2017-01-05 15:09:48 +03:00
|
|
|
# coding: utf-8
|
2017-01-04 02:49:20 +03:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
import pytest
|
2018-07-25 00:38:44 +03:00
|
|
|
from spacy.util import compile_prefix_regex
|
|
|
|
from spacy.lang.punctuation import TOKENIZER_PREFIXES
|
2017-01-05 15:14:16 +03:00
|
|
|
|
|
|
|
|
2017-01-04 02:49:20 +03:00
|
|
|
PUNCT_OPEN = ['(', '[', '{', '*']
|
|
|
|
PUNCT_CLOSE = [')', ']', '}', '*']
|
|
|
|
PUNCT_PAIRED = [('(', ')'), ('[', ']'), ('{', '}'), ('*', '*')]
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize('text', ["(", "((", "<"])
|
2017-05-29 23:14:31 +03:00
|
|
|
def test_en_tokenizer_handles_only_punct(en_tokenizer, text):
|
2017-01-04 02:49:20 +03:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == len(text)
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize('punct', PUNCT_OPEN)
|
|
|
|
@pytest.mark.parametrize('text', ["Hello"])
|
2017-05-29 23:14:31 +03:00
|
|
|
def test_en_tokenizer_splits_open_punct(en_tokenizer, punct, text):
|
2017-01-04 02:49:20 +03:00
|
|
|
tokens = en_tokenizer(punct + text)
|
|
|
|
assert len(tokens) == 2
|
|
|
|
assert tokens[0].text == punct
|
|
|
|
assert tokens[1].text == text
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize('punct', PUNCT_CLOSE)
|
|
|
|
@pytest.mark.parametrize('text', ["Hello"])
|
2017-05-29 23:14:31 +03:00
|
|
|
def test_en_tokenizer_splits_close_punct(en_tokenizer, punct, text):
|
2017-01-04 02:49:20 +03:00
|
|
|
tokens = en_tokenizer(text + punct)
|
|
|
|
assert len(tokens) == 2
|
|
|
|
assert tokens[0].text == text
|
|
|
|
assert tokens[1].text == punct
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize('punct', PUNCT_OPEN)
|
|
|
|
@pytest.mark.parametrize('punct_add', ["`"])
|
|
|
|
@pytest.mark.parametrize('text', ["Hello"])
|
2017-05-29 23:14:31 +03:00
|
|
|
def test_en_tokenizer_splits_two_diff_open_punct(en_tokenizer, punct, punct_add, text):
|
2017-01-04 02:49:20 +03:00
|
|
|
tokens = en_tokenizer(punct + punct_add + text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
assert tokens[0].text == punct
|
|
|
|
assert tokens[1].text == punct_add
|
|
|
|
assert tokens[2].text == text
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize('punct', PUNCT_CLOSE)
|
|
|
|
@pytest.mark.parametrize('punct_add', ["'"])
|
|
|
|
@pytest.mark.parametrize('text', ["Hello"])
|
2017-05-29 23:14:31 +03:00
|
|
|
def test_en_tokenizer_splits_two_diff_close_punct(en_tokenizer, punct, punct_add, text):
|
2017-01-04 02:49:20 +03:00
|
|
|
tokens = en_tokenizer(text + punct + punct_add)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
assert tokens[0].text == text
|
|
|
|
assert tokens[1].text == punct
|
|
|
|
assert tokens[2].text == punct_add
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize('punct', PUNCT_OPEN)
|
|
|
|
@pytest.mark.parametrize('text', ["Hello"])
|
2017-05-29 23:14:31 +03:00
|
|
|
def test_en_tokenizer_splits_same_open_punct(en_tokenizer, punct, text):
|
2017-01-04 02:49:20 +03:00
|
|
|
tokens = en_tokenizer(punct + punct + punct + text)
|
|
|
|
assert len(tokens) == 4
|
|
|
|
assert tokens[0].text == punct
|
|
|
|
assert tokens[3].text == text
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize('punct', PUNCT_CLOSE)
|
|
|
|
@pytest.mark.parametrize('text', ["Hello"])
|
2017-05-29 23:14:31 +03:00
|
|
|
def test_en_tokenizer_splits_same_close_punct(en_tokenizer, punct, text):
|
2017-01-04 02:49:20 +03:00
|
|
|
tokens = en_tokenizer(text + punct + punct + punct)
|
|
|
|
assert len(tokens) == 4
|
|
|
|
assert tokens[0].text == text
|
|
|
|
assert tokens[1].text == punct
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize('text', ["'The"])
|
2017-05-29 23:14:31 +03:00
|
|
|
def test_en_tokenizer_splits_open_appostrophe(en_tokenizer, text):
|
2017-01-04 02:49:20 +03:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 2
|
|
|
|
assert tokens[0].text == "'"
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize('text', ["Hello''"])
|
2017-05-29 23:14:31 +03:00
|
|
|
def test_en_tokenizer_splits_double_end_quote(en_tokenizer, text):
|
2017-01-04 02:49:20 +03:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 2
|
|
|
|
tokens_punct = en_tokenizer("''")
|
|
|
|
assert len(tokens_punct) == 1
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize('punct_open,punct_close', PUNCT_PAIRED)
|
|
|
|
@pytest.mark.parametrize('text', ["Hello"])
|
2017-05-29 23:14:31 +03:00
|
|
|
def test_en_tokenizer_splits_open_close_punct(en_tokenizer, punct_open,
|
2017-01-14 15:41:19 +03:00
|
|
|
punct_close, text):
|
2017-01-04 02:49:20 +03:00
|
|
|
tokens = en_tokenizer(punct_open + text + punct_close)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
assert tokens[0].text == punct_open
|
|
|
|
assert tokens[1].text == text
|
|
|
|
assert tokens[2].text == punct_close
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize('punct_open,punct_close', PUNCT_PAIRED)
|
2017-01-14 15:41:19 +03:00
|
|
|
@pytest.mark.parametrize('punct_open2,punct_close2', [("`", "'")])
|
2017-01-04 02:49:20 +03:00
|
|
|
@pytest.mark.parametrize('text', ["Hello"])
|
2017-05-29 23:14:31 +03:00
|
|
|
def test_en_tokenizer_two_diff_punct(en_tokenizer, punct_open, punct_close,
|
2017-01-14 15:41:19 +03:00
|
|
|
punct_open2, punct_close2, text):
|
|
|
|
tokens = en_tokenizer(punct_open2 + punct_open + text + punct_close + punct_close2)
|
2017-01-04 02:49:20 +03:00
|
|
|
assert len(tokens) == 5
|
2017-01-14 15:41:19 +03:00
|
|
|
assert tokens[0].text == punct_open2
|
2017-01-04 02:49:20 +03:00
|
|
|
assert tokens[1].text == punct_open
|
|
|
|
assert tokens[2].text == text
|
|
|
|
assert tokens[3].text == punct_close
|
2017-01-14 15:41:19 +03:00
|
|
|
assert tokens[4].text == punct_close2
|
2017-01-05 15:14:16 +03:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize('text,punct', [("(can't", "(")])
|
2017-05-29 23:14:31 +03:00
|
|
|
def test_en_tokenizer_splits_pre_punct_regex(text, punct):
|
2017-01-14 15:41:19 +03:00
|
|
|
en_search_prefixes = compile_prefix_regex(TOKENIZER_PREFIXES).search
|
2017-01-05 15:14:16 +03:00
|
|
|
match = en_search_prefixes(text)
|
|
|
|
assert match.group() == punct
|
|
|
|
|
|
|
|
|
2017-05-29 23:14:31 +03:00
|
|
|
def test_en_tokenizer_splits_bracket_period(en_tokenizer):
|
2017-01-05 15:14:16 +03:00
|
|
|
text = "(And a 6a.m. run through Washington Park)."
|
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert tokens[len(tokens) - 1].text == "."
|