2017-10-14 14:28:46 +03:00
|
|
|
import pytest
|
2019-11-20 15:07:25 +03:00
|
|
|
import re
|
2018-07-25 00:38:44 +03:00
|
|
|
from spacy.lang.en import English
|
|
|
|
from spacy.tokenizer import Tokenizer
|
|
|
|
from spacy.util import compile_prefix_regex, compile_suffix_regex
|
|
|
|
from spacy.util import compile_infix_regex
|
2017-10-14 14:28:46 +03:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def custom_en_tokenizer(en_vocab):
|
2018-07-25 00:38:44 +03:00
|
|
|
prefix_re = compile_prefix_regex(English.Defaults.prefixes)
|
|
|
|
suffix_re = compile_suffix_regex(English.Defaults.suffixes)
|
2018-11-27 03:09:36 +03:00
|
|
|
custom_infixes = [
|
2019-02-21 13:56:47 +03:00
|
|
|
r"\.\.\.+",
|
|
|
|
r"(?<=[0-9])-(?=[0-9])",
|
|
|
|
r"[0-9]+(,[0-9]+)+",
|
|
|
|
r"[\[\]!&:,()\*—–\/-]",
|
2018-11-27 03:09:36 +03:00
|
|
|
]
|
2018-07-25 00:38:44 +03:00
|
|
|
infix_re = compile_infix_regex(custom_infixes)
|
2019-11-20 15:07:25 +03:00
|
|
|
token_match_re = re.compile("a-b")
|
2018-11-27 03:09:36 +03:00
|
|
|
return Tokenizer(
|
|
|
|
en_vocab,
|
|
|
|
English.Defaults.tokenizer_exceptions,
|
|
|
|
prefix_re.search,
|
|
|
|
suffix_re.search,
|
|
|
|
infix_re.finditer,
|
2019-11-20 15:07:25 +03:00
|
|
|
token_match=token_match_re.match,
|
2018-11-27 03:09:36 +03:00
|
|
|
)
|
2017-10-14 14:28:46 +03:00
|
|
|
|
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_customized_tokenizer_handles_infixes(custom_en_tokenizer):
|
2017-10-14 14:28:46 +03:00
|
|
|
sentence = "The 8 and 10-county definitions are not used for the greater Southern California Megaregion."
|
|
|
|
context = [word.text for word in custom_en_tokenizer(sentence)]
|
2018-11-27 03:09:36 +03:00
|
|
|
assert context == [
|
|
|
|
"The",
|
|
|
|
"8",
|
|
|
|
"and",
|
|
|
|
"10",
|
|
|
|
"-",
|
|
|
|
"county",
|
|
|
|
"definitions",
|
|
|
|
"are",
|
|
|
|
"not",
|
|
|
|
"used",
|
|
|
|
"for",
|
|
|
|
"the",
|
|
|
|
"greater",
|
|
|
|
"Southern",
|
|
|
|
"California",
|
|
|
|
"Megaregion",
|
|
|
|
".",
|
|
|
|
]
|
2017-10-14 14:28:46 +03:00
|
|
|
# the trailing '-' may cause Assertion Error
|
|
|
|
sentence = "The 8- and 10-county definitions are not used for the greater Southern California Megaregion."
|
|
|
|
context = [word.text for word in custom_en_tokenizer(sentence)]
|
2018-11-27 03:09:36 +03:00
|
|
|
assert context == [
|
|
|
|
"The",
|
|
|
|
"8",
|
|
|
|
"-",
|
|
|
|
"and",
|
|
|
|
"10",
|
|
|
|
"-",
|
|
|
|
"county",
|
|
|
|
"definitions",
|
|
|
|
"are",
|
|
|
|
"not",
|
|
|
|
"used",
|
|
|
|
"for",
|
|
|
|
"the",
|
|
|
|
"greater",
|
|
|
|
"Southern",
|
|
|
|
"California",
|
|
|
|
"Megaregion",
|
|
|
|
".",
|
|
|
|
]
|
2019-11-20 15:07:25 +03:00
|
|
|
|
|
|
|
|
|
|
|
def test_en_customized_tokenizer_handles_token_match(custom_en_tokenizer):
|
|
|
|
sentence = "The 8 and 10-county definitions a-b not used for the greater Southern California Megaregion."
|
|
|
|
context = [word.text for word in custom_en_tokenizer(sentence)]
|
|
|
|
assert context == [
|
|
|
|
"The",
|
|
|
|
"8",
|
|
|
|
"and",
|
|
|
|
"10",
|
|
|
|
"-",
|
|
|
|
"county",
|
|
|
|
"definitions",
|
|
|
|
"a-b",
|
|
|
|
"not",
|
|
|
|
"used",
|
|
|
|
"for",
|
|
|
|
"the",
|
|
|
|
"greater",
|
|
|
|
"Southern",
|
|
|
|
"California",
|
|
|
|
"Megaregion",
|
|
|
|
".",
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
def test_en_customized_tokenizer_handles_rules(custom_en_tokenizer):
|
|
|
|
sentence = "The 8 and 10-county definitions are not used for the greater Southern California Megaregion. :)"
|
|
|
|
context = [word.text for word in custom_en_tokenizer(sentence)]
|
|
|
|
assert context == [
|
|
|
|
"The",
|
|
|
|
"8",
|
|
|
|
"and",
|
|
|
|
"10",
|
|
|
|
"-",
|
|
|
|
"county",
|
|
|
|
"definitions",
|
|
|
|
"are",
|
|
|
|
"not",
|
|
|
|
"used",
|
|
|
|
"for",
|
|
|
|
"the",
|
|
|
|
"greater",
|
|
|
|
"Southern",
|
|
|
|
"California",
|
|
|
|
"Megaregion",
|
|
|
|
".",
|
|
|
|
":)",
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
def test_en_customized_tokenizer_handles_rules_property(custom_en_tokenizer):
|
|
|
|
sentence = "The 8 and 10-county definitions are not used for the greater Southern California Megaregion. :)"
|
|
|
|
rules = custom_en_tokenizer.rules
|
|
|
|
del rules[":)"]
|
|
|
|
custom_en_tokenizer.rules = rules
|
|
|
|
context = [word.text for word in custom_en_tokenizer(sentence)]
|
|
|
|
assert context == [
|
|
|
|
"The",
|
|
|
|
"8",
|
|
|
|
"and",
|
|
|
|
"10",
|
|
|
|
"-",
|
|
|
|
"county",
|
|
|
|
"definitions",
|
|
|
|
"are",
|
|
|
|
"not",
|
|
|
|
"used",
|
|
|
|
"for",
|
|
|
|
"the",
|
|
|
|
"greater",
|
|
|
|
"Southern",
|
|
|
|
"California",
|
|
|
|
"Megaregion",
|
|
|
|
".",
|
|
|
|
":",
|
|
|
|
")",
|
|
|
|
]
|