2017-10-14 14:28:46 +03:00
|
|
|
# coding: utf-8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
import pytest
|
2018-07-25 00:38:44 +03:00
|
|
|
from spacy.lang.en import English
|
|
|
|
from spacy.tokenizer import Tokenizer
|
|
|
|
from spacy.util import compile_prefix_regex, compile_suffix_regex
|
|
|
|
from spacy.util import compile_infix_regex
|
2017-10-14 14:28:46 +03:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def custom_en_tokenizer(en_vocab):
|
2018-07-25 00:38:44 +03:00
|
|
|
prefix_re = compile_prefix_regex(English.Defaults.prefixes)
|
|
|
|
suffix_re = compile_suffix_regex(English.Defaults.suffixes)
|
2018-11-27 03:09:36 +03:00
|
|
|
custom_infixes = [
|
|
|
|
"\.\.\.+",
|
|
|
|
"(?<=[0-9])-(?=[0-9])",
|
|
|
|
"[0-9]+(,[0-9]+)+",
|
|
|
|
"[\[\]!&:,()\*—–\/-]",
|
|
|
|
]
|
2018-07-25 00:38:44 +03:00
|
|
|
infix_re = compile_infix_regex(custom_infixes)
|
2018-11-27 03:09:36 +03:00
|
|
|
return Tokenizer(
|
|
|
|
en_vocab,
|
|
|
|
English.Defaults.tokenizer_exceptions,
|
|
|
|
prefix_re.search,
|
|
|
|
suffix_re.search,
|
|
|
|
infix_re.finditer,
|
|
|
|
token_match=None,
|
|
|
|
)
|
2017-10-14 14:28:46 +03:00
|
|
|
|
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_customized_tokenizer_handles_infixes(custom_en_tokenizer):
|
2017-10-14 14:28:46 +03:00
|
|
|
sentence = "The 8 and 10-county definitions are not used for the greater Southern California Megaregion."
|
|
|
|
context = [word.text for word in custom_en_tokenizer(sentence)]
|
2018-11-27 03:09:36 +03:00
|
|
|
assert context == [
|
|
|
|
"The",
|
|
|
|
"8",
|
|
|
|
"and",
|
|
|
|
"10",
|
|
|
|
"-",
|
|
|
|
"county",
|
|
|
|
"definitions",
|
|
|
|
"are",
|
|
|
|
"not",
|
|
|
|
"used",
|
|
|
|
"for",
|
|
|
|
"the",
|
|
|
|
"greater",
|
|
|
|
"Southern",
|
|
|
|
"California",
|
|
|
|
"Megaregion",
|
|
|
|
".",
|
|
|
|
]
|
2017-10-14 14:28:46 +03:00
|
|
|
# the trailing '-' may cause Assertion Error
|
|
|
|
sentence = "The 8- and 10-county definitions are not used for the greater Southern California Megaregion."
|
|
|
|
context = [word.text for word in custom_en_tokenizer(sentence)]
|
2018-11-27 03:09:36 +03:00
|
|
|
assert context == [
|
|
|
|
"The",
|
|
|
|
"8",
|
|
|
|
"-",
|
|
|
|
"and",
|
|
|
|
"10",
|
|
|
|
"-",
|
|
|
|
"county",
|
|
|
|
"definitions",
|
|
|
|
"are",
|
|
|
|
"not",
|
|
|
|
"used",
|
|
|
|
"for",
|
|
|
|
"the",
|
|
|
|
"greater",
|
|
|
|
"Southern",
|
|
|
|
"California",
|
|
|
|
"Megaregion",
|
|
|
|
".",
|
|
|
|
]
|