mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-25 17:36:30 +03:00
Detect more empty matches in tokenizer.explain() (#4675)
* Detect more empty matches in tokenizer.explain() * Include a few languages in explain non-slow tests Mark a few languages in tokenizer.explain() tests as not slow so they're run by default.
This commit is contained in:
parent
5bf9ab5b03
commit
d7f32b285c
|
@ -8,57 +8,58 @@ from spacy.util import get_lang_class
|
||||||
# "is" seems to confuse importlib, so we're also excluding it for now
|
# "is" seems to confuse importlib, so we're also excluding it for now
|
||||||
# excluded: ja, ru, th, uk, vi, zh, is
|
# excluded: ja, ru, th, uk, vi, zh, is
|
||||||
LANGUAGES = [
|
LANGUAGES = [
|
||||||
"af",
|
pytest.param("fr", marks=pytest.mark.slow()),
|
||||||
"ar",
|
pytest.param("af", marks=pytest.mark.slow()),
|
||||||
"bg",
|
pytest.param("ar", marks=pytest.mark.slow()),
|
||||||
|
pytest.param("bg", marks=pytest.mark.slow()),
|
||||||
"bn",
|
"bn",
|
||||||
pytest.param("ca", marks=pytest.mark.xfail()),
|
pytest.param("ca", marks=pytest.mark.slow()),
|
||||||
"cs",
|
pytest.param("cs", marks=pytest.mark.slow()),
|
||||||
"da",
|
pytest.param("da", marks=pytest.mark.slow()),
|
||||||
"de",
|
pytest.param("de", marks=pytest.mark.slow()),
|
||||||
"el",
|
"el",
|
||||||
"en",
|
"en",
|
||||||
"es",
|
pytest.param("es", marks=pytest.mark.slow()),
|
||||||
"et",
|
pytest.param("et", marks=pytest.mark.slow()),
|
||||||
"fa",
|
pytest.param("fa", marks=pytest.mark.slow()),
|
||||||
"fi",
|
pytest.param("fi", marks=pytest.mark.slow()),
|
||||||
pytest.param("fr", marks=pytest.mark.xfail()),
|
"fr",
|
||||||
"ga",
|
pytest.param("ga", marks=pytest.mark.slow()),
|
||||||
"he",
|
pytest.param("he", marks=pytest.mark.slow()),
|
||||||
"hi",
|
pytest.param("hi", marks=pytest.mark.slow()),
|
||||||
"hr",
|
pytest.param("hr", marks=pytest.mark.slow()),
|
||||||
pytest.param("hu", marks=pytest.mark.xfail()),
|
"hu",
|
||||||
"id",
|
pytest.param("id", marks=pytest.mark.slow()),
|
||||||
"it",
|
pytest.param("it", marks=pytest.mark.slow()),
|
||||||
"kn",
|
pytest.param("kn", marks=pytest.mark.slow()),
|
||||||
"lt",
|
pytest.param("lb", marks=pytest.mark.slow()),
|
||||||
"lv",
|
pytest.param("lt", marks=pytest.mark.slow()),
|
||||||
"nb",
|
pytest.param("lv", marks=pytest.mark.slow()),
|
||||||
"nl",
|
pytest.param("nb", marks=pytest.mark.slow()),
|
||||||
pytest.param("pl", marks=pytest.mark.xfail()),
|
pytest.param("nl", marks=pytest.mark.slow()),
|
||||||
"pt",
|
"pl",
|
||||||
"ro",
|
pytest.param("pt", marks=pytest.mark.slow()),
|
||||||
"si",
|
pytest.param("ro", marks=pytest.mark.slow()),
|
||||||
"sk",
|
pytest.param("si", marks=pytest.mark.slow()),
|
||||||
"sl",
|
pytest.param("sk", marks=pytest.mark.slow()),
|
||||||
"sq",
|
pytest.param("sl", marks=pytest.mark.slow()),
|
||||||
"sr",
|
pytest.param("sq", marks=pytest.mark.slow()),
|
||||||
"sv",
|
pytest.param("sr", marks=pytest.mark.slow()),
|
||||||
"ta",
|
pytest.param("sv", marks=pytest.mark.slow()),
|
||||||
"te",
|
pytest.param("ta", marks=pytest.mark.slow()),
|
||||||
"tl",
|
pytest.param("te", marks=pytest.mark.slow()),
|
||||||
"tr",
|
pytest.param("tl", marks=pytest.mark.slow()),
|
||||||
"tt",
|
pytest.param("tr", marks=pytest.mark.slow()),
|
||||||
"ur",
|
pytest.param("tt", marks=pytest.mark.slow()),
|
||||||
|
pytest.param("ur", marks=pytest.mark.slow()),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.slow
|
|
||||||
@pytest.mark.parametrize("lang", LANGUAGES)
|
@pytest.mark.parametrize("lang", LANGUAGES)
|
||||||
def test_tokenizer_explain(lang):
|
def test_tokenizer_explain(lang):
|
||||||
nlp = get_lang_class(lang)()
|
tokenizer = get_lang_class(lang).Defaults.create_tokenizer()
|
||||||
examples = pytest.importorskip("spacy.lang.{}.examples".format(lang))
|
examples = pytest.importorskip("spacy.lang.{}.examples".format(lang))
|
||||||
for sentence in examples.sentences:
|
for sentence in examples.sentences:
|
||||||
tokens = [t.text for t in nlp.tokenizer(sentence) if not t.is_space]
|
tokens = [t.text for t in tokenizer(sentence) if not t.is_space]
|
||||||
debug_tokens = [t[1] for t in nlp.tokenizer.explain(sentence)]
|
debug_tokens = [t[1] for t in tokenizer.explain(sentence)]
|
||||||
assert tokens == debug_tokens
|
assert tokens == debug_tokens
|
||||||
|
|
|
@ -461,20 +461,20 @@ cdef class Tokenizer:
|
||||||
break
|
break
|
||||||
if prefix_search(substring):
|
if prefix_search(substring):
|
||||||
split = prefix_search(substring).end()
|
split = prefix_search(substring).end()
|
||||||
|
# break if pattern matches the empty string
|
||||||
|
if split == 0:
|
||||||
|
break
|
||||||
tokens.append(("PREFIX", substring[:split]))
|
tokens.append(("PREFIX", substring[:split]))
|
||||||
substring = substring[split:]
|
substring = substring[split:]
|
||||||
if substring in special_cases:
|
if substring in special_cases:
|
||||||
continue
|
continue
|
||||||
# break if pattern matches the empty string
|
|
||||||
if split == 0:
|
|
||||||
break
|
|
||||||
if suffix_search(substring):
|
if suffix_search(substring):
|
||||||
split = suffix_search(substring).start()
|
split = suffix_search(substring).start()
|
||||||
suffixes.append(("SUFFIX", substring[split:]))
|
|
||||||
substring = substring[:split]
|
|
||||||
# break if pattern matches the empty string
|
# break if pattern matches the empty string
|
||||||
if split == len(substring):
|
if split == len(substring):
|
||||||
break
|
break
|
||||||
|
suffixes.append(("SUFFIX", substring[split:]))
|
||||||
|
substring = substring[:split]
|
||||||
if substring in special_cases:
|
if substring in special_cases:
|
||||||
tokens.extend(("SPECIAL-" + str(i + 1), self.vocab.strings[e[ORTH]]) for i, e in enumerate(special_cases[substring]))
|
tokens.extend(("SPECIAL-" + str(i + 1), self.vocab.strings[e[ORTH]]) for i, e in enumerate(special_cases[substring]))
|
||||||
substring = ''
|
substring = ''
|
||||||
|
@ -485,8 +485,10 @@ cdef class Tokenizer:
|
||||||
infixes = infix_finditer(substring)
|
infixes = infix_finditer(substring)
|
||||||
offset = 0
|
offset = 0
|
||||||
for match in infixes:
|
for match in infixes:
|
||||||
tokens.append(("TOKEN", substring[offset : match.start()]))
|
if substring[offset : match.start()]:
|
||||||
tokens.append(("INFIX", substring[match.start() : match.end()]))
|
tokens.append(("TOKEN", substring[offset : match.start()]))
|
||||||
|
if substring[match.start() : match.end()]:
|
||||||
|
tokens.append(("INFIX", substring[match.start() : match.end()]))
|
||||||
offset = match.end()
|
offset = match.end()
|
||||||
if substring[offset:]:
|
if substring[offset:]:
|
||||||
tokens.append(("TOKEN", substring[offset:]))
|
tokens.append(("TOKEN", substring[offset:]))
|
||||||
|
|
Loading…
Reference in New Issue
Block a user