mirror of
https://github.com/explosion/spaCy.git
synced 2025-07-06 21:03:07 +03:00
Update Tokenizer.explain for special cases with whitespace
Update `Tokenizer.explain` to skip special case matches if the exact text has not been matched due to intervening whitespace. Enable fuzzy `Tokenizer.explain` tests with additional whitespace normalization.
This commit is contained in:
parent
d717123819
commit
e69348ca02
|
@ -112,7 +112,6 @@ def sentence_strategy(draw: hypothesis.strategies.DrawFn, max_n_words: int = 4)
|
||||||
return " ".join([token for token_pair in sentence for token in token_pair])
|
return " ".join([token for token_pair in sentence for token in token_pair])
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.xfail
|
|
||||||
@pytest.mark.parametrize("lang", LANGUAGES)
|
@pytest.mark.parametrize("lang", LANGUAGES)
|
||||||
@hypothesis.given(sentence=sentence_strategy())
|
@hypothesis.given(sentence=sentence_strategy())
|
||||||
def test_tokenizer_explain_fuzzy(lang: str, sentence: str) -> None:
|
def test_tokenizer_explain_fuzzy(lang: str, sentence: str) -> None:
|
||||||
|
@ -123,6 +122,9 @@ def test_tokenizer_explain_fuzzy(lang: str, sentence: str) -> None:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
tokenizer: Tokenizer = spacy.blank(lang).tokenizer
|
tokenizer: Tokenizer = spacy.blank(lang).tokenizer
|
||||||
tokens = [t.text for t in tokenizer(sentence) if not t.is_space]
|
# Tokenizer.explain is not intended to handle whitespace or control
|
||||||
|
# characters in the same way as Tokenizer
|
||||||
|
sentence = re.sub(r"\s+", " ", sentence).strip()
|
||||||
|
tokens = [t.text for t in tokenizer(sentence)]
|
||||||
debug_tokens = [t[1] for t in tokenizer.explain(sentence)]
|
debug_tokens = [t[1] for t in tokenizer.explain(sentence)]
|
||||||
assert tokens == debug_tokens, f"{tokens}, {debug_tokens}, {sentence}"
|
assert tokens == debug_tokens, f"{tokens}, {debug_tokens}, {sentence}"
|
||||||
|
|
|
@ -730,6 +730,13 @@ cdef class Tokenizer:
|
||||||
if i in spans_by_start:
|
if i in spans_by_start:
|
||||||
span = spans_by_start[i]
|
span = spans_by_start[i]
|
||||||
exc = [d[ORTH] for d in special_cases[span.label_]]
|
exc = [d[ORTH] for d in special_cases[span.label_]]
|
||||||
|
# The phrase matcher can overmatch for tokens separated by
|
||||||
|
# spaces in the text but not in the underlying rule, so skip
|
||||||
|
# cases where the texts aren't identical
|
||||||
|
if span.text != "".join([self.vocab.strings[orth] for orth in exc]):
|
||||||
|
final_tokens.append(tokens[i])
|
||||||
|
i += 1
|
||||||
|
else:
|
||||||
for j, orth in enumerate(exc):
|
for j, orth in enumerate(exc):
|
||||||
final_tokens.append((f"SPECIAL-{j + 1}", self.vocab.strings[orth]))
|
final_tokens.append((f"SPECIAL-{j + 1}", self.vocab.strings[orth]))
|
||||||
i += len(span)
|
i += len(span)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user