spaCy/spacy/tests/tokenizer/test_tokenizer.py
Adriane Boyd f94168a41e
Backport bugfixes from v3.1.0 to v3.0 (#8739)
* Fix scoring normalization (#7629)

* fix scoring normalization

* score weights by total sum instead of per component

* cleanup

* more cleanup

* Use a context manager when reading model (fix #7036) (#8244)

* Fix other open calls without context managers (#8245)

* Don't add duplicate patterns all the time in EntityRuler (fix #8216) (#8246)

* Don't add duplicate patterns (fix #8216)

* Refactor EntityRuler init

This simplifies the EntityRuler init code. This is helpful as prep for
allowing the EntityRuler to reset itself.

* Make EntityRuler.clear reset matchers

Includes a new test for this.

* Tidy PhraseMatcher instantiation

Since the attr can be None safely now, the guard if is no longer
required here.

Also renamed the `_validate` attr. Maybe it's not needed?

* Fix NER test

* Add test to make sure patterns aren't increasing

* Move test to regression tests

* Exclude generated .cpp files from package (#8271)

* Fix non-deterministic deduplication in Greek lemmatizer (#8421)

* Fix setting empty entities in Example.from_dict (#8426)

* Filter W036 for entity ruler, etc. (#8424)

* Preserve paths.vectors/initialize.vectors setting in quickstart template

* Various fixes for spans in Docs.from_docs (#8487)

* Fix spans offsets if a doc ends in a single space and no space is
  inserted
* Also include spans key in merged doc for empty spans lists

* Fix duplicate spacy package CLI opts (#8551)

Use `-c` for `--code` and not additionally for `--create-meta`, in line
with the docs.

* Raise an error for textcat with <2 labels (#8584)

* Raise an error for textcat with <2 labels

Raise an error if initializing a `textcat` component without at least
two labels.

* Add similar note to docs

* Update positive_label description in API docs

* Add Macedonian models to website (#8637)

* Fix Azerbaijani init, extend lang init tests (#8656)

* Extend langs in initialize tests

* Fix az init

* Fix ru/uk lemmatizer mp with spawn (#8657)

Use an instance variable instead a class variable for the morphological
analzyer so that multiprocessing with spawn is possible.

* Use 0-vector for OOV lexemes (#8639)

* Set version to v3.0.7

Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com>
Co-authored-by: Paul O'Leary McCann <polm@dampfkraft.com>
2021-07-19 09:20:40 +02:00

219 lines
6.4 KiB
Python

import pytest
import re
from spacy.vocab import Vocab
from spacy.tokenizer import Tokenizer
from spacy.util import ensure_path
from spacy.lang.en import English
def test_tokenizer_handles_no_word(tokenizer):
tokens = tokenizer("")
assert len(tokens) == 0
@pytest.mark.parametrize("text", ["lorem"])
def test_tokenizer_handles_single_word(tokenizer, text):
tokens = tokenizer(text)
assert tokens[0].text == text
def test_tokenizer_handles_punct(tokenizer):
text = "Lorem, ipsum."
tokens = tokenizer(text)
assert len(tokens) == 4
assert tokens[0].text == "Lorem"
assert tokens[1].text == ","
assert tokens[2].text == "ipsum"
assert tokens[1].text != "Lorem"
def test_tokenizer_handles_punct_braces(tokenizer):
text = "Lorem, (ipsum)."
tokens = tokenizer(text)
assert len(tokens) == 6
def test_tokenizer_handles_digits(tokenizer):
exceptions = ["hu", "bn"]
text = "Lorem ipsum: 1984."
tokens = tokenizer(text)
if tokens[0].lang_ not in exceptions:
assert len(tokens) == 5
assert tokens[0].text == "Lorem"
assert tokens[3].text == "1984"
@pytest.mark.parametrize(
"text",
["google.com", "python.org", "spacy.io", "explosion.ai", "http://www.google.com"],
)
def test_tokenizer_keep_urls(tokenizer, text):
tokens = tokenizer(text)
assert len(tokens) == 1
@pytest.mark.parametrize("text", ["NASDAQ:GOOG"])
def test_tokenizer_colons(tokenizer, text):
tokens = tokenizer(text)
assert len(tokens) == 3
@pytest.mark.parametrize(
"text", ["hello123@example.com", "hi+there@gmail.it", "matt@explosion.ai"]
)
def test_tokenizer_keeps_email(tokenizer, text):
tokens = tokenizer(text)
assert len(tokens) == 1
def test_tokenizer_handles_long_text(tokenizer):
text = """Lorem ipsum dolor sit amet, consectetur adipiscing elit
Cras egestas orci non porttitor maximus.
Maecenas quis odio id dolor rhoncus dignissim. Curabitur sed velit at orci ultrices sagittis. Nulla commodo euismod arcu eget vulputate.
Phasellus tincidunt, augue quis porta finibus, massa sapien consectetur augue, non lacinia enim nibh eget ipsum. Vestibulum in bibendum mauris.
"Nullam porta fringilla enim, a dictum orci consequat in." Mauris nec malesuada justo."""
tokens = tokenizer(text)
assert len(tokens) > 5
@pytest.mark.parametrize("file_name", ["sun.txt"])
def test_tokenizer_handle_text_from_file(tokenizer, file_name):
loc = ensure_path(__file__).parent / file_name
with loc.open("r", encoding="utf8") as infile:
text = infile.read()
assert len(text) != 0
tokens = tokenizer(text)
assert len(tokens) > 100
def test_tokenizer_suspected_freeing_strings(tokenizer):
text1 = "Lorem dolor sit amet, consectetur adipiscing elit."
text2 = "Lorem ipsum dolor sit amet, consectetur adipiscing elit."
tokens1 = tokenizer(text1)
tokens2 = tokenizer(text2)
assert tokens1[0].text == "Lorem"
assert tokens2[0].text == "Lorem"
@pytest.mark.parametrize("text,tokens", [("lorem", [{"orth": "lo"}, {"orth": "rem"}])])
def test_tokenizer_add_special_case(tokenizer, text, tokens):
tokenizer.add_special_case(text, tokens)
doc = tokenizer(text)
assert doc[0].text == tokens[0]["orth"]
assert doc[1].text == tokens[1]["orth"]
@pytest.mark.parametrize(
"text,tokens",
[
("lorem", [{"orth": "lo"}, {"orth": "re"}]),
("lorem", [{"orth": "lo", "tag": "A"}, {"orth": "rem"}]),
],
)
def test_tokenizer_validate_special_case(tokenizer, text, tokens):
with pytest.raises(ValueError):
tokenizer.add_special_case(text, tokens)
@pytest.mark.parametrize(
"text,tokens", [("lorem", [{"orth": "lo", "norm": "LO"}, {"orth": "rem"}])]
)
def test_tokenizer_add_special_case_tag(text, tokens):
vocab = Vocab()
tokenizer = Tokenizer(vocab, {}, None, None, None)
tokenizer.add_special_case(text, tokens)
doc = tokenizer(text)
assert doc[0].text == tokens[0]["orth"]
assert doc[0].norm_ == tokens[0]["norm"]
assert doc[1].text == tokens[1]["orth"]
def test_tokenizer_special_cases_with_affixes(tokenizer):
text = '(((_SPECIAL_ A/B, A/B-A/B")'
tokenizer.add_special_case("_SPECIAL_", [{"orth": "_SPECIAL_"}])
tokenizer.add_special_case("A/B", [{"orth": "A/B"}])
doc = tokenizer(text)
assert [token.text for token in doc] == [
"(",
"(",
"(",
"_SPECIAL_",
"A/B",
",",
"A/B",
"-",
"A/B",
'"',
")",
]
def test_tokenizer_special_cases_with_affixes_preserve_spacy():
tokenizer = English().tokenizer
# reset all special cases
tokenizer.rules = {}
# in-place modification (only merges)
text = "''a'' "
tokenizer.add_special_case("''", [{"ORTH": "''"}])
assert tokenizer(text).text == text
# not in-place (splits and merges)
tokenizer.add_special_case("ab", [{"ORTH": "a"}, {"ORTH": "b"}])
text = "ab ab ab ''ab ab'' ab'' ''ab"
assert tokenizer(text).text == text
def test_tokenizer_special_cases_with_period(tokenizer):
text = "_SPECIAL_."
tokenizer.add_special_case("_SPECIAL_", [{"orth": "_SPECIAL_"}])
doc = tokenizer(text)
assert [token.text for token in doc] == ["_SPECIAL_", "."]
def test_tokenizer_special_cases_idx(tokenizer):
text = "the _ID'X_"
tokenizer.add_special_case("_ID'X_", [{"orth": "_ID"}, {"orth": "'X_"}])
doc = tokenizer(text)
assert doc[1].idx == 4
assert doc[2].idx == 7
def test_tokenizer_special_cases_spaces(tokenizer):
assert [t.text for t in tokenizer("a b c")] == ["a", "b", "c"]
tokenizer.add_special_case("a b c", [{"ORTH": "a b c"}])
assert [t.text for t in tokenizer("a b c")] == ["a b c"]
def test_tokenizer_flush_cache(en_vocab):
suffix_re = re.compile(r"[\.]$")
tokenizer = Tokenizer(
en_vocab,
suffix_search=suffix_re.search,
)
assert [t.text for t in tokenizer("a.")] == ["a", "."]
tokenizer.suffix_search = None
assert [t.text for t in tokenizer("a.")] == ["a."]
def test_tokenizer_flush_specials(en_vocab):
suffix_re = re.compile(r"[\.]$")
rules = {"a a": [{"ORTH": "a a"}]}
tokenizer1 = Tokenizer(
en_vocab,
suffix_search=suffix_re.search,
rules=rules,
)
tokenizer2 = Tokenizer(
en_vocab,
suffix_search=suffix_re.search,
)
assert [t.text for t in tokenizer1("a a.")] == ["a a", "."]
tokenizer1.rules = {}
assert [t.text for t in tokenizer1("a a.")] == ["a", "a", "."]