2019-10-01 22:36:04 +03:00
|
|
|
import pytest
|
|
|
|
from spacy.tokens import Doc
|
|
|
|
from spacy.language import Language
|
|
|
|
from spacy.lookups import Lookups
|
2020-06-29 15:16:57 +03:00
|
|
|
from spacy.lemmatizer import Lemmatizer
|
2019-10-01 22:36:04 +03:00
|
|
|
|
|
|
|
|
2020-07-22 16:59:37 +03:00
|
|
|
@pytest.mark.skip(reason="We probably don't want to support this anymore in v3?")
|
2019-10-01 22:36:04 +03:00
|
|
|
def test_lemmatizer_reflects_lookups_changes():
|
|
|
|
"""Test for an issue that'd cause lookups available in a model loaded from
|
|
|
|
disk to not be reflected in the lemmatizer."""
|
|
|
|
nlp = Language()
|
|
|
|
assert Doc(nlp.vocab, words=["foo"])[0].lemma_ == "foo"
|
|
|
|
table = nlp.vocab.lookups.add_table("lemma_lookup")
|
|
|
|
table["foo"] = "bar"
|
|
|
|
assert Doc(nlp.vocab, words=["foo"])[0].lemma_ == "bar"
|
|
|
|
table = nlp.vocab.lookups.get_table("lemma_lookup")
|
|
|
|
table["hello"] = "world"
|
|
|
|
# The update to the table should be reflected in the lemmatizer
|
|
|
|
assert Doc(nlp.vocab, words=["hello"])[0].lemma_ == "world"
|
|
|
|
new_nlp = Language()
|
|
|
|
table = new_nlp.vocab.lookups.add_table("lemma_lookup")
|
|
|
|
table["hello"] = "hi"
|
|
|
|
assert Doc(new_nlp.vocab, words=["hello"])[0].lemma_ == "hi"
|
|
|
|
nlp_bytes = nlp.to_bytes()
|
|
|
|
new_nlp.from_bytes(nlp_bytes)
|
|
|
|
# Make sure we have the previously saved lookup table
|
2020-05-19 16:59:14 +03:00
|
|
|
assert "lemma_lookup" in new_nlp.vocab.lookups
|
2019-10-01 22:36:04 +03:00
|
|
|
assert len(new_nlp.vocab.lookups.get_table("lemma_lookup")) == 2
|
|
|
|
assert new_nlp.vocab.lookups.get_table("lemma_lookup")["hello"] == "world"
|
|
|
|
assert Doc(new_nlp.vocab, words=["foo"])[0].lemma_ == "bar"
|
|
|
|
assert Doc(new_nlp.vocab, words=["hello"])[0].lemma_ == "world"
|
|
|
|
|
|
|
|
|
2020-06-15 15:56:04 +03:00
|
|
|
def test_tagger_warns_no_lookups():
|
2019-10-01 22:36:04 +03:00
|
|
|
nlp = Language()
|
|
|
|
nlp.vocab.lookups = Lookups()
|
|
|
|
assert not len(nlp.vocab.lookups)
|
2020-07-22 14:42:59 +03:00
|
|
|
tagger = nlp.add_pipe("tagger")
|
2020-06-21 00:23:57 +03:00
|
|
|
with pytest.warns(UserWarning):
|
|
|
|
tagger.begin_training()
|
2019-10-01 22:36:04 +03:00
|
|
|
with pytest.warns(UserWarning):
|
|
|
|
nlp.begin_training()
|
|
|
|
nlp.vocab.lookups.add_table("lemma_lookup")
|
2020-06-15 15:56:04 +03:00
|
|
|
nlp.vocab.lookups.add_table("lexeme_norm")
|
|
|
|
nlp.vocab.lookups.get_table("lexeme_norm")["a"] = "A"
|
2019-10-01 22:36:04 +03:00
|
|
|
with pytest.warns(None) as record:
|
|
|
|
nlp.begin_training()
|
|
|
|
assert not record.list
|
2020-06-29 15:16:57 +03:00
|
|
|
|
|
|
|
|
|
|
|
def test_lemmatizer_without_is_base_form_implementation():
|
|
|
|
# Norwegian example from #5658
|
|
|
|
lookups = Lookups()
|
|
|
|
lookups.add_table("lemma_rules", {"noun": []})
|
|
|
|
lookups.add_table("lemma_index", {"noun": {}})
|
|
|
|
lookups.add_table("lemma_exc", {"noun": {"formuesskatten": ["formuesskatt"]}})
|
|
|
|
|
|
|
|
lemmatizer = Lemmatizer(lookups, is_base_form=None)
|
2020-07-22 16:59:37 +03:00
|
|
|
assert lemmatizer(
|
|
|
|
"Formuesskatten",
|
|
|
|
"noun",
|
|
|
|
{"Definite": "def", "Gender": "masc", "Number": "sing"},
|
|
|
|
) == ["formuesskatt"]
|