spaCy/spacy/tests/test_lemmatizer.py

62 lines
2.4 KiB
Python
Raw Normal View History

# coding: utf8
from __future__ import unicode_literals
import pytest
from spacy.tokens import Doc
from spacy.language import Language
from spacy.lookups import Lookups
from spacy.lemmatizer import Lemmatizer
def test_lemmatizer_reflects_lookups_changes():
"""Test for an issue that'd cause lookups available in a model loaded from
disk to not be reflected in the lemmatizer."""
nlp = Language()
assert Doc(nlp.vocab, words=["foo"])[0].lemma_ == "foo"
table = nlp.vocab.lookups.add_table("lemma_lookup")
table["foo"] = "bar"
assert Doc(nlp.vocab, words=["foo"])[0].lemma_ == "bar"
table = nlp.vocab.lookups.get_table("lemma_lookup")
table["hello"] = "world"
# The update to the table should be reflected in the lemmatizer
assert Doc(nlp.vocab, words=["hello"])[0].lemma_ == "world"
new_nlp = Language()
table = new_nlp.vocab.lookups.add_table("lemma_lookup")
table["hello"] = "hi"
assert Doc(new_nlp.vocab, words=["hello"])[0].lemma_ == "hi"
nlp_bytes = nlp.to_bytes()
new_nlp.from_bytes(nlp_bytes)
# Make sure we have the previously saved lookup table
Reduce stored lexemes data, move feats to lookups (#5238) * Reduce stored lexemes data, move feats to lookups * Move non-derivable lexemes features (`norm / cluster / prob`) to `spacy-lookups-data` as lookups * Get/set `norm` in both lookups and `LexemeC`, serialize in lookups * Remove `cluster` and `prob` from `LexemesC`, get/set/serialize in lookups only * Remove serialization of lexemes data as `vocab/lexemes.bin` * Remove `SerializedLexemeC` * Remove `Lexeme.to_bytes/from_bytes` * Modify normalization exception loading: * Always create `Vocab.lookups` table `lexeme_norm` for normalization exceptions * Load base exceptions from `lang.norm_exceptions`, but load language-specific exceptions from lookups * Set `lex_attr_getter[NORM]` including new lookups table in `BaseDefaults.create_vocab()` and when deserializing `Vocab` * Remove all cached lexemes when deserializing vocab to override existing normalizations with the new normalizations (as a replacement for the previous step that replaced all lexemes data with the deserialized data) * Skip English normalization test Skip English normalization test because the data is now in `spacy-lookups-data`. * Remove norm exceptions Moved to spacy-lookups-data. * Move norm exceptions test to spacy-lookups-data * Load extra lookups from spacy-lookups-data lazily Load extra lookups (currently for cluster and prob) lazily from the entry point `lg_extra` as `Vocab.lookups_extra`. * Skip creating lexeme cache on load To improve model loading times, do not create the full lexeme cache when loading. The lexemes will be created on demand when processing. * Identify numeric values in Lexeme.set_attrs() With the removal of a special case for `PROB`, also identify `float` to avoid trying to convert it with the `StringStore`. * Skip lexeme cache init in from_bytes * Unskip and update lookups tests for python3.6+ * Update vocab pickle to include lookups_extra * Update vocab serialization tests Check strings rather than lexemes since lexemes aren't initialized automatically, account for addition of "_SP". * Re-skip lookups test because of python3.5 * Skip PROB/float values in Lexeme.set_attrs * Convert is_oov from lexeme flag to lex in vectors Instead of storing `is_oov` as a lexeme flag, `is_oov` reports whether the lexeme has a vector. Co-authored-by: Matthew Honnibal <honnibal+gh@gmail.com>
2020-05-19 16:59:14 +03:00
assert "lemma_lookup" in new_nlp.vocab.lookups
assert len(new_nlp.vocab.lookups.get_table("lemma_lookup")) == 2
assert new_nlp.vocab.lookups.get_table("lemma_lookup")["hello"] == "world"
assert Doc(new_nlp.vocab, words=["foo"])[0].lemma_ == "bar"
assert Doc(new_nlp.vocab, words=["hello"])[0].lemma_ == "world"
def test_tagger_warns_no_lookups():
nlp = Language()
nlp.vocab.lookups = Lookups()
assert not len(nlp.vocab.lookups)
tagger = nlp.create_pipe("tagger")
nlp.add_pipe(tagger)
with pytest.warns(UserWarning):
nlp.begin_training()
nlp.vocab.lookups.add_table("lemma_lookup")
nlp.vocab.lookups.add_table("lexeme_norm")
nlp.vocab.lookups.get_table("lexeme_norm")["a"] = "A"
with pytest.warns(None) as record:
nlp.begin_training()
assert not record.list
def test_lemmatizer_without_is_base_form_implementation():
# Norwegian example from #5658
lookups = Lookups()
lookups.add_table("lemma_rules", {"noun": []})
lookups.add_table("lemma_index", {"noun": {}})
lookups.add_table("lemma_exc", {"noun": {"formuesskatten": ["formuesskatt"]}})
lemmatizer = Lemmatizer(lookups, is_base_form=None)
assert lemmatizer("Formuesskatten", "noun", {'Definite': 'def', 'Gender': 'masc', 'Number': 'sing'}) == ["formuesskatt"]