mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-26 18:06:29 +03:00
e962784531
* Add Lemmatizer and simplify related components * Add `Lemmatizer` pipe with `lookup` and `rule` modes using the `Lookups` tables. * Reduce `Tagger` to a simple tagger that sets `Token.tag` (no pos or lemma) * Reduce `Morphology` to only keep track of morph tags (no tag map, lemmatizer, or morph rules) * Remove lemmatizer from `Vocab` * Adjust many many tests Differences: * No default lookup lemmas * No special treatment of TAG in `from_array` and similar required * Easier to modify labels in a `Tagger` * No extra strings added from morphology / tag map * Fix test * Initial fix for Lemmatizer config/serialization * Adjust init test to be more generic * Adjust init test to force empty Lookups * Add simple cache to rule-based lemmatizer * Convert language-specific lemmatizers Convert language-specific lemmatizers to component lemmatizers. Remove previous lemmatizer class. * Fix French and Polish lemmatizers * Remove outdated UPOS conversions * Update Russian lemmatizer init in tests * Add minimal init/run tests for custom lemmatizers * Add option to overwrite existing lemmas * Update mode setting, lookup loading, and caching * Make `mode` an immutable property * Only enforce strict `load_lookups` for known supported modes * Move caching into individual `_lemmatize` methods * Implement strict when lang is not found in lookups * Fix tables/lookups in make_lemmatizer * Reallow provided lookups and allow for stricter checks * Add lookups asset to all Lemmatizer pipe tests * Rename lookups in lemmatizer init test * Clean up merge * Refactor lookup table loading * Add helper from `load_lemmatizer_lookups` that loads required and optional lookups tables based on settings provided by a config. Additional slight refactor of lookups: * Add `Lookups.set_table` to set a table from a provided `Table` * Reorder class definitions to be able to specify type as `Table` * Move registry assets into test methods * Refactor lookups tables config Use class methods within `Lemmatizer` to provide the config for particular modes and to load the lookups from a config. * Add pipe and score to lemmatizer * Simplify Tagger.score * Add missing import * Clean up imports and auto-format * Remove unused kwarg * Tidy up and auto-format * Update docstrings for Lemmatizer Update docstrings for Lemmatizer. Additionally modify `is_base_form` API to take `Token` instead of individual features. * Update docstrings * Remove tag map values from Tagger.add_label * Update API docs * Fix relative link in Lemmatizer API docs
176 lines
6.2 KiB
Python
176 lines
6.2 KiB
Python
import pytest
|
|
from spacy import registry
|
|
from spacy.pipeline import Tagger, DependencyParser, EntityRecognizer
|
|
from spacy.pipeline import TextCategorizer, SentenceRecognizer
|
|
from spacy.pipeline.dep_parser import DEFAULT_PARSER_MODEL
|
|
from spacy.pipeline.tagger import DEFAULT_TAGGER_MODEL
|
|
from spacy.pipeline.textcat import DEFAULT_TEXTCAT_MODEL
|
|
from spacy.pipeline.senter import DEFAULT_SENTER_MODEL
|
|
|
|
from ..util import make_tempdir
|
|
|
|
|
|
test_parsers = [DependencyParser, EntityRecognizer]
|
|
|
|
|
|
@pytest.fixture
|
|
def parser(en_vocab):
|
|
config = {
|
|
"learn_tokens": False,
|
|
"min_action_freq": 30,
|
|
"update_with_oracle_cut_size": 100,
|
|
}
|
|
cfg = {"model": DEFAULT_PARSER_MODEL}
|
|
model = registry.make_from_config(cfg, validate=True)["model"]
|
|
parser = DependencyParser(en_vocab, model, **config)
|
|
parser.add_label("nsubj")
|
|
return parser
|
|
|
|
|
|
@pytest.fixture
|
|
def blank_parser(en_vocab):
|
|
config = {
|
|
"learn_tokens": False,
|
|
"min_action_freq": 30,
|
|
"update_with_oracle_cut_size": 100,
|
|
}
|
|
cfg = {"model": DEFAULT_PARSER_MODEL}
|
|
model = registry.make_from_config(cfg, validate=True)["model"]
|
|
parser = DependencyParser(en_vocab, model, **config)
|
|
return parser
|
|
|
|
|
|
@pytest.fixture
|
|
def taggers(en_vocab):
|
|
cfg = {"model": DEFAULT_TAGGER_MODEL}
|
|
model = registry.make_from_config(cfg, validate=True)["model"]
|
|
tagger1 = Tagger(en_vocab, model)
|
|
tagger2 = Tagger(en_vocab, model)
|
|
return tagger1, tagger2
|
|
|
|
|
|
@pytest.mark.parametrize("Parser", test_parsers)
|
|
def test_serialize_parser_roundtrip_bytes(en_vocab, Parser):
|
|
config = {
|
|
"learn_tokens": False,
|
|
"min_action_freq": 0,
|
|
"update_with_oracle_cut_size": 100,
|
|
}
|
|
cfg = {"model": DEFAULT_PARSER_MODEL}
|
|
model = registry.make_from_config(cfg, validate=True)["model"]
|
|
parser = Parser(en_vocab, model, **config)
|
|
new_parser = Parser(en_vocab, model, **config)
|
|
new_parser = new_parser.from_bytes(parser.to_bytes(exclude=["vocab"]))
|
|
bytes_2 = new_parser.to_bytes(exclude=["vocab"])
|
|
bytes_3 = parser.to_bytes(exclude=["vocab"])
|
|
assert len(bytes_2) == len(bytes_3)
|
|
assert bytes_2 == bytes_3
|
|
|
|
|
|
@pytest.mark.parametrize("Parser", test_parsers)
|
|
def test_serialize_parser_roundtrip_disk(en_vocab, Parser):
|
|
config = {
|
|
"learn_tokens": False,
|
|
"min_action_freq": 0,
|
|
"update_with_oracle_cut_size": 100,
|
|
}
|
|
cfg = {"model": DEFAULT_PARSER_MODEL}
|
|
model = registry.make_from_config(cfg, validate=True)["model"]
|
|
parser = Parser(en_vocab, model, **config)
|
|
with make_tempdir() as d:
|
|
file_path = d / "parser"
|
|
parser.to_disk(file_path)
|
|
parser_d = Parser(en_vocab, model, **config)
|
|
parser_d = parser_d.from_disk(file_path)
|
|
parser_bytes = parser.to_bytes(exclude=["model", "vocab"])
|
|
parser_d_bytes = parser_d.to_bytes(exclude=["model", "vocab"])
|
|
assert len(parser_bytes) == len(parser_d_bytes)
|
|
assert parser_bytes == parser_d_bytes
|
|
|
|
|
|
def test_to_from_bytes(parser, blank_parser):
|
|
assert parser.model is not True
|
|
assert blank_parser.model is not True
|
|
assert blank_parser.moves.n_moves != parser.moves.n_moves
|
|
bytes_data = parser.to_bytes(exclude=["vocab"])
|
|
# the blank parser needs to be resized before we can call from_bytes
|
|
blank_parser.model.attrs["resize_output"](blank_parser.model, parser.moves.n_moves)
|
|
blank_parser.from_bytes(bytes_data)
|
|
assert blank_parser.model is not True
|
|
assert blank_parser.moves.n_moves == parser.moves.n_moves
|
|
|
|
|
|
@pytest.mark.skip(
|
|
reason="This seems to be a dict ordering bug somewhere. Only failing on some platforms."
|
|
)
|
|
def test_serialize_tagger_roundtrip_bytes(en_vocab, taggers):
|
|
tagger1 = taggers[0]
|
|
tagger1_b = tagger1.to_bytes()
|
|
tagger1 = tagger1.from_bytes(tagger1_b)
|
|
assert tagger1.to_bytes() == tagger1_b
|
|
cfg = {"model": DEFAULT_TAGGER_MODEL}
|
|
model = registry.make_from_config(cfg, validate=True)["model"]
|
|
new_tagger1 = Tagger(en_vocab, model).from_bytes(tagger1_b)
|
|
new_tagger1_b = new_tagger1.to_bytes()
|
|
assert len(new_tagger1_b) == len(tagger1_b)
|
|
assert new_tagger1_b == tagger1_b
|
|
|
|
|
|
def test_serialize_tagger_roundtrip_disk(en_vocab, taggers):
|
|
tagger1, tagger2 = taggers
|
|
with make_tempdir() as d:
|
|
file_path1 = d / "tagger1"
|
|
file_path2 = d / "tagger2"
|
|
tagger1.to_disk(file_path1)
|
|
tagger2.to_disk(file_path2)
|
|
cfg = {"model": DEFAULT_TAGGER_MODEL}
|
|
model = registry.make_from_config(cfg, validate=True)["model"]
|
|
tagger1_d = Tagger(en_vocab, model).from_disk(file_path1)
|
|
tagger2_d = Tagger(en_vocab, model).from_disk(file_path2)
|
|
assert tagger1_d.to_bytes() == tagger2_d.to_bytes()
|
|
|
|
|
|
def test_serialize_textcat_empty(en_vocab):
|
|
# See issue #1105
|
|
cfg = {"model": DEFAULT_TEXTCAT_MODEL}
|
|
model = registry.make_from_config(cfg, validate=True)["model"]
|
|
textcat = TextCategorizer(en_vocab, model, labels=["ENTITY", "ACTION", "MODIFIER"])
|
|
textcat.to_bytes(exclude=["vocab"])
|
|
|
|
|
|
@pytest.mark.parametrize("Parser", test_parsers)
|
|
def test_serialize_pipe_exclude(en_vocab, Parser):
|
|
cfg = {"model": DEFAULT_PARSER_MODEL}
|
|
model = registry.make_from_config(cfg, validate=True)["model"]
|
|
config = {
|
|
"learn_tokens": False,
|
|
"min_action_freq": 0,
|
|
"update_with_oracle_cut_size": 100,
|
|
}
|
|
|
|
def get_new_parser():
|
|
new_parser = Parser(en_vocab, model, **config)
|
|
return new_parser
|
|
|
|
parser = Parser(en_vocab, model, **config)
|
|
parser.cfg["foo"] = "bar"
|
|
new_parser = get_new_parser().from_bytes(parser.to_bytes(exclude=["vocab"]))
|
|
assert "foo" in new_parser.cfg
|
|
new_parser = get_new_parser().from_bytes(
|
|
parser.to_bytes(exclude=["vocab"]), exclude=["cfg"]
|
|
)
|
|
assert "foo" not in new_parser.cfg
|
|
new_parser = get_new_parser().from_bytes(
|
|
parser.to_bytes(exclude=["cfg"]), exclude=["vocab"]
|
|
)
|
|
assert "foo" not in new_parser.cfg
|
|
|
|
|
|
def test_serialize_sentencerecognizer(en_vocab):
|
|
cfg = {"model": DEFAULT_SENTER_MODEL}
|
|
model = registry.make_from_config(cfg, validate=True)["model"]
|
|
sr = SentenceRecognizer(en_vocab, model)
|
|
sr_b = sr.to_bytes()
|
|
sr_d = SentenceRecognizer(en_vocab, model).from_bytes(sr_b)
|
|
assert sr.to_bytes() == sr_d.to_bytes()
|