mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-11 12:18:04 +03:00
333b1a308b
* Draft layer for BILUO actions * Fixes to biluo layer * WIP on BILUO layer * Add tests for BILUO layer * Format * Fix transitions * Update test * Link in the simple_ner * Update BILUO tagger * Update __init__ * Import simple_ner * Update test * Import * Add files * Add config * Fix label passing for BILUO and tagger * Fix label handling for simple_ner component * Update simple NER test * Update config * Hack train script * Update BILUO layer * Fix SimpleNER component * Update train_from_config * Add biluo_to_iob helper * Add IOB layer * Add IOBTagger model * Update biluo layer * Update SimpleNER tagger * Update BILUO * Read random seed in train-from-config * Update use of normal_init * Fix normalization of gradient in SimpleNER * Update IOBTagger * Remove print * Tweak masking in BILUO * Add dropout in SimpleNER * Update thinc * Tidy up simple_ner * Fix biluo model * Unhack train-from-config * Update setup.cfg and requirements * Add tb_framework.py for parser model * Try to avoid memory leak in BILUO * Move ParserModel into spacy.ml, avoid need for subclass. * Use updated parser model * Remove incorrect call to model.initializre in PrecomputableAffine * Update parser model * Avoid divide by zero in tagger * Add extra dropout layer in tagger * Refine minibatch_by_words function to avoid oom * Fix parser model after refactor * Try to avoid div-by-zero in SimpleNER * Fix infinite loop in minibatch_by_words * Use SequenceCategoricalCrossentropy in Tagger * Fix parser model when hidden layer * Remove extra dropout from tagger * Add extra nan check in tagger * Fix thinc version * Update tests and imports * Fix test * Update test * Update tests * Fix tests * Fix test Co-authored-by: Ines Montani <ines@ines.io>
153 lines
5.5 KiB
Python
153 lines
5.5 KiB
Python
import pytest
|
|
from spacy.pipeline import Tagger, DependencyParser, EntityRecognizer
|
|
from spacy.pipeline import Tensorizer, TextCategorizer, SentenceRecognizer
|
|
from spacy.ml.models.defaults import default_parser, default_tensorizer, default_tagger
|
|
from spacy.ml.models.defaults import default_textcat, default_senter
|
|
|
|
from ..util import make_tempdir
|
|
|
|
|
|
test_parsers = [DependencyParser, EntityRecognizer]
|
|
|
|
|
|
@pytest.fixture
|
|
def parser(en_vocab):
|
|
parser = DependencyParser(en_vocab, default_parser())
|
|
parser.add_label("nsubj")
|
|
return parser
|
|
|
|
|
|
@pytest.fixture
|
|
def blank_parser(en_vocab):
|
|
parser = DependencyParser(en_vocab, default_parser())
|
|
return parser
|
|
|
|
|
|
@pytest.fixture
|
|
def taggers(en_vocab):
|
|
model = default_tagger()
|
|
tagger1 = Tagger(en_vocab, model)
|
|
tagger2 = Tagger(en_vocab, model)
|
|
return tagger1, tagger2
|
|
|
|
|
|
@pytest.mark.parametrize("Parser", test_parsers)
|
|
def test_serialize_parser_roundtrip_bytes(en_vocab, Parser):
|
|
parser = Parser(en_vocab, default_parser())
|
|
new_parser = Parser(en_vocab, default_parser())
|
|
new_parser = new_parser.from_bytes(parser.to_bytes(exclude=["vocab"]))
|
|
bytes_2 = new_parser.to_bytes(exclude=["vocab"])
|
|
bytes_3 = parser.to_bytes(exclude=["vocab"])
|
|
assert len(bytes_2) == len(bytes_3)
|
|
assert bytes_2 == bytes_3
|
|
|
|
|
|
@pytest.mark.parametrize("Parser", test_parsers)
|
|
def test_serialize_parser_roundtrip_disk(en_vocab, Parser):
|
|
parser = Parser(en_vocab, default_parser())
|
|
with make_tempdir() as d:
|
|
file_path = d / "parser"
|
|
parser.to_disk(file_path)
|
|
parser_d = Parser(en_vocab, default_parser())
|
|
parser_d = parser_d.from_disk(file_path)
|
|
parser_bytes = parser.to_bytes(exclude=["model", "vocab"])
|
|
parser_d_bytes = parser_d.to_bytes(exclude=["model", "vocab"])
|
|
assert len(parser_bytes) == len(parser_d_bytes)
|
|
assert parser_bytes == parser_d_bytes
|
|
|
|
|
|
def test_to_from_bytes(parser, blank_parser):
|
|
assert parser.model is not True
|
|
assert blank_parser.model is not True
|
|
assert blank_parser.moves.n_moves != parser.moves.n_moves
|
|
bytes_data = parser.to_bytes(exclude=["vocab"])
|
|
|
|
# the blank parser needs to be resized before we can call from_bytes
|
|
blank_parser.model.attrs["resize_output"](blank_parser.model, parser.moves.n_moves)
|
|
blank_parser.from_bytes(bytes_data)
|
|
assert blank_parser.model is not True
|
|
assert blank_parser.moves.n_moves == parser.moves.n_moves
|
|
|
|
|
|
@pytest.mark.skip(
|
|
reason="This seems to be a dict ordering bug somewhere. Only failing on some platforms."
|
|
)
|
|
def test_serialize_tagger_roundtrip_bytes(en_vocab, taggers):
|
|
tagger1 = taggers[0]
|
|
tagger1_b = tagger1.to_bytes()
|
|
tagger1 = tagger1.from_bytes(tagger1_b)
|
|
assert tagger1.to_bytes() == tagger1_b
|
|
new_tagger1 = Tagger(en_vocab, default_tagger()).from_bytes(tagger1_b)
|
|
new_tagger1_b = new_tagger1.to_bytes()
|
|
assert len(new_tagger1_b) == len(tagger1_b)
|
|
assert new_tagger1_b == tagger1_b
|
|
|
|
|
|
def test_serialize_tagger_roundtrip_disk(en_vocab, taggers):
|
|
tagger1, tagger2 = taggers
|
|
with make_tempdir() as d:
|
|
file_path1 = d / "tagger1"
|
|
file_path2 = d / "tagger2"
|
|
tagger1.to_disk(file_path1)
|
|
tagger2.to_disk(file_path2)
|
|
tagger1_d = Tagger(en_vocab, default_tagger()).from_disk(file_path1)
|
|
tagger2_d = Tagger(en_vocab, default_tagger()).from_disk(file_path2)
|
|
assert tagger1_d.to_bytes() == tagger2_d.to_bytes()
|
|
|
|
|
|
def test_serialize_tensorizer_roundtrip_bytes(en_vocab):
|
|
tensorizer = Tensorizer(en_vocab, default_tensorizer())
|
|
tensorizer_b = tensorizer.to_bytes(exclude=["vocab"])
|
|
new_tensorizer = Tensorizer(en_vocab, default_tensorizer()).from_bytes(tensorizer_b)
|
|
assert new_tensorizer.to_bytes(exclude=["vocab"]) == tensorizer_b
|
|
|
|
|
|
def test_serialize_tensorizer_roundtrip_disk(en_vocab):
|
|
tensorizer = Tensorizer(en_vocab, default_tensorizer())
|
|
with make_tempdir() as d:
|
|
file_path = d / "tensorizer"
|
|
tensorizer.to_disk(file_path)
|
|
tensorizer_d = Tensorizer(en_vocab, default_tensorizer()).from_disk(file_path)
|
|
assert tensorizer.to_bytes(exclude=["vocab"]) == tensorizer_d.to_bytes(
|
|
exclude=["vocab"]
|
|
)
|
|
|
|
|
|
def test_serialize_textcat_empty(en_vocab):
|
|
# See issue #1105
|
|
textcat = TextCategorizer(
|
|
en_vocab, default_textcat(), labels=["ENTITY", "ACTION", "MODIFIER"]
|
|
)
|
|
textcat.to_bytes(exclude=["vocab"])
|
|
|
|
|
|
@pytest.mark.parametrize("Parser", test_parsers)
|
|
def test_serialize_pipe_exclude(en_vocab, Parser):
|
|
def get_new_parser():
|
|
new_parser = Parser(en_vocab, default_parser())
|
|
return new_parser
|
|
|
|
parser = Parser(en_vocab, default_parser())
|
|
parser.cfg["foo"] = "bar"
|
|
new_parser = get_new_parser().from_bytes(parser.to_bytes(exclude=["vocab"]))
|
|
assert "foo" in new_parser.cfg
|
|
new_parser = get_new_parser().from_bytes(
|
|
parser.to_bytes(exclude=["vocab"]), exclude=["cfg"]
|
|
)
|
|
assert "foo" not in new_parser.cfg
|
|
new_parser = get_new_parser().from_bytes(
|
|
parser.to_bytes(exclude=["cfg"]), exclude=["vocab"]
|
|
)
|
|
assert "foo" not in new_parser.cfg
|
|
with pytest.raises(ValueError):
|
|
parser.to_bytes(cfg=False, exclude=["vocab"])
|
|
with pytest.raises(ValueError):
|
|
get_new_parser().from_bytes(parser.to_bytes(exclude=["vocab"]), cfg=False)
|
|
|
|
|
|
def test_serialize_sentencerecognizer(en_vocab):
|
|
sr = SentenceRecognizer(en_vocab, default_senter())
|
|
sr_b = sr.to_bytes()
|
|
sr_d = SentenceRecognizer(en_vocab, default_senter()).from_bytes(sr_b)
|
|
assert sr.to_bytes() == sr_d.to_bytes()
|