2019-10-31 17:01:15 +03:00
|
|
|
import pytest
|
2023-06-26 12:41:03 +03:00
|
|
|
from numpy.testing import assert_array_equal
|
|
|
|
from thinc.api import Config, get_current_ops
|
|
|
|
|
|
|
|
from spacy import util
|
|
|
|
from spacy.lang.en import English
|
|
|
|
from spacy.ml.models.tok2vec import (
|
|
|
|
MaxoutWindowEncoder,
|
|
|
|
MultiHashEmbed,
|
|
|
|
build_Tok2Vec_model,
|
|
|
|
)
|
2020-08-31 13:41:39 +03:00
|
|
|
from spacy.pipeline.tok2vec import Tok2Vec, Tok2VecListener
|
2019-10-31 17:01:15 +03:00
|
|
|
from spacy.tokens import Doc
|
2020-09-09 11:31:03 +03:00
|
|
|
from spacy.training import Example
|
2022-05-10 09:24:42 +03:00
|
|
|
from spacy.util import registry
|
2023-06-26 12:41:03 +03:00
|
|
|
from spacy.vocab import Vocab
|
2020-08-31 13:41:39 +03:00
|
|
|
|
2023-06-26 12:41:03 +03:00
|
|
|
from ..util import add_vecs_to_vocab, get_batch, make_tempdir
|
2021-01-29 07:57:04 +03:00
|
|
|
|
2019-10-31 17:01:15 +03:00
|
|
|
|
|
|
|
def test_empty_doc():
|
|
|
|
width = 128
|
|
|
|
embed_size = 2000
|
|
|
|
vocab = Vocab()
|
|
|
|
doc = Doc(vocab, words=[])
|
2020-07-20 15:49:54 +03:00
|
|
|
tok2vec = build_Tok2Vec_model(
|
2020-07-29 00:06:46 +03:00
|
|
|
MultiHashEmbed(
|
|
|
|
width=width,
|
2020-10-05 20:57:45 +03:00
|
|
|
rows=[embed_size, embed_size, embed_size, embed_size],
|
2020-10-05 16:24:33 +03:00
|
|
|
include_static_vectors=False,
|
|
|
|
attrs=["NORM", "PREFIX", "SUFFIX", "SHAPE"],
|
2020-07-29 00:06:46 +03:00
|
|
|
),
|
2020-08-05 17:00:59 +03:00
|
|
|
MaxoutWindowEncoder(width=width, depth=4, window_size=1, maxout_pieces=3),
|
2020-07-20 15:49:54 +03:00
|
|
|
)
|
|
|
|
tok2vec.initialize()
|
2019-10-31 17:01:15 +03:00
|
|
|
vectors, backprop = tok2vec.begin_update([doc])
|
|
|
|
assert len(vectors) == 1
|
|
|
|
assert vectors[0].shape == (0, width)
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"batch_size,width,embed_size", [[1, 128, 2000], [2, 128, 2000], [3, 8, 63]]
|
|
|
|
)
|
|
|
|
def test_tok2vec_batch_sizes(batch_size, width, embed_size):
|
|
|
|
batch = get_batch(batch_size)
|
2020-02-27 20:42:27 +03:00
|
|
|
tok2vec = build_Tok2Vec_model(
|
2020-07-29 00:06:46 +03:00
|
|
|
MultiHashEmbed(
|
|
|
|
width=width,
|
2020-10-05 20:57:45 +03:00
|
|
|
rows=[embed_size] * 4,
|
2020-10-05 16:27:06 +03:00
|
|
|
include_static_vectors=False,
|
|
|
|
attrs=["NORM", "PREFIX", "SUFFIX", "SHAPE"],
|
2020-07-29 00:06:46 +03:00
|
|
|
),
|
2020-08-31 13:41:39 +03:00
|
|
|
MaxoutWindowEncoder(width=width, depth=4, window_size=1, maxout_pieces=3),
|
2020-02-27 20:42:27 +03:00
|
|
|
)
|
2020-01-29 19:06:46 +03:00
|
|
|
tok2vec.initialize()
|
2019-10-31 17:01:15 +03:00
|
|
|
vectors, backprop = tok2vec.begin_update(batch)
|
|
|
|
assert len(vectors) == len(batch)
|
|
|
|
for doc_vec, doc in zip(vectors, batch):
|
|
|
|
assert doc_vec.shape == (len(doc), width)
|
|
|
|
|
|
|
|
|
2022-05-10 09:24:42 +03:00
|
|
|
@pytest.mark.slow
|
|
|
|
@pytest.mark.parametrize("width", [8])
|
2019-10-31 17:01:15 +03:00
|
|
|
@pytest.mark.parametrize(
|
2022-05-10 09:24:42 +03:00
|
|
|
"embed_arch,embed_config",
|
2021-01-29 11:38:09 +03:00
|
|
|
# fmt: off
|
2019-10-31 17:01:15 +03:00
|
|
|
[
|
2022-05-10 09:24:42 +03:00
|
|
|
("spacy.MultiHashEmbed.v1", {"rows": [100, 100], "attrs": ["SHAPE", "LOWER"], "include_static_vectors": False}),
|
|
|
|
("spacy.MultiHashEmbed.v1", {"rows": [100, 20], "attrs": ["ORTH", "PREFIX"], "include_static_vectors": False}),
|
|
|
|
("spacy.CharacterEmbed.v1", {"rows": 100, "nM": 64, "nC": 8, "include_static_vectors": False}),
|
|
|
|
("spacy.CharacterEmbed.v1", {"rows": 100, "nM": 16, "nC": 2, "include_static_vectors": False}),
|
2019-10-31 17:01:15 +03:00
|
|
|
],
|
2021-01-29 11:38:09 +03:00
|
|
|
# fmt: on
|
2019-10-31 17:01:15 +03:00
|
|
|
)
|
2022-05-10 09:24:42 +03:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"tok2vec_arch,encode_arch,encode_config",
|
|
|
|
# fmt: off
|
|
|
|
[
|
|
|
|
("spacy.Tok2Vec.v1", "spacy.MaxoutWindowEncoder.v1", {"window_size": 1, "maxout_pieces": 3, "depth": 2}),
|
|
|
|
("spacy.Tok2Vec.v2", "spacy.MaxoutWindowEncoder.v2", {"window_size": 1, "maxout_pieces": 3, "depth": 2}),
|
|
|
|
("spacy.Tok2Vec.v1", "spacy.MishWindowEncoder.v1", {"window_size": 1, "depth": 6}),
|
|
|
|
("spacy.Tok2Vec.v2", "spacy.MishWindowEncoder.v2", {"window_size": 1, "depth": 6}),
|
|
|
|
],
|
|
|
|
# fmt: on
|
|
|
|
)
|
|
|
|
def test_tok2vec_configs(
|
|
|
|
width, tok2vec_arch, embed_arch, embed_config, encode_arch, encode_config
|
|
|
|
):
|
|
|
|
embed = registry.get("architectures", embed_arch)
|
|
|
|
encode = registry.get("architectures", encode_arch)
|
|
|
|
tok2vec_model = registry.get("architectures", tok2vec_arch)
|
|
|
|
|
2020-07-29 14:47:37 +03:00
|
|
|
embed_config["width"] = width
|
|
|
|
encode_config["width"] = width
|
2019-10-31 17:01:15 +03:00
|
|
|
docs = get_batch(3)
|
2022-05-10 09:24:42 +03:00
|
|
|
tok2vec = tok2vec_model(embed(**embed_config), encode(**encode_config))
|
2020-03-29 20:40:36 +03:00
|
|
|
tok2vec.initialize(docs)
|
2019-10-31 17:01:15 +03:00
|
|
|
vectors, backprop = tok2vec.begin_update(docs)
|
|
|
|
assert len(vectors) == len(docs)
|
2020-07-29 14:47:37 +03:00
|
|
|
assert vectors[0].shape == (len(docs[0]), width)
|
2019-10-31 17:01:15 +03:00
|
|
|
backprop(vectors)
|
2020-08-31 13:41:39 +03:00
|
|
|
|
|
|
|
|
|
|
|
def test_init_tok2vec():
|
|
|
|
# Simple test to initialize the default tok2vec
|
|
|
|
nlp = English()
|
|
|
|
tok2vec = nlp.add_pipe("tok2vec")
|
|
|
|
assert tok2vec.listeners == []
|
2020-09-28 22:35:09 +03:00
|
|
|
nlp.initialize()
|
2020-09-08 23:44:25 +03:00
|
|
|
assert tok2vec.model.get_dim("nO")
|
2020-08-31 13:41:39 +03:00
|
|
|
|
|
|
|
|
|
|
|
cfg_string = """
|
|
|
|
[nlp]
|
|
|
|
lang = "en"
|
|
|
|
pipeline = ["tok2vec","tagger"]
|
|
|
|
|
|
|
|
[components]
|
|
|
|
|
|
|
|
[components.tagger]
|
|
|
|
factory = "tagger"
|
|
|
|
|
|
|
|
[components.tagger.model]
|
2022-03-15 16:15:31 +03:00
|
|
|
@architectures = "spacy.Tagger.v2"
|
2020-08-31 13:41:39 +03:00
|
|
|
nO = null
|
|
|
|
|
|
|
|
[components.tagger.model.tok2vec]
|
|
|
|
@architectures = "spacy.Tok2VecListener.v1"
|
|
|
|
width = ${components.tok2vec.model.encode.width}
|
|
|
|
|
|
|
|
[components.tok2vec]
|
|
|
|
factory = "tok2vec"
|
|
|
|
|
|
|
|
[components.tok2vec.model]
|
2021-01-07 08:39:27 +03:00
|
|
|
@architectures = "spacy.Tok2Vec.v2"
|
2020-08-31 13:41:39 +03:00
|
|
|
|
|
|
|
[components.tok2vec.model.embed]
|
2020-10-05 20:59:30 +03:00
|
|
|
@architectures = "spacy.MultiHashEmbed.v1"
|
2020-08-31 13:41:39 +03:00
|
|
|
width = ${components.tok2vec.model.encode.width}
|
2020-10-05 20:57:45 +03:00
|
|
|
rows = [2000, 1000, 1000, 1000]
|
|
|
|
attrs = ["NORM", "PREFIX", "SUFFIX", "SHAPE"]
|
|
|
|
include_static_vectors = false
|
2020-08-31 13:41:39 +03:00
|
|
|
|
|
|
|
[components.tok2vec.model.encode]
|
2021-01-07 08:39:27 +03:00
|
|
|
@architectures = "spacy.MaxoutWindowEncoder.v2"
|
2020-08-31 13:41:39 +03:00
|
|
|
width = 96
|
|
|
|
depth = 4
|
|
|
|
window_size = 1
|
|
|
|
maxout_pieces = 3
|
|
|
|
"""
|
|
|
|
|
|
|
|
TRAIN_DATA = [
|
2021-06-28 12:48:00 +03:00
|
|
|
(
|
|
|
|
"I like green eggs",
|
|
|
|
{"tags": ["N", "V", "J", "N"], "cats": {"preference": 1.0, "imperative": 0.0}},
|
|
|
|
),
|
|
|
|
(
|
|
|
|
"Eat blue ham",
|
|
|
|
{"tags": ["V", "J", "N"], "cats": {"preference": 0.0, "imperative": 1.0}},
|
|
|
|
),
|
2020-08-31 13:41:39 +03:00
|
|
|
]
|
|
|
|
|
2020-09-04 14:42:33 +03:00
|
|
|
|
2022-02-21 12:22:36 +03:00
|
|
|
@pytest.mark.parametrize("with_vectors", (False, True))
|
|
|
|
def test_tok2vec_listener(with_vectors):
|
2020-08-31 13:41:39 +03:00
|
|
|
orig_config = Config().from_str(cfg_string)
|
2022-02-21 12:22:36 +03:00
|
|
|
orig_config["components"]["tok2vec"]["model"]["embed"][
|
|
|
|
"include_static_vectors"
|
|
|
|
] = with_vectors
|
2020-09-27 23:21:31 +03:00
|
|
|
nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
|
2022-02-21 12:22:36 +03:00
|
|
|
|
|
|
|
if with_vectors:
|
|
|
|
ops = get_current_ops()
|
|
|
|
vectors = [
|
|
|
|
("apple", ops.asarray([1, 2, 3])),
|
|
|
|
("orange", ops.asarray([-1, -2, -3])),
|
|
|
|
("and", ops.asarray([-1, -1, -1])),
|
|
|
|
("juice", ops.asarray([5, 5, 10])),
|
|
|
|
("pie", ops.asarray([7, 6.3, 8.9])),
|
|
|
|
]
|
|
|
|
add_vecs_to_vocab(nlp.vocab, vectors)
|
|
|
|
|
2020-08-31 13:41:39 +03:00
|
|
|
assert nlp.pipe_names == ["tok2vec", "tagger"]
|
|
|
|
tagger = nlp.get_pipe("tagger")
|
|
|
|
tok2vec = nlp.get_pipe("tok2vec")
|
|
|
|
tagger_tok2vec = tagger.model.get_ref("tok2vec")
|
|
|
|
assert isinstance(tok2vec, Tok2Vec)
|
|
|
|
assert isinstance(tagger_tok2vec, Tok2VecListener)
|
|
|
|
train_examples = []
|
|
|
|
for t in TRAIN_DATA:
|
|
|
|
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
|
|
|
|
for tag in t[1]["tags"]:
|
|
|
|
tagger.add_label(tag)
|
|
|
|
|
2023-06-27 11:47:07 +03:00
|
|
|
# Check that the Tok2Vec component finds its listeners
|
2020-09-28 22:35:09 +03:00
|
|
|
optimizer = nlp.initialize(lambda: train_examples)
|
2020-08-31 13:41:39 +03:00
|
|
|
assert tok2vec.listeners == [tagger_tok2vec]
|
|
|
|
|
|
|
|
for i in range(5):
|
|
|
|
losses = {}
|
|
|
|
nlp.update(train_examples, sgd=optimizer, losses=losses)
|
|
|
|
|
|
|
|
doc = nlp("Running the pipeline as a whole.")
|
|
|
|
doc_tensor = tagger_tok2vec.predict([doc])[0]
|
2021-04-22 15:58:29 +03:00
|
|
|
ops = get_current_ops()
|
|
|
|
assert_array_equal(ops.to_numpy(doc.tensor), ops.to_numpy(doc_tensor))
|
2020-08-31 13:41:39 +03:00
|
|
|
|
2022-02-21 12:22:36 +03:00
|
|
|
# test with empty doc
|
|
|
|
doc = nlp("")
|
|
|
|
|
2020-08-31 13:41:39 +03:00
|
|
|
# TODO: should this warn or error?
|
|
|
|
nlp.select_pipes(disable="tok2vec")
|
|
|
|
assert nlp.pipe_names == ["tagger"]
|
|
|
|
nlp("Running the pipeline with the Tok2Vec component disabled.")
|
2020-09-22 14:54:44 +03:00
|
|
|
|
|
|
|
|
|
|
|
def test_tok2vec_listener_callback():
|
|
|
|
orig_config = Config().from_str(cfg_string)
|
2020-09-27 23:21:31 +03:00
|
|
|
nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
|
2020-09-22 14:54:44 +03:00
|
|
|
assert nlp.pipe_names == ["tok2vec", "tagger"]
|
|
|
|
tagger = nlp.get_pipe("tagger")
|
|
|
|
tok2vec = nlp.get_pipe("tok2vec")
|
|
|
|
docs = [nlp.make_doc("A random sentence")]
|
|
|
|
tok2vec.model.initialize(X=docs)
|
|
|
|
gold_array = [[1.0 for tag in ["V", "Z"]] for word in docs]
|
|
|
|
label_sample = [tagger.model.ops.asarray(gold_array, dtype="float32")]
|
|
|
|
tagger.model.initialize(X=docs, Y=label_sample)
|
|
|
|
docs = [nlp.make_doc("Another entirely random sentence")]
|
2020-09-22 22:54:52 +03:00
|
|
|
tok2vec.update([Example.from_dict(x, {}) for x in docs])
|
2020-09-22 14:54:44 +03:00
|
|
|
Y, get_dX = tagger.model.begin_update(docs)
|
|
|
|
# assure that the backprop call works (and doesn't hit a 'None' callback)
|
|
|
|
assert get_dX(Y) is not None
|
2021-01-29 07:57:04 +03:00
|
|
|
|
|
|
|
|
2022-09-12 16:36:48 +03:00
|
|
|
def test_tok2vec_listener_overfitting():
|
2022-10-21 12:54:17 +03:00
|
|
|
"""Test that a pipeline with a listener properly overfits, even if 'tok2vec' is in the annotating components"""
|
2022-09-12 16:36:48 +03:00
|
|
|
orig_config = Config().from_str(cfg_string)
|
|
|
|
nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
|
|
|
|
train_examples = []
|
|
|
|
for t in TRAIN_DATA:
|
|
|
|
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
|
|
|
|
optimizer = nlp.initialize(get_examples=lambda: train_examples)
|
|
|
|
|
|
|
|
for i in range(50):
|
|
|
|
losses = {}
|
|
|
|
nlp.update(train_examples, sgd=optimizer, losses=losses, annotates=["tok2vec"])
|
|
|
|
assert losses["tagger"] < 0.00001
|
|
|
|
|
|
|
|
# test the trained model
|
|
|
|
test_text = "I like blue eggs"
|
|
|
|
doc = nlp(test_text)
|
|
|
|
assert doc[0].tag_ == "N"
|
|
|
|
assert doc[1].tag_ == "V"
|
|
|
|
assert doc[2].tag_ == "J"
|
|
|
|
assert doc[3].tag_ == "N"
|
|
|
|
|
|
|
|
# Also test the results are still the same after IO
|
|
|
|
with make_tempdir() as tmp_dir:
|
|
|
|
nlp.to_disk(tmp_dir)
|
|
|
|
nlp2 = util.load_model_from_path(tmp_dir)
|
|
|
|
doc2 = nlp2(test_text)
|
|
|
|
assert doc2[0].tag_ == "N"
|
|
|
|
assert doc2[1].tag_ == "V"
|
|
|
|
assert doc2[2].tag_ == "J"
|
|
|
|
assert doc2[3].tag_ == "N"
|
|
|
|
|
|
|
|
|
|
|
|
def test_tok2vec_frozen_not_annotating():
|
2022-10-21 12:54:17 +03:00
|
|
|
"""Test that a pipeline with a frozen tok2vec raises an error when the tok2vec is not annotating"""
|
2022-09-12 16:36:48 +03:00
|
|
|
orig_config = Config().from_str(cfg_string)
|
|
|
|
nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
|
|
|
|
train_examples = []
|
|
|
|
for t in TRAIN_DATA:
|
|
|
|
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
|
|
|
|
optimizer = nlp.initialize(get_examples=lambda: train_examples)
|
|
|
|
|
|
|
|
for i in range(2):
|
|
|
|
losses = {}
|
2022-10-21 12:54:17 +03:00
|
|
|
with pytest.raises(
|
|
|
|
ValueError, match=r"the tok2vec embedding layer is not updated"
|
|
|
|
):
|
|
|
|
nlp.update(
|
|
|
|
train_examples, sgd=optimizer, losses=losses, exclude=["tok2vec"]
|
|
|
|
)
|
2022-09-12 16:36:48 +03:00
|
|
|
|
|
|
|
|
|
|
|
def test_tok2vec_frozen_overfitting():
|
2022-10-21 12:54:17 +03:00
|
|
|
"""Test that a pipeline with a frozen & annotating tok2vec can still overfit"""
|
2022-09-12 16:36:48 +03:00
|
|
|
orig_config = Config().from_str(cfg_string)
|
|
|
|
nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
|
|
|
|
train_examples = []
|
|
|
|
for t in TRAIN_DATA:
|
|
|
|
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
|
|
|
|
optimizer = nlp.initialize(get_examples=lambda: train_examples)
|
|
|
|
|
|
|
|
for i in range(100):
|
|
|
|
losses = {}
|
2022-10-21 12:54:17 +03:00
|
|
|
nlp.update(
|
|
|
|
train_examples,
|
|
|
|
sgd=optimizer,
|
|
|
|
losses=losses,
|
|
|
|
exclude=["tok2vec"],
|
|
|
|
annotates=["tok2vec"],
|
|
|
|
)
|
2022-09-12 16:36:48 +03:00
|
|
|
assert losses["tagger"] < 0.0001
|
|
|
|
|
|
|
|
# test the trained model
|
|
|
|
test_text = "I like blue eggs"
|
|
|
|
doc = nlp(test_text)
|
|
|
|
assert doc[0].tag_ == "N"
|
|
|
|
assert doc[1].tag_ == "V"
|
|
|
|
assert doc[2].tag_ == "J"
|
|
|
|
assert doc[3].tag_ == "N"
|
|
|
|
|
|
|
|
# Also test the results are still the same after IO
|
|
|
|
with make_tempdir() as tmp_dir:
|
|
|
|
nlp.to_disk(tmp_dir)
|
|
|
|
nlp2 = util.load_model_from_path(tmp_dir)
|
|
|
|
doc2 = nlp2(test_text)
|
|
|
|
assert doc2[0].tag_ == "N"
|
|
|
|
assert doc2[1].tag_ == "V"
|
|
|
|
assert doc2[2].tag_ == "J"
|
|
|
|
assert doc2[3].tag_ == "N"
|
|
|
|
|
|
|
|
|
2021-01-29 07:57:04 +03:00
|
|
|
def test_replace_listeners():
|
|
|
|
orig_config = Config().from_str(cfg_string)
|
|
|
|
nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
|
|
|
|
examples = [Example.from_dict(nlp.make_doc("x y"), {"tags": ["V", "Z"]})]
|
|
|
|
nlp.initialize(lambda: examples)
|
|
|
|
tok2vec = nlp.get_pipe("tok2vec")
|
|
|
|
tagger = nlp.get_pipe("tagger")
|
|
|
|
assert isinstance(tagger.model.layers[0], Tok2VecListener)
|
|
|
|
assert tok2vec.listener_map["tagger"][0] == tagger.model.layers[0]
|
2021-01-29 11:38:09 +03:00
|
|
|
assert (
|
|
|
|
nlp.config["components"]["tok2vec"]["model"]["@architectures"]
|
|
|
|
== "spacy.Tok2Vec.v2"
|
|
|
|
)
|
|
|
|
assert (
|
|
|
|
nlp.config["components"]["tagger"]["model"]["tok2vec"]["@architectures"]
|
|
|
|
== "spacy.Tok2VecListener.v1"
|
|
|
|
)
|
2021-01-29 07:57:04 +03:00
|
|
|
nlp.replace_listeners("tok2vec", "tagger", ["model.tok2vec"])
|
|
|
|
assert not isinstance(tagger.model.layers[0], Tok2VecListener)
|
|
|
|
t2v_cfg = nlp.config["components"]["tok2vec"]["model"]
|
|
|
|
assert t2v_cfg["@architectures"] == "spacy.Tok2Vec.v2"
|
|
|
|
assert nlp.config["components"]["tagger"]["model"]["tok2vec"] == t2v_cfg
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
nlp.replace_listeners("invalid", "tagger", ["model.tok2vec"])
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
nlp.replace_listeners("tok2vec", "parser", ["model.tok2vec"])
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
nlp.replace_listeners("tok2vec", "tagger", ["model.yolo"])
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
nlp.replace_listeners("tok2vec", "tagger", ["model.tok2vec", "model.yolo"])
|
2021-05-12 12:32:22 +03:00
|
|
|
# attempt training with the new pipeline
|
|
|
|
optimizer = nlp.initialize(lambda: examples)
|
|
|
|
for i in range(2):
|
|
|
|
losses = {}
|
|
|
|
nlp.update(examples, sgd=optimizer, losses=losses)
|
|
|
|
assert losses["tok2vec"] == 0.0
|
|
|
|
assert losses["tagger"] > 0.0
|
2021-01-29 11:38:09 +03:00
|
|
|
|
|
|
|
|
|
|
|
cfg_string_multi = """
|
|
|
|
[nlp]
|
|
|
|
lang = "en"
|
|
|
|
pipeline = ["tok2vec","tagger", "ner"]
|
|
|
|
|
|
|
|
[components]
|
|
|
|
|
|
|
|
[components.tagger]
|
|
|
|
factory = "tagger"
|
|
|
|
|
|
|
|
[components.tagger.model]
|
2022-03-15 16:15:31 +03:00
|
|
|
@architectures = "spacy.Tagger.v2"
|
2021-01-29 11:38:09 +03:00
|
|
|
nO = null
|
|
|
|
|
|
|
|
[components.tagger.model.tok2vec]
|
|
|
|
@architectures = "spacy.Tok2VecListener.v1"
|
|
|
|
width = ${components.tok2vec.model.encode.width}
|
|
|
|
|
|
|
|
[components.ner]
|
|
|
|
factory = "ner"
|
|
|
|
|
|
|
|
[components.ner.model]
|
2023-12-08 22:23:08 +03:00
|
|
|
@architectures = "spacy.TransitionBasedParser.v2"
|
2021-01-29 11:38:09 +03:00
|
|
|
|
|
|
|
[components.ner.model.tok2vec]
|
|
|
|
@architectures = "spacy.Tok2VecListener.v1"
|
|
|
|
width = ${components.tok2vec.model.encode.width}
|
|
|
|
|
|
|
|
[components.tok2vec]
|
|
|
|
factory = "tok2vec"
|
|
|
|
|
|
|
|
[components.tok2vec.model]
|
|
|
|
@architectures = "spacy.Tok2Vec.v2"
|
|
|
|
|
|
|
|
[components.tok2vec.model.embed]
|
|
|
|
@architectures = "spacy.MultiHashEmbed.v1"
|
|
|
|
width = ${components.tok2vec.model.encode.width}
|
|
|
|
rows = [2000, 1000, 1000, 1000]
|
|
|
|
attrs = ["NORM", "PREFIX", "SUFFIX", "SHAPE"]
|
|
|
|
include_static_vectors = false
|
|
|
|
|
|
|
|
[components.tok2vec.model.encode]
|
|
|
|
@architectures = "spacy.MaxoutWindowEncoder.v2"
|
|
|
|
width = 96
|
|
|
|
depth = 4
|
|
|
|
window_size = 1
|
|
|
|
maxout_pieces = 3
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
def test_replace_listeners_from_config():
|
|
|
|
orig_config = Config().from_str(cfg_string_multi)
|
|
|
|
nlp = util.load_model_from_config(orig_config, auto_fill=True)
|
|
|
|
annots = {"tags": ["V", "Z"], "entities": [(0, 1, "A"), (1, 2, "B")]}
|
|
|
|
examples = [Example.from_dict(nlp.make_doc("x y"), annots)]
|
|
|
|
nlp.initialize(lambda: examples)
|
|
|
|
tok2vec = nlp.get_pipe("tok2vec")
|
|
|
|
tagger = nlp.get_pipe("tagger")
|
|
|
|
ner = nlp.get_pipe("ner")
|
|
|
|
assert tok2vec.listening_components == ["tagger", "ner"]
|
|
|
|
assert any(isinstance(node, Tok2VecListener) for node in ner.model.walk())
|
|
|
|
assert any(isinstance(node, Tok2VecListener) for node in tagger.model.walk())
|
|
|
|
with make_tempdir() as dir_path:
|
|
|
|
nlp.to_disk(dir_path)
|
|
|
|
base_model = str(dir_path)
|
|
|
|
new_config = {
|
2023-06-27 11:47:07 +03:00
|
|
|
"nlp": {
|
|
|
|
"lang": "en",
|
|
|
|
"pipeline": ["tok2vec", "tagger2", "ner3", "tagger4"],
|
|
|
|
},
|
2021-01-29 11:38:09 +03:00
|
|
|
"components": {
|
|
|
|
"tok2vec": {"source": base_model},
|
2023-06-27 11:47:07 +03:00
|
|
|
"tagger2": {
|
2021-01-29 11:38:09 +03:00
|
|
|
"source": base_model,
|
2023-06-27 11:47:07 +03:00
|
|
|
"component": "tagger",
|
2021-01-29 11:38:09 +03:00
|
|
|
"replace_listeners": ["model.tok2vec"],
|
|
|
|
},
|
2023-06-27 11:47:07 +03:00
|
|
|
"ner3": {
|
|
|
|
"source": base_model,
|
|
|
|
"component": "ner",
|
|
|
|
},
|
|
|
|
"tagger4": {
|
|
|
|
"source": base_model,
|
|
|
|
"component": "tagger",
|
|
|
|
},
|
2021-01-29 11:38:09 +03:00
|
|
|
},
|
|
|
|
}
|
|
|
|
new_nlp = util.load_model_from_config(new_config, auto_fill=True)
|
|
|
|
new_nlp.initialize(lambda: examples)
|
|
|
|
tok2vec = new_nlp.get_pipe("tok2vec")
|
2023-06-27 11:47:07 +03:00
|
|
|
tagger = new_nlp.get_pipe("tagger2")
|
|
|
|
ner = new_nlp.get_pipe("ner3")
|
|
|
|
assert "ner" not in new_nlp.pipe_names
|
|
|
|
assert "tagger" not in new_nlp.pipe_names
|
|
|
|
assert tok2vec.listening_components == ["ner3", "tagger4"]
|
2021-01-29 11:38:09 +03:00
|
|
|
assert any(isinstance(node, Tok2VecListener) for node in ner.model.walk())
|
|
|
|
assert not any(isinstance(node, Tok2VecListener) for node in tagger.model.walk())
|
|
|
|
t2v_cfg = new_nlp.config["components"]["tok2vec"]["model"]
|
|
|
|
assert t2v_cfg["@architectures"] == "spacy.Tok2Vec.v2"
|
2023-06-27 11:47:07 +03:00
|
|
|
assert new_nlp.config["components"]["tagger2"]["model"]["tok2vec"] == t2v_cfg
|
2021-01-29 11:38:09 +03:00
|
|
|
assert (
|
2023-06-27 11:47:07 +03:00
|
|
|
new_nlp.config["components"]["ner3"]["model"]["tok2vec"]["@architectures"]
|
|
|
|
== "spacy.Tok2VecListener.v1"
|
|
|
|
)
|
|
|
|
assert (
|
|
|
|
new_nlp.config["components"]["tagger4"]["model"]["tok2vec"]["@architectures"]
|
2021-01-29 11:38:09 +03:00
|
|
|
== "spacy.Tok2VecListener.v1"
|
|
|
|
)
|
2021-05-31 11:21:06 +03:00
|
|
|
|
|
|
|
|
|
|
|
cfg_string_multi_textcat = """
|
|
|
|
[nlp]
|
|
|
|
lang = "en"
|
|
|
|
pipeline = ["tok2vec","textcat_multilabel","tagger"]
|
|
|
|
|
|
|
|
[components]
|
|
|
|
|
|
|
|
[components.textcat_multilabel]
|
|
|
|
factory = "textcat_multilabel"
|
|
|
|
|
|
|
|
[components.textcat_multilabel.model]
|
|
|
|
@architectures = "spacy.TextCatEnsemble.v2"
|
|
|
|
nO = null
|
|
|
|
|
|
|
|
[components.textcat_multilabel.model.tok2vec]
|
|
|
|
@architectures = "spacy.Tok2VecListener.v1"
|
|
|
|
width = ${components.tok2vec.model.encode.width}
|
|
|
|
|
|
|
|
[components.textcat_multilabel.model.linear_model]
|
|
|
|
@architectures = "spacy.TextCatBOW.v1"
|
|
|
|
exclusive_classes = false
|
|
|
|
ngram_size = 1
|
|
|
|
no_output_layer = false
|
|
|
|
|
|
|
|
[components.tagger]
|
|
|
|
factory = "tagger"
|
|
|
|
|
|
|
|
[components.tagger.model]
|
2022-03-15 16:15:31 +03:00
|
|
|
@architectures = "spacy.Tagger.v2"
|
2021-05-31 11:21:06 +03:00
|
|
|
nO = null
|
|
|
|
|
|
|
|
[components.tagger.model.tok2vec]
|
|
|
|
@architectures = "spacy.Tok2VecListener.v1"
|
|
|
|
width = ${components.tok2vec.model.encode.width}
|
|
|
|
|
|
|
|
[components.tok2vec]
|
|
|
|
factory = "tok2vec"
|
|
|
|
|
|
|
|
[components.tok2vec.model]
|
|
|
|
@architectures = "spacy.Tok2Vec.v2"
|
|
|
|
|
|
|
|
[components.tok2vec.model.embed]
|
|
|
|
@architectures = "spacy.MultiHashEmbed.v1"
|
|
|
|
width = ${components.tok2vec.model.encode.width}
|
|
|
|
rows = [2000, 1000, 1000, 1000]
|
|
|
|
attrs = ["NORM", "PREFIX", "SUFFIX", "SHAPE"]
|
|
|
|
include_static_vectors = false
|
|
|
|
|
|
|
|
[components.tok2vec.model.encode]
|
|
|
|
@architectures = "spacy.MaxoutWindowEncoder.v2"
|
|
|
|
width = 96
|
|
|
|
depth = 4
|
|
|
|
window_size = 1
|
|
|
|
maxout_pieces = 3
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
def test_tok2vec_listeners_textcat():
|
|
|
|
orig_config = Config().from_str(cfg_string_multi_textcat)
|
|
|
|
nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
|
|
|
|
assert nlp.pipe_names == ["tok2vec", "textcat_multilabel", "tagger"]
|
|
|
|
tagger = nlp.get_pipe("tagger")
|
|
|
|
textcat = nlp.get_pipe("textcat_multilabel")
|
|
|
|
tok2vec = nlp.get_pipe("tok2vec")
|
|
|
|
tagger_tok2vec = tagger.model.get_ref("tok2vec")
|
|
|
|
textcat_tok2vec = textcat.model.get_ref("tok2vec")
|
|
|
|
assert isinstance(tok2vec, Tok2Vec)
|
|
|
|
assert isinstance(tagger_tok2vec, Tok2VecListener)
|
|
|
|
assert isinstance(textcat_tok2vec, Tok2VecListener)
|
|
|
|
train_examples = []
|
|
|
|
for t in TRAIN_DATA:
|
|
|
|
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
|
|
|
|
|
|
|
|
optimizer = nlp.initialize(lambda: train_examples)
|
|
|
|
for i in range(50):
|
|
|
|
losses = {}
|
|
|
|
nlp.update(train_examples, sgd=optimizer, losses=losses)
|
|
|
|
|
|
|
|
docs = list(nlp.pipe(["Eat blue ham", "I like green eggs"]))
|
|
|
|
cats0 = docs[0].cats
|
|
|
|
assert cats0["preference"] < 0.1
|
|
|
|
assert cats0["imperative"] > 0.9
|
|
|
|
cats1 = docs[1].cats
|
|
|
|
assert cats1["preference"] > 0.1
|
|
|
|
assert cats1["imperative"] < 0.9
|
2021-06-28 12:48:00 +03:00
|
|
|
assert [t.tag_ for t in docs[0]] == ["V", "J", "N"]
|
|
|
|
assert [t.tag_ for t in docs[1]] == ["N", "V", "J", "N"]
|
2023-03-09 11:37:19 +03:00
|
|
|
|
|
|
|
|
|
|
|
cfg_string_distillation = """
|
|
|
|
[nlp]
|
|
|
|
lang = "en"
|
|
|
|
pipeline = ["tok2vec","tagger"]
|
|
|
|
|
|
|
|
[components]
|
|
|
|
|
|
|
|
[components.tagger]
|
|
|
|
factory = "tagger"
|
|
|
|
|
|
|
|
[components.tagger.model]
|
|
|
|
@architectures = "spacy.Tagger.v2"
|
|
|
|
nO = null
|
|
|
|
|
|
|
|
[components.tagger.model.tok2vec]
|
|
|
|
@architectures = "spacy.Tok2VecListener.v1"
|
|
|
|
width = ${components.tok2vec.model.encode.width}
|
|
|
|
|
|
|
|
[components.tok2vec]
|
|
|
|
factory = "tok2vec"
|
|
|
|
|
|
|
|
[components.tok2vec.model]
|
|
|
|
@architectures = "spacy.Tok2Vec.v2"
|
|
|
|
|
|
|
|
[components.tok2vec.model.embed]
|
|
|
|
@architectures = "spacy.MultiHashEmbed.v2"
|
|
|
|
width = ${components.tok2vec.model.encode.width}
|
|
|
|
rows = [2000, 1000, 1000, 1000]
|
|
|
|
attrs = ["NORM", "PREFIX", "SUFFIX", "SHAPE"]
|
|
|
|
include_static_vectors = false
|
|
|
|
|
|
|
|
[components.tok2vec.model.encode]
|
|
|
|
@architectures = "spacy.MaxoutWindowEncoder.v2"
|
|
|
|
width = 96
|
|
|
|
depth = 4
|
|
|
|
window_size = 1
|
|
|
|
maxout_pieces = 3
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
|
|
def test_tok2vec_distillation_teacher_annotations():
|
|
|
|
orig_config = Config().from_str(cfg_string_distillation)
|
|
|
|
teacher_nlp = util.load_model_from_config(
|
|
|
|
orig_config, auto_fill=True, validate=True
|
|
|
|
)
|
|
|
|
student_nlp = util.load_model_from_config(
|
|
|
|
orig_config, auto_fill=True, validate=True
|
|
|
|
)
|
|
|
|
|
|
|
|
train_examples_teacher = []
|
|
|
|
train_examples_student = []
|
|
|
|
for t in TRAIN_DATA:
|
|
|
|
train_examples_teacher.append(
|
|
|
|
Example.from_dict(teacher_nlp.make_doc(t[0]), t[1])
|
|
|
|
)
|
|
|
|
train_examples_student.append(
|
|
|
|
Example.from_dict(student_nlp.make_doc(t[0]), t[1])
|
|
|
|
)
|
|
|
|
|
|
|
|
optimizer = teacher_nlp.initialize(lambda: train_examples_teacher)
|
|
|
|
student_nlp.initialize(lambda: train_examples_student)
|
|
|
|
|
|
|
|
# Since Language.distill creates a copy of the examples to use as
|
|
|
|
# its internal teacher/student docs, we'll need to monkey-patch the
|
|
|
|
# tok2vec pipe's distill method.
|
|
|
|
student_tok2vec = student_nlp.get_pipe("tok2vec")
|
|
|
|
student_tok2vec._old_distill = student_tok2vec.distill
|
|
|
|
|
|
|
|
def tok2vec_distill_wrapper(
|
|
|
|
self,
|
|
|
|
teacher_pipe,
|
|
|
|
examples,
|
|
|
|
**kwargs,
|
|
|
|
):
|
|
|
|
assert all(not eg.reference.tensor.any() for eg in examples)
|
|
|
|
out = self._old_distill(teacher_pipe, examples, **kwargs)
|
|
|
|
assert all(eg.reference.tensor.any() for eg in examples)
|
|
|
|
return out
|
|
|
|
|
|
|
|
student_tok2vec.distill = tok2vec_distill_wrapper.__get__(student_tok2vec, Tok2Vec)
|
|
|
|
student_nlp.distill(teacher_nlp, train_examples_student, sgd=optimizer, losses={})
|
2023-07-19 17:37:31 +03:00
|
|
|
|
|
|
|
|
2023-06-27 11:47:07 +03:00
|
|
|
def test_tok2vec_listener_source_link_name():
|
|
|
|
"""The component's internal name and the tok2vec listener map correspond
|
|
|
|
to the most recently modified pipeline.
|
|
|
|
"""
|
|
|
|
orig_config = Config().from_str(cfg_string_multi)
|
|
|
|
nlp1 = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
|
|
|
|
assert nlp1.get_pipe("tok2vec").listening_components == ["tagger", "ner"]
|
|
|
|
|
|
|
|
nlp2 = English()
|
|
|
|
nlp2.add_pipe("tok2vec", source=nlp1)
|
|
|
|
nlp2.add_pipe("tagger", name="tagger2", source=nlp1)
|
|
|
|
|
|
|
|
# there is no way to have the component have the right name for both
|
|
|
|
# pipelines, right now the most recently modified pipeline is prioritized
|
|
|
|
assert nlp1.get_pipe("tagger").name == nlp2.get_pipe("tagger2").name == "tagger2"
|
|
|
|
|
|
|
|
# there is no way to have the tok2vec have the right listener map for both
|
|
|
|
# pipelines, right now the most recently modified pipeline is prioritized
|
|
|
|
assert nlp2.get_pipe("tok2vec").listening_components == ["tagger2"]
|
|
|
|
nlp2.add_pipe("ner", name="ner3", source=nlp1)
|
|
|
|
assert nlp2.get_pipe("tok2vec").listening_components == ["tagger2", "ner3"]
|
|
|
|
nlp2.remove_pipe("ner3")
|
|
|
|
assert nlp2.get_pipe("tok2vec").listening_components == ["tagger2"]
|
|
|
|
nlp2.remove_pipe("tagger2")
|
|
|
|
assert nlp2.get_pipe("tok2vec").listening_components == []
|
|
|
|
|
|
|
|
# at this point the tok2vec component corresponds to nlp2
|
|
|
|
assert nlp1.get_pipe("tok2vec").listening_components == []
|
|
|
|
|
|
|
|
# modifying the nlp1 pipeline syncs the tok2vec listener map back to nlp1
|
|
|
|
nlp1.add_pipe("sentencizer")
|
|
|
|
assert nlp1.get_pipe("tok2vec").listening_components == ["tagger", "ner"]
|
|
|
|
|
|
|
|
# modifying nlp2 syncs it back to nlp2
|
|
|
|
nlp2.add_pipe("sentencizer")
|
|
|
|
assert nlp1.get_pipe("tok2vec").listening_components == []
|
|
|
|
|
|
|
|
|
|
|
|
def test_tok2vec_listener_source_replace_listeners():
|
|
|
|
orig_config = Config().from_str(cfg_string_multi)
|
|
|
|
nlp1 = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
|
|
|
|
assert nlp1.get_pipe("tok2vec").listening_components == ["tagger", "ner"]
|
|
|
|
nlp1.replace_listeners("tok2vec", "tagger", ["model.tok2vec"])
|
|
|
|
assert nlp1.get_pipe("tok2vec").listening_components == ["ner"]
|
|
|
|
|
|
|
|
nlp2 = English()
|
|
|
|
nlp2.add_pipe("tok2vec", source=nlp1)
|
|
|
|
assert nlp2.get_pipe("tok2vec").listening_components == []
|
|
|
|
nlp2.add_pipe("tagger", source=nlp1)
|
|
|
|
assert nlp2.get_pipe("tok2vec").listening_components == []
|
|
|
|
nlp2.add_pipe("ner", name="ner2", source=nlp1)
|
2023-07-19 17:38:29 +03:00
|
|
|
assert nlp2.get_pipe("tok2vec").listening_components == ["ner2"]
|