2021-07-06 15:15:41 +03:00
|
|
|
import pytest
|
2021-08-17 11:36:34 +03:00
|
|
|
import numpy
|
2021-08-20 12:06:19 +03:00
|
|
|
from numpy.testing import assert_array_equal, assert_almost_equal
|
2022-10-21 12:01:18 +03:00
|
|
|
from thinc.api import get_current_ops, Ragged, fix_random_seed
|
2021-08-20 13:37:50 +03:00
|
|
|
|
|
|
|
from spacy import util
|
|
|
|
from spacy.lang.en import English
|
2021-06-24 13:35:27 +03:00
|
|
|
from spacy.language import Language
|
2021-08-20 12:06:19 +03:00
|
|
|
from spacy.tokens import SpanGroup
|
2022-08-30 14:56:35 +03:00
|
|
|
from spacy.tokens.span_groups import SpanGroups
|
2021-06-24 13:35:27 +03:00
|
|
|
from spacy.training import Example
|
2022-10-21 12:01:18 +03:00
|
|
|
from spacy.util import registry, make_tempdir
|
2021-06-24 13:35:27 +03:00
|
|
|
|
2021-08-04 15:29:43 +03:00
|
|
|
OPS = get_current_ops()
|
2021-06-24 13:35:27 +03:00
|
|
|
|
|
|
|
SPAN_KEY = "labeled_spans"
|
|
|
|
|
|
|
|
TRAIN_DATA = [
|
|
|
|
("Who is Shaka Khan?", {"spans": {SPAN_KEY: [(7, 17, "PERSON")]}}),
|
|
|
|
(
|
|
|
|
"I like London and Berlin.",
|
|
|
|
{"spans": {SPAN_KEY: [(7, 13, "LOC"), (18, 24, "LOC")]}},
|
|
|
|
),
|
|
|
|
]
|
|
|
|
|
2021-08-20 13:37:50 +03:00
|
|
|
TRAIN_DATA_OVERLAPPING = [
|
|
|
|
("Who is Shaka Khan?", {"spans": {SPAN_KEY: [(7, 17, "PERSON")]}}),
|
|
|
|
(
|
|
|
|
"I like London and Berlin",
|
|
|
|
{"spans": {SPAN_KEY: [(7, 13, "LOC"), (18, 24, "LOC"), (7, 24, "DOUBLE_LOC")]}},
|
|
|
|
),
|
2021-11-15 14:40:55 +03:00
|
|
|
("", {"spans": {SPAN_KEY: []}}),
|
2021-08-20 13:37:50 +03:00
|
|
|
]
|
|
|
|
|
2021-06-24 13:35:27 +03:00
|
|
|
|
2021-08-20 13:37:50 +03:00
|
|
|
def make_examples(nlp, data=TRAIN_DATA):
|
2021-06-24 13:35:27 +03:00
|
|
|
train_examples = []
|
2021-08-20 13:37:50 +03:00
|
|
|
for t in data:
|
2021-06-24 13:35:27 +03:00
|
|
|
eg = Example.from_dict(nlp.make_doc(t[0]), t[1])
|
|
|
|
train_examples.append(eg)
|
2021-08-20 13:37:50 +03:00
|
|
|
return train_examples
|
2021-06-24 13:35:27 +03:00
|
|
|
|
|
|
|
|
2021-07-06 15:15:41 +03:00
|
|
|
def test_no_label():
|
|
|
|
nlp = Language()
|
|
|
|
nlp.add_pipe("spancat", config={"spans_key": SPAN_KEY})
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
nlp.initialize()
|
|
|
|
|
|
|
|
|
|
|
|
def test_no_resize():
|
|
|
|
nlp = Language()
|
|
|
|
spancat = nlp.add_pipe("spancat", config={"spans_key": SPAN_KEY})
|
|
|
|
spancat.add_label("Thing")
|
|
|
|
spancat.add_label("Phrase")
|
|
|
|
assert spancat.labels == ("Thing", "Phrase")
|
|
|
|
nlp.initialize()
|
|
|
|
assert spancat.model.get_dim("nO") == 2
|
|
|
|
# this throws an error because the spancat can't be resized after initialization
|
|
|
|
with pytest.raises(ValueError):
|
|
|
|
spancat.add_label("Stuff")
|
|
|
|
|
|
|
|
|
|
|
|
def test_implicit_labels():
|
|
|
|
nlp = Language()
|
|
|
|
spancat = nlp.add_pipe("spancat", config={"spans_key": SPAN_KEY})
|
|
|
|
assert len(spancat.labels) == 0
|
2021-08-20 13:37:50 +03:00
|
|
|
train_examples = make_examples(nlp)
|
2021-07-06 15:15:41 +03:00
|
|
|
nlp.initialize(get_examples=lambda: train_examples)
|
|
|
|
assert spancat.labels == ("PERSON", "LOC")
|
|
|
|
|
|
|
|
|
|
|
|
def test_explicit_labels():
|
|
|
|
nlp = Language()
|
|
|
|
spancat = nlp.add_pipe("spancat", config={"spans_key": SPAN_KEY})
|
|
|
|
assert len(spancat.labels) == 0
|
|
|
|
spancat.add_label("PERSON")
|
|
|
|
spancat.add_label("LOC")
|
|
|
|
nlp.initialize()
|
|
|
|
assert spancat.labels == ("PERSON", "LOC")
|
|
|
|
|
2022-01-21 12:01:10 +03:00
|
|
|
|
|
|
|
# TODO figure out why this is flaky
|
2022-01-18 11:36:28 +03:00
|
|
|
@pytest.mark.skip(reason="Test is unreliable for unknown reason")
|
2021-08-20 12:06:19 +03:00
|
|
|
def test_doc_gc():
|
|
|
|
# If the Doc object is garbage collected, the spans won't be functional afterwards
|
|
|
|
nlp = Language()
|
|
|
|
spancat = nlp.add_pipe("spancat", config={"spans_key": SPAN_KEY})
|
|
|
|
spancat.add_label("PERSON")
|
|
|
|
nlp.initialize()
|
2021-08-27 12:42:27 +03:00
|
|
|
texts = [
|
|
|
|
"Just a sentence.",
|
|
|
|
"I like London and Berlin",
|
|
|
|
"I like Berlin",
|
|
|
|
"I eat ham.",
|
|
|
|
]
|
2021-08-20 12:06:19 +03:00
|
|
|
all_spans = [doc.spans for doc in nlp.pipe(texts)]
|
|
|
|
for text, spangroups in zip(texts, all_spans):
|
|
|
|
assert isinstance(spangroups, SpanGroups)
|
|
|
|
for key, spangroup in spangroups.items():
|
|
|
|
assert isinstance(spangroup, SpanGroup)
|
2022-01-18 11:36:28 +03:00
|
|
|
# XXX This fails with length 0 sometimes
|
2021-08-20 12:06:19 +03:00
|
|
|
assert len(spangroup) > 0
|
|
|
|
with pytest.raises(RuntimeError):
|
|
|
|
span = spangroup[0]
|
|
|
|
|
|
|
|
|
2021-08-17 11:36:34 +03:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"max_positive,nr_results", [(None, 4), (1, 2), (2, 3), (3, 4), (4, 4)]
|
|
|
|
)
|
|
|
|
def test_make_spangroup(max_positive, nr_results):
|
|
|
|
fix_random_seed(0)
|
|
|
|
nlp = Language()
|
|
|
|
spancat = nlp.add_pipe(
|
|
|
|
"spancat",
|
|
|
|
config={"spans_key": SPAN_KEY, "threshold": 0.5, "max_positive": max_positive},
|
|
|
|
)
|
|
|
|
doc = nlp.make_doc("Greater London")
|
|
|
|
ngram_suggester = registry.misc.get("spacy.ngram_suggester.v1")(sizes=[1, 2])
|
|
|
|
indices = ngram_suggester([doc])[0].dataXd
|
2021-10-13 11:47:56 +03:00
|
|
|
assert_array_equal(OPS.to_numpy(indices), numpy.asarray([[0, 1], [1, 2], [0, 2]]))
|
2021-08-17 11:36:34 +03:00
|
|
|
labels = ["Thing", "City", "Person", "GreatCity"]
|
|
|
|
scores = numpy.asarray(
|
|
|
|
[[0.2, 0.4, 0.3, 0.1], [0.1, 0.6, 0.2, 0.4], [0.8, 0.7, 0.3, 0.9]], dtype="f"
|
|
|
|
)
|
|
|
|
spangroup = spancat._make_span_group(doc, indices, scores, labels)
|
|
|
|
assert len(spangroup) == nr_results
|
|
|
|
|
|
|
|
# first span is always the second token "London"
|
|
|
|
assert spangroup[0].text == "London"
|
|
|
|
assert spangroup[0].label_ == "City"
|
|
|
|
assert_almost_equal(0.6, spangroup.attrs["scores"][0], 5)
|
|
|
|
|
|
|
|
# second span depends on the number of positives that were allowed
|
|
|
|
assert spangroup[1].text == "Greater London"
|
|
|
|
if max_positive == 1:
|
|
|
|
assert spangroup[1].label_ == "GreatCity"
|
|
|
|
assert_almost_equal(0.9, spangroup.attrs["scores"][1], 5)
|
|
|
|
else:
|
|
|
|
assert spangroup[1].label_ == "Thing"
|
|
|
|
assert_almost_equal(0.8, spangroup.attrs["scores"][1], 5)
|
|
|
|
|
|
|
|
if nr_results > 2:
|
|
|
|
assert spangroup[2].text == "Greater London"
|
|
|
|
if max_positive == 2:
|
|
|
|
assert spangroup[2].label_ == "GreatCity"
|
|
|
|
assert_almost_equal(0.9, spangroup.attrs["scores"][2], 5)
|
|
|
|
else:
|
|
|
|
assert spangroup[2].label_ == "City"
|
|
|
|
assert_almost_equal(0.7, spangroup.attrs["scores"][2], 5)
|
|
|
|
|
|
|
|
assert spangroup[-1].text == "Greater London"
|
|
|
|
assert spangroup[-1].label_ == "GreatCity"
|
|
|
|
assert_almost_equal(0.9, spangroup.attrs["scores"][-1], 5)
|
|
|
|
|
|
|
|
|
2021-06-24 13:35:27 +03:00
|
|
|
def test_ngram_suggester(en_tokenizer):
|
|
|
|
# test different n-gram lengths
|
|
|
|
for size in [1, 2, 3]:
|
2021-07-07 09:09:30 +03:00
|
|
|
ngram_suggester = registry.misc.get("spacy.ngram_suggester.v1")(sizes=[size])
|
2021-06-24 13:35:27 +03:00
|
|
|
docs = [
|
|
|
|
en_tokenizer(text)
|
|
|
|
for text in [
|
|
|
|
"a",
|
|
|
|
"a b",
|
|
|
|
"a b c",
|
|
|
|
"a b c d",
|
|
|
|
"a b c d e",
|
|
|
|
"a " * 100,
|
|
|
|
]
|
|
|
|
]
|
|
|
|
ngrams = ngram_suggester(docs)
|
|
|
|
# span sizes are correct
|
|
|
|
for s in ngrams.data:
|
|
|
|
assert s[1] - s[0] == size
|
|
|
|
# spans are within docs
|
|
|
|
offset = 0
|
|
|
|
for i, doc in enumerate(docs):
|
|
|
|
spans = ngrams.dataXd[offset : offset + ngrams.lengths[i]]
|
|
|
|
spans_set = set()
|
|
|
|
for span in spans:
|
|
|
|
assert 0 <= span[0] < len(doc)
|
|
|
|
assert 0 < span[1] <= len(doc)
|
2021-08-04 15:29:43 +03:00
|
|
|
spans_set.add((int(span[0]), int(span[1])))
|
2021-06-24 13:35:27 +03:00
|
|
|
# spans are unique
|
|
|
|
assert spans.shape[0] == len(spans_set)
|
|
|
|
offset += ngrams.lengths[i]
|
|
|
|
# the number of spans is correct
|
2021-08-06 14:38:06 +03:00
|
|
|
assert_array_equal(
|
|
|
|
OPS.to_numpy(ngrams.lengths),
|
|
|
|
[max(0, len(doc) - (size - 1)) for doc in docs],
|
|
|
|
)
|
2021-06-24 13:35:27 +03:00
|
|
|
|
|
|
|
# test 1-3-gram suggestions
|
2021-07-07 09:09:30 +03:00
|
|
|
ngram_suggester = registry.misc.get("spacy.ngram_suggester.v1")(sizes=[1, 2, 3])
|
2021-06-24 13:35:27 +03:00
|
|
|
docs = [
|
|
|
|
en_tokenizer(text) for text in ["a", "a b", "a b c", "a b c d", "a b c d e"]
|
|
|
|
]
|
|
|
|
ngrams = ngram_suggester(docs)
|
2021-08-04 15:29:43 +03:00
|
|
|
assert_array_equal(OPS.to_numpy(ngrams.lengths), [1, 3, 6, 9, 12])
|
|
|
|
assert_array_equal(
|
|
|
|
OPS.to_numpy(ngrams.data),
|
2021-06-24 13:35:27 +03:00
|
|
|
[
|
|
|
|
# doc 0
|
|
|
|
[0, 1],
|
|
|
|
# doc 1
|
|
|
|
[0, 1],
|
|
|
|
[1, 2],
|
|
|
|
[0, 2],
|
|
|
|
# doc 2
|
|
|
|
[0, 1],
|
|
|
|
[1, 2],
|
|
|
|
[2, 3],
|
|
|
|
[0, 2],
|
|
|
|
[1, 3],
|
|
|
|
[0, 3],
|
|
|
|
# doc 3
|
|
|
|
[0, 1],
|
|
|
|
[1, 2],
|
|
|
|
[2, 3],
|
|
|
|
[3, 4],
|
|
|
|
[0, 2],
|
|
|
|
[1, 3],
|
|
|
|
[2, 4],
|
|
|
|
[0, 3],
|
|
|
|
[1, 4],
|
|
|
|
# doc 4
|
|
|
|
[0, 1],
|
|
|
|
[1, 2],
|
|
|
|
[2, 3],
|
|
|
|
[3, 4],
|
|
|
|
[4, 5],
|
|
|
|
[0, 2],
|
|
|
|
[1, 3],
|
|
|
|
[2, 4],
|
|
|
|
[3, 5],
|
|
|
|
[0, 3],
|
|
|
|
[1, 4],
|
|
|
|
[2, 5],
|
|
|
|
],
|
|
|
|
)
|
|
|
|
|
|
|
|
# test some empty docs
|
2021-07-07 09:09:30 +03:00
|
|
|
ngram_suggester = registry.misc.get("spacy.ngram_suggester.v1")(sizes=[1])
|
2021-06-24 13:35:27 +03:00
|
|
|
docs = [en_tokenizer(text) for text in ["", "a", ""]]
|
|
|
|
ngrams = ngram_suggester(docs)
|
2021-08-04 15:29:43 +03:00
|
|
|
assert_array_equal(OPS.to_numpy(ngrams.lengths), [len(doc) for doc in docs])
|
2021-06-24 13:35:27 +03:00
|
|
|
|
|
|
|
# test all empty docs
|
2021-07-07 09:09:30 +03:00
|
|
|
ngram_suggester = registry.misc.get("spacy.ngram_suggester.v1")(sizes=[1])
|
2021-06-24 13:35:27 +03:00
|
|
|
docs = [en_tokenizer(text) for text in ["", "", ""]]
|
|
|
|
ngrams = ngram_suggester(docs)
|
2021-08-04 15:29:43 +03:00
|
|
|
assert_array_equal(OPS.to_numpy(ngrams.lengths), [len(doc) for doc in docs])
|
2021-07-15 11:01:22 +03:00
|
|
|
|
|
|
|
|
|
|
|
def test_ngram_sizes(en_tokenizer):
|
|
|
|
# test that the range suggester works well
|
|
|
|
size_suggester = registry.misc.get("spacy.ngram_suggester.v1")(sizes=[1, 2, 3])
|
2021-07-17 03:49:13 +03:00
|
|
|
suggester_factory = registry.misc.get("spacy.ngram_range_suggester.v1")
|
|
|
|
range_suggester = suggester_factory(min_size=1, max_size=3)
|
2021-07-15 11:01:22 +03:00
|
|
|
docs = [
|
|
|
|
en_tokenizer(text) for text in ["a", "a b", "a b c", "a b c d", "a b c d e"]
|
|
|
|
]
|
|
|
|
ngrams_1 = size_suggester(docs)
|
|
|
|
ngrams_2 = range_suggester(docs)
|
2021-08-04 15:29:43 +03:00
|
|
|
assert_array_equal(OPS.to_numpy(ngrams_1.lengths), [1, 3, 6, 9, 12])
|
|
|
|
assert_array_equal(OPS.to_numpy(ngrams_1.lengths), OPS.to_numpy(ngrams_2.lengths))
|
|
|
|
assert_array_equal(OPS.to_numpy(ngrams_1.data), OPS.to_numpy(ngrams_2.data))
|
2021-07-15 11:01:22 +03:00
|
|
|
|
|
|
|
# one more variation
|
2021-07-17 03:49:13 +03:00
|
|
|
suggester_factory = registry.misc.get("spacy.ngram_range_suggester.v1")
|
|
|
|
range_suggester = suggester_factory(min_size=2, max_size=4)
|
2021-07-15 11:01:22 +03:00
|
|
|
ngrams_3 = range_suggester(docs)
|
2021-08-04 15:29:43 +03:00
|
|
|
assert_array_equal(OPS.to_numpy(ngrams_3.lengths), [0, 1, 3, 6, 9])
|
2021-08-20 13:37:50 +03:00
|
|
|
|
|
|
|
|
|
|
|
def test_overfitting_IO():
|
|
|
|
# Simple test to try and quickly overfit the spancat component - ensuring the ML models work correctly
|
|
|
|
fix_random_seed(0)
|
|
|
|
nlp = English()
|
|
|
|
spancat = nlp.add_pipe("spancat", config={"spans_key": SPAN_KEY})
|
|
|
|
train_examples = make_examples(nlp)
|
|
|
|
optimizer = nlp.initialize(get_examples=lambda: train_examples)
|
|
|
|
assert spancat.model.get_dim("nO") == 2
|
|
|
|
assert set(spancat.labels) == {"LOC", "PERSON"}
|
|
|
|
|
|
|
|
for i in range(50):
|
|
|
|
losses = {}
|
|
|
|
nlp.update(train_examples, sgd=optimizer, losses=losses)
|
|
|
|
assert losses["spancat"] < 0.01
|
|
|
|
|
|
|
|
# test the trained model
|
|
|
|
test_text = "I like London and Berlin"
|
|
|
|
doc = nlp(test_text)
|
|
|
|
assert doc.spans[spancat.key] == doc.spans[SPAN_KEY]
|
|
|
|
spans = doc.spans[SPAN_KEY]
|
|
|
|
assert len(spans) == 2
|
|
|
|
assert len(spans.attrs["scores"]) == 2
|
|
|
|
assert min(spans.attrs["scores"]) > 0.9
|
|
|
|
assert set([span.text for span in spans]) == {"London", "Berlin"}
|
|
|
|
assert set([span.label_ for span in spans]) == {"LOC"}
|
|
|
|
|
|
|
|
# Also test the results are still the same after IO
|
|
|
|
with make_tempdir() as tmp_dir:
|
|
|
|
nlp.to_disk(tmp_dir)
|
|
|
|
nlp2 = util.load_model_from_path(tmp_dir)
|
|
|
|
doc2 = nlp2(test_text)
|
|
|
|
spans2 = doc2.spans[SPAN_KEY]
|
|
|
|
assert len(spans2) == 2
|
|
|
|
assert len(spans2.attrs["scores"]) == 2
|
|
|
|
assert min(spans2.attrs["scores"]) > 0.9
|
|
|
|
assert set([span.text for span in spans2]) == {"London", "Berlin"}
|
|
|
|
assert set([span.label_ for span in spans2]) == {"LOC"}
|
|
|
|
|
|
|
|
# Test scoring
|
|
|
|
scores = nlp.evaluate(train_examples)
|
|
|
|
assert f"spans_{SPAN_KEY}_f" in scores
|
|
|
|
assert scores[f"spans_{SPAN_KEY}_p"] == 1.0
|
|
|
|
assert scores[f"spans_{SPAN_KEY}_r"] == 1.0
|
|
|
|
assert scores[f"spans_{SPAN_KEY}_f"] == 1.0
|
|
|
|
|
|
|
|
# also test that the spancat works for just a single entity in a sentence
|
|
|
|
doc = nlp("London")
|
|
|
|
assert len(doc.spans[spancat.key]) == 1
|
|
|
|
|
|
|
|
|
|
|
|
def test_overfitting_IO_overlapping():
|
|
|
|
# Test for overfitting on overlapping entities
|
|
|
|
fix_random_seed(0)
|
|
|
|
nlp = English()
|
|
|
|
spancat = nlp.add_pipe("spancat", config={"spans_key": SPAN_KEY})
|
|
|
|
|
|
|
|
train_examples = make_examples(nlp, data=TRAIN_DATA_OVERLAPPING)
|
|
|
|
optimizer = nlp.initialize(get_examples=lambda: train_examples)
|
|
|
|
assert spancat.model.get_dim("nO") == 3
|
|
|
|
assert set(spancat.labels) == {"PERSON", "LOC", "DOUBLE_LOC"}
|
|
|
|
|
|
|
|
for i in range(50):
|
|
|
|
losses = {}
|
|
|
|
nlp.update(train_examples, sgd=optimizer, losses=losses)
|
|
|
|
assert losses["spancat"] < 0.01
|
|
|
|
|
|
|
|
# test the trained model
|
|
|
|
test_text = "I like London and Berlin"
|
|
|
|
doc = nlp(test_text)
|
|
|
|
spans = doc.spans[SPAN_KEY]
|
|
|
|
assert len(spans) == 3
|
|
|
|
assert len(spans.attrs["scores"]) == 3
|
|
|
|
assert min(spans.attrs["scores"]) > 0.9
|
2021-08-27 12:42:27 +03:00
|
|
|
assert set([span.text for span in spans]) == {
|
|
|
|
"London",
|
|
|
|
"Berlin",
|
|
|
|
"London and Berlin",
|
|
|
|
}
|
2021-08-20 13:37:50 +03:00
|
|
|
assert set([span.label_ for span in spans]) == {"LOC", "DOUBLE_LOC"}
|
|
|
|
|
|
|
|
# Also test the results are still the same after IO
|
|
|
|
with make_tempdir() as tmp_dir:
|
|
|
|
nlp.to_disk(tmp_dir)
|
|
|
|
nlp2 = util.load_model_from_path(tmp_dir)
|
|
|
|
doc2 = nlp2(test_text)
|
|
|
|
spans2 = doc2.spans[SPAN_KEY]
|
|
|
|
assert len(spans2) == 3
|
|
|
|
assert len(spans2.attrs["scores"]) == 3
|
|
|
|
assert min(spans2.attrs["scores"]) > 0.9
|
2021-08-27 12:42:27 +03:00
|
|
|
assert set([span.text for span in spans2]) == {
|
|
|
|
"London",
|
|
|
|
"Berlin",
|
|
|
|
"London and Berlin",
|
|
|
|
}
|
2021-08-20 13:37:50 +03:00
|
|
|
assert set([span.label_ for span in spans2]) == {"LOC", "DOUBLE_LOC"}
|
2021-11-15 14:40:55 +03:00
|
|
|
|
|
|
|
|
|
|
|
def test_zero_suggestions():
|
|
|
|
# Test with a suggester that returns 0 suggestions
|
|
|
|
|
|
|
|
@registry.misc("test_zero_suggester")
|
|
|
|
def make_zero_suggester():
|
|
|
|
def zero_suggester(docs, *, ops=None):
|
|
|
|
if ops is None:
|
|
|
|
ops = get_current_ops()
|
|
|
|
return Ragged(
|
|
|
|
ops.xp.zeros((0, 0), dtype="i"), ops.xp.zeros((len(docs),), dtype="i")
|
|
|
|
)
|
|
|
|
|
|
|
|
return zero_suggester
|
|
|
|
|
|
|
|
fix_random_seed(0)
|
|
|
|
nlp = English()
|
|
|
|
spancat = nlp.add_pipe(
|
|
|
|
"spancat",
|
|
|
|
config={"suggester": {"@misc": "test_zero_suggester"}, "spans_key": SPAN_KEY},
|
|
|
|
)
|
|
|
|
train_examples = make_examples(nlp)
|
|
|
|
optimizer = nlp.initialize(get_examples=lambda: train_examples)
|
|
|
|
assert spancat.model.get_dim("nO") == 2
|
|
|
|
assert set(spancat.labels) == {"LOC", "PERSON"}
|
|
|
|
|
|
|
|
nlp.update(train_examples, sgd=optimizer)
|
2022-03-14 18:46:58 +03:00
|
|
|
|
|
|
|
|
|
|
|
def test_set_candidates():
|
|
|
|
nlp = Language()
|
|
|
|
spancat = nlp.add_pipe("spancat", config={"spans_key": SPAN_KEY})
|
|
|
|
train_examples = make_examples(nlp)
|
|
|
|
nlp.initialize(get_examples=lambda: train_examples)
|
|
|
|
texts = [
|
|
|
|
"Just a sentence.",
|
|
|
|
"I like London and Berlin",
|
|
|
|
"I like Berlin",
|
|
|
|
"I eat ham.",
|
|
|
|
]
|
|
|
|
|
|
|
|
docs = [nlp(text) for text in texts]
|
|
|
|
spancat.set_candidates(docs)
|
|
|
|
|
|
|
|
assert len(docs) == len(texts)
|
|
|
|
assert type(docs[0].spans["candidates"]) == SpanGroup
|
|
|
|
assert len(docs[0].spans["candidates"]) == 9
|
|
|
|
assert docs[0].spans["candidates"][0].text == "Just"
|
|
|
|
assert docs[0].spans["candidates"][4].text == "Just a"
|
Store activations in `Doc`s when `save_activations` is enabled (#11002)
* Store activations in Doc when `store_activations` is enabled
This change adds the new `activations` attribute to `Doc`. This
attribute can be used by trainable pipes to store their activations,
probabilities, and guesses for downstream users.
As an example, this change modifies the `tagger` and `senter` pipes to
add an `store_activations` option. When this option is enabled, the
probabilities and guesses are stored in `set_annotations`.
* Change type of `store_activations` to `Union[bool, List[str]]`
When the value is:
- A bool: all activations are stored when set to `True`.
- A List[str]: the activations named in the list are stored
* Formatting fixes in Tagger
* Support store_activations in spancat and morphologizer
* Make Doc.activations type visible to MyPy
* textcat/textcat_multilabel: add store_activations option
* trainable_lemmatizer/entity_linker: add store_activations option
* parser/ner: do not currently support returning activations
* Extend tagger and senter tests
So that they, like the other tests, also check that we get no
activations if no activations were requested.
* Document `Doc.activations` and `store_activations` in the relevant pipes
* Start errors/warnings at higher numbers to avoid merge conflicts
Between the master and v4 branches.
* Add `store_activations` to docstrings.
* Replace store_activations setter by set_store_activations method
Setters that take a different type than what the getter returns are still
problematic for MyPy. Replace the setter by a method, so that type inference
works everywhere.
* Use dict comprehension suggested by @svlandeg
* Revert "Use dict comprehension suggested by @svlandeg"
This reverts commit 6e7b958f7060397965176c69649e5414f1f24988.
* EntityLinker: add type annotations to _add_activations
* _store_activations: make kwarg-only, remove doc_scores_lens arg
* set_annotations: add type annotations
* Apply suggestions from code review
Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com>
* TextCat.predict: return dict
* Make the `TrainablePipe.store_activations` property a bool
This means that we can also bring back `store_activations` setter.
* Remove `TrainablePipe.activations`
We do not need to enumerate the activations anymore since `store_activations` is
`bool`.
* Add type annotations for activations in predict/set_annotations
* Rename `TrainablePipe.store_activations` to `save_activations`
* Error E1400 is not used anymore
This error was used when activations were still `Union[bool, List[str]]`.
* Change wording in API docs after store -> save change
* docs: tag (save_)activations as new in spaCy 4.0
* Fix copied line in morphologizer activations test
* Don't train in any test_save_activations test
* Rename activations
- "probs" -> "probabilities"
- "guesses" -> "label_ids", except in the edit tree lemmatizer, where
"guesses" -> "tree_ids".
* Remove unused W400 warning.
This warning was used when we still allowed the user to specify
which activations to save.
* Formatting fixes
Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com>
* Replace "kb_ids" by a constant
* spancat: replace a cast by an assertion
* Fix EOF spacing
* Fix comments in test_save_activations tests
* Do not set RNG seed in activation saving tests
* Revert "spancat: replace a cast by an assertion"
This reverts commit 0bd5730d16432443a2b247316928d4f789ad8741.
Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com>
2022-09-13 10:51:12 +03:00
|
|
|
|
|
|
|
|
|
|
|
def test_save_activations():
|
|
|
|
# Test if activations are correctly added to Doc when requested.
|
|
|
|
nlp = English()
|
|
|
|
spancat = nlp.add_pipe("spancat", config={"spans_key": SPAN_KEY})
|
|
|
|
train_examples = make_examples(nlp)
|
|
|
|
nlp.initialize(get_examples=lambda: train_examples)
|
|
|
|
nO = spancat.model.get_dim("nO")
|
|
|
|
assert nO == 2
|
|
|
|
assert set(spancat.labels) == {"LOC", "PERSON"}
|
|
|
|
|
|
|
|
doc = nlp("This is a test.")
|
|
|
|
assert "spancat" not in doc.activations
|
|
|
|
|
|
|
|
spancat.save_activations = True
|
|
|
|
doc = nlp("This is a test.")
|
|
|
|
assert set(doc.activations["spancat"].keys()) == {"indices", "scores"}
|
|
|
|
assert doc.activations["spancat"]["indices"].shape == (12, 2)
|
|
|
|
assert doc.activations["spancat"]["scores"].shape == (12, nO)
|