spaCy/spacy/tests/pipeline/test_senter.py
Daniël de Kok efdbb722c5
Store activations in Docs when save_activations is enabled (#11002)
* Store activations in Doc when `store_activations` is enabled

This change adds the new `activations` attribute to `Doc`. This
attribute can be used by trainable pipes to store their activations,
probabilities, and guesses for downstream users.

As an example, this change modifies the `tagger` and `senter` pipes to
add an `store_activations` option. When this option is enabled, the
probabilities and guesses are stored in `set_annotations`.

* Change type of `store_activations` to `Union[bool, List[str]]`

When the value is:

- A bool: all activations are stored when set to `True`.
- A List[str]: the activations named in the list are stored

* Formatting fixes in Tagger

* Support store_activations in spancat and morphologizer

* Make Doc.activations type visible to MyPy

* textcat/textcat_multilabel: add store_activations option

* trainable_lemmatizer/entity_linker: add store_activations option

* parser/ner: do not currently support returning activations

* Extend tagger and senter tests

So that they, like the other tests, also check that we get no
activations if no activations were requested.

* Document `Doc.activations` and `store_activations` in the relevant pipes

* Start errors/warnings at higher numbers to avoid merge conflicts

Between the master and v4 branches.

* Add `store_activations` to docstrings.

* Replace store_activations setter by set_store_activations method

Setters that take a different type than what the getter returns are still
problematic for MyPy. Replace the setter by a method, so that type inference
works everywhere.

* Use dict comprehension suggested by @svlandeg

* Revert "Use dict comprehension suggested by @svlandeg"

This reverts commit 6e7b958f70.

* EntityLinker: add type annotations to _add_activations

* _store_activations: make kwarg-only, remove doc_scores_lens arg

* set_annotations: add type annotations

* Apply suggestions from code review

Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com>

* TextCat.predict: return dict

* Make the `TrainablePipe.store_activations` property a bool

This means that we can also bring back `store_activations` setter.

* Remove `TrainablePipe.activations`

We do not need to enumerate the activations anymore since `store_activations` is
`bool`.

* Add type annotations for activations in predict/set_annotations

* Rename `TrainablePipe.store_activations` to `save_activations`

* Error E1400 is not used anymore

This error was used when activations were still `Union[bool, List[str]]`.

* Change wording in API docs after store -> save change

* docs: tag (save_)activations as new in spaCy 4.0

* Fix copied line in morphologizer activations test

* Don't train in any test_save_activations test

* Rename activations

- "probs" -> "probabilities"
- "guesses" -> "label_ids", except in the edit tree lemmatizer, where
  "guesses" -> "tree_ids".

* Remove unused W400 warning.

This warning was used when we still allowed the user to specify
which activations to save.

* Formatting fixes

Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com>

* Replace "kb_ids" by a constant

* spancat: replace a cast by an assertion

* Fix EOF spacing

* Fix comments in test_save_activations tests

* Do not set RNG seed in activation saving tests

* Revert "spancat: replace a cast by an assertion"

This reverts commit 0bd5730d16.

Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com>
2022-09-13 09:51:12 +02:00

129 lines
4.1 KiB
Python

from typing import cast
import pytest
from numpy.testing import assert_equal
from spacy.attrs import SENT_START
from spacy import util
from spacy.training import Example
from spacy.lang.en import English
from spacy.language import Language
from spacy.pipeline import TrainablePipe
from spacy.tests.util import make_tempdir
def test_label_types():
nlp = Language()
senter = nlp.add_pipe("senter")
with pytest.raises(NotImplementedError):
senter.add_label("A")
SENT_STARTS = [0] * 14
SENT_STARTS[0] = 1
SENT_STARTS[5] = 1
SENT_STARTS[9] = 1
TRAIN_DATA = [
(
"I like green eggs. Eat blue ham. I like purple eggs.",
{"sent_starts": SENT_STARTS},
),
(
"She likes purple eggs. They hate ham. You like yellow eggs.",
{"sent_starts": SENT_STARTS},
),
]
def test_initialize_examples():
nlp = Language()
nlp.add_pipe("senter")
train_examples = []
for t in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
# you shouldn't really call this more than once, but for testing it should be fine
nlp.initialize()
nlp.initialize(get_examples=lambda: train_examples)
with pytest.raises(TypeError):
nlp.initialize(get_examples=lambda: None)
with pytest.raises(TypeError):
nlp.initialize(get_examples=train_examples)
def test_overfitting_IO():
# Simple test to try and quickly overfit the senter - ensuring the ML models work correctly
nlp = English()
train_examples = []
for t in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
# add some cases where SENT_START == -1
train_examples[0].reference[10].is_sent_start = False
train_examples[1].reference[1].is_sent_start = False
train_examples[1].reference[11].is_sent_start = False
nlp.add_pipe("senter")
optimizer = nlp.initialize()
for i in range(200):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses["senter"] < 0.001
# test the trained model
test_text = TRAIN_DATA[0][0]
doc = nlp(test_text)
gold_sent_starts = [0] * 14
gold_sent_starts[0] = 1
gold_sent_starts[5] = 1
gold_sent_starts[9] = 1
assert [int(t.is_sent_start) for t in doc] == gold_sent_starts
# Also test the results are still the same after IO
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
doc2 = nlp2(test_text)
assert [int(t.is_sent_start) for t in doc2] == gold_sent_starts
# Make sure that running pipe twice, or comparing to call, always amounts to the same predictions
texts = [
"Just a sentence.",
"Then one more sentence about London.",
"Here is another one.",
"I like London.",
]
batch_deps_1 = [doc.to_array([SENT_START]) for doc in nlp.pipe(texts)]
batch_deps_2 = [doc.to_array([SENT_START]) for doc in nlp.pipe(texts)]
no_batch_deps = [
doc.to_array([SENT_START]) for doc in [nlp(text) for text in texts]
]
assert_equal(batch_deps_1, batch_deps_2)
assert_equal(batch_deps_1, no_batch_deps)
# test internal pipe labels vs. Language.pipe_labels with hidden labels
assert nlp.get_pipe("senter").labels == ("I", "S")
assert "senter" not in nlp.pipe_labels
def test_save_activations():
# Test if activations are correctly added to Doc when requested.
nlp = English()
senter = cast(TrainablePipe, nlp.add_pipe("senter"))
train_examples = []
for t in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
nlp.initialize(get_examples=lambda: train_examples)
nO = senter.model.get_dim("nO")
doc = nlp("This is a test.")
assert "senter" not in doc.activations
senter.save_activations = True
doc = nlp("This is a test.")
assert "senter" in doc.activations
assert set(doc.activations["senter"].keys()) == {"label_ids", "probabilities"}
assert doc.activations["senter"]["probabilities"].shape == (5, nO)
assert doc.activations["senter"]["label_ids"].shape == (5,)