mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-28 19:06:33 +03:00
e27c60a702
* Improve the correctness of _parse_patch * If there are no more actions, do not attempt to make further transitions, even if not all states are final. * Assert that the number of actions for a step is the same as the number of states. * Reimplement distillation with oracle cut size The code for distillation with an oracle cut size was not reimplemented after the parser refactor. We did not notice, because we did not have tests for this functionality. This change brings back the functionality and adds this to the parser tests. * Rename states2actions to _states_to_actions for consistency * Test distillation max cuts in NER * Mark parser/NER tests as slow * Typo * Fix invariant in _states_diff_to_actions * Rename _init_batch -> _init_batch_from_teacher * Ninja edit the ninja edit * Check that we raise an exception when we pass the incorrect number or actions * Remove unnecessary get Co-authored-by: Madeesh Kannan <shadeMe@users.noreply.github.com> * Write out condition more explicitly --------- Co-authored-by: Madeesh Kannan <shadeMe@users.noreply.github.com>
660 lines
23 KiB
Python
660 lines
23 KiB
Python
import itertools
|
|
import pytest
|
|
import numpy
|
|
from numpy.testing import assert_equal
|
|
from thinc.api import Adam
|
|
|
|
from spacy import registry, util
|
|
from spacy.attrs import DEP, NORM
|
|
from spacy.lang.en import English
|
|
from spacy.training import Example
|
|
from spacy.tokens import Doc
|
|
from spacy.vocab import Vocab
|
|
from spacy import util, registry
|
|
from thinc.api import fix_random_seed
|
|
|
|
from ...pipeline import DependencyParser
|
|
from ...pipeline.dep_parser import DEFAULT_PARSER_MODEL
|
|
from ..util import apply_transition_sequence, make_tempdir
|
|
from ...pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL
|
|
|
|
TRAIN_DATA = [
|
|
(
|
|
"They trade mortgage-backed securities.",
|
|
{
|
|
"heads": [1, 1, 4, 4, 5, 1, 1],
|
|
"deps": ["nsubj", "ROOT", "compound", "punct", "nmod", "dobj", "punct"],
|
|
},
|
|
),
|
|
(
|
|
"I like London and Berlin.",
|
|
{
|
|
"heads": [1, 1, 1, 2, 2, 1],
|
|
"deps": ["nsubj", "ROOT", "dobj", "cc", "conj", "punct"],
|
|
},
|
|
),
|
|
]
|
|
|
|
|
|
CONFLICTING_DATA = [
|
|
(
|
|
"I like London and Berlin.",
|
|
{
|
|
"heads": [1, 1, 1, 2, 2, 1],
|
|
"deps": ["nsubj", "ROOT", "dobj", "cc", "conj", "punct"],
|
|
},
|
|
),
|
|
(
|
|
"I like London and Berlin.",
|
|
{
|
|
"heads": [0, 0, 0, 0, 0, 0],
|
|
"deps": ["ROOT", "nsubj", "nsubj", "cc", "conj", "punct"],
|
|
},
|
|
),
|
|
]
|
|
|
|
PARTIAL_DATA = [
|
|
(
|
|
"I like London.",
|
|
{
|
|
"heads": [1, 1, 1, None],
|
|
"deps": ["nsubj", "ROOT", "dobj", None],
|
|
},
|
|
),
|
|
]
|
|
|
|
PARSERS = ["parser"] # TODO: Test beam_parser when ready
|
|
|
|
eps = 0.1
|
|
|
|
|
|
@pytest.fixture
|
|
def vocab():
|
|
return Vocab(lex_attr_getters={NORM: lambda s: s})
|
|
|
|
|
|
@pytest.fixture
|
|
def parser(vocab):
|
|
vocab.strings.add("ROOT")
|
|
cfg = {"model": DEFAULT_PARSER_MODEL}
|
|
model = registry.resolve(cfg, validate=True)["model"]
|
|
parser = DependencyParser(vocab, model)
|
|
parser.cfg["token_vector_width"] = 4
|
|
parser.cfg["hidden_width"] = 32
|
|
# parser.add_label('right')
|
|
parser.add_label("left")
|
|
parser.initialize(lambda: [_parser_example(parser)])
|
|
sgd = Adam(0.001)
|
|
|
|
for i in range(10):
|
|
losses = {}
|
|
doc = Doc(vocab, words=["a", "b", "c", "d"])
|
|
example = Example.from_dict(
|
|
doc, {"heads": [1, 1, 3, 3], "deps": ["left", "ROOT", "left", "ROOT"]}
|
|
)
|
|
parser.update([example], sgd=sgd, losses=losses)
|
|
return parser
|
|
|
|
|
|
def _parser_example(parser):
|
|
doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
|
|
gold = {"heads": [1, 1, 3, 3], "deps": ["right", "ROOT", "left", "ROOT"]}
|
|
return Example.from_dict(doc, gold)
|
|
|
|
|
|
@pytest.mark.issue(2772)
|
|
def test_issue2772(en_vocab):
|
|
"""Test that deprojectivization doesn't mess up sentence boundaries."""
|
|
# fmt: off
|
|
words = ["When", "we", "write", "or", "communicate", "virtually", ",", "we", "can", "hide", "our", "true", "feelings", "."]
|
|
# fmt: on
|
|
# A tree with a non-projective (i.e. crossing) arc
|
|
# The arcs (0, 4) and (2, 9) cross.
|
|
heads = [4, 2, 9, 2, 2, 4, 9, 9, 9, 9, 12, 12, 9, 9]
|
|
deps = ["dep"] * len(heads)
|
|
doc = Doc(en_vocab, words=words, heads=heads, deps=deps)
|
|
assert doc[1].is_sent_start is False
|
|
|
|
|
|
@pytest.mark.issue(3830)
|
|
def test_issue3830_no_subtok():
|
|
"""Test that the parser doesn't have subtok label if not learn_tokens"""
|
|
config = {
|
|
"learn_tokens": False,
|
|
}
|
|
model = registry.resolve({"model": DEFAULT_PARSER_MODEL}, validate=True)["model"]
|
|
parser = DependencyParser(Vocab(), model, **config)
|
|
parser.add_label("nsubj")
|
|
assert "subtok" not in parser.labels
|
|
parser.initialize(lambda: [_parser_example(parser)])
|
|
assert "subtok" not in parser.labels
|
|
|
|
|
|
@pytest.mark.issue(3830)
|
|
def test_issue3830_with_subtok():
|
|
"""Test that the parser does have subtok label if learn_tokens=True."""
|
|
config = {
|
|
"learn_tokens": True,
|
|
}
|
|
model = registry.resolve({"model": DEFAULT_PARSER_MODEL}, validate=True)["model"]
|
|
parser = DependencyParser(Vocab(), model, **config)
|
|
parser.add_label("nsubj")
|
|
assert "subtok" not in parser.labels
|
|
parser.initialize(lambda: [_parser_example(parser)])
|
|
assert "subtok" in parser.labels
|
|
|
|
|
|
@pytest.mark.issue(7716)
|
|
@pytest.mark.xfail(reason="Not fixed yet")
|
|
def test_partial_annotation(parser):
|
|
doc = Doc(parser.vocab, words=["a", "b", "c", "d"])
|
|
doc[2].is_sent_start = False
|
|
# Note that if the following line is used, then doc[2].is_sent_start == False
|
|
# doc[3].is_sent_start = False
|
|
|
|
doc = parser(doc)
|
|
assert doc[2].is_sent_start == False
|
|
|
|
|
|
def test_parser_root(en_vocab):
|
|
words = ["i", "do", "n't", "have", "other", "assistance"]
|
|
heads = [3, 3, 3, 3, 5, 3]
|
|
deps = ["nsubj", "aux", "neg", "ROOT", "amod", "dobj"]
|
|
doc = Doc(en_vocab, words=words, heads=heads, deps=deps)
|
|
for t in doc:
|
|
assert t.dep != 0, t.text
|
|
|
|
|
|
@pytest.mark.skip(
|
|
reason="The step_through API was removed (but should be brought back)"
|
|
)
|
|
@pytest.mark.parametrize("words", [["Hello"]])
|
|
def test_parser_parse_one_word_sentence(en_vocab, en_parser, words):
|
|
doc = Doc(en_vocab, words=words, heads=[0], deps=["ROOT"])
|
|
assert len(doc) == 1
|
|
with en_parser.step_through(doc) as _: # noqa: F841
|
|
pass
|
|
assert doc[0].dep != 0
|
|
|
|
|
|
def test_parser_apply_actions(en_vocab, en_parser):
|
|
words = ["I", "ate", "pizza"]
|
|
words2 = ["Eat", "more", "pizza", "!"]
|
|
doc1 = Doc(en_vocab, words=words)
|
|
doc2 = Doc(en_vocab, words=words2)
|
|
docs = [doc1, doc2]
|
|
|
|
moves = en_parser.moves
|
|
moves.add_action(0, "")
|
|
moves.add_action(1, "")
|
|
moves.add_action(2, "nsubj")
|
|
moves.add_action(3, "obj")
|
|
moves.add_action(2, "amod")
|
|
|
|
actions = [
|
|
numpy.array([0, 0], dtype="i"),
|
|
numpy.array([2, 0], dtype="i"),
|
|
numpy.array([0, 4], dtype="i"),
|
|
numpy.array([3, 3], dtype="i"),
|
|
numpy.array([1, 1], dtype="i"),
|
|
numpy.array([1, 1], dtype="i"),
|
|
numpy.array([0], dtype="i"),
|
|
numpy.array([1], dtype="i"),
|
|
]
|
|
|
|
states = moves.init_batch(docs)
|
|
active_states = states
|
|
|
|
for step_actions in actions:
|
|
active_states = moves.apply_actions(active_states, step_actions)
|
|
|
|
assert len(active_states) == 0
|
|
|
|
for (state, doc) in zip(states, docs):
|
|
moves.set_annotations(state, doc)
|
|
|
|
assert docs[0][0].head.i == 1
|
|
assert docs[0][0].dep_ == "nsubj"
|
|
assert docs[0][1].head.i == 1
|
|
assert docs[0][1].dep_ == "ROOT"
|
|
assert docs[0][2].head.i == 1
|
|
assert docs[0][2].dep_ == "obj"
|
|
|
|
assert docs[1][0].head.i == 0
|
|
assert docs[1][0].dep_ == "ROOT"
|
|
assert docs[1][1].head.i == 2
|
|
assert docs[1][1].dep_ == "amod"
|
|
assert docs[1][2].head.i == 0
|
|
assert docs[1][2].dep_ == "obj"
|
|
|
|
|
|
@pytest.mark.skip(
|
|
reason="The step_through API was removed (but should be brought back)"
|
|
)
|
|
def test_parser_initial(en_vocab, en_parser):
|
|
words = ["I", "ate", "the", "pizza", "with", "anchovies", "."]
|
|
transition = ["L-nsubj", "S", "L-det"]
|
|
doc = Doc(en_vocab, words=words)
|
|
apply_transition_sequence(en_parser, doc, transition)
|
|
assert doc[0].head.i == 1
|
|
assert doc[1].head.i == 1
|
|
assert doc[2].head.i == 3
|
|
assert doc[3].head.i == 3
|
|
|
|
|
|
def test_parser_parse_subtrees(en_vocab, en_parser):
|
|
words = ["The", "four", "wheels", "on", "the", "bus", "turned", "quickly"]
|
|
heads = [2, 2, 6, 2, 5, 3, 6, 6]
|
|
deps = ["dep"] * len(heads)
|
|
doc = Doc(en_vocab, words=words, heads=heads, deps=deps)
|
|
assert len(list(doc[2].lefts)) == 2
|
|
assert len(list(doc[2].rights)) == 1
|
|
assert len(list(doc[2].children)) == 3
|
|
assert len(list(doc[5].lefts)) == 1
|
|
assert len(list(doc[5].rights)) == 0
|
|
assert len(list(doc[5].children)) == 1
|
|
assert len(list(doc[2].subtree)) == 6
|
|
|
|
|
|
def test_parser_merge_pp(en_vocab):
|
|
words = ["A", "phrase", "with", "another", "phrase", "occurs"]
|
|
heads = [1, 5, 1, 4, 2, 5]
|
|
deps = ["det", "nsubj", "prep", "det", "pobj", "ROOT"]
|
|
pos = ["DET", "NOUN", "ADP", "DET", "NOUN", "VERB"]
|
|
doc = Doc(en_vocab, words=words, deps=deps, heads=heads, pos=pos)
|
|
with doc.retokenize() as retokenizer:
|
|
for np in doc.noun_chunks:
|
|
retokenizer.merge(np, attrs={"lemma": np.lemma_})
|
|
assert doc[0].text == "A phrase"
|
|
assert doc[1].text == "with"
|
|
assert doc[2].text == "another phrase"
|
|
assert doc[3].text == "occurs"
|
|
|
|
|
|
@pytest.mark.skip(
|
|
reason="The step_through API was removed (but should be brought back)"
|
|
)
|
|
def test_parser_arc_eager_finalize_state(en_vocab, en_parser):
|
|
words = ["a", "b", "c", "d", "e"]
|
|
# right branching
|
|
transition = ["R-nsubj", "D", "R-nsubj", "R-nsubj", "D", "R-ROOT"]
|
|
tokens = Doc(en_vocab, words=words)
|
|
apply_transition_sequence(en_parser, tokens, transition)
|
|
|
|
assert tokens[0].n_lefts == 0
|
|
assert tokens[0].n_rights == 2
|
|
assert tokens[0].left_edge.i == 0
|
|
assert tokens[0].right_edge.i == 4
|
|
assert tokens[0].head.i == 0
|
|
|
|
assert tokens[1].n_lefts == 0
|
|
assert tokens[1].n_rights == 0
|
|
assert tokens[1].left_edge.i == 1
|
|
assert tokens[1].right_edge.i == 1
|
|
assert tokens[1].head.i == 0
|
|
|
|
assert tokens[2].n_lefts == 0
|
|
assert tokens[2].n_rights == 2
|
|
assert tokens[2].left_edge.i == 2
|
|
assert tokens[2].right_edge.i == 4
|
|
assert tokens[2].head.i == 0
|
|
|
|
assert tokens[3].n_lefts == 0
|
|
assert tokens[3].n_rights == 0
|
|
assert tokens[3].left_edge.i == 3
|
|
assert tokens[3].right_edge.i == 3
|
|
assert tokens[3].head.i == 2
|
|
|
|
assert tokens[4].n_lefts == 0
|
|
assert tokens[4].n_rights == 0
|
|
assert tokens[4].left_edge.i == 4
|
|
assert tokens[4].right_edge.i == 4
|
|
assert tokens[4].head.i == 2
|
|
|
|
# left branching
|
|
transition = ["S", "S", "S", "L-nsubj", "L-nsubj", "L-nsubj", "L-nsubj"]
|
|
tokens = Doc(en_vocab, words=words)
|
|
apply_transition_sequence(en_parser, tokens, transition)
|
|
|
|
assert tokens[0].n_lefts == 0
|
|
assert tokens[0].n_rights == 0
|
|
assert tokens[0].left_edge.i == 0
|
|
assert tokens[0].right_edge.i == 0
|
|
assert tokens[0].head.i == 4
|
|
|
|
assert tokens[1].n_lefts == 0
|
|
assert tokens[1].n_rights == 0
|
|
assert tokens[1].left_edge.i == 1
|
|
assert tokens[1].right_edge.i == 1
|
|
assert tokens[1].head.i == 4
|
|
|
|
assert tokens[2].n_lefts == 0
|
|
assert tokens[2].n_rights == 0
|
|
assert tokens[2].left_edge.i == 2
|
|
assert tokens[2].right_edge.i == 2
|
|
assert tokens[2].head.i == 4
|
|
|
|
assert tokens[3].n_lefts == 0
|
|
assert tokens[3].n_rights == 0
|
|
assert tokens[3].left_edge.i == 3
|
|
assert tokens[3].right_edge.i == 3
|
|
assert tokens[3].head.i == 4
|
|
|
|
assert tokens[4].n_lefts == 4
|
|
assert tokens[4].n_rights == 0
|
|
assert tokens[4].left_edge.i == 0
|
|
assert tokens[4].right_edge.i == 4
|
|
assert tokens[4].head.i == 4
|
|
|
|
|
|
def test_parser_set_sent_starts(en_vocab):
|
|
# fmt: off
|
|
words = ['Ein', 'Satz', '.', 'Außerdem', 'ist', 'Zimmer', 'davon', 'überzeugt', ',', 'dass', 'auch', 'epige-', '\n', 'netische', 'Mechanismen', 'eine', 'Rolle', 'spielen', ',', 'also', 'Vorgänge', ',', 'die', '\n', 'sich', 'darauf', 'auswirken', ',', 'welche', 'Gene', 'abgelesen', 'werden', 'und', '\n', 'welche', 'nicht', '.', '\n']
|
|
heads = [1, 1, 1, 30, 4, 4, 7, 4, 7, 17, 14, 14, 11, 14, 17, 16, 17, 6, 17, 20, 11, 20, 26, 22, 26, 26, 20, 26, 29, 31, 31, 25, 31, 32, 17, 4, 4, 36]
|
|
deps = ['nk', 'ROOT', 'punct', 'mo', 'ROOT', 'sb', 'op', 'pd', 'punct', 'cp', 'mo', 'nk', '', 'nk', 'sb', 'nk', 'oa', 're', 'punct', 'mo', 'app', 'punct', 'sb', '', 'oa', 'op', 'rc', 'punct', 'nk', 'sb', 'oc', 're', 'cd', '', 'oa', 'ng', 'punct', '']
|
|
# fmt: on
|
|
doc = Doc(en_vocab, words=words, deps=deps, heads=heads)
|
|
for i in range(len(words)):
|
|
if i == 0 or i == 3:
|
|
assert doc[i].is_sent_start is True
|
|
else:
|
|
assert doc[i].is_sent_start is False
|
|
for sent in doc.sents:
|
|
for token in sent:
|
|
assert token.head in sent
|
|
|
|
|
|
def test_parser_constructor(en_vocab):
|
|
config = {
|
|
"learn_tokens": False,
|
|
"min_action_freq": 30,
|
|
"update_with_oracle_cut_size": 100,
|
|
}
|
|
cfg = {"model": DEFAULT_PARSER_MODEL}
|
|
model = registry.resolve(cfg, validate=True)["model"]
|
|
DependencyParser(en_vocab, model, **config)
|
|
DependencyParser(en_vocab, model)
|
|
|
|
|
|
@pytest.mark.parametrize("pipe_name", PARSERS)
|
|
def test_incomplete_data(pipe_name):
|
|
# Test that the parser works with incomplete information
|
|
nlp = English()
|
|
parser = nlp.add_pipe(pipe_name)
|
|
train_examples = []
|
|
for text, annotations in PARTIAL_DATA:
|
|
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
|
|
for dep in annotations.get("deps", []):
|
|
if dep is not None:
|
|
parser.add_label(dep)
|
|
optimizer = nlp.initialize(get_examples=lambda: train_examples)
|
|
for i in range(150):
|
|
losses = {}
|
|
nlp.update(train_examples, sgd=optimizer, losses=losses)
|
|
assert losses[pipe_name] < 0.0001
|
|
|
|
# test the trained model
|
|
test_text = "I like securities."
|
|
doc = nlp(test_text)
|
|
assert doc[0].dep_ == "nsubj"
|
|
assert doc[2].dep_ == "dobj"
|
|
assert doc[0].head.i == 1
|
|
assert doc[2].head.i == 1
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"pipe_name,max_moves", itertools.product(PARSERS, [0, 1, 5, 100])
|
|
)
|
|
def test_overfitting_IO(pipe_name, max_moves):
|
|
fix_random_seed(0)
|
|
# Simple test to try and quickly overfit the dependency parser (normal or beam)
|
|
nlp = English()
|
|
parser = nlp.add_pipe(pipe_name)
|
|
parser.cfg["update_with_oracle_cut_size"] = max_moves
|
|
train_examples = []
|
|
for text, annotations in TRAIN_DATA:
|
|
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
|
|
for dep in annotations.get("deps", []):
|
|
parser.add_label(dep)
|
|
optimizer = nlp.initialize()
|
|
# run overfitting
|
|
for i in range(200):
|
|
losses = {}
|
|
nlp.update(train_examples, sgd=optimizer, losses=losses)
|
|
assert losses[pipe_name] < 0.0001
|
|
# test the trained model
|
|
test_text = "I like securities."
|
|
doc = nlp(test_text)
|
|
assert doc[0].dep_ == "nsubj"
|
|
assert doc[2].dep_ == "dobj"
|
|
assert doc[3].dep_ == "punct"
|
|
assert doc[0].head.i == 1
|
|
assert doc[2].head.i == 1
|
|
assert doc[3].head.i == 1
|
|
# Also test the results are still the same after IO
|
|
with make_tempdir() as tmp_dir:
|
|
nlp.to_disk(tmp_dir)
|
|
nlp2 = util.load_model_from_path(tmp_dir)
|
|
doc2 = nlp2(test_text)
|
|
assert doc2[0].dep_ == "nsubj"
|
|
assert doc2[2].dep_ == "dobj"
|
|
assert doc2[3].dep_ == "punct"
|
|
assert doc2[0].head.i == 1
|
|
assert doc2[2].head.i == 1
|
|
assert doc2[3].head.i == 1
|
|
|
|
# Make sure that running pipe twice, or comparing to call, always amounts to the same predictions
|
|
texts = [
|
|
"Just a sentence.",
|
|
"Then one more sentence about London.",
|
|
"Here is another one.",
|
|
"I like London.",
|
|
]
|
|
batch_deps_1 = [doc.to_array([DEP]) for doc in nlp.pipe(texts)]
|
|
batch_deps_2 = [doc.to_array([DEP]) for doc in nlp.pipe(texts)]
|
|
no_batch_deps = [doc.to_array([DEP]) for doc in [nlp(text) for text in texts]]
|
|
assert_equal(batch_deps_1, batch_deps_2)
|
|
assert_equal(batch_deps_1, no_batch_deps)
|
|
|
|
|
|
def test_is_distillable():
|
|
nlp = English()
|
|
parser = nlp.add_pipe("parser")
|
|
assert parser.is_distillable
|
|
|
|
|
|
@pytest.mark.slow
|
|
@pytest.mark.parametrize("max_moves", [0, 1, 5, 100])
|
|
def test_distill(max_moves):
|
|
teacher = English()
|
|
teacher_parser = teacher.add_pipe("parser")
|
|
train_examples = []
|
|
for text, annotations in TRAIN_DATA:
|
|
train_examples.append(Example.from_dict(teacher.make_doc(text), annotations))
|
|
for dep in annotations.get("deps", []):
|
|
teacher_parser.add_label(dep)
|
|
|
|
optimizer = teacher.initialize(get_examples=lambda: train_examples)
|
|
|
|
for i in range(200):
|
|
losses = {}
|
|
teacher.update(train_examples, sgd=optimizer, losses=losses)
|
|
assert losses["parser"] < 0.0001
|
|
|
|
student = English()
|
|
student_parser = student.add_pipe("parser")
|
|
student_parser.cfg["update_with_oracle_cut_size"] = max_moves
|
|
student_parser.initialize(
|
|
get_examples=lambda: train_examples, labels=teacher_parser.label_data
|
|
)
|
|
|
|
distill_examples = [
|
|
Example.from_dict(teacher.make_doc(t[0]), {}) for t in TRAIN_DATA
|
|
]
|
|
|
|
for i in range(200):
|
|
losses = {}
|
|
student_parser.distill(
|
|
teacher_parser, distill_examples, sgd=optimizer, losses=losses
|
|
)
|
|
assert losses["parser"] < 0.0001
|
|
|
|
test_text = "I like securities."
|
|
doc = student(test_text)
|
|
assert doc[0].dep_ == "nsubj"
|
|
assert doc[2].dep_ == "dobj"
|
|
assert doc[3].dep_ == "punct"
|
|
assert doc[0].head.i == 1
|
|
assert doc[2].head.i == 1
|
|
assert doc[3].head.i == 1
|
|
|
|
|
|
# fmt: off
|
|
@pytest.mark.slow
|
|
@pytest.mark.parametrize("pipe_name", ["parser", "beam_parser"])
|
|
@pytest.mark.parametrize(
|
|
"parser_config",
|
|
[
|
|
# TODO: re-enable after we have a spacy-legacy release for v4. See
|
|
# https://github.com/explosion/spacy-legacy/pull/36
|
|
#({"@architectures": "spacy.TransitionBasedParser.v1", "tok2vec": DEFAULT_TOK2VEC_MODEL, "state_type": "parser", "extra_state_tokens": False, "hidden_width": 64, "maxout_pieces": 2, "use_upper": True}),
|
|
({"@architectures": "spacy.TransitionBasedParser.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "state_type": "parser", "extra_state_tokens": False, "hidden_width": 64, "maxout_pieces": 2, "use_upper": True}),
|
|
({"@architectures": "spacy.TransitionBasedParser.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "state_type": "parser", "extra_state_tokens": False, "hidden_width": 64, "maxout_pieces": 2, "use_upper": False}),
|
|
({"@architectures": "spacy.TransitionBasedParser.v3", "tok2vec": DEFAULT_TOK2VEC_MODEL, "state_type": "parser", "extra_state_tokens": False, "hidden_width": 64, "maxout_pieces": 2}),
|
|
],
|
|
)
|
|
# fmt: on
|
|
def test_parser_configs(pipe_name, parser_config):
|
|
pipe_config = {"model": parser_config}
|
|
nlp = English()
|
|
parser = nlp.add_pipe(pipe_name, config=pipe_config)
|
|
train_examples = []
|
|
for text, annotations in TRAIN_DATA:
|
|
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
|
|
for dep in annotations.get("deps", []):
|
|
parser.add_label(dep)
|
|
optimizer = nlp.initialize()
|
|
for i in range(5):
|
|
losses = {}
|
|
nlp.update(train_examples, sgd=optimizer, losses=losses)
|
|
|
|
|
|
def test_beam_parser_scores():
|
|
# Test that we can get confidence values out of the beam_parser pipe
|
|
beam_width = 16
|
|
beam_density = 0.0001
|
|
nlp = English()
|
|
config = {
|
|
"beam_width": beam_width,
|
|
"beam_density": beam_density,
|
|
}
|
|
parser = nlp.add_pipe("beam_parser", config=config)
|
|
train_examples = []
|
|
for text, annotations in CONFLICTING_DATA:
|
|
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
|
|
for dep in annotations.get("deps", []):
|
|
parser.add_label(dep)
|
|
optimizer = nlp.initialize()
|
|
|
|
# update a bit with conflicting data
|
|
for i in range(10):
|
|
losses = {}
|
|
nlp.update(train_examples, sgd=optimizer, losses=losses)
|
|
|
|
# test the scores from the beam
|
|
test_text = "I like securities."
|
|
doc = nlp.make_doc(test_text)
|
|
docs = [doc]
|
|
beams = parser.predict(docs)
|
|
head_scores, label_scores = parser.scored_parses(beams)
|
|
|
|
for j in range(len(doc)):
|
|
for label in parser.labels:
|
|
label_score = label_scores[0][(j, label)]
|
|
assert 0 - eps <= label_score <= 1 + eps
|
|
for i in range(len(doc)):
|
|
head_score = head_scores[0][(j, i)]
|
|
assert 0 - eps <= head_score <= 1 + eps
|
|
|
|
|
|
def test_beam_overfitting_IO():
|
|
# Simple test to try and quickly overfit the Beam dependency parser
|
|
nlp = English()
|
|
beam_width = 16
|
|
beam_density = 0.0001
|
|
config = {
|
|
"beam_width": beam_width,
|
|
"beam_density": beam_density,
|
|
}
|
|
parser = nlp.add_pipe("beam_parser", config=config)
|
|
train_examples = []
|
|
for text, annotations in TRAIN_DATA:
|
|
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
|
|
for dep in annotations.get("deps", []):
|
|
parser.add_label(dep)
|
|
optimizer = nlp.initialize()
|
|
# run overfitting
|
|
for i in range(150):
|
|
losses = {}
|
|
nlp.update(train_examples, sgd=optimizer, losses=losses)
|
|
assert losses["beam_parser"] < 0.0001
|
|
# test the scores from the beam
|
|
test_text = "I like securities."
|
|
docs = [nlp.make_doc(test_text)]
|
|
beams = parser.predict(docs)
|
|
head_scores, label_scores = parser.scored_parses(beams)
|
|
# we only processed one document
|
|
head_scores = head_scores[0]
|
|
label_scores = label_scores[0]
|
|
# test label annotations: 0=nsubj, 2=dobj, 3=punct
|
|
assert label_scores[(0, "nsubj")] == pytest.approx(1.0, abs=eps)
|
|
assert label_scores[(0, "dobj")] == pytest.approx(0.0, abs=eps)
|
|
assert label_scores[(0, "punct")] == pytest.approx(0.0, abs=eps)
|
|
assert label_scores[(2, "nsubj")] == pytest.approx(0.0, abs=eps)
|
|
assert label_scores[(2, "dobj")] == pytest.approx(1.0, abs=eps)
|
|
assert label_scores[(2, "punct")] == pytest.approx(0.0, abs=eps)
|
|
assert label_scores[(3, "nsubj")] == pytest.approx(0.0, abs=eps)
|
|
assert label_scores[(3, "dobj")] == pytest.approx(0.0, abs=eps)
|
|
assert label_scores[(3, "punct")] == pytest.approx(1.0, abs=eps)
|
|
# test head annotations: the root is token at index 1
|
|
assert head_scores[(0, 0)] == pytest.approx(0.0, abs=eps)
|
|
assert head_scores[(0, 1)] == pytest.approx(1.0, abs=eps)
|
|
assert head_scores[(0, 2)] == pytest.approx(0.0, abs=eps)
|
|
assert head_scores[(2, 0)] == pytest.approx(0.0, abs=eps)
|
|
assert head_scores[(2, 1)] == pytest.approx(1.0, abs=eps)
|
|
assert head_scores[(2, 2)] == pytest.approx(0.0, abs=eps)
|
|
assert head_scores[(3, 0)] == pytest.approx(0.0, abs=eps)
|
|
assert head_scores[(3, 1)] == pytest.approx(1.0, abs=eps)
|
|
assert head_scores[(3, 2)] == pytest.approx(0.0, abs=eps)
|
|
|
|
# Also test the results are still the same after IO
|
|
with make_tempdir() as tmp_dir:
|
|
nlp.to_disk(tmp_dir)
|
|
nlp2 = util.load_model_from_path(tmp_dir)
|
|
docs2 = [nlp2.make_doc(test_text)]
|
|
parser2 = nlp2.get_pipe("beam_parser")
|
|
beams2 = parser2.predict(docs2)
|
|
head_scores2, label_scores2 = parser2.scored_parses(beams2)
|
|
# we only processed one document
|
|
head_scores2 = head_scores2[0]
|
|
label_scores2 = label_scores2[0]
|
|
# check the results again
|
|
assert label_scores2[(0, "nsubj")] == pytest.approx(1.0, abs=eps)
|
|
assert label_scores2[(0, "dobj")] == pytest.approx(0.0, abs=eps)
|
|
assert label_scores2[(0, "punct")] == pytest.approx(0.0, abs=eps)
|
|
assert label_scores2[(2, "nsubj")] == pytest.approx(0.0, abs=eps)
|
|
assert label_scores2[(2, "dobj")] == pytest.approx(1.0, abs=eps)
|
|
assert label_scores2[(2, "punct")] == pytest.approx(0.0, abs=eps)
|
|
assert label_scores2[(3, "nsubj")] == pytest.approx(0.0, abs=eps)
|
|
assert label_scores2[(3, "dobj")] == pytest.approx(0.0, abs=eps)
|
|
assert label_scores2[(3, "punct")] == pytest.approx(1.0, abs=eps)
|
|
assert head_scores2[(0, 0)] == pytest.approx(0.0, abs=eps)
|
|
assert head_scores2[(0, 1)] == pytest.approx(1.0, abs=eps)
|
|
assert head_scores2[(0, 2)] == pytest.approx(0.0, abs=eps)
|
|
assert head_scores2[(2, 0)] == pytest.approx(0.0, abs=eps)
|
|
assert head_scores2[(2, 1)] == pytest.approx(1.0, abs=eps)
|
|
assert head_scores2[(2, 2)] == pytest.approx(0.0, abs=eps)
|
|
assert head_scores2[(3, 0)] == pytest.approx(0.0, abs=eps)
|
|
assert head_scores2[(3, 1)] == pytest.approx(1.0, abs=eps)
|
|
assert head_scores2[(3, 2)] == pytest.approx(0.0, abs=eps)
|