mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-27 10:26:35 +03:00
Modernise parser tests and don't depend on models
This commit is contained in:
parent
342cb41782
commit
d0e37b5670
|
@ -1,102 +1,101 @@
|
||||||
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from ..util import get_doc, apply_transition_sequence
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.models
|
def test_parser_root(en_tokenizer):
|
||||||
def test_root(EN):
|
text = "i don't have other assistance"
|
||||||
tokens = EN(u"i don't have other assistance")
|
heads = [3, 2, 1, 0, 1, -2]
|
||||||
for t in tokens:
|
deps = ['nsubj', 'aux', 'neg', 'ROOT', 'amod', 'dobj']
|
||||||
assert t.dep != 0, t.orth_
|
tokens = en_tokenizer(text)
|
||||||
|
doc = get_doc(tokens.vocab, [t.text for t in tokens], heads=heads, deps=deps)
|
||||||
|
for t in doc:
|
||||||
|
assert t.dep != 0, t.text
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.models
|
@pytest.mark.parametrize('text', ["Hello"])
|
||||||
def test_one_word_sentence(EN):
|
def test_parser_parse_one_word_sentence(en_tokenizer, en_parser, text):
|
||||||
# one word sentence
|
tokens = en_tokenizer(text)
|
||||||
doc = EN.tokenizer.tokens_from_list(['Hello'])
|
doc = get_doc(tokens.vocab, [t.text for t in tokens], heads=[0], deps=['ROOT'])
|
||||||
EN.tagger(doc)
|
|
||||||
assert len(doc) == 1
|
assert len(doc) == 1
|
||||||
with EN.parser.step_through(doc) as _:
|
with en_parser.step_through(doc) as _:
|
||||||
pass
|
pass
|
||||||
assert doc[0].dep != 0
|
assert doc[0].dep != 0
|
||||||
|
|
||||||
|
|
||||||
def apply_transition_sequence(model, doc, sequence):
|
def test_parser_arc_eager_finalize_state(en_tokenizer, en_parser):
|
||||||
for action_name in sequence:
|
text = "a b c d e"
|
||||||
if '-' in action_name:
|
|
||||||
move, label = action_name.split('-')
|
|
||||||
model.parser.add_label(label)
|
|
||||||
with model.parser.step_through(doc) as stepwise:
|
|
||||||
for transition in sequence:
|
|
||||||
stepwise.transition(transition)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.models
|
|
||||||
def test_arc_eager_finalize_state(EN):
|
|
||||||
# right branching
|
# right branching
|
||||||
example = EN.tokenizer.tokens_from_list(u"a b c d e".split(' '))
|
transition = ['R-nsubj', 'D', 'R-nsubj', 'R-nsubj', 'D', 'R-ROOT']
|
||||||
apply_transition_sequence(EN, example, ['R-nsubj','D','R-nsubj','R-nsubj','D','R-ROOT'])
|
tokens = en_tokenizer(text)
|
||||||
|
apply_transition_sequence(en_parser, tokens, transition)
|
||||||
|
|
||||||
assert example[0].n_lefts == 0
|
assert tokens[0].n_lefts == 0
|
||||||
assert example[0].n_rights == 2
|
assert tokens[0].n_rights == 2
|
||||||
assert example[0].left_edge.i == 0
|
assert tokens[0].left_edge.i == 0
|
||||||
assert example[0].right_edge.i == 4
|
assert tokens[0].right_edge.i == 4
|
||||||
assert example[0].head.i == 0
|
assert tokens[0].head.i == 0
|
||||||
|
|
||||||
assert example[1].n_lefts == 0
|
assert tokens[1].n_lefts == 0
|
||||||
assert example[1].n_rights == 0
|
assert tokens[1].n_rights == 0
|
||||||
assert example[1].left_edge.i == 1
|
assert tokens[1].left_edge.i == 1
|
||||||
assert example[1].right_edge.i == 1
|
assert tokens[1].right_edge.i == 1
|
||||||
assert example[1].head.i == 0
|
assert tokens[1].head.i == 0
|
||||||
|
|
||||||
assert example[2].n_lefts == 0
|
assert tokens[2].n_lefts == 0
|
||||||
assert example[2].n_rights == 2
|
assert tokens[2].n_rights == 2
|
||||||
assert example[2].left_edge.i == 2
|
assert tokens[2].left_edge.i == 2
|
||||||
assert example[2].right_edge.i == 4
|
assert tokens[2].right_edge.i == 4
|
||||||
assert example[2].head.i == 0
|
assert tokens[2].head.i == 0
|
||||||
|
|
||||||
assert example[3].n_lefts == 0
|
assert tokens[3].n_lefts == 0
|
||||||
assert example[3].n_rights == 0
|
assert tokens[3].n_rights == 0
|
||||||
assert example[3].left_edge.i == 3
|
assert tokens[3].left_edge.i == 3
|
||||||
assert example[3].right_edge.i == 3
|
assert tokens[3].right_edge.i == 3
|
||||||
assert example[3].head.i == 2
|
assert tokens[3].head.i == 2
|
||||||
|
|
||||||
assert example[4].n_lefts == 0
|
assert tokens[4].n_lefts == 0
|
||||||
assert example[4].n_rights == 0
|
assert tokens[4].n_rights == 0
|
||||||
assert example[4].left_edge.i == 4
|
assert tokens[4].left_edge.i == 4
|
||||||
assert example[4].right_edge.i == 4
|
assert tokens[4].right_edge.i == 4
|
||||||
assert example[4].head.i == 2
|
assert tokens[4].head.i == 2
|
||||||
|
|
||||||
# left branching
|
# left branching
|
||||||
example = EN.tokenizer.tokens_from_list(u"a b c d e".split(' '))
|
transition = ['S', 'S', 'S', 'L-nsubj','L-nsubj','L-nsubj', 'L-nsubj']
|
||||||
apply_transition_sequence(EN, example, ['S', 'S', 'S', 'L-nsubj','L-nsubj','L-nsubj', 'L-nsubj'])
|
tokens = en_tokenizer(text)
|
||||||
|
apply_transition_sequence(en_parser, tokens, transition)
|
||||||
|
|
||||||
assert example[0].n_lefts == 0
|
assert tokens[0].n_lefts == 0
|
||||||
assert example[0].n_rights == 0
|
assert tokens[0].n_rights == 0
|
||||||
assert example[0].left_edge.i == 0
|
assert tokens[0].left_edge.i == 0
|
||||||
assert example[0].right_edge.i == 0
|
assert tokens[0].right_edge.i == 0
|
||||||
assert example[0].head.i == 4
|
assert tokens[0].head.i == 4
|
||||||
|
|
||||||
assert example[1].n_lefts == 0
|
assert tokens[1].n_lefts == 0
|
||||||
assert example[1].n_rights == 0
|
assert tokens[1].n_rights == 0
|
||||||
assert example[1].left_edge.i == 1
|
assert tokens[1].left_edge.i == 1
|
||||||
assert example[1].right_edge.i == 1
|
assert tokens[1].right_edge.i == 1
|
||||||
assert example[1].head.i == 4
|
assert tokens[1].head.i == 4
|
||||||
|
|
||||||
assert example[2].n_lefts == 0
|
assert tokens[2].n_lefts == 0
|
||||||
assert example[2].n_rights == 0
|
assert tokens[2].n_rights == 0
|
||||||
assert example[2].left_edge.i == 2
|
assert tokens[2].left_edge.i == 2
|
||||||
assert example[2].right_edge.i == 2
|
assert tokens[2].right_edge.i == 2
|
||||||
assert example[2].head.i == 4
|
assert tokens[2].head.i == 4
|
||||||
|
|
||||||
assert example[3].n_lefts == 0
|
assert tokens[3].n_lefts == 0
|
||||||
assert example[3].n_rights == 0
|
assert tokens[3].n_rights == 0
|
||||||
assert example[3].left_edge.i == 3
|
assert tokens[3].left_edge.i == 3
|
||||||
assert example[3].right_edge.i == 3
|
assert tokens[3].right_edge.i == 3
|
||||||
assert example[3].head.i == 4
|
assert tokens[3].head.i == 4
|
||||||
|
|
||||||
assert example[4].n_lefts == 4
|
assert tokens[4].n_lefts == 4
|
||||||
assert example[4].n_rights == 0
|
assert tokens[4].n_rights == 0
|
||||||
assert example[4].left_edge.i == 0
|
assert tokens[4].left_edge.i == 0
|
||||||
assert example[4].right_edge.i == 4
|
assert tokens[4].right_edge.i == 4
|
||||||
assert example[4].head.i == 4
|
assert tokens[4].head.i == 4
|
||||||
|
|
Loading…
Reference in New Issue
Block a user