mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-26 09:56:28 +03:00
Modernise parser tests and don't depend on models
This commit is contained in:
parent
342cb41782
commit
d0e37b5670
|
@ -1,102 +1,101 @@
|
|||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..util import get_doc, apply_transition_sequence
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.models
|
||||
def test_root(EN):
|
||||
tokens = EN(u"i don't have other assistance")
|
||||
for t in tokens:
|
||||
assert t.dep != 0, t.orth_
|
||||
def test_parser_root(en_tokenizer):
|
||||
text = "i don't have other assistance"
|
||||
heads = [3, 2, 1, 0, 1, -2]
|
||||
deps = ['nsubj', 'aux', 'neg', 'ROOT', 'amod', 'dobj']
|
||||
tokens = en_tokenizer(text)
|
||||
doc = get_doc(tokens.vocab, [t.text for t in tokens], heads=heads, deps=deps)
|
||||
for t in doc:
|
||||
assert t.dep != 0, t.text
|
||||
|
||||
|
||||
@pytest.mark.models
|
||||
def test_one_word_sentence(EN):
|
||||
# one word sentence
|
||||
doc = EN.tokenizer.tokens_from_list(['Hello'])
|
||||
EN.tagger(doc)
|
||||
@pytest.mark.parametrize('text', ["Hello"])
|
||||
def test_parser_parse_one_word_sentence(en_tokenizer, en_parser, text):
|
||||
tokens = en_tokenizer(text)
|
||||
doc = get_doc(tokens.vocab, [t.text for t in tokens], heads=[0], deps=['ROOT'])
|
||||
|
||||
assert len(doc) == 1
|
||||
with EN.parser.step_through(doc) as _:
|
||||
with en_parser.step_through(doc) as _:
|
||||
pass
|
||||
assert doc[0].dep != 0
|
||||
|
||||
|
||||
def apply_transition_sequence(model, doc, sequence):
|
||||
for action_name in sequence:
|
||||
if '-' in action_name:
|
||||
move, label = action_name.split('-')
|
||||
model.parser.add_label(label)
|
||||
with model.parser.step_through(doc) as stepwise:
|
||||
for transition in sequence:
|
||||
stepwise.transition(transition)
|
||||
def test_parser_arc_eager_finalize_state(en_tokenizer, en_parser):
|
||||
text = "a b c d e"
|
||||
|
||||
|
||||
@pytest.mark.models
|
||||
def test_arc_eager_finalize_state(EN):
|
||||
# right branching
|
||||
example = EN.tokenizer.tokens_from_list(u"a b c d e".split(' '))
|
||||
apply_transition_sequence(EN, example, ['R-nsubj','D','R-nsubj','R-nsubj','D','R-ROOT'])
|
||||
transition = ['R-nsubj', 'D', 'R-nsubj', 'R-nsubj', 'D', 'R-ROOT']
|
||||
tokens = en_tokenizer(text)
|
||||
apply_transition_sequence(en_parser, tokens, transition)
|
||||
|
||||
assert example[0].n_lefts == 0
|
||||
assert example[0].n_rights == 2
|
||||
assert example[0].left_edge.i == 0
|
||||
assert example[0].right_edge.i == 4
|
||||
assert example[0].head.i == 0
|
||||
assert tokens[0].n_lefts == 0
|
||||
assert tokens[0].n_rights == 2
|
||||
assert tokens[0].left_edge.i == 0
|
||||
assert tokens[0].right_edge.i == 4
|
||||
assert tokens[0].head.i == 0
|
||||
|
||||
assert example[1].n_lefts == 0
|
||||
assert example[1].n_rights == 0
|
||||
assert example[1].left_edge.i == 1
|
||||
assert example[1].right_edge.i == 1
|
||||
assert example[1].head.i == 0
|
||||
assert tokens[1].n_lefts == 0
|
||||
assert tokens[1].n_rights == 0
|
||||
assert tokens[1].left_edge.i == 1
|
||||
assert tokens[1].right_edge.i == 1
|
||||
assert tokens[1].head.i == 0
|
||||
|
||||
assert example[2].n_lefts == 0
|
||||
assert example[2].n_rights == 2
|
||||
assert example[2].left_edge.i == 2
|
||||
assert example[2].right_edge.i == 4
|
||||
assert example[2].head.i == 0
|
||||
assert tokens[2].n_lefts == 0
|
||||
assert tokens[2].n_rights == 2
|
||||
assert tokens[2].left_edge.i == 2
|
||||
assert tokens[2].right_edge.i == 4
|
||||
assert tokens[2].head.i == 0
|
||||
|
||||
assert example[3].n_lefts == 0
|
||||
assert example[3].n_rights == 0
|
||||
assert example[3].left_edge.i == 3
|
||||
assert example[3].right_edge.i == 3
|
||||
assert example[3].head.i == 2
|
||||
assert tokens[3].n_lefts == 0
|
||||
assert tokens[3].n_rights == 0
|
||||
assert tokens[3].left_edge.i == 3
|
||||
assert tokens[3].right_edge.i == 3
|
||||
assert tokens[3].head.i == 2
|
||||
|
||||
assert example[4].n_lefts == 0
|
||||
assert example[4].n_rights == 0
|
||||
assert example[4].left_edge.i == 4
|
||||
assert example[4].right_edge.i == 4
|
||||
assert example[4].head.i == 2
|
||||
assert tokens[4].n_lefts == 0
|
||||
assert tokens[4].n_rights == 0
|
||||
assert tokens[4].left_edge.i == 4
|
||||
assert tokens[4].right_edge.i == 4
|
||||
assert tokens[4].head.i == 2
|
||||
|
||||
# left branching
|
||||
example = EN.tokenizer.tokens_from_list(u"a b c d e".split(' '))
|
||||
apply_transition_sequence(EN, example, ['S', 'S', 'S', 'L-nsubj','L-nsubj','L-nsubj', 'L-nsubj'])
|
||||
transition = ['S', 'S', 'S', 'L-nsubj','L-nsubj','L-nsubj', 'L-nsubj']
|
||||
tokens = en_tokenizer(text)
|
||||
apply_transition_sequence(en_parser, tokens, transition)
|
||||
|
||||
assert example[0].n_lefts == 0
|
||||
assert example[0].n_rights == 0
|
||||
assert example[0].left_edge.i == 0
|
||||
assert example[0].right_edge.i == 0
|
||||
assert example[0].head.i == 4
|
||||
assert tokens[0].n_lefts == 0
|
||||
assert tokens[0].n_rights == 0
|
||||
assert tokens[0].left_edge.i == 0
|
||||
assert tokens[0].right_edge.i == 0
|
||||
assert tokens[0].head.i == 4
|
||||
|
||||
assert example[1].n_lefts == 0
|
||||
assert example[1].n_rights == 0
|
||||
assert example[1].left_edge.i == 1
|
||||
assert example[1].right_edge.i == 1
|
||||
assert example[1].head.i == 4
|
||||
assert tokens[1].n_lefts == 0
|
||||
assert tokens[1].n_rights == 0
|
||||
assert tokens[1].left_edge.i == 1
|
||||
assert tokens[1].right_edge.i == 1
|
||||
assert tokens[1].head.i == 4
|
||||
|
||||
assert example[2].n_lefts == 0
|
||||
assert example[2].n_rights == 0
|
||||
assert example[2].left_edge.i == 2
|
||||
assert example[2].right_edge.i == 2
|
||||
assert example[2].head.i == 4
|
||||
assert tokens[2].n_lefts == 0
|
||||
assert tokens[2].n_rights == 0
|
||||
assert tokens[2].left_edge.i == 2
|
||||
assert tokens[2].right_edge.i == 2
|
||||
assert tokens[2].head.i == 4
|
||||
|
||||
assert example[3].n_lefts == 0
|
||||
assert example[3].n_rights == 0
|
||||
assert example[3].left_edge.i == 3
|
||||
assert example[3].right_edge.i == 3
|
||||
assert example[3].head.i == 4
|
||||
assert tokens[3].n_lefts == 0
|
||||
assert tokens[3].n_rights == 0
|
||||
assert tokens[3].left_edge.i == 3
|
||||
assert tokens[3].right_edge.i == 3
|
||||
assert tokens[3].head.i == 4
|
||||
|
||||
assert example[4].n_lefts == 4
|
||||
assert example[4].n_rights == 0
|
||||
assert example[4].left_edge.i == 0
|
||||
assert example[4].right_edge.i == 4
|
||||
assert example[4].head.i == 4
|
||||
assert tokens[4].n_lefts == 4
|
||||
assert tokens[4].n_rights == 0
|
||||
assert tokens[4].left_edge.i == 0
|
||||
assert tokens[4].right_edge.i == 4
|
||||
assert tokens[4].head.i == 4
|
||||
|
|
Loading…
Reference in New Issue
Block a user