From 1a3984742cfe31db85255e8a88371bb7cd57e89f Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Wed, 11 Jan 2017 23:53:08 +0100 Subject: [PATCH] Modernise sentence boundary detection tests and don't depend on models (where possible) --- spacy/tests/parser/test_sbd.py | 157 +++++++++------------------------ 1 file changed, 42 insertions(+), 115 deletions(-) diff --git a/spacy/tests/parser/test_sbd.py b/spacy/tests/parser/test_sbd.py index 8ba54060b..5ac6ecc6d 100644 --- a/spacy/tests/parser/test_sbd.py +++ b/spacy/tests/parser/test_sbd.py @@ -1,131 +1,58 @@ +# coding: utf-8 from __future__ import unicode_literals +from ...tokens import Doc +from ..util import get_doc, apply_transition_sequence + import pytest -from spacy.tokens import Doc -from spacy.syntax.nonproj import PseudoProjectivity -@pytest.mark.models -def test_single_period(EN): - string = 'A test sentence.' - words = EN(string) - assert len(words) == 4 - assert len(list(words.sents)) == 1 - assert sum(len(sent) for sent in words.sents) == len(words) +@pytest.mark.parametrize('text', ["A test sentence"]) +@pytest.mark.parametrize('punct', ['.', '!', '?', '']) +def test_parser_sbd_single_punct(en_tokenizer, text, punct): + heads = [2, 1, 0, -1] if punct else [2, 1, 0] + tokens = en_tokenizer(text + punct) + doc = get_doc(tokens.vocab, [t.text for t in tokens], heads=heads) + assert len(doc) == 4 if punct else 3 + assert len(list(doc.sents)) == 1 + assert sum(len(sent) for sent in doc.sents) == len(doc) -@pytest.mark.models -def test_single_no_period(EN): - string = 'A test sentence' - words = EN(string) - assert len(words) == 3 - assert len(list(words.sents)) == 1 - assert sum(len(sent) for sent in words.sents) == len(words) +def test_parser_sentence_breaks(en_tokenizer, en_parser): + text = "This is a sentence . This is another one ." + heads = [1, 0, 1, -2, -3, 1, 0, 1, -2, -3] + deps = ['nsubj', 'ROOT', 'det', 'attr', 'punct', 'nsubj', 'ROOT', 'det', + 'attr', 'punct'] + transition = ['L-nsubj', 'S', 'L-det', 'R-attr', 'D', 'R-punct', 'B-ROOT', + 'L-nsubj', 'S', 'L-attr', 'R-attr', 'D', 'R-punct'] + tokens = en_tokenizer(text) + doc = get_doc(tokens.vocab, [t.text for t in tokens], heads=heads, deps=deps) + apply_transition_sequence(en_parser, doc, transition) -@pytest.mark.models -def test_single_exclamation(EN): - string = 'A test sentence!' - words = EN(string) - assert len(words) == 4 - assert len(list(words.sents)) == 1 - assert sum(len(sent) for sent in words.sents) == len(words) - - -@pytest.mark.models -def test_single_question(EN): - string = 'A test sentence?' - words = EN(string, tag=False, parse=True) - assert len(words) == 4 - assert len(list(words.sents)) == 1 - assert sum(len(sent) for sent in words.sents) == len(words) - - -@pytest.mark.models -def test_sentence_breaks(EN): - doc = EN.tokenizer.tokens_from_list(u'This is a sentence . This is another one .'.split(' ')) - EN.tagger(doc) - with EN.parser.step_through(doc) as stepwise: - assert EN.parser.moves.is_valid(stepwise.stcls,'B-ROOT') - stepwise.transition('L-nsubj') - assert EN.parser.moves.is_valid(stepwise.stcls,'B-ROOT') - stepwise.transition('S') - assert EN.parser.moves.is_valid(stepwise.stcls,'B-ROOT') - stepwise.transition('L-det') - assert EN.parser.moves.is_valid(stepwise.stcls,'B-ROOT') - stepwise.transition('R-attr') - assert EN.parser.moves.is_valid(stepwise.stcls,'B-ROOT') - stepwise.transition('D') - assert EN.parser.moves.is_valid(stepwise.stcls,'B-ROOT') - stepwise.transition('R-punct') - assert EN.parser.moves.is_valid(stepwise.stcls,'B-ROOT') - stepwise.transition('B-ROOT') - assert EN.parser.moves.is_valid(stepwise.stcls,'B-ROOT') - stepwise.transition('L-nsubj') - assert EN.parser.moves.is_valid(stepwise.stcls,'B-ROOT') - stepwise.transition('S') - assert EN.parser.moves.is_valid(stepwise.stcls,'B-ROOT') - stepwise.transition('L-attr') - assert EN.parser.moves.is_valid(stepwise.stcls,'B-ROOT') - stepwise.transition('R-attr') - assert EN.parser.moves.is_valid(stepwise.stcls,'B-ROOT') - stepwise.transition('D') - assert EN.parser.moves.is_valid(stepwise.stcls,'B-ROOT') - stepwise.transition('R-punct') assert len(list(doc.sents)) == 2 - for tok in doc: - assert tok.dep != 0 or tok.is_space - assert [ tok.head.i for tok in doc ] == [1,1,3,1,1,6,6,8,6,6] + for token in doc: + assert token.dep != 0 or token.is_space + assert [token.head.i for token in doc ] == [1, 1, 3, 1, 1, 6, 6, 8, 6, 6] -def apply_transition_sequence(model, doc, sequence): - with model.parser.step_through(doc) as stepwise: - for transition in sequence: - stepwise.transition(transition) - +# Currently, there's no way of setting the serializer data for the parser +# without loading the models, so we can't remove the model dependency here yet. @pytest.mark.models -def test_sbd_serialization_projective(EN): - """ - test that before and after serialization, the sentence boundaries are the same. - """ - - example = EN.tokenizer.tokens_from_list(u"I bought a couch from IKEA. It was n't very comfortable .".split(' ')) - EN.tagger(example) - apply_transition_sequence(EN, example, ['L-nsubj','S','L-det','R-dobj','D','R-prep','R-pobj','B-ROOT','L-nsubj','R-neg','D','S','L-advmod','R-acomp','D','R-punct']) - - example_serialized = Doc(EN.vocab).from_bytes(example.to_bytes()) - - assert example.to_bytes() == example_serialized.to_bytes() - assert [s.text for s in example.sents] == [s.text for s in example_serialized.sents] - - - -# TODO: -# @pytest.mark.models -# def test_sbd_serialization_nonprojective(DE): -# """ -# test that before and after serialization, the sentence boundaries are the same in a non-projective sentence. -# """ -# example = EN.tokenizer.tokens_from_list(u"Den Mann hat Peter nicht gesehen . Er war zu langsam .".split(' ')) -# EN.tagger(example) -# apply_transition_sequence(EN, example, ['L-nk','L-oa||oc','R-sb','D','S','L-ng','B-ROOT','L-nsubj','R-neg','D','S','L-advmod','R-acomp','D','R-punct']) -# print [(t.dep_,t.head.i) for t in example] - -# example_serialized = Doc(EN.vocab).from_bytes(example.to_bytes()) - -# assert example.to_bytes() == example_serialized.to_bytes() -# assert [s.text for s in example.sents] == [s.text for s in example_serialized.sents] - - - - - - - - - - - +def test_parser_sbd_serialization_projective(EN): + """Test that before and after serialization, the sentence boundaries are + the same.""" + text = "I bought a couch from IKEA It wasn't very comfortable." + transition = ['L-nsubj', 'S', 'L-det', 'R-dobj', 'D', 'R-prep', 'R-pobj', + 'B-ROOT', 'L-nsubj', 'R-neg', 'D', 'S', 'L-advmod', + 'R-acomp', 'D', 'R-punct'] + doc = EN.tokenizer(text) + apply_transition_sequence(EN.parser, doc, transition) + doc_serialized = Doc(EN.vocab).from_bytes(doc.to_bytes()) + assert doc.is_parsed == True + assert doc_serialized.is_parsed == True + assert doc.to_bytes() == doc_serialized.to_bytes() + assert [s.text for s in doc.sents] == [s.text for s in doc_serialized.sents]