From 2b180ea03343dbc328cb1d81a62b2a719dd512b9 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Sun, 21 Jun 2020 01:15:41 +0200 Subject: [PATCH] Update test --- spacy/tests/parser/test_parse.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/spacy/tests/parser/test_parse.py b/spacy/tests/parser/test_parse.py index 80d91e7ae..0d9e257b9 100644 --- a/spacy/tests/parser/test_parse.py +++ b/spacy/tests/parser/test_parse.py @@ -22,7 +22,6 @@ TRAIN_DATA = [ ] -@pytest.mark.skip # Segfault def test_parser_root(en_tokenizer): text = "i don't have other assistance" heads = [3, 2, 1, 0, 1, -2] @@ -33,9 +32,8 @@ def test_parser_root(en_tokenizer): assert t.dep != 0, t.text -#@pytest.mark.xfail +@pytest.mark.xfail #@pytest.mark.parametrize("text", ["Hello"]) -@pytest.mark.skip # Segfault def test_parser_parse_one_word_sentence(en_tokenizer, en_parser, text): tokens = en_tokenizer(text) doc = get_doc( @@ -48,7 +46,6 @@ def test_parser_parse_one_word_sentence(en_tokenizer, en_parser, text): assert doc[0].dep != 0 -@pytest.mark.skip # Segfault def test_parser_initial(en_tokenizer, en_parser): text = "I ate the pizza with anchovies." # heads = [1, 0, 1, -2, -3, -1, -5] @@ -61,7 +58,6 @@ def test_parser_initial(en_tokenizer, en_parser): assert tokens[3].head.i == 3 -@pytest.mark.skip # Segfault def test_parser_parse_subtrees(en_tokenizer, en_parser): text = "The four wheels on the bus turned quickly" heads = [2, 1, 4, -1, 1, -2, 0, -1] @@ -76,7 +72,6 @@ def test_parser_parse_subtrees(en_tokenizer, en_parser): assert len(list(doc[2].subtree)) == 6 -@pytest.mark.skip # Segfault def test_parser_merge_pp(en_tokenizer): text = "A phrase with another phrase occurs" heads = [1, 4, -1, 1, -2, 0] @@ -95,7 +90,6 @@ def test_parser_merge_pp(en_tokenizer): assert doc[3].text == "occurs" -@pytest.mark.skip # Segfault def test_parser_arc_eager_finalize_state(en_tokenizer, en_parser): text = "a b c d e" @@ -170,7 +164,6 @@ def test_parser_arc_eager_finalize_state(en_tokenizer, en_parser): assert tokens[4].head.i == 4 -@pytest.mark.skip # Segfault def test_parser_set_sent_starts(en_vocab): # fmt: off words = ['Ein', 'Satz', '.', 'Außerdem', 'ist', 'Zimmer', 'davon', 'überzeugt', ',', 'dass', 'auch', 'epige-', '\n', 'netische', 'Mechanismen', 'eine', 'Rolle', 'spielen', ',', 'also', 'Vorgänge', ',', 'die', '\n', 'sich', 'darauf', 'auswirken', ',', 'welche', 'Gene', 'abgelesen', 'werden', 'und', '\n', 'welche', 'nicht', '.', '\n'] @@ -187,7 +180,6 @@ def test_parser_set_sent_starts(en_vocab): for token in sent: assert token.head in sent -@pytest.mark.skip def test_overfitting_IO(): # Simple test to try and quickly overfit the dependency parser - ensuring the ML models work correctly nlp = English()