mirror of
https://github.com/explosion/spaCy.git
synced 2025-02-14 10:30:34 +03:00
Update test
This commit is contained in:
parent
192b94f0a1
commit
2b180ea033
|
@ -22,7 +22,6 @@ TRAIN_DATA = [
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skip # Segfault
|
|
||||||
def test_parser_root(en_tokenizer):
|
def test_parser_root(en_tokenizer):
|
||||||
text = "i don't have other assistance"
|
text = "i don't have other assistance"
|
||||||
heads = [3, 2, 1, 0, 1, -2]
|
heads = [3, 2, 1, 0, 1, -2]
|
||||||
|
@ -33,9 +32,8 @@ def test_parser_root(en_tokenizer):
|
||||||
assert t.dep != 0, t.text
|
assert t.dep != 0, t.text
|
||||||
|
|
||||||
|
|
||||||
#@pytest.mark.xfail
|
@pytest.mark.xfail
|
||||||
#@pytest.mark.parametrize("text", ["Hello"])
|
#@pytest.mark.parametrize("text", ["Hello"])
|
||||||
@pytest.mark.skip # Segfault
|
|
||||||
def test_parser_parse_one_word_sentence(en_tokenizer, en_parser, text):
|
def test_parser_parse_one_word_sentence(en_tokenizer, en_parser, text):
|
||||||
tokens = en_tokenizer(text)
|
tokens = en_tokenizer(text)
|
||||||
doc = get_doc(
|
doc = get_doc(
|
||||||
|
@ -48,7 +46,6 @@ def test_parser_parse_one_word_sentence(en_tokenizer, en_parser, text):
|
||||||
assert doc[0].dep != 0
|
assert doc[0].dep != 0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skip # Segfault
|
|
||||||
def test_parser_initial(en_tokenizer, en_parser):
|
def test_parser_initial(en_tokenizer, en_parser):
|
||||||
text = "I ate the pizza with anchovies."
|
text = "I ate the pizza with anchovies."
|
||||||
# heads = [1, 0, 1, -2, -3, -1, -5]
|
# heads = [1, 0, 1, -2, -3, -1, -5]
|
||||||
|
@ -61,7 +58,6 @@ def test_parser_initial(en_tokenizer, en_parser):
|
||||||
assert tokens[3].head.i == 3
|
assert tokens[3].head.i == 3
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skip # Segfault
|
|
||||||
def test_parser_parse_subtrees(en_tokenizer, en_parser):
|
def test_parser_parse_subtrees(en_tokenizer, en_parser):
|
||||||
text = "The four wheels on the bus turned quickly"
|
text = "The four wheels on the bus turned quickly"
|
||||||
heads = [2, 1, 4, -1, 1, -2, 0, -1]
|
heads = [2, 1, 4, -1, 1, -2, 0, -1]
|
||||||
|
@ -76,7 +72,6 @@ def test_parser_parse_subtrees(en_tokenizer, en_parser):
|
||||||
assert len(list(doc[2].subtree)) == 6
|
assert len(list(doc[2].subtree)) == 6
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skip # Segfault
|
|
||||||
def test_parser_merge_pp(en_tokenizer):
|
def test_parser_merge_pp(en_tokenizer):
|
||||||
text = "A phrase with another phrase occurs"
|
text = "A phrase with another phrase occurs"
|
||||||
heads = [1, 4, -1, 1, -2, 0]
|
heads = [1, 4, -1, 1, -2, 0]
|
||||||
|
@ -95,7 +90,6 @@ def test_parser_merge_pp(en_tokenizer):
|
||||||
assert doc[3].text == "occurs"
|
assert doc[3].text == "occurs"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skip # Segfault
|
|
||||||
def test_parser_arc_eager_finalize_state(en_tokenizer, en_parser):
|
def test_parser_arc_eager_finalize_state(en_tokenizer, en_parser):
|
||||||
text = "a b c d e"
|
text = "a b c d e"
|
||||||
|
|
||||||
|
@ -170,7 +164,6 @@ def test_parser_arc_eager_finalize_state(en_tokenizer, en_parser):
|
||||||
assert tokens[4].head.i == 4
|
assert tokens[4].head.i == 4
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skip # Segfault
|
|
||||||
def test_parser_set_sent_starts(en_vocab):
|
def test_parser_set_sent_starts(en_vocab):
|
||||||
# fmt: off
|
# fmt: off
|
||||||
words = ['Ein', 'Satz', '.', 'Außerdem', 'ist', 'Zimmer', 'davon', 'überzeugt', ',', 'dass', 'auch', 'epige-', '\n', 'netische', 'Mechanismen', 'eine', 'Rolle', 'spielen', ',', 'also', 'Vorgänge', ',', 'die', '\n', 'sich', 'darauf', 'auswirken', ',', 'welche', 'Gene', 'abgelesen', 'werden', 'und', '\n', 'welche', 'nicht', '.', '\n']
|
words = ['Ein', 'Satz', '.', 'Außerdem', 'ist', 'Zimmer', 'davon', 'überzeugt', ',', 'dass', 'auch', 'epige-', '\n', 'netische', 'Mechanismen', 'eine', 'Rolle', 'spielen', ',', 'also', 'Vorgänge', ',', 'die', '\n', 'sich', 'darauf', 'auswirken', ',', 'welche', 'Gene', 'abgelesen', 'werden', 'und', '\n', 'welche', 'nicht', '.', '\n']
|
||||||
|
@ -187,7 +180,6 @@ def test_parser_set_sent_starts(en_vocab):
|
||||||
for token in sent:
|
for token in sent:
|
||||||
assert token.head in sent
|
assert token.head in sent
|
||||||
|
|
||||||
@pytest.mark.skip
|
|
||||||
def test_overfitting_IO():
|
def test_overfitting_IO():
|
||||||
# Simple test to try and quickly overfit the dependency parser - ensuring the ML models work correctly
|
# Simple test to try and quickly overfit the dependency parser - ensuring the ML models work correctly
|
||||||
nlp = English()
|
nlp = English()
|
||||||
|
|
Loading…
Reference in New Issue
Block a user