mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-24 17:06:29 +03:00
Decorate non-regression tests
This commit is contained in:
parent
199943deb4
commit
91dec2c76e
|
@ -119,6 +119,7 @@ def test_en_tokenizer_splits_period_abbr(en_tokenizer):
|
||||||
assert tokens[4].text == "Mr."
|
assert tokens[4].text == "Mr."
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.issue(225)
|
||||||
@pytest.mark.xfail(reason="Issue #225 - not yet implemented")
|
@pytest.mark.xfail(reason="Issue #225 - not yet implemented")
|
||||||
def test_en_tokenizer_splits_em_dash_infix(en_tokenizer):
|
def test_en_tokenizer_splits_em_dash_infix(en_tokenizer):
|
||||||
tokens = en_tokenizer(
|
tokens = en_tokenizer(
|
||||||
|
|
|
@ -4,6 +4,7 @@ from spacy.lang.punctuation import TOKENIZER_INFIXES
|
||||||
from spacy.lang.char_classes import ALPHA
|
from spacy.lang.char_classes import ALPHA
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.issue(768)
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"text,expected_tokens", [("l'avion", ["l'", "avion"]), ("j'ai", ["j'", "ai"])]
|
"text,expected_tokens", [("l'avion", ["l'", "avion"]), ("j'ai", ["j'", "ai"])]
|
||||||
)
|
)
|
||||||
|
|
|
@ -370,6 +370,7 @@ def test_dependency_matcher_span_user_data(en_tokenizer):
|
||||||
assert doc_t_i == span_t_i + offset
|
assert doc_t_i == span_t_i + offset
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.issue(9263)
|
||||||
def test_dependency_matcher_order_issue(en_tokenizer):
|
def test_dependency_matcher_order_issue(en_tokenizer):
|
||||||
# issue from #9263
|
# issue from #9263
|
||||||
doc = en_tokenizer("I like text")
|
doc = en_tokenizer("I like text")
|
||||||
|
@ -415,6 +416,7 @@ def test_dependency_matcher_order_issue(en_tokenizer):
|
||||||
assert matches == []
|
assert matches == []
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.issue(9263)
|
||||||
def test_dependency_matcher_remove(en_tokenizer):
|
def test_dependency_matcher_remove(en_tokenizer):
|
||||||
# issue from #9263
|
# issue from #9263
|
||||||
doc = en_tokenizer("The red book")
|
doc = en_tokenizer("The red book")
|
||||||
|
|
|
@ -152,6 +152,7 @@ def test_operator_combos(en_vocab):
|
||||||
assert not matches, (string, pattern_str)
|
assert not matches, (string, pattern_str)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.issue(1450)
|
||||||
def test_matcher_end_zero_plus(en_vocab):
|
def test_matcher_end_zero_plus(en_vocab):
|
||||||
"""Test matcher works when patterns end with * operator. (issue 1450)"""
|
"""Test matcher works when patterns end with * operator. (issue 1450)"""
|
||||||
matcher = Matcher(en_vocab)
|
matcher = Matcher(en_vocab)
|
||||||
|
|
|
@ -162,6 +162,7 @@ def test_serialize_tagger_strings(en_vocab, de_vocab, taggers):
|
||||||
assert label in tagger2.vocab.strings
|
assert label in tagger2.vocab.strings
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.issue(1105)
|
||||||
def test_serialize_textcat_empty(en_vocab):
|
def test_serialize_textcat_empty(en_vocab):
|
||||||
# See issue #1105
|
# See issue #1105
|
||||||
cfg = {"model": DEFAULT_SINGLE_TEXTCAT_MODEL}
|
cfg = {"model": DEFAULT_SINGLE_TEXTCAT_MODEL}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user