mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-26 18:06:29 +03:00
7d50804644
* Migrate regressions 1-1000 * Move serialize test to correct file * Remove tests that won't work in v3 * Migrate regressions 1000-1500 Removed regression test 1250 because v3 doesn't support the old LEX scheme anymore. * Add missing imports in serializer tests * Migrate tests 1500-2000 * Migrate regressions from 2000-2500 * Migrate regressions from 2501-3000 * Migrate regressions from 3000-3501 * Migrate regressions from 3501-4000 * Migrate regressions from 4001-4500 * Migrate regressions from 4501-5000 * Migrate regressions from 5001-5501 * Migrate regressions from 5501 to 7000 * Migrate regressions from 7001 to 8000 * Migrate remaining regression tests * Fixing missing imports * Update docs with new system [ci skip] * Update CONTRIBUTING.md - Fix formatting - Update wording * Remove lemmatizer tests in el lang * Move a few tests into the general tokenizer * Separate Doc and DocBin tests
98 lines
2.6 KiB
Python
98 lines
2.6 KiB
Python
import pytest
|
|
|
|
from spacy.attrs import IS_ALPHA, LEMMA, NORM, ORTH, intify_attrs
|
|
from spacy.lang.en.stop_words import STOP_WORDS
|
|
from spacy.lang.lex_attrs import is_ascii, is_currency, is_punct, is_stop
|
|
from spacy.lang.lex_attrs import like_url, word_shape
|
|
|
|
|
|
@pytest.mark.parametrize("word", ["the"])
|
|
@pytest.mark.issue(1889)
|
|
def test_issue1889(word):
|
|
assert is_stop(word, STOP_WORDS) == is_stop(word.upper(), STOP_WORDS)
|
|
|
|
|
|
@pytest.mark.parametrize("text", ["dog"])
|
|
def test_attrs_key(text):
|
|
assert intify_attrs({"ORTH": text}) == {ORTH: text}
|
|
assert intify_attrs({"NORM": text}) == {NORM: text}
|
|
assert intify_attrs({"lemma": text}, strings_map={text: 10}) == {LEMMA: 10}
|
|
|
|
|
|
@pytest.mark.parametrize("text", ["dog"])
|
|
def test_attrs_idempotence(text):
|
|
int_attrs = intify_attrs({"lemma": text, "is_alpha": True}, strings_map={text: 10})
|
|
assert intify_attrs(int_attrs) == {LEMMA: 10, IS_ALPHA: True}
|
|
|
|
|
|
@pytest.mark.parametrize("text", ["dog"])
|
|
def test_attrs_do_deprecated(text):
|
|
int_attrs = intify_attrs(
|
|
{"F": text, "is_alpha": True}, strings_map={text: 10}, _do_deprecated=True
|
|
)
|
|
assert int_attrs == {ORTH: 10, IS_ALPHA: True}
|
|
|
|
|
|
@pytest.mark.parametrize("text,match", [(",", True), (" ", False), ("a", False)])
|
|
def test_lex_attrs_is_punct(text, match):
|
|
assert is_punct(text) == match
|
|
|
|
|
|
@pytest.mark.parametrize("text,match", [(",", True), ("£", False), ("♥", False)])
|
|
def test_lex_attrs_is_ascii(text, match):
|
|
assert is_ascii(text) == match
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"text,match",
|
|
[
|
|
("$", True),
|
|
("£", True),
|
|
("♥", False),
|
|
("€", True),
|
|
("¥", True),
|
|
("¢", True),
|
|
("a", False),
|
|
("www.google.com", False),
|
|
("dog", False),
|
|
],
|
|
)
|
|
def test_lex_attrs_is_currency(text, match):
|
|
assert is_currency(text) == match
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"text,match",
|
|
[
|
|
("www.google.com", True),
|
|
("google.com", True),
|
|
("sydney.com", True),
|
|
("1abc2def.org", True),
|
|
("http://stupid", True),
|
|
("www.hi", True),
|
|
("example.com/example", True),
|
|
("dog", False),
|
|
("1.2", False),
|
|
("1.a", False),
|
|
("hello.There", False),
|
|
],
|
|
)
|
|
def test_lex_attrs_like_url(text, match):
|
|
assert like_url(text) == match
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"text,shape",
|
|
[
|
|
("Nasa", "Xxxx"),
|
|
("capitalized", "xxxx"),
|
|
("999999999", "dddd"),
|
|
("C3P0", "XdXd"),
|
|
(",", ","),
|
|
("\n", "\n"),
|
|
("``,-", "``,-"),
|
|
],
|
|
)
|
|
def test_lex_attrs_word_shape(text, shape):
|
|
assert word_shape(text) == shape
|