mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-15 14:17:58 +03:00
7d50804644
* Migrate regressions 1-1000 * Move serialize test to correct file * Remove tests that won't work in v3 * Migrate regressions 1000-1500 Removed regression test 1250 because v3 doesn't support the old LEX scheme anymore. * Add missing imports in serializer tests * Migrate tests 1500-2000 * Migrate regressions from 2000-2500 * Migrate regressions from 2501-3000 * Migrate regressions from 3000-3501 * Migrate regressions from 3501-4000 * Migrate regressions from 4001-4500 * Migrate regressions from 4501-5000 * Migrate regressions from 5001-5501 * Migrate regressions from 5501 to 7000 * Migrate regressions from 7001 to 8000 * Migrate remaining regression tests * Fixing missing imports * Update docs with new system [ci skip] * Update CONTRIBUTING.md - Fix formatting - Update wording * Remove lemmatizer tests in el lang * Move a few tests into the general tokenizer * Separate Doc and DocBin tests
78 lines
2.4 KiB
Python
78 lines
2.4 KiB
Python
import pytest
|
|
|
|
SV_TOKEN_EXCEPTION_TESTS = [
|
|
(
|
|
"Smörsåsen används bl.a. till fisk",
|
|
["Smörsåsen", "används", "bl.a.", "till", "fisk"],
|
|
),
|
|
(
|
|
"Jag kommer först kl. 13 p.g.a. diverse förseningar",
|
|
["Jag", "kommer", "först", "kl.", "13", "p.g.a.", "diverse", "förseningar"],
|
|
),
|
|
(
|
|
"Anders I. tycker om ord med i i.",
|
|
["Anders", "I.", "tycker", "om", "ord", "med", "i", "i", "."],
|
|
),
|
|
]
|
|
|
|
|
|
@pytest.mark.issue(805)
|
|
@pytest.mark.parametrize(
|
|
"text,expected_tokens",
|
|
[
|
|
(
|
|
"Smörsåsen används bl.a. till fisk",
|
|
["Smörsåsen", "används", "bl.a.", "till", "fisk"],
|
|
),
|
|
(
|
|
"Jag kommer först kl. 13 p.g.a. diverse förseningar",
|
|
["Jag", "kommer", "först", "kl.", "13", "p.g.a.", "diverse", "förseningar"],
|
|
),
|
|
],
|
|
)
|
|
def test_issue805(sv_tokenizer, text, expected_tokens):
|
|
tokens = sv_tokenizer(text)
|
|
token_list = [token.text for token in tokens if not token.is_space]
|
|
assert expected_tokens == token_list
|
|
|
|
|
|
@pytest.mark.parametrize("text,expected_tokens", SV_TOKEN_EXCEPTION_TESTS)
|
|
def test_sv_tokenizer_handles_exception_cases(sv_tokenizer, text, expected_tokens):
|
|
tokens = sv_tokenizer(text)
|
|
token_list = [token.text for token in tokens if not token.is_space]
|
|
assert expected_tokens == token_list
|
|
|
|
|
|
@pytest.mark.parametrize("text", ["driveru", "hajaru", "Serru", "Fixaru"])
|
|
def test_sv_tokenizer_handles_verb_exceptions(sv_tokenizer, text):
|
|
tokens = sv_tokenizer(text)
|
|
assert len(tokens) == 2
|
|
assert tokens[1].text == "u"
|
|
|
|
|
|
@pytest.mark.parametrize("text", ["bl.a", "m.a.o.", "Jan.", "Dec.", "kr.", "osv."])
|
|
def test_sv_tokenizer_handles_abbr(sv_tokenizer, text):
|
|
tokens = sv_tokenizer(text)
|
|
assert len(tokens) == 1
|
|
|
|
|
|
@pytest.mark.parametrize("text", ["Jul.", "jul.", "sön.", "Sön."])
|
|
def test_sv_tokenizer_handles_ambiguous_abbr(sv_tokenizer, text):
|
|
tokens = sv_tokenizer(text)
|
|
assert len(tokens) == 2
|
|
|
|
|
|
def test_sv_tokenizer_handles_exc_in_text(sv_tokenizer):
|
|
text = "Det är bl.a. inte meningen"
|
|
tokens = sv_tokenizer(text)
|
|
assert len(tokens) == 5
|
|
assert tokens[2].text == "bl.a."
|
|
|
|
|
|
def test_sv_tokenizer_handles_custom_base_exc(sv_tokenizer):
|
|
text = "Här är något du kan titta på."
|
|
tokens = sv_tokenizer(text)
|
|
assert len(tokens) == 8
|
|
assert tokens[6].text == "på"
|
|
assert tokens[7].text == "."
|