spaCy/spacy/tests/lang/pl/test_tokenizer.py

24 lines
555 B
Python
Raw Permalink Normal View History

import pytest
2019-02-08 16:14:49 +03:00
DOT_TESTS = [
("tel.", ["tel", "."]),
2019-02-08 16:14:49 +03:00
("0 zł 99 gr", ["0", "", "99", "gr"]),
]
HYPHEN_TESTS = [
2019-02-08 16:14:49 +03:00
("cztero-", ["cztero-"]),
("jedno-", ["jedno-"]),
("dwu-", ["dwu-"]),
("trzy-", ["trzy-"]),
]
TESTCASES = DOT_TESTS + HYPHEN_TESTS
2019-02-08 16:14:49 +03:00
@pytest.mark.parametrize("text,expected_tokens", TESTCASES)
def test_tokenizer_handles_testcases(pl_tokenizer, text, expected_tokens):
tokens = pl_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list