spaCy/spacy/tests/lang/pl/test_tokenizer.py
adrianeboyd 0061992d95
Update Polish tokenizer for UD_Polish-PDB (#5432)
Update Polish tokenizer for UD_Polish-PDB, which is a relatively major
change from the existing tokenizer. Unused exceptions files and
conflicting test cases removed.

Co-authored-by: Matthew Honnibal <honnibal+gh@gmail.com>
2020-05-19 15:59:55 +02:00

27 lines
611 B
Python

# coding: utf8
from __future__ import unicode_literals
import pytest
DOT_TESTS = [
("tel.", ["tel", "."]),
("0 zł 99 gr", ["0", "", "99", "gr"]),
]
HYPHEN_TESTS = [
("cztero-", ["cztero-"]),
("jedno-", ["jedno-"]),
("dwu-", ["dwu-"]),
("trzy-", ["trzy-"]),
]
TESTCASES = DOT_TESTS + HYPHEN_TESTS
@pytest.mark.parametrize("text,expected_tokens", TESTCASES)
def test_tokenizer_handles_testcases(pl_tokenizer, text, expected_tokens):
tokens = pl_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list