spaCy/spacy/tests/lang/ro/test_tokenizer.py
Jani Monoses ec62cadf4c Updates to Romanian support (#2354)
* Add back Romanian in conftest

* Romanian lex_attr

* More tokenizer exceptions for Romanian

* Add tests for some Romanian tokenizer exceptions
2018-05-24 11:40:00 +02:00

26 lines
806 B
Python

# coding: utf8
from __future__ import unicode_literals
import pytest
DEFAULT_TESTS = [
('Adresa este str. Principală nr. 5.', ['Adresa', 'este', 'str.', 'Principală', 'nr.', '5', '.']),
('Teste, etc.', ['Teste', ',', 'etc.']),
('Lista, ș.a.m.d.', ['Lista', ',', 'ș.a.m.d.']),
('Și d.p.d.v. al...', ['Și', 'd.p.d.v.', 'al', '...'])
]
NUMBER_TESTS = [
('Clasa a 4-a.', ['Clasa', 'a', '4-a', '.']),
('Al 12-lea ceas.', ['Al', '12-lea', 'ceas', '.'])
]
TESTCASES = DEFAULT_TESTS + NUMBER_TESTS
@pytest.mark.parametrize('text,expected_tokens', TESTCASES)
def test_tokenizer_handles_testcases(ro_tokenizer, text, expected_tokens):
tokens = ro_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list