2018-05-24 12:40:00 +03:00
|
|
|
# coding: utf8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
|
|
|
|
TEST_CASES = [
|
2018-05-24 12:40:00 +03:00
|
|
|
('Adresa este str. Principală nr. 5.', ['Adresa', 'este', 'str.', 'Principală', 'nr.', '5', '.']),
|
|
|
|
('Teste, etc.', ['Teste', ',', 'etc.']),
|
|
|
|
('Lista, ș.a.m.d.', ['Lista', ',', 'ș.a.m.d.']),
|
2018-07-25 00:38:44 +03:00
|
|
|
('Și d.p.d.v. al...', ['Și', 'd.p.d.v.', 'al', '...']),
|
|
|
|
# number tests
|
2018-05-24 12:40:00 +03:00
|
|
|
('Clasa a 4-a.', ['Clasa', 'a', '4-a', '.']),
|
|
|
|
('Al 12-lea ceas.', ['Al', '12-lea', 'ceas', '.'])
|
|
|
|
]
|
|
|
|
|
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
@pytest.mark.parametrize('text,expected_tokens', TEST_CASES)
|
|
|
|
def test_ro_tokenizer_handles_testcases(ro_tokenizer, text, expected_tokens):
|
2018-05-24 12:40:00 +03:00
|
|
|
tokens = ro_tokenizer(text)
|
|
|
|
token_list = [token.text for token in tokens if not token.is_space]
|
|
|
|
assert expected_tokens == token_list
|