2017-03-12 15:07:28 +03:00
|
|
|
# coding: utf8
|
2017-02-04 14:47:29 +03:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
2017-05-09 01:02:37 +03:00
|
|
|
|
2017-02-04 14:47:29 +03:00
|
|
|
ABBREVIATION_TESTS = [
|
|
|
|
('Hyvää uutta vuotta t. siht. Niemelä!', ['Hyvää', 'uutta', 'vuotta', 't.', 'siht.', 'Niemelä', '!']),
|
|
|
|
('Paino on n. 2.2 kg', ['Paino', 'on', 'n.', '2.2', 'kg'])
|
|
|
|
]
|
|
|
|
|
|
|
|
|
2017-05-09 01:02:37 +03:00
|
|
|
@pytest.mark.parametrize('text,expected_tokens', ABBREVIATION_TESTS)
|
2017-02-04 14:47:29 +03:00
|
|
|
def test_tokenizer_handles_testcases(fi_tokenizer, text, expected_tokens):
|
|
|
|
tokens = fi_tokenizer(text)
|
|
|
|
token_list = [token.text for token in tokens if not token.is_space]
|
|
|
|
assert expected_tokens == token_list
|