2019-08-22 12:43:07 +03:00
|
|
|
|
import pytest
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
2023-05-11 12:54:16 +03:00
|
|
|
|
"text,norms,lemmas",
|
2019-08-31 14:39:06 +03:00
|
|
|
|
[
|
2023-05-11 12:54:16 +03:00
|
|
|
|
("о.г.", ["ове године"], ["ова година"]),
|
|
|
|
|
("чет.", ["четвртак"], ["четвртак"]),
|
|
|
|
|
("гђа", ["госпођа"], ["госпођа"]),
|
|
|
|
|
("ил'", ["или"], ["или"]),
|
2019-08-31 14:39:06 +03:00
|
|
|
|
],
|
|
|
|
|
)
|
2023-05-11 12:54:16 +03:00
|
|
|
|
def test_sr_tokenizer_abbrev_exceptions(sr_tokenizer, text, norms, lemmas):
|
2019-08-22 12:43:07 +03:00
|
|
|
|
tokens = sr_tokenizer(text)
|
|
|
|
|
assert len(tokens) == 1
|
|
|
|
|
assert [token.norm_ for token in tokens] == norms
|