mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-28 19:06:33 +03:00
b5af0fe836
This reverts commit 6f314f99c4
.
We are reverting this until we can support this normalization more
consistently across vectors, training corpora, and lemmatizer data.
17 lines
510 B
Python
17 lines
510 B
Python
import pytest
|
||
|
||
|
||
@pytest.mark.parametrize(
|
||
"text,norms,lemmas",
|
||
[
|
||
("о.г.", ["ове године"], ["ова година"]),
|
||
("чет.", ["четвртак"], ["четвртак"]),
|
||
("гђа", ["госпођа"], ["госпођа"]),
|
||
("ил'", ["или"], ["или"]),
|
||
],
|
||
)
|
||
def test_sr_tokenizer_abbrev_exceptions(sr_tokenizer, text, norms, lemmas):
|
||
tokens = sr_tokenizer(text)
|
||
assert len(tokens) == 1
|
||
assert [token.norm_ for token in tokens] == norms
|