2017-11-21 22:23:59 +03:00
|
|
|
# coding: utf-8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
import pytest
|
2018-07-25 00:38:44 +03:00
|
|
|
from spacy.lang.ru import Russian
|
2017-11-21 22:23:59 +03:00
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
from ...util import get_doc
|
2017-11-21 22:23:59 +03:00
|
|
|
|
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
@pytest.fixture
|
|
|
|
def ru_lemmatizer():
|
|
|
|
pymorphy = pytest.importorskip('pymorphy2')
|
|
|
|
return Russian.Defaults.create_lemmatizer()
|
2017-11-21 22:23:59 +03:00
|
|
|
|
2017-11-26 20:54:48 +03:00
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_ru_doc_lemmatization(ru_tokenizer):
|
|
|
|
words = ['мама', 'мыла', 'раму']
|
|
|
|
tags = ['NOUN__Animacy=Anim|Case=Nom|Gender=Fem|Number=Sing',
|
|
|
|
'VERB__Aspect=Imp|Gender=Fem|Mood=Ind|Number=Sing|Tense=Past|VerbForm=Fin|Voice=Act',
|
|
|
|
'NOUN__Animacy=Anim|Case=Acc|Gender=Fem|Number=Sing']
|
|
|
|
doc = get_doc(ru_tokenizer.vocab, words=words, tags=tags)
|
2017-11-26 20:54:48 +03:00
|
|
|
lemmas = [token.lemma_ for token in doc]
|
|
|
|
assert lemmas == ['мама', 'мыть', 'рама']
|
2017-11-21 22:23:59 +03:00
|
|
|
|
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
@pytest.mark.parametrize('text,lemmas', [
|
|
|
|
('гвоздики', ['гвоздик', 'гвоздика']),
|
|
|
|
('люди', ['человек']),
|
|
|
|
('реки', ['река']),
|
|
|
|
('кольцо', ['кольцо']),
|
|
|
|
('пепперони', ['пепперони'])])
|
2017-11-21 22:23:59 +03:00
|
|
|
def test_ru_lemmatizer_noun_lemmas(ru_lemmatizer, text, lemmas):
|
|
|
|
assert sorted(ru_lemmatizer.noun(text)) == lemmas
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.models('ru')
|
2018-07-25 00:38:44 +03:00
|
|
|
@pytest.mark.parametrize('text,pos,morphology,lemma', [
|
|
|
|
('рой', 'NOUN', None, 'рой'),
|
|
|
|
('рой', 'VERB', None, 'рыть'),
|
|
|
|
('клей', 'NOUN', None, 'клей'),
|
|
|
|
('клей', 'VERB', None, 'клеить'),
|
|
|
|
('три', 'NUM', None, 'три'),
|
|
|
|
('кос', 'NOUN', {'Number': 'Sing'}, 'кос'),
|
|
|
|
('кос', 'NOUN', {'Number': 'Plur'}, 'коса'),
|
|
|
|
('кос', 'ADJ', None, 'косой'),
|
|
|
|
('потом', 'NOUN', None, 'пот'),
|
|
|
|
('потом', 'ADV', None, 'потом')])
|
2017-11-21 22:23:59 +03:00
|
|
|
def test_ru_lemmatizer_works_with_different_pos_homonyms(ru_lemmatizer, text, pos, morphology, lemma):
|
|
|
|
assert ru_lemmatizer(text, pos, morphology) == [lemma]
|
|
|
|
|
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
@pytest.mark.parametrize('text,morphology,lemma', [
|
|
|
|
('гвоздики', {'Gender': 'Fem'}, 'гвоздика'),
|
|
|
|
('гвоздики', {'Gender': 'Masc'}, 'гвоздик'),
|
|
|
|
('вина', {'Gender': 'Fem'}, 'вина'),
|
|
|
|
('вина', {'Gender': 'Neut'}, 'вино')])
|
2017-11-21 22:23:59 +03:00
|
|
|
def test_ru_lemmatizer_works_with_noun_homonyms(ru_lemmatizer, text, morphology, lemma):
|
|
|
|
assert ru_lemmatizer.noun(text, morphology) == [lemma]
|
|
|
|
|
|
|
|
|
2017-11-26 20:54:48 +03:00
|
|
|
def test_ru_lemmatizer_punct(ru_lemmatizer):
|
|
|
|
assert ru_lemmatizer.punct('«') == ['"']
|
|
|
|
assert ru_lemmatizer.punct('»') == ['"']
|