2017-05-29 23:14:31 +03:00
|
|
|
# coding: utf-8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
import pytest
|
2017-09-04 16:17:44 +03:00
|
|
|
from ....tokens.doc import Doc
|
2017-05-29 23:14:31 +03:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def en_lemmatizer(EN):
|
|
|
|
return EN.Defaults.create_lemmatizer()
|
|
|
|
|
2017-09-04 16:17:44 +03:00
|
|
|
@pytest.mark.models('en')
|
|
|
|
def test_doc_lemmatization(EN):
|
|
|
|
doc = Doc(EN.vocab, words=['bleed'])
|
|
|
|
doc[0].tag_ = 'VBP'
|
|
|
|
assert doc[0].lemma_ == 'bleed'
|
2017-05-29 23:14:31 +03:00
|
|
|
|
|
|
|
@pytest.mark.models('en')
|
|
|
|
@pytest.mark.parametrize('text,lemmas', [("aardwolves", ["aardwolf"]),
|
|
|
|
("aardwolf", ["aardwolf"]),
|
|
|
|
("planets", ["planet"]),
|
|
|
|
("ring", ["ring"]),
|
|
|
|
("axes", ["axis", "axe", "ax"])])
|
2017-09-04 16:17:44 +03:00
|
|
|
def test_en_lemmatizer_noun_lemmas(en_lemmatizer, text, lemmas):
|
2017-11-03 21:46:34 +03:00
|
|
|
assert en_lemmatizer.noun(text) == lemmas
|
2017-09-04 16:17:44 +03:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.models('en')
|
|
|
|
@pytest.mark.parametrize('text,lemmas', [("bleed", ["bleed"]),
|
|
|
|
("feed", ["feed"]),
|
|
|
|
("need", ["need"]),
|
2017-11-03 21:46:34 +03:00
|
|
|
("ring", ["ring"])])
|
2017-05-29 23:14:31 +03:00
|
|
|
def test_en_lemmatizer_noun_lemmas(en_lemmatizer, text, lemmas):
|
2017-11-03 21:46:34 +03:00
|
|
|
# Cases like this are problematic -- not clear what we should do to resolve
|
|
|
|
# ambiguity?
|
|
|
|
# ("axes", ["ax", "axes", "axis"])])
|
|
|
|
assert en_lemmatizer.noun(text) == lemmas
|
2017-05-29 23:14:31 +03:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.xfail
|
|
|
|
@pytest.mark.models('en')
|
|
|
|
def test_en_lemmatizer_base_forms(en_lemmatizer):
|
2017-11-03 21:46:34 +03:00
|
|
|
assert en_lemmatizer.noun('dive', {'number': 'sing'}) == ['dive']
|
|
|
|
assert en_lemmatizer.noun('dive', {'number': 'plur'}) == ['diva']
|
2017-05-29 23:14:31 +03:00
|
|
|
|
|
|
|
|
2017-06-05 03:09:27 +03:00
|
|
|
@pytest.mark.models('en')
|
2017-05-29 23:14:31 +03:00
|
|
|
def test_en_lemmatizer_base_form_verb(en_lemmatizer):
|
2017-11-03 21:46:34 +03:00
|
|
|
assert en_lemmatizer.verb('saw', {'verbform': 'past'}) == ['see']
|
2017-05-29 23:14:31 +03:00
|
|
|
|
|
|
|
|
2017-06-05 03:09:27 +03:00
|
|
|
@pytest.mark.models('en')
|
2017-05-29 23:14:31 +03:00
|
|
|
def test_en_lemmatizer_punct(en_lemmatizer):
|
2017-11-03 21:46:34 +03:00
|
|
|
assert en_lemmatizer.punct('“') == ['"']
|
|
|
|
assert en_lemmatizer.punct('“') == ['"']
|
2017-05-29 23:14:31 +03:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.models('en')
|
|
|
|
def test_en_lemmatizer_lemma_assignment(EN):
|
|
|
|
text = "Bananas in pyjamas are geese."
|
2017-06-05 00:35:06 +03:00
|
|
|
doc = EN.make_doc(text)
|
2017-05-29 23:14:31 +03:00
|
|
|
EN.tagger(doc)
|
|
|
|
assert all(t.lemma_ != '' for t in doc)
|