mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-11 12:18:04 +03:00
bab9976d9a
* Adjust Table API and add docs * Add attributes and update description [ci skip] * Use strings.get_string_id instead of hash_string * Fix table method calls * Make orth arg in Lemmatizer.lookup optional Fall back to string, which is now handled by Table.__contains__ out-of-the-box * Fix method name * Auto-format
21 lines
861 B
Python
21 lines
861 B
Python
# coding: utf-8
|
|
from __future__ import unicode_literals
|
|
|
|
import pytest
|
|
|
|
# fmt: off
|
|
TEST_CASES = [
|
|
(["Galime", "vadinti", "gerovės", "valstybe", ",", "turime", "išvystytą", "socialinę", "apsaugą", ",",
|
|
"sveikatos", "apsaugą", "ir", "prieinamą", "švietimą", "."],
|
|
["galėti", "vadintas", "gerovė", "valstybė", ",", "turėti", "išvystytas", "socialinis",
|
|
"apsauga", ",", "sveikata", "apsauga", "ir", "prieinamas", "švietimas", "."]),
|
|
(["taip", ",", "uoliai", "tyrinėjau", "ir", "pasirinkau", "geriausią", "variantą", "."],
|
|
["taip", ",", "uolus", "tyrinėti", "ir", "pasirinkti", "geras", "variantas", "."])
|
|
]
|
|
# fmt: on
|
|
|
|
|
|
@pytest.mark.parametrize("tokens,lemmas", TEST_CASES)
|
|
def test_lt_lemmatizer(lt_lemmatizer, tokens, lemmas):
|
|
assert lemmas == [lt_lemmatizer.lookup_table.get(token, token) for token in tokens]
|