Adds Turkish Lemmatization

This commit is contained in:
Canbey Bilgili 2017-12-01 17:04:32 +03:00
parent a07b44fb47
commit abe098b255
6 changed files with 1337930 additions and 1 deletions

View File

@ -12,6 +12,7 @@ This is a list of everyone who has made significant contributions to spaCy, in a
* Ben Eyal, [@beneyal](https://github.com/beneyal) * Ben Eyal, [@beneyal](https://github.com/beneyal)
* Bhargav Srinivasa, [@bhargavvader](https://github.com/bhargavvader) * Bhargav Srinivasa, [@bhargavvader](https://github.com/bhargavvader)
* Bruno P. Kinoshita, [@kinow](https://github.com/kinow) * Bruno P. Kinoshita, [@kinow](https://github.com/kinow)
* Canbey Bilgili, [@cbilgili](https://github.com/cbilgili)
* Chris DuBois, [@chrisdubois](https://github.com/chrisdubois) * Chris DuBois, [@chrisdubois](https://github.com/chrisdubois)
* Christoph Schwienheer, [@chssch](https://github.com/chssch) * Christoph Schwienheer, [@chssch](https://github.com/chssch)
* Dafne van Kuppevelt, [@dafnevk](https://github.com/dafnevk) * Dafne van Kuppevelt, [@dafnevk](https://github.com/dafnevk)

View File

@ -2,6 +2,7 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .lemmatizer import LOOKUP
from .stop_words import STOP_WORDS from .stop_words import STOP_WORDS
from ..tokenizer_exceptions import BASE_EXCEPTIONS from ..tokenizer_exceptions import BASE_EXCEPTIONS
@ -17,6 +18,7 @@ class TurkishDefaults(Language.Defaults):
lex_attr_getters[NORM] = add_lookups(Language.Defaults.lex_attr_getters[NORM], BASE_NORMS) lex_attr_getters[NORM] = add_lookups(Language.Defaults.lex_attr_getters[NORM], BASE_NORMS)
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS) tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
stop_words = STOP_WORDS stop_words = STOP_WORDS
lemma_lookup = LOOKUP
class Turkish(Language): class Turkish(Language):

1337907
spacy/lang/tr/lemmatizer.py Normal file

File diff suppressed because it is too large Load Diff

View File

@ -15,7 +15,7 @@ from .. import util
# here if it's using spaCy's tokenizer (not a different library) # here if it's using spaCy's tokenizer (not a different library)
# TODO: re-implement generic tokenizer tests # TODO: re-implement generic tokenizer tests
_languages = ['bn', 'da', 'de', 'en', 'es', 'fi', 'fr', 'ga', 'he', 'hu', 'id', _languages = ['bn', 'da', 'de', 'en', 'es', 'fi', 'fr', 'ga', 'he', 'hu', 'id',
'it', 'nb', 'nl', 'pl', 'pt', 'ru', 'sv', 'xx'] 'it', 'nb', 'nl', 'pl', 'pt', 'ru', 'sv', 'tr', 'xx']
_models = {'en': ['en_core_web_sm'], _models = {'en': ['en_core_web_sm'],
'de': ['de_core_news_md'], 'de': ['de_core_news_md'],
'fr': ['fr_core_news_sm'], 'fr': ['fr_core_news_sm'],
@ -142,6 +142,10 @@ def th_tokenizer():
pythainlp = pytest.importorskip("pythainlp") pythainlp = pytest.importorskip("pythainlp")
return util.get_lang_class('th').Defaults.create_tokenizer() return util.get_lang_class('th').Defaults.create_tokenizer()
@pytest.fixture
def tr_tokenizer():
return util.get_lang_class('tr').Defaults.create_tokenizer()
@pytest.fixture @pytest.fixture
def ru_tokenizer(): def ru_tokenizer():

View File

View File

@ -0,0 +1,15 @@
# coding: utf-8
from __future__ import unicode_literals
import pytest
@pytest.mark.parametrize('string,lemma', [('evlerimizdeki', 'ev'),
('işlerimizi', ''),
('biran', 'biran'),
('bitirmeliyiz', 'bitir'),
('isteklerimizi', 'istek'),
('karşılaştırmamızın', 'karşılaştır'),
('çoğulculuktan', 'çoğulcu')])
def test_lemmatizer_lookup_assigns(tr_tokenizer, string, lemma):
tokens = tr_tokenizer(string)
assert tokens[0].lemma_ == lemma