mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-24 00:46:28 +03:00
Adds Turkish Lemmatization
This commit is contained in:
parent
a07b44fb47
commit
abe098b255
|
@ -12,6 +12,7 @@ This is a list of everyone who has made significant contributions to spaCy, in a
|
|||
* Ben Eyal, [@beneyal](https://github.com/beneyal)
|
||||
* Bhargav Srinivasa, [@bhargavvader](https://github.com/bhargavvader)
|
||||
* Bruno P. Kinoshita, [@kinow](https://github.com/kinow)
|
||||
* Canbey Bilgili, [@cbilgili](https://github.com/cbilgili)
|
||||
* Chris DuBois, [@chrisdubois](https://github.com/chrisdubois)
|
||||
* Christoph Schwienheer, [@chssch](https://github.com/chssch)
|
||||
* Dafne van Kuppevelt, [@dafnevk](https://github.com/dafnevk)
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
from __future__ import unicode_literals
|
||||
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .lemmatizer import LOOKUP
|
||||
from .stop_words import STOP_WORDS
|
||||
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
|
@ -17,6 +18,7 @@ class TurkishDefaults(Language.Defaults):
|
|||
lex_attr_getters[NORM] = add_lookups(Language.Defaults.lex_attr_getters[NORM], BASE_NORMS)
|
||||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||||
stop_words = STOP_WORDS
|
||||
lemma_lookup = LOOKUP
|
||||
|
||||
|
||||
class Turkish(Language):
|
||||
|
|
1337907
spacy/lang/tr/lemmatizer.py
Normal file
1337907
spacy/lang/tr/lemmatizer.py
Normal file
File diff suppressed because it is too large
Load Diff
|
@ -15,7 +15,7 @@ from .. import util
|
|||
# here if it's using spaCy's tokenizer (not a different library)
|
||||
# TODO: re-implement generic tokenizer tests
|
||||
_languages = ['bn', 'da', 'de', 'en', 'es', 'fi', 'fr', 'ga', 'he', 'hu', 'id',
|
||||
'it', 'nb', 'nl', 'pl', 'pt', 'ru', 'sv', 'xx']
|
||||
'it', 'nb', 'nl', 'pl', 'pt', 'ru', 'sv', 'tr', 'xx']
|
||||
_models = {'en': ['en_core_web_sm'],
|
||||
'de': ['de_core_news_md'],
|
||||
'fr': ['fr_core_news_sm'],
|
||||
|
@ -142,6 +142,10 @@ def th_tokenizer():
|
|||
pythainlp = pytest.importorskip("pythainlp")
|
||||
return util.get_lang_class('th').Defaults.create_tokenizer()
|
||||
|
||||
@pytest.fixture
|
||||
def tr_tokenizer():
|
||||
return util.get_lang_class('tr').Defaults.create_tokenizer()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def ru_tokenizer():
|
||||
|
|
0
spacy/tests/lang/tr/__init__.py
Normal file
0
spacy/tests/lang/tr/__init__.py
Normal file
15
spacy/tests/lang/tr/test_lemmatization.py
Normal file
15
spacy/tests/lang/tr/test_lemmatization.py
Normal file
|
@ -0,0 +1,15 @@
|
|||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import pytest
|
||||
|
||||
@pytest.mark.parametrize('string,lemma', [('evlerimizdeki', 'ev'),
|
||||
('işlerimizi', 'iş'),
|
||||
('biran', 'biran'),
|
||||
('bitirmeliyiz', 'bitir'),
|
||||
('isteklerimizi', 'istek'),
|
||||
('karşılaştırmamızın', 'karşılaştır'),
|
||||
('çoğulculuktan', 'çoğulcu')])
|
||||
def test_lemmatizer_lookup_assigns(tr_tokenizer, string, lemma):
|
||||
tokens = tr_tokenizer(string)
|
||||
assert tokens[0].lemma_ == lemma
|
Loading…
Reference in New Issue
Block a user