2019-02-07 23:05:11 +03:00
|
|
|
# coding: utf8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
|
|
|
from .stop_words import STOP_WORDS
|
|
|
|
from .lex_attrs import LEX_ATTRS
|
|
|
|
|
|
|
|
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
|
|
|
from ..norm_exceptions import BASE_NORMS
|
|
|
|
from ...util import update_exc, add_lookups
|
|
|
|
from ...language import Language
|
2019-02-08 16:14:49 +03:00
|
|
|
from ...attrs import LANG, NORM
|
2019-02-07 23:05:11 +03:00
|
|
|
from .lemmatizer import UkrainianLemmatizer
|
|
|
|
|
|
|
|
|
|
|
|
class UkrainianDefaults(Language.Defaults):
|
|
|
|
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
|
2019-02-08 16:14:49 +03:00
|
|
|
lex_attr_getters[LANG] = lambda text: "uk"
|
|
|
|
lex_attr_getters[NORM] = add_lookups(
|
|
|
|
Language.Defaults.lex_attr_getters[NORM], BASE_NORMS
|
|
|
|
)
|
2019-02-07 23:05:11 +03:00
|
|
|
lex_attr_getters.update(LEX_ATTRS)
|
|
|
|
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
|
|
|
stop_words = STOP_WORDS
|
|
|
|
|
|
|
|
@classmethod
|
2019-09-10 12:14:46 +03:00
|
|
|
def create_lemmatizer(cls, nlp=None, **kwargs):
|
2019-02-07 23:05:11 +03:00
|
|
|
return UkrainianLemmatizer()
|
|
|
|
|
|
|
|
|
|
|
|
class Ukrainian(Language):
|
2019-02-08 16:14:49 +03:00
|
|
|
lang = "uk"
|
|
|
|
Defaults = UkrainianDefaults
|
2019-02-07 23:05:11 +03:00
|
|
|
|
|
|
|
|
2019-02-08 16:14:49 +03:00
|
|
|
__all__ = ["Ukrainian"]
|