mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-14 05:37:03 +03:00
40 lines
1.2 KiB
Python
40 lines
1.2 KiB
Python
# coding: utf8
|
|
from __future__ import unicode_literals
|
|
|
|
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS, TOKEN_MATCH
|
|
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES
|
|
from .stop_words import STOP_WORDS
|
|
from .lemmatizer import LOOKUP
|
|
|
|
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
|
from ..norm_exceptions import BASE_NORMS
|
|
from ...language import Language
|
|
from ...lemmatizerlookup import Lemmatizer
|
|
from ...attrs import LANG, NORM
|
|
from ...util import update_exc, add_lookups
|
|
|
|
|
|
class HungarianDefaults(Language.Defaults):
|
|
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
|
|
lex_attr_getters[LANG] = lambda text: 'hu'
|
|
lex_attr_getters[NORM] = add_lookups(Language.Defaults.lex_attr_getters[NORM], BASE_NORMS)
|
|
|
|
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
|
stop_words = set(STOP_WORDS)
|
|
prefixes = tuple(TOKENIZER_PREFIXES)
|
|
suffixes = tuple(TOKENIZER_SUFFIXES)
|
|
infixes = tuple(TOKENIZER_INFIXES)
|
|
token_match = TOKEN_MATCH
|
|
|
|
@classmethod
|
|
def create_lemmatizer(cls, nlp=None):
|
|
return Lemmatizer(LOOKUP)
|
|
|
|
|
|
class Hungarian(Language):
|
|
lang = 'hu'
|
|
Defaults = HungarianDefaults
|
|
|
|
|
|
__all__ = ['Hungarian']
|