mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-13 13:17:06 +03:00
417d45f5d0
Don't create lookup lemmatizer within Language class and just pass in the data so it can be set on Token creation
35 lines
1.1 KiB
Python
35 lines
1.1 KiB
Python
# coding: utf8
|
|
from __future__ import unicode_literals
|
|
|
|
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS, TOKEN_MATCH
|
|
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIXES
|
|
from .stop_words import STOP_WORDS
|
|
from .lemmatizer import LOOKUP
|
|
|
|
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
|
from ..norm_exceptions import BASE_NORMS
|
|
from ...language import Language
|
|
from ...attrs import LANG, NORM
|
|
from ...util import update_exc, add_lookups
|
|
|
|
|
|
class HungarianDefaults(Language.Defaults):
|
|
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
|
|
lex_attr_getters[LANG] = lambda text: 'hu'
|
|
lex_attr_getters[NORM] = add_lookups(Language.Defaults.lex_attr_getters[NORM], BASE_NORMS)
|
|
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
|
stop_words = set(STOP_WORDS)
|
|
prefixes = tuple(TOKENIZER_PREFIXES)
|
|
suffixes = tuple(TOKENIZER_SUFFIXES)
|
|
infixes = tuple(TOKENIZER_INFIXES)
|
|
token_match = TOKEN_MATCH
|
|
lemma_lookup = dict(LOOKUP)
|
|
|
|
|
|
class Hungarian(Language):
|
|
lang = 'hu'
|
|
Defaults = HungarianDefaults
|
|
|
|
|
|
__all__ = ['Hungarian']
|