spaCy/spacy/lang/id/__init__.py

44 lines
1.3 KiB
Python
Raw Normal View History

2017-07-23 16:51:31 +03:00
# coding: utf8
from __future__ import unicode_literals
from .stop_words import STOP_WORDS
2017-07-24 02:17:07 +03:00
from .punctuation import TOKENIZER_SUFFIXES, TOKENIZER_PREFIXES, TOKENIZER_INFIXES
2017-07-26 15:13:14 +03:00
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS, TOKEN_MATCH
2017-07-23 18:54:49 +03:00
from .norm_exceptions import NORM_EXCEPTIONS
from .lemmatizer import LOOKUP
2017-07-23 16:51:31 +03:00
from .lex_attrs import LEX_ATTRS
from .syntax_iterators import SYNTAX_ITERATORS
2017-07-23 16:51:31 +03:00
from ..tokenizer_exceptions import BASE_EXCEPTIONS
2017-07-23 17:30:34 +03:00
from ...language import Language
from ...lemmatizerlookup import Lemmatizer
2017-07-23 17:30:34 +03:00
from ...attrs import LANG
from ...util import update_exc
2017-07-23 16:51:31 +03:00
class IndonesianDefaults(Language.Defaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters[LANG] = lambda text: 'id'
lex_attr_getters.update(LEX_ATTRS)
2017-07-23 17:30:34 +03:00
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
2017-07-23 16:51:31 +03:00
stop_words = set(STOP_WORDS)
2017-07-26 15:13:14 +03:00
token_match = TOKEN_MATCH
2017-07-23 19:46:40 +03:00
prefixes = tuple(TOKENIZER_PREFIXES)
suffixes = tuple(TOKENIZER_SUFFIXES)
2017-07-24 02:17:07 +03:00
infixes = tuple(TOKENIZER_INFIXES)
syntax_iterators = dict(SYNTAX_ITERATORS)
@classmethod
def create_lemmatizer(cls, nlp=None):
return Lemmatizer(LOOKUP)
2017-07-23 16:51:31 +03:00
class Indonesian(Language):
lang = 'id'
Defaults = IndonesianDefaults
__all__ = ['Indonesian']