2017-03-12 15:07:28 +03:00
|
|
|
# coding: utf8
|
2017-03-15 19:33:39 +03:00
|
|
|
from __future__ import unicode_literals
|
2015-07-07 15:00:07 +03:00
|
|
|
|
2017-05-08 16:47:25 +03:00
|
|
|
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
|
|
|
from .tag_map import TAG_MAP
|
|
|
|
from .stop_words import STOP_WORDS
|
|
|
|
from .morph_rules import MORPH_RULES
|
|
|
|
from .lemmatizer import LEMMA_RULES, LEMMA_INDEX, LEMMA_EXC
|
|
|
|
|
|
|
|
from ..language_data import BASE_EXCEPTIONS
|
2015-08-25 16:37:30 +03:00
|
|
|
from ..language import Language
|
2016-10-18 19:52:48 +03:00
|
|
|
from ..attrs import LANG
|
2017-05-08 16:47:25 +03:00
|
|
|
from ..util import update_exc
|
2016-01-19 04:54:56 +03:00
|
|
|
|
2017-03-15 19:33:39 +03:00
|
|
|
|
2015-08-25 16:37:30 +03:00
|
|
|
class English(Language):
|
2015-12-28 18:54:03 +03:00
|
|
|
lang = 'en'
|
2015-12-18 11:52:55 +03:00
|
|
|
|
2016-09-24 21:26:17 +03:00
|
|
|
class Defaults(Language.Defaults):
|
|
|
|
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
|
2016-10-18 19:52:48 +03:00
|
|
|
lex_attr_getters[LANG] = lambda text: 'en'
|
2016-09-24 21:26:17 +03:00
|
|
|
|
2017-05-08 16:47:25 +03:00
|
|
|
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
|
|
|
tag_map = dict(TAG_MAP)
|
|
|
|
stop_words = set(STOP_WORDS)
|
2017-03-15 17:23:22 +03:00
|
|
|
morph_rules = dict(MORPH_RULES)
|
2017-03-15 12:52:50 +03:00
|
|
|
lemma_rules = dict(LEMMA_RULES)
|
|
|
|
lemma_index = dict(LEMMA_INDEX)
|
|
|
|
lemma_exc = dict(LEMMA_EXC)
|
|
|
|
|
2016-12-19 00:29:31 +03:00
|
|
|
|
2017-05-08 16:47:25 +03:00
|
|
|
__all__ = ['English']
|