2016-12-17 14:25:44 +03:00
|
|
|
# encoding: utf8
|
2015-08-25 16:37:30 +03:00
|
|
|
from __future__ import unicode_literals, print_function
|
2015-07-07 15:00:07 +03:00
|
|
|
|
2015-08-25 16:37:30 +03:00
|
|
|
from os import path
|
2014-12-24 09:42:00 +03:00
|
|
|
|
2015-08-25 16:37:30 +03:00
|
|
|
from ..language import Language
|
2016-09-25 15:49:53 +03:00
|
|
|
from ..lemmatizer import Lemmatizer
|
|
|
|
from ..vocab import Vocab
|
|
|
|
from ..tokenizer import Tokenizer
|
2016-10-18 19:52:48 +03:00
|
|
|
from ..attrs import LANG
|
2016-12-08 15:58:32 +03:00
|
|
|
|
2016-12-18 18:54:19 +03:00
|
|
|
from .language_data import *
|
2016-01-19 04:54:56 +03:00
|
|
|
|
2016-12-18 18:58:28 +03:00
|
|
|
|
2015-08-25 16:37:30 +03:00
|
|
|
class English(Language):
|
2015-12-28 18:54:03 +03:00
|
|
|
lang = 'en'
|
2015-12-18 11:52:55 +03:00
|
|
|
|
2016-09-24 21:26:17 +03:00
|
|
|
class Defaults(Language.Defaults):
|
|
|
|
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
|
2016-10-18 19:52:48 +03:00
|
|
|
lex_attr_getters[LANG] = lambda text: 'en'
|
2016-09-24 21:26:17 +03:00
|
|
|
|
2016-12-08 15:58:32 +03:00
|
|
|
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
|
|
|
tag_map = TAG_MAP
|
|
|
|
stop_words = STOP_WORDS
|
2016-12-18 17:50:09 +03:00
|
|
|
lemma_rules = LEMMA_RULES
|