mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-11 04:08:09 +03:00
2f981d5af1
Remove corpus-specific tag maps from the language data for languages without custom tokenizers. For languages with custom word segmenters that also provide tags (Japanese and Korean), the tag maps for the custom tokenizers are kept as the default. The default tag maps for languages without custom tokenizers are now the default tag map from `lang/tag_map/py`, UPOS -> UPOS.
36 lines
1.1 KiB
Python
36 lines
1.1 KiB
Python
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
|
from .stop_words import STOP_WORDS
|
|
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_INFIXES
|
|
from .punctuation import TOKENIZER_SUFFIXES
|
|
|
|
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
|
from ..norm_exceptions import BASE_NORMS
|
|
from ...language import Language
|
|
from ...attrs import LANG, NORM
|
|
from ...util import update_exc, add_lookups
|
|
|
|
# Lemma data note:
|
|
# Original pairs downloaded from http://www.lexiconista.com/datasets/lemmatization/
|
|
# Replaced characters using cedillas with the correct ones (ș and ț)
|
|
|
|
|
|
class RomanianDefaults(Language.Defaults):
|
|
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
|
|
lex_attr_getters[LANG] = lambda text: "ro"
|
|
lex_attr_getters[NORM] = add_lookups(
|
|
Language.Defaults.lex_attr_getters[NORM], BASE_NORMS
|
|
)
|
|
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
|
stop_words = STOP_WORDS
|
|
prefixes = TOKENIZER_PREFIXES
|
|
suffixes = TOKENIZER_SUFFIXES
|
|
infixes = TOKENIZER_INFIXES
|
|
|
|
|
|
class Romanian(Language):
|
|
lang = "ro"
|
|
Defaults = RomanianDefaults
|
|
|
|
|
|
__all__ = ["Romanian"]
|