mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-11 20:28:20 +03:00
2f981d5af1
Remove corpus-specific tag maps from the language data for languages without custom tokenizers. For languages with custom word segmenters that also provide tags (Japanese and Korean), the tag maps for the custom tokenizers are kept as the default. The default tag maps for languages without custom tokenizers are now the default tag map from `lang/tag_map/py`, UPOS -> UPOS.
36 lines
1.1 KiB
Python
36 lines
1.1 KiB
Python
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
|
from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_INFIXES
|
|
from .punctuation import TOKENIZER_SUFFIXES
|
|
from .stop_words import STOP_WORDS
|
|
from .morph_rules import MORPH_RULES
|
|
from .syntax_iterators import SYNTAX_ITERATORS
|
|
|
|
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
|
from ..norm_exceptions import BASE_NORMS
|
|
from ...language import Language
|
|
from ...attrs import LANG, NORM
|
|
from ...util import update_exc, add_lookups
|
|
|
|
|
|
class NorwegianDefaults(Language.Defaults):
|
|
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
|
|
lex_attr_getters[LANG] = lambda text: "nb"
|
|
lex_attr_getters[NORM] = add_lookups(
|
|
Language.Defaults.lex_attr_getters[NORM], BASE_NORMS
|
|
)
|
|
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
|
prefixes = TOKENIZER_PREFIXES
|
|
infixes = TOKENIZER_INFIXES
|
|
suffixes = TOKENIZER_SUFFIXES
|
|
stop_words = STOP_WORDS
|
|
morph_rules = MORPH_RULES
|
|
syntax_iterators = SYNTAX_ITERATORS
|
|
|
|
|
|
class Norwegian(Language):
|
|
lang = "nb"
|
|
Defaults = NorwegianDefaults
|
|
|
|
|
|
__all__ = ["Norwegian"]
|