mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-14 13:47:13 +03:00
e0cf4796a5
* Add default to util.get_entry_point * Tidy up entry points * Read lookups from entry points * Remove lookup tables and related tests * Add lookups install option * Remove lemmatizer tests * Remove logic to process language data files * Update setup.cfg
51 lines
1.5 KiB
Python
51 lines
1.5 KiB
Python
# coding: utf8
|
||
from __future__ import unicode_literals
|
||
|
||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||
from .norm_exceptions import NORM_EXCEPTIONS
|
||
from .punctuation import TOKENIZER_INFIXES
|
||
from .tag_map import TAG_MAP
|
||
from .stop_words import STOP_WORDS
|
||
from .syntax_iterators import SYNTAX_ITERATORS
|
||
|
||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||
from ..norm_exceptions import BASE_NORMS
|
||
from ...language import Language
|
||
from ...attrs import LANG, NORM
|
||
from ...util import update_exc, add_lookups
|
||
|
||
|
||
class GermanDefaults(Language.Defaults):
|
||
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
|
||
lex_attr_getters[LANG] = lambda text: "de"
|
||
lex_attr_getters[NORM] = add_lookups(
|
||
Language.Defaults.lex_attr_getters[NORM], NORM_EXCEPTIONS, BASE_NORMS
|
||
)
|
||
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
|
||
infixes = TOKENIZER_INFIXES
|
||
tag_map = TAG_MAP
|
||
stop_words = STOP_WORDS
|
||
syntax_iterators = SYNTAX_ITERATORS
|
||
single_orth_variants = [
|
||
{"tags": ["$("], "variants": ["…", "..."]},
|
||
{"tags": ["$("], "variants": ["-", "—", "–", "--", "---", "——"]},
|
||
]
|
||
paired_orth_variants = [
|
||
{
|
||
"tags": ["$("],
|
||
"variants": [("'", "'"), (",", "'"), ("‚", "‘"), ("›", "‹"), ("‹", "›")],
|
||
},
|
||
{
|
||
"tags": ["$("],
|
||
"variants": [("``", "''"), ('"', '"'), ("„", "“"), ("»", "«"), ("«", "»")],
|
||
},
|
||
]
|
||
|
||
|
||
class German(Language):
|
||
lang = "de"
|
||
Defaults = GermanDefaults
|
||
|
||
|
||
__all__ = ["German"]
|