mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-27 10:26:35 +03:00
5ca7dd0f94
* Improve load_language_data helper * WIP: Add Lookups implementation * Start moving lemma data over to JSON * WIP: move data over for more languages * Convert more languages * Fix lemmatizer fixtures in tests * Finish conversion * Auto-format JSON files * Fix test for now * Make sure tables are stored on instance
41 lines
1.3 KiB
Python
41 lines
1.3 KiB
Python
# coding: utf8
|
|
from __future__ import unicode_literals
|
|
|
|
from ...language import Language
|
|
from ...attrs import LANG, NORM
|
|
from ...util import update_exc, add_lookups
|
|
from ..norm_exceptions import BASE_NORMS
|
|
from .stop_words import STOP_WORDS
|
|
from .lex_attrs import LEX_ATTRS
|
|
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
|
from .tag_map import TAG_MAP
|
|
from .punctuation import TOKENIZER_SUFFIXES
|
|
|
|
|
|
class PersianDefaults(Language.Defaults):
|
|
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
|
|
lex_attr_getters.update(LEX_ATTRS)
|
|
lex_attr_getters[NORM] = add_lookups(
|
|
Language.Defaults.lex_attr_getters[NORM], BASE_NORMS
|
|
)
|
|
lex_attr_getters[LANG] = lambda text: "fa"
|
|
tokenizer_exceptions = update_exc(TOKENIZER_EXCEPTIONS)
|
|
stop_words = STOP_WORDS
|
|
tag_map = TAG_MAP
|
|
suffixes = TOKENIZER_SUFFIXES
|
|
writing_system = {"direction": "rtl", "has_case": False, "has_letters": True}
|
|
# extracted from Mojgan Seraji's Persian Universal Dependencies Corpus
|
|
resources = {
|
|
"lemma_rules": "lemmatizer/lemma_rules.json",
|
|
"lemma_index": "lemmatizer/lemma_index.json",
|
|
"lemma_exc": "lemmatizer/lemma_exc.json",
|
|
}
|
|
|
|
|
|
class Persian(Language):
|
|
lang = "fa"
|
|
Defaults = PersianDefaults
|
|
|
|
|
|
__all__ = ["Persian"]
|