spaCy/spacy/lang/es/__init__.py
Adriane Boyd d17afb4826
Add Spanish rule-based lemmatizer (#6833)
* Initial Spanish lemmatizer

* Handle merged verb+pron(s) multi-word tokens

* Use VERB for AUX rule lookup

* Add morph to lemma cache key

* Fix aux lookups, minor refactoring

* Improve verb+pron handling

* Move verb+pron handling into its own method
* Check for exceptions (primarily for se)
* Collect pronouns in the same (not reversed) order

* Only add modified possible lemmas
2021-01-27 19:21:35 +08:00

37 lines
1.1 KiB
Python

from typing import Optional
from thinc.api import Model
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS
from .lex_attrs import LEX_ATTRS
from .lemmatizer import SpanishLemmatizer
from .syntax_iterators import SYNTAX_ITERATORS
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES
from ...language import Language
class SpanishDefaults(Language.Defaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
infixes = TOKENIZER_INFIXES
suffixes = TOKENIZER_SUFFIXES
lex_attr_getters = LEX_ATTRS
syntax_iterators = SYNTAX_ITERATORS
stop_words = STOP_WORDS
class Spanish(Language):
lang = "es"
Defaults = SpanishDefaults
@Spanish.factory(
"lemmatizer",
assigns=["token.lemma"],
default_config={"model": None, "mode": "rule", "overwrite": False},
default_score_weights={"lemma_acc": 1.0},
)
def make_lemmatizer(nlp: Language, model: Optional[Model], name: str, mode: str, overwrite: bool):
return SpanishLemmatizer(nlp.vocab, model, name, mode=mode, overwrite=overwrite)
__all__ = ["Spanish"]