mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-14 13:47:13 +03:00
e962784531
* Add Lemmatizer and simplify related components * Add `Lemmatizer` pipe with `lookup` and `rule` modes using the `Lookups` tables. * Reduce `Tagger` to a simple tagger that sets `Token.tag` (no pos or lemma) * Reduce `Morphology` to only keep track of morph tags (no tag map, lemmatizer, or morph rules) * Remove lemmatizer from `Vocab` * Adjust many many tests Differences: * No default lookup lemmas * No special treatment of TAG in `from_array` and similar required * Easier to modify labels in a `Tagger` * No extra strings added from morphology / tag map * Fix test * Initial fix for Lemmatizer config/serialization * Adjust init test to be more generic * Adjust init test to force empty Lookups * Add simple cache to rule-based lemmatizer * Convert language-specific lemmatizers Convert language-specific lemmatizers to component lemmatizers. Remove previous lemmatizer class. * Fix French and Polish lemmatizers * Remove outdated UPOS conversions * Update Russian lemmatizer init in tests * Add minimal init/run tests for custom lemmatizers * Add option to overwrite existing lemmas * Update mode setting, lookup loading, and caching * Make `mode` an immutable property * Only enforce strict `load_lookups` for known supported modes * Move caching into individual `_lemmatize` methods * Implement strict when lang is not found in lookups * Fix tables/lookups in make_lemmatizer * Reallow provided lookups and allow for stricter checks * Add lookups asset to all Lemmatizer pipe tests * Rename lookups in lemmatizer init test * Clean up merge * Refactor lookup table loading * Add helper from `load_lemmatizer_lookups` that loads required and optional lookups tables based on settings provided by a config. Additional slight refactor of lookups: * Add `Lookups.set_table` to set a table from a provided `Table` * Reorder class definitions to be able to specify type as `Table` * Move registry assets into test methods * Refactor lookups tables config Use class methods within `Lemmatizer` to provide the config for particular modes and to load the lookups from a config. * Add pipe and score to lemmatizer * Simplify Tagger.score * Add missing import * Clean up imports and auto-format * Remove unused kwarg * Tidy up and auto-format * Update docstrings for Lemmatizer Update docstrings for Lemmatizer. Additionally modify `is_base_form` API to take `Token` instead of individual features. * Update docstrings * Remove tag map values from Tagger.add_label * Update API docs * Fix relative link in Lemmatizer API docs
88 lines
2.9 KiB
Python
88 lines
2.9 KiB
Python
from typing import List, Dict
|
|
|
|
from ...pipeline import Lemmatizer
|
|
from ...tokens import Token
|
|
|
|
|
|
class FrenchLemmatizer(Lemmatizer):
|
|
"""
|
|
French language lemmatizer applies the default rule based lemmatization
|
|
procedure with some modifications for better French language support.
|
|
|
|
The parts of speech 'ADV', 'PRON', 'DET', 'ADP' and 'AUX' are added to use
|
|
the rule-based lemmatization. As a last resort, the lemmatizer checks in
|
|
the lookup table.
|
|
"""
|
|
|
|
@classmethod
|
|
def get_lookups_config(cls, mode: str) -> Dict:
|
|
if mode == "rule":
|
|
return {
|
|
"required_tables": [
|
|
"lemma_lookup",
|
|
"lemma_rules",
|
|
"lemma_exc",
|
|
"lemma_index",
|
|
],
|
|
"optional_tables": [],
|
|
}
|
|
else:
|
|
return super().get_lookups_config(mode)
|
|
|
|
def rule_lemmatize(self, token: Token) -> List[str]:
|
|
cache_key = (token.orth, token.pos)
|
|
if cache_key in self.cache:
|
|
return self.cache[cache_key]
|
|
string = token.text
|
|
univ_pos = token.pos_.lower()
|
|
if univ_pos in ("", "eol", "space"):
|
|
return [string.lower()]
|
|
elif "lemma_rules" not in self.lookups or univ_pos not in (
|
|
"noun",
|
|
"verb",
|
|
"adj",
|
|
"adp",
|
|
"adv",
|
|
"aux",
|
|
"cconj",
|
|
"det",
|
|
"pron",
|
|
"punct",
|
|
"sconj",
|
|
):
|
|
return self.lookup_lemmatize(token)
|
|
index_table = self.lookups.get_table("lemma_index", {})
|
|
exc_table = self.lookups.get_table("lemma_exc", {})
|
|
rules_table = self.lookups.get_table("lemma_rules", {})
|
|
lookup_table = self.lookups.get_table("lemma_lookup", {})
|
|
index = index_table.get(univ_pos, {})
|
|
exceptions = exc_table.get(univ_pos, {})
|
|
rules = rules_table.get(univ_pos, [])
|
|
string = string.lower()
|
|
forms = []
|
|
if string in index:
|
|
forms.append(string)
|
|
self.cache[cache_key] = forms
|
|
return forms
|
|
forms.extend(exceptions.get(string, []))
|
|
oov_forms = []
|
|
if not forms:
|
|
for old, new in rules:
|
|
if string.endswith(old):
|
|
form = string[: len(string) - len(old)] + new
|
|
if not form:
|
|
pass
|
|
elif form in index or not form.isalpha():
|
|
forms.append(form)
|
|
else:
|
|
oov_forms.append(form)
|
|
if not forms:
|
|
forms.extend(oov_forms)
|
|
if not forms and string in lookup_table.keys():
|
|
forms.append(self.lookup_lemmatize(token)[0])
|
|
if not forms:
|
|
forms.append(string)
|
|
forms = list(set(forms))
|
|
self.cache[cache_key] = forms
|
|
return forms
|