2020-10-03 18:16:10 +03:00
|
|
|
from typing import List, Tuple
|
2020-07-22 14:42:59 +03:00
|
|
|
|
2020-08-07 16:27:13 +03:00
|
|
|
from ...pipeline import Lemmatizer
|
|
|
|
from ...tokens import Token
|
2019-08-22 15:21:32 +03:00
|
|
|
|
|
|
|
|
2019-10-01 22:36:04 +03:00
|
|
|
class FrenchLemmatizer(Lemmatizer):
|
2019-08-22 15:21:32 +03:00
|
|
|
"""
|
|
|
|
French language lemmatizer applies the default rule based lemmatization
|
|
|
|
procedure with some modifications for better French language support.
|
|
|
|
|
|
|
|
The parts of speech 'ADV', 'PRON', 'DET', 'ADP' and 'AUX' are added to use
|
|
|
|
the rule-based lemmatization. As a last resort, the lemmatizer checks in
|
|
|
|
the lookup table.
|
|
|
|
"""
|
|
|
|
|
2020-08-07 16:27:13 +03:00
|
|
|
@classmethod
|
2020-10-03 18:16:10 +03:00
|
|
|
def get_lookups_config(cls, mode: str) -> Tuple[List[str], List[str]]:
|
2020-08-07 16:27:13 +03:00
|
|
|
if mode == "rule":
|
2020-10-03 18:16:10 +03:00
|
|
|
required = ["lemma_lookup", "lemma_rules", "lemma_exc", "lemma_index"]
|
|
|
|
return (required, [])
|
2019-08-22 15:21:32 +03:00
|
|
|
else:
|
2020-08-07 16:27:13 +03:00
|
|
|
return super().get_lookups_config(mode)
|
|
|
|
|
|
|
|
def rule_lemmatize(self, token: Token) -> List[str]:
|
|
|
|
cache_key = (token.orth, token.pos)
|
|
|
|
if cache_key in self.cache:
|
|
|
|
return self.cache[cache_key]
|
|
|
|
string = token.text
|
|
|
|
univ_pos = token.pos_.lower()
|
|
|
|
if univ_pos in ("", "eol", "space"):
|
|
|
|
return [string.lower()]
|
|
|
|
elif "lemma_rules" not in self.lookups or univ_pos not in (
|
|
|
|
"noun",
|
|
|
|
"verb",
|
|
|
|
"adj",
|
|
|
|
"adp",
|
|
|
|
"adv",
|
|
|
|
"aux",
|
|
|
|
"cconj",
|
|
|
|
"det",
|
|
|
|
"pron",
|
|
|
|
"punct",
|
|
|
|
"sconj",
|
|
|
|
):
|
|
|
|
return self.lookup_lemmatize(token)
|
2019-10-01 22:36:04 +03:00
|
|
|
index_table = self.lookups.get_table("lemma_index", {})
|
|
|
|
exc_table = self.lookups.get_table("lemma_exc", {})
|
|
|
|
rules_table = self.lookups.get_table("lemma_rules", {})
|
|
|
|
lookup_table = self.lookups.get_table("lemma_lookup", {})
|
2020-08-07 16:27:13 +03:00
|
|
|
index = index_table.get(univ_pos, {})
|
|
|
|
exceptions = exc_table.get(univ_pos, {})
|
|
|
|
rules = rules_table.get(univ_pos, [])
|
2019-10-01 22:36:04 +03:00
|
|
|
string = string.lower()
|
|
|
|
forms = []
|
|
|
|
if string in index:
|
|
|
|
forms.append(string)
|
2020-08-07 16:27:13 +03:00
|
|
|
self.cache[cache_key] = forms
|
2019-10-01 22:36:04 +03:00
|
|
|
return forms
|
|
|
|
forms.extend(exceptions.get(string, []))
|
|
|
|
oov_forms = []
|
|
|
|
if not forms:
|
|
|
|
for old, new in rules:
|
|
|
|
if string.endswith(old):
|
|
|
|
form = string[: len(string) - len(old)] + new
|
|
|
|
if not form:
|
|
|
|
pass
|
|
|
|
elif form in index or not form.isalpha():
|
|
|
|
forms.append(form)
|
|
|
|
else:
|
|
|
|
oov_forms.append(form)
|
|
|
|
if not forms:
|
|
|
|
forms.extend(oov_forms)
|
|
|
|
if not forms and string in lookup_table.keys():
|
2020-08-07 16:27:13 +03:00
|
|
|
forms.append(self.lookup_lemmatize(token)[0])
|
2019-10-01 22:36:04 +03:00
|
|
|
if not forms:
|
|
|
|
forms.append(string)
|
2020-08-07 16:27:13 +03:00
|
|
|
forms = list(set(forms))
|
|
|
|
self.cache[cache_key] = forms
|
|
|
|
return forms
|