spaCy/spacy/lang/pl/lemmatizer.py
Adriane Boyd e962784531
Add Lemmatizer and simplify related components (#5848)
* Add Lemmatizer and simplify related components

* Add `Lemmatizer` pipe with `lookup` and `rule` modes using the
`Lookups` tables.
* Reduce `Tagger` to a simple tagger that sets `Token.tag` (no pos or lemma)
* Reduce `Morphology` to only keep track of morph tags (no tag map, lemmatizer,
or morph rules)
* Remove lemmatizer from `Vocab`
* Adjust many many tests

Differences:

* No default lookup lemmas
* No special treatment of TAG in `from_array` and similar required
* Easier to modify labels in a `Tagger`
* No extra strings added from morphology / tag map

* Fix test

* Initial fix for Lemmatizer config/serialization

* Adjust init test to be more generic

* Adjust init test to force empty Lookups

* Add simple cache to rule-based lemmatizer

* Convert language-specific lemmatizers

Convert language-specific lemmatizers to component lemmatizers. Remove
previous lemmatizer class.

* Fix French and Polish lemmatizers

* Remove outdated UPOS conversions

* Update Russian lemmatizer init in tests

* Add minimal init/run tests for custom lemmatizers

* Add option to overwrite existing lemmas

* Update mode setting, lookup loading, and caching

* Make `mode` an immutable property
* Only enforce strict `load_lookups` for known supported modes
* Move caching into individual `_lemmatize` methods

* Implement strict when lang is not found in lookups

* Fix tables/lookups in make_lemmatizer

* Reallow provided lookups and allow for stricter checks

* Add lookups asset to all Lemmatizer pipe tests

* Rename lookups in lemmatizer init test

* Clean up merge

* Refactor lookup table loading

* Add helper from `load_lemmatizer_lookups` that loads required and
optional lookups tables based on settings provided by a config.

Additional slight refactor of lookups:

* Add `Lookups.set_table` to set a table from a provided `Table`
* Reorder class definitions to be able to specify type as `Table`

* Move registry assets into test methods

* Refactor lookups tables config

Use class methods within `Lemmatizer` to provide the config for
particular modes and to load the lookups from a config.

* Add pipe and score to lemmatizer

* Simplify Tagger.score

* Add missing import

* Clean up imports and auto-format

* Remove unused kwarg

* Tidy up and auto-format

* Update docstrings for Lemmatizer

Update docstrings for Lemmatizer.

Additionally modify `is_base_form` API to take `Token` instead of
individual features.

* Update docstrings

* Remove tag map values from Tagger.add_label

* Update API docs

* Fix relative link in Lemmatizer API docs
2020-08-07 15:27:13 +02:00

92 lines
3.5 KiB
Python

from typing import List, Dict
from ...pipeline import Lemmatizer
from ...tokens import Token
class PolishLemmatizer(Lemmatizer):
# This lemmatizer implements lookup lemmatization based on the Morfeusz
# dictionary (morfeusz.sgjp.pl/en) by Institute of Computer Science PAS.
# It utilizes some prefix based improvements for verb and adjectives
# lemmatization, as well as case-sensitive lemmatization for nouns.
@classmethod
def get_lookups_config(cls, mode: str) -> Dict:
if mode == "lookup":
return {
"required_tables": [
"lemma_lookup_adj",
"lemma_lookup_adp",
"lemma_lookup_adv",
"lemma_lookup_aux",
"lemma_lookup_noun",
"lemma_lookup_num",
"lemma_lookup_part",
"lemma_lookup_pron",
"lemma_lookup_verb",
]
}
else:
return super().get_lookups_config(mode)
def lookup_lemmatize(self, token: Token) -> List[str]:
string = token.text
univ_pos = token.pos_
morphology = token.morph.to_dict()
lookup_pos = univ_pos.lower()
if univ_pos == "PROPN":
lookup_pos = "noun"
lookup_table = self.lookups.get_table("lemma_lookup_" + lookup_pos, {})
if univ_pos == "NOUN":
return self.lemmatize_noun(string, morphology, lookup_table)
if univ_pos != "PROPN":
string = string.lower()
if univ_pos == "ADJ":
return self.lemmatize_adj(string, morphology, lookup_table)
elif univ_pos == "VERB":
return self.lemmatize_verb(string, morphology, lookup_table)
return [lookup_table.get(string, string.lower())]
def lemmatize_adj(
self, string: str, morphology: dict, lookup_table: Dict[str, str]
) -> List[str]:
# this method utilizes different procedures for adjectives
# with 'nie' and 'naj' prefixes
if string[:3] == "nie":
search_string = string[3:]
if search_string[:3] == "naj":
naj_search_string = search_string[3:]
if naj_search_string in lookup_table:
return [lookup_table[naj_search_string]]
if search_string in lookup_table:
return [lookup_table[search_string]]
if string[:3] == "naj":
naj_search_string = string[3:]
if naj_search_string in lookup_table:
return [lookup_table[naj_search_string]]
return [lookup_table.get(string, string)]
def lemmatize_verb(
self, string: str, morphology: dict, lookup_table: Dict[str, str]
) -> List[str]:
# this method utilizes a different procedure for verbs
# with 'nie' prefix
if string[:3] == "nie":
search_string = string[3:]
if search_string in lookup_table:
return [lookup_table[search_string]]
return [lookup_table.get(string, string)]
def lemmatize_noun(
self, string: str, morphology: dict, lookup_table: Dict[str, str]
) -> List[str]:
# this method is case-sensitive, in order to work
# for incorrectly tagged proper names
if string != string.lower():
if string.lower() in lookup_table:
return [lookup_table[string.lower()]]
elif string in lookup_table:
return [lookup_table[string]]
return [string.lower()]
return [lookup_table.get(string, string)]