mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-27 10:26:35 +03:00
e962784531
* Add Lemmatizer and simplify related components * Add `Lemmatizer` pipe with `lookup` and `rule` modes using the `Lookups` tables. * Reduce `Tagger` to a simple tagger that sets `Token.tag` (no pos or lemma) * Reduce `Morphology` to only keep track of morph tags (no tag map, lemmatizer, or morph rules) * Remove lemmatizer from `Vocab` * Adjust many many tests Differences: * No default lookup lemmas * No special treatment of TAG in `from_array` and similar required * Easier to modify labels in a `Tagger` * No extra strings added from morphology / tag map * Fix test * Initial fix for Lemmatizer config/serialization * Adjust init test to be more generic * Adjust init test to force empty Lookups * Add simple cache to rule-based lemmatizer * Convert language-specific lemmatizers Convert language-specific lemmatizers to component lemmatizers. Remove previous lemmatizer class. * Fix French and Polish lemmatizers * Remove outdated UPOS conversions * Update Russian lemmatizer init in tests * Add minimal init/run tests for custom lemmatizers * Add option to overwrite existing lemmas * Update mode setting, lookup loading, and caching * Make `mode` an immutable property * Only enforce strict `load_lookups` for known supported modes * Move caching into individual `_lemmatize` methods * Implement strict when lang is not found in lookups * Fix tables/lookups in make_lemmatizer * Reallow provided lookups and allow for stricter checks * Add lookups asset to all Lemmatizer pipe tests * Rename lookups in lemmatizer init test * Clean up merge * Refactor lookup table loading * Add helper from `load_lemmatizer_lookups` that loads required and optional lookups tables based on settings provided by a config. Additional slight refactor of lookups: * Add `Lookups.set_table` to set a table from a provided `Table` * Reorder class definitions to be able to specify type as `Table` * Move registry assets into test methods * Refactor lookups tables config Use class methods within `Lemmatizer` to provide the config for particular modes and to load the lookups from a config. * Add pipe and score to lemmatizer * Simplify Tagger.score * Add missing import * Clean up imports and auto-format * Remove unused kwarg * Tidy up and auto-format * Update docstrings for Lemmatizer Update docstrings for Lemmatizer. Additionally modify `is_base_form` API to take `Token` instead of individual features. * Update docstrings * Remove tag map values from Tagger.add_label * Update API docs * Fix relative link in Lemmatizer API docs
111 lines
3.3 KiB
Python
111 lines
3.3 KiB
Python
from typing import Optional, Any, Dict
|
|
from thinc.api import Config
|
|
|
|
from .stop_words import STOP_WORDS
|
|
from .tag_map import TAG_MAP
|
|
from .lex_attrs import LEX_ATTRS
|
|
from ...language import Language
|
|
from ...tokens import Doc
|
|
from ...compat import copy_reg
|
|
from ...symbols import POS
|
|
from ...util import DummyTokenizer, registry
|
|
|
|
|
|
DEFAULT_CONFIG = """
|
|
[nlp]
|
|
|
|
[nlp.tokenizer]
|
|
@tokenizers = "spacy.ko.KoreanTokenizer"
|
|
"""
|
|
|
|
|
|
@registry.tokenizers("spacy.ko.KoreanTokenizer")
|
|
def create_tokenizer():
|
|
def korean_tokenizer_factory(nlp):
|
|
return KoreanTokenizer(nlp)
|
|
|
|
return korean_tokenizer_factory
|
|
|
|
|
|
class KoreanTokenizer(DummyTokenizer):
|
|
def __init__(self, nlp: Optional[Language] = None):
|
|
self.vocab = nlp.vocab
|
|
MeCab = try_mecab_import()
|
|
self.mecab_tokenizer = MeCab("-F%f[0],%f[7]")
|
|
|
|
def __del__(self):
|
|
self.mecab_tokenizer.__del__()
|
|
|
|
def __call__(self, text: str) -> Doc:
|
|
dtokens = list(self.detailed_tokens(text))
|
|
surfaces = [dt["surface"] for dt in dtokens]
|
|
doc = Doc(self.vocab, words=surfaces, spaces=list(check_spaces(text, surfaces)))
|
|
for token, dtoken in zip(doc, dtokens):
|
|
first_tag, sep, eomi_tags = dtoken["tag"].partition("+")
|
|
token.tag_ = first_tag # stem(어간) or pre-final(선어말 어미)
|
|
token.pos = TAG_MAP[token.tag_][POS]
|
|
token.lemma_ = dtoken["lemma"]
|
|
doc.user_data["full_tags"] = [dt["tag"] for dt in dtokens]
|
|
return doc
|
|
|
|
def detailed_tokens(self, text: str) -> Dict[str, Any]:
|
|
# 품사 태그(POS)[0], 의미 부류(semantic class)[1], 종성 유무(jongseong)[2], 읽기(reading)[3],
|
|
# 타입(type)[4], 첫번째 품사(start pos)[5], 마지막 품사(end pos)[6], 표현(expression)[7], *
|
|
for node in self.mecab_tokenizer.parse(text, as_nodes=True):
|
|
if node.is_eos():
|
|
break
|
|
surface = node.surface
|
|
feature = node.feature
|
|
tag, _, expr = feature.partition(",")
|
|
lemma, _, remainder = expr.partition("/")
|
|
if lemma == "*":
|
|
lemma = surface
|
|
yield {"surface": surface, "lemma": lemma, "tag": tag}
|
|
|
|
|
|
class KoreanDefaults(Language.Defaults):
|
|
config = Config().from_str(DEFAULT_CONFIG)
|
|
lex_attr_getters = LEX_ATTRS
|
|
stop_words = STOP_WORDS
|
|
writing_system = {"direction": "ltr", "has_case": False, "has_letters": False}
|
|
|
|
|
|
class Korean(Language):
|
|
lang = "ko"
|
|
Defaults = KoreanDefaults
|
|
|
|
|
|
def try_mecab_import() -> None:
|
|
try:
|
|
from natto import MeCab
|
|
|
|
return MeCab
|
|
except ImportError:
|
|
raise ImportError(
|
|
"Korean support requires [mecab-ko](https://bitbucket.org/eunjeon/mecab-ko/src/master/README.md), "
|
|
"[mecab-ko-dic](https://bitbucket.org/eunjeon/mecab-ko-dic), "
|
|
"and [natto-py](https://github.com/buruzaemon/natto-py)"
|
|
) from None
|
|
|
|
|
|
def check_spaces(text, tokens):
|
|
prev_end = -1
|
|
start = 0
|
|
for token in tokens:
|
|
idx = text.find(token, start)
|
|
if prev_end > 0:
|
|
yield prev_end != idx
|
|
prev_end = idx + len(token)
|
|
start = prev_end
|
|
if start > 0:
|
|
yield False
|
|
|
|
|
|
def pickle_korean(instance):
|
|
return Korean, tuple()
|
|
|
|
|
|
copy_reg.pickle(Korean, pickle_korean)
|
|
|
|
__all__ = ["Korean"]
|