mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-29 19:36:31 +03:00
1ee6541ab0
* Use morph for extra Japanese tokenizer info Previously Japanese tokenizer info that didn't correspond to Token fields was put in user data. Since spaCy core should avoid touching user data, this moves most information to the Token.morph attribute. It also adds the normalized form, which wasn't exposed before. The subtokens, which are a list of full tokens, are still added to user data, except with the default tokenizer granualarity. With the default tokenizer settings the subtokens are all None, so in this case the user data is simply not set. * Update tests Also adds a new test for norm data. * Update docs * Add Japanese morphologizer factory Set the default to `extend=True` so that the morphologizer does not clobber the values set by the tokenizer. * Use the norm_ field for normalized forms Before this commit, normalized forms were put in the "norm" field in the morph attributes. I am not sure why I did that instead of using the token morph, I think I just forgot about it. * Skip test if sudachipy is not installed * Fix import Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com>
25 lines
694 B
Python
25 lines
694 B
Python
import pytest
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"word,lemma",
|
|
[("新しく", "新しい"), ("赤く", "赤い"), ("すごく", "すごい"), ("いただきました", "いただく"), ("なった", "なる")],
|
|
)
|
|
def test_ja_lemmatizer_assigns(ja_tokenizer, word, lemma):
|
|
test_lemma = ja_tokenizer(word)[0].lemma_
|
|
assert test_lemma == lemma
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"word,norm",
|
|
[
|
|
("SUMMER", "サマー"),
|
|
("食べ物", "食べ物"),
|
|
("綜合", "総合"),
|
|
("コンピュータ", "コンピューター"),
|
|
],
|
|
)
|
|
def test_ja_lemmatizer_norm(ja_tokenizer, word, norm):
|
|
test_norm = ja_tokenizer(word)[0].norm_
|
|
assert test_norm == norm
|