mirror of
https://github.com/explosion/spaCy.git
synced 2025-02-11 09:00:36 +03:00
Switch to the (confusingly-named) mecab-ko python module for default Korean tokenization. Maintain the previous `natto-py` tokenizer as `spacy.KoreanNattoTokenizer.v1`.
18 lines
622 B
Python
18 lines
622 B
Python
import pytest
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"word,lemma", [("새로운", "새롭"), ("빨간", "빨갛"), ("클수록", "크"), ("뭡니까", "뭣"), ("됐다", "되")]
|
|
)
|
|
def test_ko_lemmatizer_assigns(ko_tokenizer, word, lemma):
|
|
test_lemma = ko_tokenizer(word)[0].lemma_
|
|
assert test_lemma == lemma
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"word,lemma", [("새로운", "새롭"), ("빨간", "빨갛"), ("클수록", "크"), ("뭡니까", "뭣"), ("됐다", "되")]
|
|
)
|
|
def test_ko_lemmatizer_natto_assigns(ko_tokenizer_natto, word, lemma):
|
|
test_lemma = ko_tokenizer_natto(word)[0].lemma_
|
|
assert test_lemma == lemma
|