2019-11-11 16:23:21 +03:00
|
|
|
|
import pytest
|
2020-09-26 14:13:57 +03:00
|
|
|
|
from thinc.api import ConfigValidationError
|
2019-11-11 16:23:21 +03:00
|
|
|
|
|
2023-06-14 18:48:41 +03:00
|
|
|
|
from spacy.lang.zh import Chinese, _get_pkuseg_trie_data
|
2019-11-11 16:23:21 +03:00
|
|
|
|
|
|
|
|
|
# fmt: off
|
2020-04-18 18:01:53 +03:00
|
|
|
|
TEXTS = ("作为语言而言,为世界使用人数最多的语言,目前世界有五分之一人口做为母语。",)
|
|
|
|
|
JIEBA_TOKENIZER_TESTS = [
|
|
|
|
|
(TEXTS[0],
|
2019-11-11 16:23:21 +03:00
|
|
|
|
['作为', '语言', '而言', ',', '为', '世界', '使用', '人', '数最多',
|
|
|
|
|
'的', '语言', ',', '目前', '世界', '有', '五分之一', '人口', '做',
|
|
|
|
|
'为', '母语', '。']),
|
|
|
|
|
]
|
2020-04-18 18:01:53 +03:00
|
|
|
|
PKUSEG_TOKENIZER_TESTS = [
|
|
|
|
|
(TEXTS[0],
|
|
|
|
|
['作为', '语言', '而言', ',', '为', '世界', '使用', '人数', '最多',
|
|
|
|
|
'的', '语言', ',', '目前', '世界', '有', '五分之一', '人口', '做为',
|
|
|
|
|
'母语', '。']),
|
|
|
|
|
]
|
2019-11-11 16:23:21 +03:00
|
|
|
|
# fmt: on
|
|
|
|
|
|
|
|
|
|
|
2020-04-18 18:01:53 +03:00
|
|
|
|
@pytest.mark.parametrize("text", TEXTS)
|
|
|
|
|
def test_zh_tokenizer_char(zh_tokenizer_char, text):
|
|
|
|
|
tokens = [token.text for token in zh_tokenizer_char(text)]
|
2019-11-11 16:23:21 +03:00
|
|
|
|
assert tokens == list(text)
|
|
|
|
|
|
2020-04-18 18:01:53 +03:00
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("text,expected_tokens", JIEBA_TOKENIZER_TESTS)
|
|
|
|
|
def test_zh_tokenizer_jieba(zh_tokenizer_jieba, text, expected_tokens):
|
|
|
|
|
tokens = [token.text for token in zh_tokenizer_jieba(text)]
|
|
|
|
|
assert tokens == expected_tokens
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("text,expected_tokens", PKUSEG_TOKENIZER_TESTS)
|
|
|
|
|
def test_zh_tokenizer_pkuseg(zh_tokenizer_pkuseg, text, expected_tokens):
|
|
|
|
|
tokens = [token.text for token in zh_tokenizer_pkuseg(text)]
|
2019-11-11 16:23:21 +03:00
|
|
|
|
assert tokens == expected_tokens
|
|
|
|
|
|
|
|
|
|
|
2020-07-19 14:34:37 +03:00
|
|
|
|
def test_zh_tokenizer_pkuseg_user_dict(zh_tokenizer_pkuseg, zh_tokenizer_char):
|
2020-05-08 12:21:46 +03:00
|
|
|
|
user_dict = _get_pkuseg_trie_data(zh_tokenizer_pkuseg.pkuseg_seg.preprocesser.trie)
|
|
|
|
|
zh_tokenizer_pkuseg.pkuseg_update_user_dict(["nonsense_asdf"])
|
2020-05-21 15:14:01 +03:00
|
|
|
|
updated_user_dict = _get_pkuseg_trie_data(
|
|
|
|
|
zh_tokenizer_pkuseg.pkuseg_seg.preprocesser.trie
|
|
|
|
|
)
|
2020-05-08 12:21:46 +03:00
|
|
|
|
assert len(user_dict) == len(updated_user_dict) - 1
|
|
|
|
|
|
|
|
|
|
# reset user dict
|
|
|
|
|
zh_tokenizer_pkuseg.pkuseg_update_user_dict([], reset=True)
|
2020-05-21 15:14:01 +03:00
|
|
|
|
reset_user_dict = _get_pkuseg_trie_data(
|
|
|
|
|
zh_tokenizer_pkuseg.pkuseg_seg.preprocesser.trie
|
|
|
|
|
)
|
2020-05-08 12:21:46 +03:00
|
|
|
|
assert len(reset_user_dict) == 0
|
|
|
|
|
|
2020-07-19 14:34:37 +03:00
|
|
|
|
# warn if not relevant
|
|
|
|
|
with pytest.warns(UserWarning):
|
|
|
|
|
zh_tokenizer_char.pkuseg_update_user_dict(["nonsense_asdf"])
|
2020-05-08 12:21:46 +03:00
|
|
|
|
|
2020-07-19 14:34:37 +03:00
|
|
|
|
|
|
|
|
|
def test_zh_extra_spaces(zh_tokenizer_char):
|
2019-11-11 16:23:21 +03:00
|
|
|
|
# note: three spaces after "I"
|
2020-04-18 18:01:53 +03:00
|
|
|
|
tokens = zh_tokenizer_char("I like cheese.")
|
2019-11-11 16:23:21 +03:00
|
|
|
|
assert tokens[1].orth_ == " "
|
2020-07-19 14:34:37 +03:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_zh_unsupported_segmenter():
|
2020-07-22 14:42:59 +03:00
|
|
|
|
config = {"nlp": {"tokenizer": {"segmenter": "unk"}}}
|
|
|
|
|
with pytest.raises(ConfigValidationError):
|
|
|
|
|
Chinese.from_config(config)
|
2020-07-19 14:34:37 +03:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_zh_uninitialized_pkuseg():
|
2020-07-22 14:42:59 +03:00
|
|
|
|
config = {"nlp": {"tokenizer": {"segmenter": "char"}}}
|
|
|
|
|
nlp = Chinese.from_config(config)
|
2020-07-19 14:34:37 +03:00
|
|
|
|
nlp.tokenizer.segmenter = "pkuseg"
|
|
|
|
|
with pytest.raises(ValueError):
|
2020-07-22 14:42:59 +03:00
|
|
|
|
nlp("test")
|