mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-14 21:57:15 +03:00
0b9a5f4074
* Rework Chinese language initialization * Create a `ChineseTokenizer` class * Modify jieba post-processing to handle whitespace correctly * Modify non-jieba character tokenization to handle whitespace correctly * Add a `create_tokenizer()` method to `ChineseDefaults` * Load lexical attributes * Update Chinese tag_map for UD v2 * Add very basic Chinese tests * Test tokenization with and without jieba * Test `like_num` attribute * Fix try_jieba_import() * Fix zh code formatting
32 lines
992 B
Python
32 lines
992 B
Python
# coding: utf-8
|
||
from __future__ import unicode_literals
|
||
|
||
import pytest
|
||
|
||
|
||
# fmt: off
|
||
TOKENIZER_TESTS = [
|
||
("作为语言而言,为世界使用人数最多的语言,目前世界有五分之一人口做为母语。",
|
||
['作为', '语言', '而言', ',', '为', '世界', '使用', '人', '数最多',
|
||
'的', '语言', ',', '目前', '世界', '有', '五分之一', '人口', '做',
|
||
'为', '母语', '。']),
|
||
]
|
||
# fmt: on
|
||
|
||
|
||
@pytest.mark.parametrize("text,expected_tokens", TOKENIZER_TESTS)
|
||
def test_zh_tokenizer(zh_tokenizer, text, expected_tokens):
|
||
zh_tokenizer.use_jieba = False
|
||
tokens = [token.text for token in zh_tokenizer(text)]
|
||
assert tokens == list(text)
|
||
|
||
zh_tokenizer.use_jieba = True
|
||
tokens = [token.text for token in zh_tokenizer(text)]
|
||
assert tokens == expected_tokens
|
||
|
||
|
||
def test_extra_spaces(zh_tokenizer):
|
||
# note: three spaces after "I"
|
||
tokens = zh_tokenizer("I like cheese.")
|
||
assert tokens[1].orth_ == " "
|