spaCy/spacy/tests/lang/zh/test_tokenizer.py
Ines Montani db55577c45
Drop Python 2.7 and 3.5 (#4828)
* Remove unicode declarations

* Remove Python 3.5 and 2.7 from CI

* Don't require pathlib

* Replace compat helpers

* Remove OrderedDict

* Use f-strings

* Set Cython compiler language level

* Fix typo

* Re-add OrderedDict for Table

* Update setup.cfg

* Revert CONTRIBUTING.md

* Revert lookups.md

* Revert top-level.md

* Small adjustments and docs [ci skip]
2019-12-22 01:53:56 +01:00

29 lines
935 B
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import pytest
# fmt: off
TOKENIZER_TESTS = [
("作为语言而言,为世界使用人数最多的语言,目前世界有五分之一人口做为母语。",
['作为', '语言', '而言', '', '', '世界', '使用', '', '数最多',
'', '语言', '', '目前', '世界', '', '五分之一', '人口', '',
'', '母语', '']),
]
# fmt: on
@pytest.mark.parametrize("text,expected_tokens", TOKENIZER_TESTS)
def test_zh_tokenizer(zh_tokenizer, text, expected_tokens):
zh_tokenizer.use_jieba = False
tokens = [token.text for token in zh_tokenizer(text)]
assert tokens == list(text)
zh_tokenizer.use_jieba = True
tokens = [token.text for token in zh_tokenizer(text)]
assert tokens == expected_tokens
def test_extra_spaces(zh_tokenizer):
# note: three spaces after "I"
tokens = zh_tokenizer("I like cheese.")
assert tokens[1].orth_ == " "