spaCy/spacy/tests/ja/test_tokenizer.py
Paul O'Leary McCann bcf2b9b4f5 Update tagger & tokenizer tests
Tagger is now parametrized and has two sentences with more tag coverage.

The tokenizer tests are updated to reflect differences in tokenization
between IPAdic and Unidic. -POLM
2017-08-22 00:03:11 +09:00

18 lines
865 B
Python

# coding: utf-8
from __future__ import unicode_literals
import pytest
TOKENIZER_TESTS = [
("日本語だよ", ['日本', '', '', '']),
("東京タワーの近くに住んでいます。", ['東京', 'タワー', '', '近く', '', '住ん', '', '', 'ます', '']),
("吾輩は猫である。", ['吾輩', '', '', '', 'ある', '']),
("月に代わって、お仕置きよ!", ['', '', '代わっ', '', '', '', '仕置き', '', '!']),
("すもももももももものうち", ['すもも', '', 'もも', '', 'もも', '', 'うち'])
]
@pytest.mark.parametrize('text,expected_tokens', TOKENIZER_TESTS)
def test_japanese_tokenizer(ja_tokenizer, text, expected_tokens):
tokens = [token.text for token in ja_tokenizer(text)]
assert tokens == expected_tokens