From c33619339217dbeff75243d7493dc60685ddf28c Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Thu, 29 Jun 2017 00:09:40 +0900 Subject: [PATCH] Parametrize and extend Japanese tokenizer tests --- spacy/tests/ja/test_tokenizer.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/spacy/tests/ja/test_tokenizer.py b/spacy/tests/ja/test_tokenizer.py index 8d45c822d..58700b353 100644 --- a/spacy/tests/ja/test_tokenizer.py +++ b/spacy/tests/ja/test_tokenizer.py @@ -3,6 +3,15 @@ from __future__ import unicode_literals import pytest -def test_japanese_tokenizer(ja_tokenizer): - tokens = ja_tokenizer("日本語だよ") - assert len(tokens) == 3 +TOKENIZER_TESTS = [ + ("日本語だよ", ['日本語', 'だ', 'よ']), + ("東京タワーの近くに住んでいます。", ['東京', 'タワー', 'の', '近く', 'に', '住ん', 'で', 'い', 'ます', '。']), + ("吾輩は猫である。", ['吾輩', 'は', '猫', 'で', 'ある', '。']), + ("月に代わって、お仕置きよ!", ['月', 'に', '代わっ', 'て', '、', 'お仕置き', 'よ', '!']), + ("すもももももももものうち", ['すもも', 'も', 'もも', 'も', 'もも', 'の', 'うち']) +] + +@pytest.mark.parametrize('text,expected_tokens', TOKENIZER_TESTS) +def test_japanese_tokenizer(ja_tokenizer, text, expected_tokens): + tokens = [token.text for token in ja_tokenizer(text)] + assert tokens == expected_tokens