spaCy/spacy/tests/lang/ga/test_tokenizer.py

19 lines
629 B
Python
Raw Normal View History

2017-09-11 10:57:48 +03:00
# coding: utf8
from __future__ import unicode_literals
import pytest
2017-09-11 11:33:17 +03:00
GA_TOKEN_EXCEPTION_TESTS = [
2017-10-31 18:15:44 +03:00
("B'fhearr fanacht as amharc", ["B'", "fhearr", "fanacht", "as", "amharc"]),
2017-09-11 10:57:48 +03:00
('Daoine a bhfuil Gaeilge acu, m.sh. tusa agus mise', ['Daoine', 'a', 'bhfuil', 'Gaeilge', 'acu', ',', 'm.sh.', 'tusa', 'agus', 'mise'])
]
@pytest.mark.parametrize('text,expected_tokens', GA_TOKEN_EXCEPTION_TESTS)
def test_tokenizer_handles_exception_cases(ga_tokenizer, text, expected_tokens):
tokens = ga_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
assert expected_tokens == token_list