From c283e9edfe9618e5b48193dad4b0b1844ffee72a Mon Sep 17 00:00:00 2001 From: Jim O'Regan Date: Mon, 11 Sep 2017 08:57:48 +0100 Subject: [PATCH] first stab at test --- spacy/tests/lang/ga/__init__.py | 0 spacy/tests/lang/ga/test_tokenizer.py | 18 ++++++++++++++++++ 2 files changed, 18 insertions(+) create mode 100644 spacy/tests/lang/ga/__init__.py create mode 100644 spacy/tests/lang/ga/test_tokenizer.py diff --git a/spacy/tests/lang/ga/__init__.py b/spacy/tests/lang/ga/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/spacy/tests/lang/ga/test_tokenizer.py b/spacy/tests/lang/ga/test_tokenizer.py new file mode 100644 index 000000000..fe5cb0b2f --- /dev/null +++ b/spacy/tests/lang/ga/test_tokenizer.py @@ -0,0 +1,18 @@ +# coding: utf8 +from __future__ import unicode_literals + +import pytest + + +SV_TOKEN_EXCEPTION_TESTS = [ + ('B\'fhearr fanacht as amharc', ['B\'', 'fhearr', 'fanacht', 'as', 'amharc']), + ('Daoine a bhfuil Gaeilge acu, m.sh. tusa agus mise', ['Daoine', 'a', 'bhfuil', 'Gaeilge', 'acu', ',', 'm.sh.', 'tusa', 'agus', 'mise']) +] + + +@pytest.mark.parametrize('text,expected_tokens', GA_TOKEN_EXCEPTION_TESTS) +def test_tokenizer_handles_exception_cases(ga_tokenizer, text, expected_tokens): + tokens = ga_tokenizer(text) + token_list = [token.text for token in tokens if not token.is_space] + assert expected_tokens == token_list +