diff --git a/spacy/tests/conftest.py b/spacy/tests/conftest.py index 3d9e0adcc..d21d7d313 100644 --- a/spacy/tests/conftest.py +++ b/spacy/tests/conftest.py @@ -68,6 +68,11 @@ def fi_tokenizer(): return Finnish.Defaults.create_tokenizer() +@pytest.fixture +def sv_tokenizer(): + return Swedish.Defaults.create_tokenizer() + + @pytest.fixture def stringstore(): return StringStore() diff --git a/spacy/tests/regression/test_issue805.py b/spacy/tests/regression/test_issue805.py new file mode 100644 index 000000000..f23aff426 --- /dev/null +++ b/spacy/tests/regression/test_issue805.py @@ -0,0 +1,15 @@ +# encoding: utf8 +from __future__ import unicode_literals + +import pytest + +SV_TOKEN_EXCEPTION_TESTS = [ + ('Smörsåsen används bl.a. till fisk', ['Smörsåsen', 'används', 'bl.a.', 'till', 'fisk']), + ('Jag kommer först kl. 13 p.g.a. diverse förseningar', ['Jag', 'kommer', 'först', 'kl.', '13', 'p.g.a.', 'diverse', 'förseningar']) +] + +@pytest.mark.parametrize('text,expected_tokens', SV_TOKEN_EXCEPTION_TESTS) +def test_issue805(sv_tokenizer, text, expected_tokens): + tokens = sv_tokenizer(text) + token_list = [token.text for token in tokens if not token.is_space] + assert expected_tokens == token_list