mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-12 04:38:28 +03:00
1914c488d3
* Exceptions for single letter words ending sentence Sentences ending in "i." (as in "... peka i."), "m." (as in "...än 2000 m."), should be tokenized as two separate tokens. * Add test
26 lines
985 B
Python
26 lines
985 B
Python
# coding: utf8
|
|
from __future__ import unicode_literals
|
|
|
|
import pytest
|
|
|
|
|
|
SV_TOKEN_EXCEPTION_TESTS = [
|
|
('Smörsåsen används bl.a. till fisk', ['Smörsåsen', 'används', 'bl.a.', 'till', 'fisk']),
|
|
('Jag kommer först kl. 13 p.g.a. diverse förseningar', ['Jag', 'kommer', 'först', 'kl.', '13', 'p.g.a.', 'diverse', 'förseningar']),
|
|
('Anders I. tycker om ord med i i.', ["Anders", "I.", "tycker", "om", "ord", "med", "i", "i", "."])
|
|
]
|
|
|
|
|
|
@pytest.mark.parametrize('text,expected_tokens', SV_TOKEN_EXCEPTION_TESTS)
|
|
def test_tokenizer_handles_exception_cases(sv_tokenizer, text, expected_tokens):
|
|
tokens = sv_tokenizer(text)
|
|
token_list = [token.text for token in tokens if not token.is_space]
|
|
assert expected_tokens == token_list
|
|
|
|
|
|
@pytest.mark.parametrize('text', ["driveru", "hajaru", "Serru", "Fixaru"])
|
|
def test_tokenizer_handles_verb_exceptions(sv_tokenizer, text):
|
|
tokens = sv_tokenizer(text)
|
|
assert len(tokens) == 2
|
|
assert tokens[1].text == "u"
|