2017-01-05 20:11:25 +03:00
|
|
|
# coding: utf-8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["(unter)"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_de_tokenizer_splits_no_special(de_tokenizer, text):
|
2017-01-05 20:11:25 +03:00
|
|
|
tokens = de_tokenizer(text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["unter'm"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_de_tokenizer_splits_no_punct(de_tokenizer, text):
|
2017-01-05 20:11:25 +03:00
|
|
|
tokens = de_tokenizer(text)
|
|
|
|
assert len(tokens) == 2
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["(unter'm"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_de_tokenizer_splits_prefix_punct(de_tokenizer, text):
|
2017-01-05 20:11:25 +03:00
|
|
|
tokens = de_tokenizer(text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["unter'm)"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_de_tokenizer_splits_suffix_punct(de_tokenizer, text):
|
2017-01-05 20:11:25 +03:00
|
|
|
tokens = de_tokenizer(text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["(unter'm)"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_de_tokenizer_splits_even_wrap(de_tokenizer, text):
|
2017-01-05 20:11:25 +03:00
|
|
|
tokens = de_tokenizer(text)
|
|
|
|
assert len(tokens) == 4
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["(unter'm?)"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_de_tokenizer_splits_uneven_wrap(de_tokenizer, text):
|
2017-01-05 20:11:25 +03:00
|
|
|
tokens = de_tokenizer(text)
|
|
|
|
assert len(tokens) == 5
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text,length", [("z.B.", 1), ("zb.", 2), ("(z.B.", 2)])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_de_tokenizer_splits_prefix_interact(de_tokenizer, text, length):
|
2017-01-05 20:11:25 +03:00
|
|
|
tokens = de_tokenizer(text)
|
|
|
|
assert len(tokens) == length
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["z.B.)"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_de_tokenizer_splits_suffix_interact(de_tokenizer, text):
|
2017-01-05 20:11:25 +03:00
|
|
|
tokens = de_tokenizer(text)
|
|
|
|
assert len(tokens) == 2
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["(z.B.)"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_de_tokenizer_splits_even_wrap_interact(de_tokenizer, text):
|
2017-01-05 20:11:25 +03:00
|
|
|
tokens = de_tokenizer(text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["(z.B.?)"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_de_tokenizer_splits_uneven_wrap_interact(de_tokenizer, text):
|
2017-01-05 20:11:25 +03:00
|
|
|
tokens = de_tokenizer(text)
|
|
|
|
assert len(tokens) == 4
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["0.1-13.5", "0.0-0.1", "103.27-300"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_de_tokenizer_splits_numeric_range(de_tokenizer, text):
|
2017-01-05 20:11:25 +03:00
|
|
|
tokens = de_tokenizer(text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["blau.Rot", "Hallo.Welt"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_de_tokenizer_splits_period_infix(de_tokenizer, text):
|
2017-01-05 20:11:25 +03:00
|
|
|
tokens = de_tokenizer(text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["Hallo,Welt", "eins,zwei"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_de_tokenizer_splits_comma_infix(de_tokenizer, text):
|
2017-01-05 20:11:25 +03:00
|
|
|
tokens = de_tokenizer(text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
assert tokens[0].text == text.split(",")[0]
|
|
|
|
assert tokens[1].text == ","
|
|
|
|
assert tokens[2].text == text.split(",")[1]
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["blau...Rot", "blau...rot"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_de_tokenizer_splits_ellipsis_infix(de_tokenizer, text):
|
2017-01-05 20:11:25 +03:00
|
|
|
tokens = de_tokenizer(text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["Islam-Konferenz", "Ost-West-Konflikt"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_de_tokenizer_keeps_hyphens(de_tokenizer, text):
|
2017-09-16 21:40:15 +03:00
|
|
|
tokens = de_tokenizer(text)
|
|
|
|
assert len(tokens) == 1
|
|
|
|
|
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_de_tokenizer_splits_double_hyphen_infix(de_tokenizer):
|
2017-01-05 20:11:25 +03:00
|
|
|
tokens = de_tokenizer("Viele Regeln--wie die Bindestrich-Regeln--sind kompliziert.")
|
2017-09-16 21:40:15 +03:00
|
|
|
assert len(tokens) == 10
|
2017-01-05 20:11:25 +03:00
|
|
|
assert tokens[0].text == "Viele"
|
|
|
|
assert tokens[1].text == "Regeln"
|
|
|
|
assert tokens[2].text == "--"
|
|
|
|
assert tokens[3].text == "wie"
|
|
|
|
assert tokens[4].text == "die"
|
2017-09-16 21:40:15 +03:00
|
|
|
assert tokens[5].text == "Bindestrich-Regeln"
|
|
|
|
assert tokens[6].text == "--"
|
|
|
|
assert tokens[7].text == "sind"
|
|
|
|
assert tokens[8].text == "kompliziert"
|