mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-26 18:06:29 +03:00
117 lines
3.4 KiB
Python
117 lines
3.4 KiB
Python
# coding: utf-8
|
|
"""Test that tokenizer prefixes, suffixes and infixes are handled correctly."""
|
|
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
import pytest
|
|
|
|
|
|
@pytest.mark.parametrize('text', ["(unter)"])
|
|
def test_tokenizer_splits_no_special(de_tokenizer, text):
|
|
tokens = de_tokenizer(text)
|
|
assert len(tokens) == 3
|
|
|
|
|
|
@pytest.mark.parametrize('text', ["unter'm"])
|
|
def test_tokenizer_splits_no_punct(de_tokenizer, text):
|
|
tokens = de_tokenizer(text)
|
|
assert len(tokens) == 2
|
|
|
|
|
|
@pytest.mark.parametrize('text', ["(unter'm"])
|
|
def test_tokenizer_splits_prefix_punct(de_tokenizer, text):
|
|
tokens = de_tokenizer(text)
|
|
assert len(tokens) == 3
|
|
|
|
|
|
@pytest.mark.parametrize('text', ["unter'm)"])
|
|
def test_tokenizer_splits_suffix_punct(de_tokenizer, text):
|
|
tokens = de_tokenizer(text)
|
|
assert len(tokens) == 3
|
|
|
|
|
|
@pytest.mark.parametrize('text', ["(unter'm)"])
|
|
def test_tokenizer_splits_even_wrap(de_tokenizer, text):
|
|
tokens = de_tokenizer(text)
|
|
assert len(tokens) == 4
|
|
|
|
|
|
@pytest.mark.parametrize('text', ["(unter'm?)"])
|
|
def test_tokenizer_splits_uneven_wrap(de_tokenizer, text):
|
|
tokens = de_tokenizer(text)
|
|
assert len(tokens) == 5
|
|
|
|
|
|
@pytest.mark.parametrize('text,length', [("z.B.", 1), ("zb.", 2), ("(z.B.", 2)])
|
|
def test_tokenizer_splits_prefix_interact(de_tokenizer, text, length):
|
|
tokens = de_tokenizer(text)
|
|
assert len(tokens) == length
|
|
|
|
|
|
@pytest.mark.parametrize('text', ["z.B.)"])
|
|
def test_tokenizer_splits_suffix_interact(de_tokenizer, text):
|
|
tokens = de_tokenizer(text)
|
|
assert len(tokens) == 2
|
|
|
|
|
|
@pytest.mark.parametrize('text', ["(z.B.)"])
|
|
def test_tokenizer_splits_even_wrap_interact(de_tokenizer, text):
|
|
tokens = de_tokenizer(text)
|
|
assert len(tokens) == 3
|
|
|
|
|
|
@pytest.mark.parametrize('text', ["(z.B.?)"])
|
|
def test_tokenizer_splits_uneven_wrap_interact(de_tokenizer, text):
|
|
tokens = de_tokenizer(text)
|
|
assert len(tokens) == 4
|
|
|
|
|
|
@pytest.mark.parametrize('text', ["blau-rot"])
|
|
def test_tokenizer_splits_hyphens(de_tokenizer, text):
|
|
tokens = de_tokenizer(text)
|
|
assert len(tokens) == 3
|
|
|
|
|
|
@pytest.mark.parametrize('text', ["0.1-13.5", "0.0-0.1", "103.27-300"])
|
|
def test_tokenizer_splits_numeric_range(de_tokenizer, text):
|
|
tokens = de_tokenizer(text)
|
|
assert len(tokens) == 3
|
|
|
|
|
|
@pytest.mark.parametrize('text', ["blau.Rot", "Hallo.Welt"])
|
|
def test_tokenizer_splits_period_infix(de_tokenizer, text):
|
|
tokens = de_tokenizer(text)
|
|
assert len(tokens) == 3
|
|
|
|
|
|
@pytest.mark.parametrize('text', ["Hallo,Welt", "eins,zwei"])
|
|
def test_tokenizer_splits_comma_infix(de_tokenizer, text):
|
|
tokens = de_tokenizer(text)
|
|
assert len(tokens) == 3
|
|
assert tokens[0].text == text.split(",")[0]
|
|
assert tokens[1].text == ","
|
|
assert tokens[2].text == text.split(",")[1]
|
|
|
|
|
|
@pytest.mark.parametrize('text', ["blau...Rot", "blau...rot"])
|
|
def test_tokenizer_splits_ellipsis_infix(de_tokenizer, text):
|
|
tokens = de_tokenizer(text)
|
|
assert len(tokens) == 3
|
|
|
|
|
|
def test_tokenizer_splits_double_hyphen_infix(de_tokenizer):
|
|
tokens = de_tokenizer("Viele Regeln--wie die Bindestrich-Regeln--sind kompliziert.")
|
|
assert len(tokens) == 12
|
|
assert tokens[0].text == "Viele"
|
|
assert tokens[1].text == "Regeln"
|
|
assert tokens[2].text == "--"
|
|
assert tokens[3].text == "wie"
|
|
assert tokens[4].text == "die"
|
|
assert tokens[5].text == "Bindestrich"
|
|
assert tokens[6].text == "-"
|
|
assert tokens[7].text == "Regeln"
|
|
assert tokens[8].text == "--"
|
|
assert tokens[9].text == "sind"
|
|
assert tokens[10].text == "kompliziert"
|