2017-01-05 15:13:12 +03:00
|
|
|
import pytest
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["(can)"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_tokenizer_splits_no_special(en_tokenizer, text):
|
2017-01-05 15:13:12 +03:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["can't"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_tokenizer_splits_no_punct(en_tokenizer, text):
|
2017-01-05 15:13:12 +03:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 2
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["(can't"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_tokenizer_splits_prefix_punct(en_tokenizer, text):
|
2017-01-05 15:13:12 +03:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["can't)"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_tokenizer_splits_suffix_punct(en_tokenizer, text):
|
2017-01-05 15:13:12 +03:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["(can't)"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_tokenizer_splits_even_wrap(en_tokenizer, text):
|
2017-01-05 15:13:12 +03:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 4
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["(can't?)"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_tokenizer_splits_uneven_wrap(en_tokenizer, text):
|
2017-01-05 15:13:12 +03:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 5
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text,length", [("U.S.", 1), ("us.", 2), ("(U.S.", 2)])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_tokenizer_splits_prefix_interact(en_tokenizer, text, length):
|
2017-01-05 15:13:12 +03:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == length
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["U.S.)"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_tokenizer_splits_suffix_interact(en_tokenizer, text):
|
2017-01-05 15:13:12 +03:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 2
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["(U.S.)"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_tokenizer_splits_even_wrap_interact(en_tokenizer, text):
|
2017-01-05 15:13:12 +03:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["(U.S.?)"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_tokenizer_splits_uneven_wrap_interact(en_tokenizer, text):
|
2017-01-05 15:13:12 +03:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 4
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["best-known"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_tokenizer_splits_hyphens(en_tokenizer, text):
|
2017-01-05 15:13:12 +03:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["0.1-13.5", "0.0-0.1", "103.27-300"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_tokenizer_splits_numeric_range(en_tokenizer, text):
|
2017-01-05 15:13:12 +03:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["best.Known", "Hello.World"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_tokenizer_splits_period_infix(en_tokenizer, text):
|
2017-01-05 15:13:12 +03:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["Hello,world", "one,two"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_tokenizer_splits_comma_infix(en_tokenizer, text):
|
2017-01-05 15:13:12 +03:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
assert tokens[0].text == text.split(",")[0]
|
|
|
|
assert tokens[1].text == ","
|
|
|
|
assert tokens[2].text == text.split(",")[1]
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["best...Known", "best...known"])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_tokenizer_splits_ellipsis_infix(en_tokenizer, text):
|
2017-01-05 15:13:12 +03:00
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 3
|
|
|
|
|
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_tokenizer_splits_double_hyphen_infix(en_tokenizer):
|
2017-01-05 15:13:12 +03:00
|
|
|
tokens = en_tokenizer("No decent--let alone well-bred--people.")
|
|
|
|
assert tokens[0].text == "No"
|
|
|
|
assert tokens[1].text == "decent"
|
|
|
|
assert tokens[2].text == "--"
|
|
|
|
assert tokens[3].text == "let"
|
|
|
|
assert tokens[4].text == "alone"
|
|
|
|
assert tokens[5].text == "well"
|
|
|
|
assert tokens[6].text == "-"
|
|
|
|
assert tokens[7].text == "bred"
|
|
|
|
assert tokens[8].text == "--"
|
|
|
|
assert tokens[9].text == "people"
|
|
|
|
|
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_tokenizer_splits_period_abbr(en_tokenizer):
|
2017-01-05 15:13:12 +03:00
|
|
|
text = "Today is Tuesday.Mr."
|
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == 5
|
|
|
|
assert tokens[0].text == "Today"
|
|
|
|
assert tokens[1].text == "is"
|
|
|
|
assert tokens[2].text == "Tuesday"
|
|
|
|
assert tokens[3].text == "."
|
|
|
|
assert tokens[4].text == "Mr."
|
|
|
|
|
|
|
|
|
2021-11-05 04:27:08 +03:00
|
|
|
@pytest.mark.issue(225)
|
2020-07-20 15:49:54 +03:00
|
|
|
@pytest.mark.xfail(reason="Issue #225 - not yet implemented")
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_en_tokenizer_splits_em_dash_infix(en_tokenizer):
|
2018-11-27 03:09:36 +03:00
|
|
|
tokens = en_tokenizer(
|
|
|
|
"""Will this road take me to Puddleton?\u2014No, """
|
|
|
|
"""you'll have to walk there.\u2014Ariel."""
|
|
|
|
)
|
2017-01-05 15:13:12 +03:00
|
|
|
assert tokens[6].text == "Puddleton"
|
|
|
|
assert tokens[7].text == "?"
|
|
|
|
assert tokens[8].text == "\u2014"
|
2019-09-09 20:19:22 +03:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("text,length", [("_MATH_", 3), ("_MATH_.", 4)])
|
|
|
|
def test_final_period(en_tokenizer, text, length):
|
|
|
|
tokens = en_tokenizer(text)
|
|
|
|
assert len(tokens) == length
|