From 8a74129cdf09b63759d1786c0c40914e9f95af6d Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Thu, 5 Jan 2017 13:13:12 +0100 Subject: [PATCH] Modernize and merge tokenizer tests for prefixes/suffixes/infixes --- spacy/tests/tokenizer/test_infix.py | 62 -------- .../tokenizer/test_prefix_suffix_infix.py | 148 ++++++++++++++++++ spacy/tests/tokenizer/test_special_affix.py | 46 ------ spacy/tests/tokenizer/test_tokenizer.py | 8 - 4 files changed, 148 insertions(+), 116 deletions(-) delete mode 100644 spacy/tests/tokenizer/test_infix.py create mode 100644 spacy/tests/tokenizer/test_prefix_suffix_infix.py delete mode 100644 spacy/tests/tokenizer/test_special_affix.py diff --git a/spacy/tests/tokenizer/test_infix.py b/spacy/tests/tokenizer/test_infix.py deleted file mode 100644 index d197e79ea..000000000 --- a/spacy/tests/tokenizer/test_infix.py +++ /dev/null @@ -1,62 +0,0 @@ -from __future__ import unicode_literals - -import pytest - - -@pytest.mark.parametrize('text', ["best-known"]) -def test_tokenizer_splits_hyphens(en_tokenizer, text): - tokens = en_tokenizer(text) - assert len(tokens) == 3 - - -@pytest.mark.parametrize('text', ["0.1-13.5", "0.0-0.1", "103.27-300"]) -def test_tokenizer_splits_numeric_range(en_tokenizer, text): - tokens = en_tokenizer(text) - assert len(tokens) == 3 - - -@pytest.mark.parametrize('text', ["best.Known", "Hello.World"]) -def test_tokenizer_splits_period(en_tokenizer, text): - tokens = en_tokenizer(text) - assert len(tokens) == 3 - - -@pytest.mark.parametrize('text', ["Hello,world", "one,two"]) -def test_tokenizer_splits_comma(en_tokenizer, text): - tokens = en_tokenizer(text) - assert len(tokens) == 3 - assert tokens[0].text == text.split(",")[0] - assert tokens[1].text == "," - assert tokens[2].text == text.split(",")[1] - - -@pytest.mark.parametrize('text', ["best...Known", "best...known"]) -def test_tokenizer_splits_ellipsis(en_tokenizer, text): - tokens = en_tokenizer(text) - assert len(tokens) == 3 - - -@pytest.mark.parametrize('text', ["google.com", "python.org", "spacy.io", "explosion.ai"]) -def test_tokenizer_keep_urls(en_tokenizer, text): - tokens = en_tokenizer(text) - assert len(tokens) == 1 - - -@pytest.mark.parametrize('text', ["hello123@example.com", "hi+there@gmail.it", "matt@explosion.ai"]) -def test_tokenizer_keeps_email(en_tokenizer, text): - tokens = en_tokenizer(text) - assert len(tokens) == 1 - - -def test_tokenizer_splits_double_hyphen(en_tokenizer): - tokens = en_tokenizer("No decent--let alone well-bred--people.") - assert tokens[0].text == "No" - assert tokens[1].text == "decent" - assert tokens[2].text == "--" - assert tokens[3].text == "let" - assert tokens[4].text == "alone" - assert tokens[5].text == "well" - assert tokens[6].text == "-" - assert tokens[7].text == "bred" - assert tokens[8].text == "--" - assert tokens[9].text == "people" diff --git a/spacy/tests/tokenizer/test_prefix_suffix_infix.py b/spacy/tests/tokenizer/test_prefix_suffix_infix.py new file mode 100644 index 000000000..d6963ada1 --- /dev/null +++ b/spacy/tests/tokenizer/test_prefix_suffix_infix.py @@ -0,0 +1,148 @@ +# coding: utf-8 +"""Test that tokenizer prefixes, suffixes and infixes are handled correctly.""" + + +from __future__ import unicode_literals + +import pytest + + +@pytest.mark.parametrize('text', ["(can)"]) +def test_tokenizer_splits_no_special(en_tokenizer, text): + tokens = en_tokenizer(text) + assert len(tokens) == 3 + + +@pytest.mark.parametrize('text', ["can't"]) +def test_tokenizer_splits_no_punct(en_tokenizer, text): + tokens = en_tokenizer(text) + assert len(tokens) == 2 + + +@pytest.mark.parametrize('text', ["(can't"]) +def test_tokenizer_splits_prefix_punct(en_tokenizer, text): + tokens = en_tokenizer(text) + assert len(tokens) == 3 + + +@pytest.mark.parametrize('text', ["can't)"]) +def test_tokenizer_splits_suffix_punct(en_tokenizer, text): + tokens = en_tokenizer(text) + assert len(tokens) == 3 + + +@pytest.mark.parametrize('text', ["(can't)"]) +def test_tokenizer_splits_even_wrap(en_tokenizer, text): + tokens = en_tokenizer(text) + assert len(tokens) == 4 + + +@pytest.mark.parametrize('text', ["(can't?)"]) +def test_tokenizer_splits_uneven_wrap(en_tokenizer, text): + tokens = en_tokenizer(text) + assert len(tokens) == 5 + + +@pytest.mark.parametrize('text,length', [("U.S.", 1), ("us.", 2), ("(U.S.", 2)]) +def test_tokenizer_splits_prefix_interact(en_tokenizer, text, length): + tokens = en_tokenizer(text) + assert len(tokens) == length + + +@pytest.mark.parametrize('text', ["U.S.)"]) +def test_tokenizer_splits_suffix_interact(en_tokenizer, text): + tokens = en_tokenizer(text) + assert len(tokens) == 2 + + +@pytest.mark.parametrize('text', ["(U.S.)"]) +def test_tokenizer_splits_even_wrap_interact(en_tokenizer, text): + tokens = en_tokenizer(text) + assert len(tokens) == 3 + + +@pytest.mark.parametrize('text', ["(U.S.?)"]) +def test_tokenizer_splits_uneven_wrap_interact(en_tokenizer, text): + tokens = en_tokenizer(text) + assert len(tokens) == 4 + + +@pytest.mark.parametrize('text', ["best-known"]) +def test_tokenizer_splits_hyphens(en_tokenizer, text): + tokens = en_tokenizer(text) + assert len(tokens) == 3 + + +@pytest.mark.parametrize('text', ["0.1-13.5", "0.0-0.1", "103.27-300"]) +def test_tokenizer_splits_numeric_range(en_tokenizer, text): + tokens = en_tokenizer(text) + assert len(tokens) == 3 + + +@pytest.mark.parametrize('text', ["best.Known", "Hello.World"]) +def test_tokenizer_splits_period_infix(en_tokenizer, text): + tokens = en_tokenizer(text) + assert len(tokens) == 3 + + +@pytest.mark.parametrize('text', ["Hello,world", "one,two"]) +def test_tokenizer_splits_comma_infix(en_tokenizer, text): + tokens = en_tokenizer(text) + assert len(tokens) == 3 + assert tokens[0].text == text.split(",")[0] + assert tokens[1].text == "," + assert tokens[2].text == text.split(",")[1] + + +@pytest.mark.parametrize('text', ["best...Known", "best...known"]) +def test_tokenizer_splits_ellipsis_infix(en_tokenizer, text): + tokens = en_tokenizer(text) + assert len(tokens) == 3 + + +@pytest.mark.parametrize('text', ["google.com", "python.org", "spacy.io", "explosion.ai"]) +def test_tokenizer_keep_urls(en_tokenizer, text): + tokens = en_tokenizer(text) + assert len(tokens) == 1 + + +@pytest.mark.parametrize('text', ["hello123@example.com", "hi+there@gmail.it", "matt@explosion.ai"]) +def test_tokenizer_keeps_email(en_tokenizer, text): + tokens = en_tokenizer(text) + assert len(tokens) == 1 + + +def test_tokenizer_splits_double_hyphen_infix(en_tokenizer): + tokens = en_tokenizer("No decent--let alone well-bred--people.") + assert tokens[0].text == "No" + assert tokens[1].text == "decent" + assert tokens[2].text == "--" + assert tokens[3].text == "let" + assert tokens[4].text == "alone" + assert tokens[5].text == "well" + assert tokens[6].text == "-" + assert tokens[7].text == "bred" + assert tokens[8].text == "--" + assert tokens[9].text == "people" + + +@pytest.mark.xfail +def test_tokenizer_splits_period_abbr(en_tokenizer): + text = "Today is Tuesday.Mr." + tokens = en_tokenizer(text) + assert len(tokens) == 5 + assert tokens[0].text == "Today" + assert tokens[1].text == "is" + assert tokens[2].text == "Tuesday" + assert tokens[3].text == "." + assert tokens[4].text == "Mr." + + +@pytest.mark.xfail +def test_tokenizer_splits_em_dash_infix(en_tokenizer): + # Re Issue #225 + tokens = en_tokenizer("""Will this road take me to Puddleton?\u2014No, """ + """you'll have to walk there.\u2014Ariel.""") + assert tokens[6].text == "Puddleton" + assert tokens[7].text == "?" + assert tokens[8].text == "\u2014" diff --git a/spacy/tests/tokenizer/test_special_affix.py b/spacy/tests/tokenizer/test_special_affix.py deleted file mode 100644 index 62cf114f1..000000000 --- a/spacy/tests/tokenizer/test_special_affix.py +++ /dev/null @@ -1,46 +0,0 @@ -"""Test entries in the tokenization special-case interacting with prefix -and suffix punctuation.""" -from __future__ import unicode_literals -import pytest - - -def test_no_special(en_tokenizer): - assert len(en_tokenizer("(can)")) == 3 - - -def test_no_punct(en_tokenizer): - assert len(en_tokenizer("can't")) == 2 - - -def test_prefix(en_tokenizer): - assert len(en_tokenizer("(can't")) == 3 - - -def test_suffix(en_tokenizer): - assert len(en_tokenizer("can't)")) == 3 - - -def test_wrap(en_tokenizer): - assert len(en_tokenizer("(can't)")) == 4 - - -def test_uneven_wrap(en_tokenizer): - assert len(en_tokenizer("(can't?)")) == 5 - - -def test_prefix_interact(en_tokenizer): - assert len(en_tokenizer("U.S.")) == 1 - assert len(en_tokenizer("us.")) == 2 - assert len(en_tokenizer("(U.S.")) == 2 - - -def test_suffix_interact(en_tokenizer): - assert len(en_tokenizer("U.S.)")) == 2 - - -def test_even_wrap_interact(en_tokenizer): - assert len(en_tokenizer("(U.S.)")) == 3 - - -def test_uneven_wrap_interact(en_tokenizer): - assert len(en_tokenizer("(U.S.?)")) == 4 diff --git a/spacy/tests/tokenizer/test_tokenizer.py b/spacy/tests/tokenizer/test_tokenizer.py index 091561ae3..45e8cf70e 100644 --- a/spacy/tests/tokenizer/test_tokenizer.py +++ b/spacy/tests/tokenizer/test_tokenizer.py @@ -157,14 +157,6 @@ def test_two_whitespace(en_tokenizer): assert repr(tokens.text_with_ws) == repr(orig_str) -@pytest.mark.xfail -def test_em_dash_infix(en_tokenizer): - # Re Issue #225 - tokens = en_tokenizer('''Will this road take me to Puddleton?\u2014No, ''' - '''you'll have to walk there.\u2014Ariel.''') - assert tokens[6].text == 'Puddleton' - assert tokens[7].text == '?' - assert tokens[8].text == '\u2014' #def test_cnts7(): # text = 'But then the 6,000-year ice age came...'