mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-26 01:46:28 +03:00
Modernize and merge tokenizer tests for prefixes/suffixes/infixes
This commit is contained in:
parent
0e65dca9a5
commit
8a74129cdf
|
@ -1,62 +0,0 @@
|
|||
from __future__ import unicode_literals
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["best-known"])
|
||||
def test_tokenizer_splits_hyphens(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 3
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["0.1-13.5", "0.0-0.1", "103.27-300"])
|
||||
def test_tokenizer_splits_numeric_range(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 3
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["best.Known", "Hello.World"])
|
||||
def test_tokenizer_splits_period(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 3
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["Hello,world", "one,two"])
|
||||
def test_tokenizer_splits_comma(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 3
|
||||
assert tokens[0].text == text.split(",")[0]
|
||||
assert tokens[1].text == ","
|
||||
assert tokens[2].text == text.split(",")[1]
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["best...Known", "best...known"])
|
||||
def test_tokenizer_splits_ellipsis(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 3
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["google.com", "python.org", "spacy.io", "explosion.ai"])
|
||||
def test_tokenizer_keep_urls(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["hello123@example.com", "hi+there@gmail.it", "matt@explosion.ai"])
|
||||
def test_tokenizer_keeps_email(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 1
|
||||
|
||||
|
||||
def test_tokenizer_splits_double_hyphen(en_tokenizer):
|
||||
tokens = en_tokenizer("No decent--let alone well-bred--people.")
|
||||
assert tokens[0].text == "No"
|
||||
assert tokens[1].text == "decent"
|
||||
assert tokens[2].text == "--"
|
||||
assert tokens[3].text == "let"
|
||||
assert tokens[4].text == "alone"
|
||||
assert tokens[5].text == "well"
|
||||
assert tokens[6].text == "-"
|
||||
assert tokens[7].text == "bred"
|
||||
assert tokens[8].text == "--"
|
||||
assert tokens[9].text == "people"
|
148
spacy/tests/tokenizer/test_prefix_suffix_infix.py
Normal file
148
spacy/tests/tokenizer/test_prefix_suffix_infix.py
Normal file
|
@ -0,0 +1,148 @@
|
|||
# coding: utf-8
|
||||
"""Test that tokenizer prefixes, suffixes and infixes are handled correctly."""
|
||||
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["(can)"])
|
||||
def test_tokenizer_splits_no_special(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 3
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["can't"])
|
||||
def test_tokenizer_splits_no_punct(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 2
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["(can't"])
|
||||
def test_tokenizer_splits_prefix_punct(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 3
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["can't)"])
|
||||
def test_tokenizer_splits_suffix_punct(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 3
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["(can't)"])
|
||||
def test_tokenizer_splits_even_wrap(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 4
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["(can't?)"])
|
||||
def test_tokenizer_splits_uneven_wrap(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 5
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text,length', [("U.S.", 1), ("us.", 2), ("(U.S.", 2)])
|
||||
def test_tokenizer_splits_prefix_interact(en_tokenizer, text, length):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == length
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["U.S.)"])
|
||||
def test_tokenizer_splits_suffix_interact(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 2
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["(U.S.)"])
|
||||
def test_tokenizer_splits_even_wrap_interact(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 3
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["(U.S.?)"])
|
||||
def test_tokenizer_splits_uneven_wrap_interact(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 4
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["best-known"])
|
||||
def test_tokenizer_splits_hyphens(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 3
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["0.1-13.5", "0.0-0.1", "103.27-300"])
|
||||
def test_tokenizer_splits_numeric_range(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 3
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["best.Known", "Hello.World"])
|
||||
def test_tokenizer_splits_period_infix(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 3
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["Hello,world", "one,two"])
|
||||
def test_tokenizer_splits_comma_infix(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 3
|
||||
assert tokens[0].text == text.split(",")[0]
|
||||
assert tokens[1].text == ","
|
||||
assert tokens[2].text == text.split(",")[1]
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["best...Known", "best...known"])
|
||||
def test_tokenizer_splits_ellipsis_infix(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 3
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["google.com", "python.org", "spacy.io", "explosion.ai"])
|
||||
def test_tokenizer_keep_urls(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["hello123@example.com", "hi+there@gmail.it", "matt@explosion.ai"])
|
||||
def test_tokenizer_keeps_email(en_tokenizer, text):
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 1
|
||||
|
||||
|
||||
def test_tokenizer_splits_double_hyphen_infix(en_tokenizer):
|
||||
tokens = en_tokenizer("No decent--let alone well-bred--people.")
|
||||
assert tokens[0].text == "No"
|
||||
assert tokens[1].text == "decent"
|
||||
assert tokens[2].text == "--"
|
||||
assert tokens[3].text == "let"
|
||||
assert tokens[4].text == "alone"
|
||||
assert tokens[5].text == "well"
|
||||
assert tokens[6].text == "-"
|
||||
assert tokens[7].text == "bred"
|
||||
assert tokens[8].text == "--"
|
||||
assert tokens[9].text == "people"
|
||||
|
||||
|
||||
@pytest.mark.xfail
|
||||
def test_tokenizer_splits_period_abbr(en_tokenizer):
|
||||
text = "Today is Tuesday.Mr."
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 5
|
||||
assert tokens[0].text == "Today"
|
||||
assert tokens[1].text == "is"
|
||||
assert tokens[2].text == "Tuesday"
|
||||
assert tokens[3].text == "."
|
||||
assert tokens[4].text == "Mr."
|
||||
|
||||
|
||||
@pytest.mark.xfail
|
||||
def test_tokenizer_splits_em_dash_infix(en_tokenizer):
|
||||
# Re Issue #225
|
||||
tokens = en_tokenizer("""Will this road take me to Puddleton?\u2014No, """
|
||||
"""you'll have to walk there.\u2014Ariel.""")
|
||||
assert tokens[6].text == "Puddleton"
|
||||
assert tokens[7].text == "?"
|
||||
assert tokens[8].text == "\u2014"
|
|
@ -1,46 +0,0 @@
|
|||
"""Test entries in the tokenization special-case interacting with prefix
|
||||
and suffix punctuation."""
|
||||
from __future__ import unicode_literals
|
||||
import pytest
|
||||
|
||||
|
||||
def test_no_special(en_tokenizer):
|
||||
assert len(en_tokenizer("(can)")) == 3
|
||||
|
||||
|
||||
def test_no_punct(en_tokenizer):
|
||||
assert len(en_tokenizer("can't")) == 2
|
||||
|
||||
|
||||
def test_prefix(en_tokenizer):
|
||||
assert len(en_tokenizer("(can't")) == 3
|
||||
|
||||
|
||||
def test_suffix(en_tokenizer):
|
||||
assert len(en_tokenizer("can't)")) == 3
|
||||
|
||||
|
||||
def test_wrap(en_tokenizer):
|
||||
assert len(en_tokenizer("(can't)")) == 4
|
||||
|
||||
|
||||
def test_uneven_wrap(en_tokenizer):
|
||||
assert len(en_tokenizer("(can't?)")) == 5
|
||||
|
||||
|
||||
def test_prefix_interact(en_tokenizer):
|
||||
assert len(en_tokenizer("U.S.")) == 1
|
||||
assert len(en_tokenizer("us.")) == 2
|
||||
assert len(en_tokenizer("(U.S.")) == 2
|
||||
|
||||
|
||||
def test_suffix_interact(en_tokenizer):
|
||||
assert len(en_tokenizer("U.S.)")) == 2
|
||||
|
||||
|
||||
def test_even_wrap_interact(en_tokenizer):
|
||||
assert len(en_tokenizer("(U.S.)")) == 3
|
||||
|
||||
|
||||
def test_uneven_wrap_interact(en_tokenizer):
|
||||
assert len(en_tokenizer("(U.S.?)")) == 4
|
|
@ -157,14 +157,6 @@ def test_two_whitespace(en_tokenizer):
|
|||
assert repr(tokens.text_with_ws) == repr(orig_str)
|
||||
|
||||
|
||||
@pytest.mark.xfail
|
||||
def test_em_dash_infix(en_tokenizer):
|
||||
# Re Issue #225
|
||||
tokens = en_tokenizer('''Will this road take me to Puddleton?\u2014No, '''
|
||||
'''you'll have to walk there.\u2014Ariel.''')
|
||||
assert tokens[6].text == 'Puddleton'
|
||||
assert tokens[7].text == '?'
|
||||
assert tokens[8].text == '\u2014'
|
||||
|
||||
#def test_cnts7():
|
||||
# text = 'But then the 6,000-year ice age came...'
|
||||
|
|
Loading…
Reference in New Issue
Block a user