spaCy/spacy/tests/tokenizer/test_urls.py

102 lines
2.9 KiB
Python
Raw Normal View History

2017-01-10 17:53:15 +03:00
# coding: utf-8
from __future__ import unicode_literals
import pytest
2017-01-10 17:53:15 +03:00
URLS_BASIC = [
"http://www.nytimes.com/2016/04/20/us/politics/new-york-primary-preview.html?hp&action=click&pgtype=Homepage&clickSource=story-heading&module=a-lede-package-region&region=top-news&WT.nav=top-news&_r=0",
"www.red-stars.com",
"mailto:foo.bar@baz.com",
]
URLS_FULL = URLS_BASIC + [
"mailto:foo-bar@baz-co.com",
"www.google.com?q=google",
"http://foo.com/blah_(wikipedia)#cite-1"
]
2017-01-10 17:56:35 +03:00
# Punctuation we want to check is split away before the URL
PREFIXES = [
"(", '"', ">"
]
2017-01-10 17:56:35 +03:00
# Punctuation we want to check is split away after the URL
SUFFIXES = [
'"', ":", ">"]
2017-01-10 17:56:35 +03:00
@pytest.mark.parametrize("url", URLS_BASIC)
2017-01-10 17:56:35 +03:00
def test_tokenizer_handles_simple_url(tokenizer, url):
tokens = tokenizer(url)
assert len(tokens) == 1
2017-01-10 17:56:35 +03:00
assert tokens[0].text == url
@pytest.mark.parametrize("url", URLS_BASIC)
def test_tokenizer_handles_simple_surround_url(tokenizer, url):
tokens = tokenizer("(" + url + ")")
assert len(tokens) == 3
assert tokens[0].text == "("
assert tokens[1].text == url
assert tokens[2].text == ")"
@pytest.mark.slow
@pytest.mark.parametrize("prefix", PREFIXES)
@pytest.mark.parametrize("url", URLS_FULL)
2017-01-10 17:56:35 +03:00
def test_tokenizer_handles_prefixed_url(tokenizer, prefix, url):
2017-01-09 16:10:40 +03:00
tokens = tokenizer(prefix + url)
2017-01-10 17:57:00 +03:00
assert len(tokens) == 2
assert tokens[0].text == prefix
assert tokens[1].text == url
2017-01-10 17:56:35 +03:00
@pytest.mark.slow
@pytest.mark.parametrize("suffix", SUFFIXES)
@pytest.mark.parametrize("url", URLS_FULL)
2017-01-10 17:56:35 +03:00
def test_tokenizer_handles_suffixed_url(tokenizer, url, suffix):
2017-01-09 16:10:40 +03:00
tokens = tokenizer(url + suffix)
2017-01-10 17:57:00 +03:00
assert len(tokens) == 2
assert tokens[0].text == url
assert tokens[1].text == suffix
2017-01-10 17:56:35 +03:00
@pytest.mark.slow
@pytest.mark.parametrize("prefix", PREFIXES)
@pytest.mark.parametrize("suffix", SUFFIXES)
@pytest.mark.parametrize("url", URLS_FULL)
2017-01-10 17:56:35 +03:00
def test_tokenizer_handles_surround_url(tokenizer, prefix, suffix, url):
2017-01-09 16:10:40 +03:00
tokens = tokenizer(prefix + url + suffix)
2017-01-10 17:57:00 +03:00
assert len(tokens) == 3
assert tokens[0].text == prefix
assert tokens[1].text == url
assert tokens[2].text == suffix
2017-01-10 17:56:35 +03:00
@pytest.mark.slow
@pytest.mark.parametrize("prefix1", PREFIXES)
@pytest.mark.parametrize("prefix2", PREFIXES)
@pytest.mark.parametrize("url", URLS_FULL)
2017-01-10 17:56:35 +03:00
def test_tokenizer_handles_two_prefix_url(tokenizer, prefix1, prefix2, url):
2017-01-09 16:10:40 +03:00
tokens = tokenizer(prefix1 + prefix2 + url)
2017-01-10 17:57:00 +03:00
assert len(tokens) == 3
assert tokens[0].text == prefix1
assert tokens[1].text == prefix2
assert tokens[2].text == url
2017-01-10 17:56:35 +03:00
@pytest.mark.slow
@pytest.mark.parametrize("suffix1", SUFFIXES)
@pytest.mark.parametrize("suffix2", SUFFIXES)
@pytest.mark.parametrize("url", URLS_FULL)
2017-01-10 17:56:35 +03:00
def test_tokenizer_handles_two_prefix_url(tokenizer, suffix1, suffix2, url):
2017-01-09 16:10:40 +03:00
tokens = tokenizer(url + suffix1 + suffix2)
2017-01-10 17:57:00 +03:00
assert len(tokens) == 3
assert tokens[0].text == url
assert tokens[1].text == suffix1
assert tokens[2].text == suffix2