mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-30 20:06:30 +03:00
46dfe773e1
* replace unicode categories with raw list of code points * simplifying ranges * fixing variable length quotes * removing redundant regular expression * small cleanup of regexp notations * quotes and alpha as ranges instead of alterations * removed most regexp dependencies and features * exponential backtracking - unit tests * rewrote expression with pathological backtracking * disabling double hyphen tests for now * test additional variants of repeating punctuation * remove regex and redundant backslashes from load_reddit script * small typo fixes * disable double punctuation test for russian * clean up old comments * format block code * final cleanup * naming consistency * french strings as unicode for python 2 support * french regular expression case insensitive
123 lines
3.7 KiB
Python
123 lines
3.7 KiB
Python
# coding: utf-8
|
|
from __future__ import unicode_literals
|
|
|
|
import pytest
|
|
from spacy.vocab import Vocab
|
|
from spacy.tokenizer import Tokenizer
|
|
from spacy.util import ensure_path
|
|
|
|
|
|
def test_tokenizer_handles_no_word(tokenizer):
|
|
tokens = tokenizer("")
|
|
assert len(tokens) == 0
|
|
|
|
|
|
@pytest.mark.parametrize("text", ["lorem"])
|
|
def test_tokenizer_handles_single_word(tokenizer, text):
|
|
tokens = tokenizer(text)
|
|
assert tokens[0].text == text
|
|
|
|
|
|
def test_tokenizer_handles_punct(tokenizer):
|
|
text = "Lorem, ipsum."
|
|
tokens = tokenizer(text)
|
|
assert len(tokens) == 4
|
|
assert tokens[0].text == "Lorem"
|
|
assert tokens[1].text == ","
|
|
assert tokens[2].text == "ipsum"
|
|
assert tokens[1].text != "Lorem"
|
|
|
|
|
|
def test_tokenizer_handles_punct_braces(tokenizer):
|
|
text = "Lorem, (ipsum)."
|
|
tokens = tokenizer(text)
|
|
assert len(tokens) == 6
|
|
|
|
|
|
def test_tokenizer_handles_digits(tokenizer):
|
|
exceptions = ["hu", "bn"]
|
|
text = "Lorem ipsum: 1984."
|
|
tokens = tokenizer(text)
|
|
|
|
if tokens[0].lang_ not in exceptions:
|
|
assert len(tokens) == 5
|
|
assert tokens[0].text == "Lorem"
|
|
assert tokens[3].text == "1984"
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"text",
|
|
["google.com", "python.org", "spacy.io", "explosion.ai", "http://www.google.com"],
|
|
)
|
|
def test_tokenizer_keep_urls(tokenizer, text):
|
|
tokens = tokenizer(text)
|
|
assert len(tokens) == 1
|
|
|
|
|
|
@pytest.mark.parametrize("text", ["NASDAQ:GOOG"])
|
|
def test_tokenizer_colons(tokenizer, text):
|
|
tokens = tokenizer(text)
|
|
assert len(tokens) == 3
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"text", ["hello123@example.com", "hi+there@gmail.it", "matt@explosion.ai"]
|
|
)
|
|
def test_tokenizer_keeps_email(tokenizer, text):
|
|
tokens = tokenizer(text)
|
|
assert len(tokens) == 1
|
|
|
|
|
|
def test_tokenizer_handles_long_text(tokenizer):
|
|
text = """Lorem ipsum dolor sit amet, consectetur adipiscing elit
|
|
|
|
Cras egestas orci non porttitor maximus.
|
|
Maecenas quis odio id dolor rhoncus dignissim. Curabitur sed velit at orci ultrices sagittis. Nulla commodo euismod arcu eget vulputate.
|
|
|
|
Phasellus tincidunt, augue quis porta finibus, massa sapien consectetur augue, non lacinia enim nibh eget ipsum. Vestibulum in bibendum mauris.
|
|
|
|
"Nullam porta fringilla enim, a dictum orci consequat in." Mauris nec malesuada justo."""
|
|
|
|
tokens = tokenizer(text)
|
|
assert len(tokens) > 5
|
|
|
|
|
|
@pytest.mark.parametrize("file_name", ["sun.txt"])
|
|
def test_tokenizer_handle_text_from_file(tokenizer, file_name):
|
|
loc = ensure_path(__file__).parent / file_name
|
|
text = loc.open("r", encoding="utf8").read()
|
|
assert len(text) != 0
|
|
tokens = tokenizer(text)
|
|
assert len(tokens) > 100
|
|
|
|
|
|
def test_tokenizer_suspected_freeing_strings(tokenizer):
|
|
text1 = "Lorem dolor sit amet, consectetur adipiscing elit."
|
|
text2 = "Lorem ipsum dolor sit amet, consectetur adipiscing elit."
|
|
tokens1 = tokenizer(text1)
|
|
tokens2 = tokenizer(text2)
|
|
assert tokens1[0].text == "Lorem"
|
|
assert tokens2[0].text == "Lorem"
|
|
|
|
|
|
@pytest.mark.parametrize("text,tokens", [("lorem", [{"orth": "lo"}, {"orth": "rem"}])])
|
|
def test_tokenizer_add_special_case(tokenizer, text, tokens):
|
|
tokenizer.add_special_case(text, tokens)
|
|
doc = tokenizer(text)
|
|
assert doc[0].text == tokens[0]["orth"]
|
|
assert doc[1].text == tokens[1]["orth"]
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"text,tokens", [("lorem", [{"orth": "lo", "tag": "NN"}, {"orth": "rem"}])]
|
|
)
|
|
def test_tokenizer_add_special_case_tag(text, tokens):
|
|
vocab = Vocab(tag_map={"NN": {"pos": "NOUN"}})
|
|
tokenizer = Tokenizer(vocab, {}, None, None, None)
|
|
tokenizer.add_special_case(text, tokens)
|
|
doc = tokenizer(text)
|
|
assert doc[0].text == tokens[0]["orth"]
|
|
assert doc[0].tag_ == tokens[0]["tag"]
|
|
assert doc[0].pos_ == "NOUN"
|
|
assert doc[1].text == tokens[1]["orth"]
|