2018-07-25 00:38:44 +03:00
|
|
|
import pytest
|
|
|
|
import re
|
|
|
|
from spacy.tokens import Doc
|
|
|
|
from spacy.vocab import Vocab
|
|
|
|
from spacy.lang.en import English
|
|
|
|
from spacy.lang.lex_attrs import LEX_ATTRS
|
|
|
|
from spacy.matcher import Matcher
|
|
|
|
from spacy.tokenizer import Tokenizer
|
2020-08-09 23:36:23 +03:00
|
|
|
from spacy.symbols import ORTH, LEMMA, POS
|
2018-07-25 00:38:44 +03:00
|
|
|
|
|
|
|
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(1061)
|
2019-09-04 21:42:24 +03:00
|
|
|
def test_issue1061():
|
2019-09-11 12:38:22 +03:00
|
|
|
"""Test special-case works after tokenizing. Was caching problem."""
|
|
|
|
text = "I like _MATH_ even _MATH_ when _MATH_, except when _MATH_ is _MATH_! but not _MATH_."
|
2020-07-22 14:42:59 +03:00
|
|
|
tokenizer = English().tokenizer
|
2019-09-04 21:42:24 +03:00
|
|
|
doc = tokenizer(text)
|
2019-09-11 12:38:22 +03:00
|
|
|
assert "MATH" in [w.text for w in doc]
|
|
|
|
assert "_MATH_" not in [w.text for w in doc]
|
2019-09-04 21:42:24 +03:00
|
|
|
|
2019-09-11 12:38:22 +03:00
|
|
|
tokenizer.add_special_case("_MATH_", [{ORTH: "_MATH_"}])
|
2019-09-04 21:42:24 +03:00
|
|
|
doc = tokenizer(text)
|
2019-09-11 12:38:22 +03:00
|
|
|
assert "_MATH_" in [w.text for w in doc]
|
|
|
|
assert "MATH" not in [w.text for w in doc]
|
2019-09-04 21:42:24 +03:00
|
|
|
|
|
|
|
# For sanity, check it works when pipeline is clean.
|
2020-07-22 14:42:59 +03:00
|
|
|
tokenizer = English().tokenizer
|
2019-09-11 12:38:22 +03:00
|
|
|
tokenizer.add_special_case("_MATH_", [{ORTH: "_MATH_"}])
|
2019-09-04 21:42:24 +03:00
|
|
|
doc = tokenizer(text)
|
2019-09-11 12:38:22 +03:00
|
|
|
assert "_MATH_" in [w.text for w in doc]
|
|
|
|
assert "MATH" not in [w.text for w in doc]
|
2019-09-04 21:42:24 +03:00
|
|
|
|
|
|
|
|
2020-07-20 15:49:54 +03:00
|
|
|
@pytest.mark.skip(
|
|
|
|
reason="Can not be fixed without variable-width look-behind (which we don't want)"
|
2019-02-21 00:10:13 +03:00
|
|
|
)
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(1235)
|
2019-02-21 00:10:13 +03:00
|
|
|
def test_issue1235():
|
|
|
|
"""Test that g is not split of if preceded by a number and a letter"""
|
|
|
|
nlp = English()
|
2019-08-20 18:36:34 +03:00
|
|
|
testwords = "e2g 2g 52g"
|
2019-02-21 00:10:13 +03:00
|
|
|
doc = nlp(testwords)
|
|
|
|
assert len(doc) == 5
|
|
|
|
assert doc[0].text == "e2g"
|
|
|
|
assert doc[1].text == "2"
|
|
|
|
assert doc[2].text == "g"
|
|
|
|
assert doc[3].text == "52"
|
|
|
|
assert doc[4].text == "g"
|
|
|
|
|
|
|
|
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(1242)
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_issue1242():
|
|
|
|
nlp = English()
|
2018-11-27 03:09:36 +03:00
|
|
|
doc = nlp("")
|
2018-07-25 00:38:44 +03:00
|
|
|
assert len(doc) == 0
|
2018-11-27 03:09:36 +03:00
|
|
|
docs = list(nlp.pipe(["", "hello"]))
|
2018-07-25 00:38:44 +03:00
|
|
|
assert len(docs[0]) == 0
|
|
|
|
assert len(docs[1]) == 1
|
|
|
|
|
|
|
|
|
2020-08-07 16:27:13 +03:00
|
|
|
@pytest.mark.skip(reason="v3 no longer supports LEMMA/POS in tokenizer special cases")
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(1250)
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_issue1250():
|
|
|
|
"""Test cached special cases."""
|
2018-11-27 03:09:36 +03:00
|
|
|
special_case = [{ORTH: "reimbur", LEMMA: "reimburse", POS: "VERB"}]
|
2018-07-25 00:38:44 +03:00
|
|
|
nlp = English()
|
2018-11-27 03:09:36 +03:00
|
|
|
nlp.tokenizer.add_special_case("reimbur", special_case)
|
|
|
|
lemmas = [w.lemma_ for w in nlp("reimbur, reimbur...")]
|
|
|
|
assert lemmas == ["reimburse", ",", "reimburse", "..."]
|
|
|
|
lemmas = [w.lemma_ for w in nlp("reimbur, reimbur...")]
|
|
|
|
assert lemmas == ["reimburse", ",", "reimburse", "..."]
|
2018-07-25 00:38:44 +03:00
|
|
|
|
|
|
|
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(1257)
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_issue1257():
|
|
|
|
"""Test that tokens compare correctly."""
|
2018-11-27 03:09:36 +03:00
|
|
|
doc1 = Doc(Vocab(), words=["a", "b", "c"])
|
|
|
|
doc2 = Doc(Vocab(), words=["a", "c", "e"])
|
2018-07-25 00:38:44 +03:00
|
|
|
assert doc1[0] != doc2[0]
|
|
|
|
assert not doc1[0] == doc2[0]
|
|
|
|
|
|
|
|
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(1375)
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_issue1375():
|
|
|
|
"""Test that token.nbor() raises IndexError for out-of-bounds access."""
|
2018-11-27 03:09:36 +03:00
|
|
|
doc = Doc(Vocab(), words=["0", "1", "2"])
|
2018-07-25 00:38:44 +03:00
|
|
|
with pytest.raises(IndexError):
|
|
|
|
assert doc[0].nbor(-1)
|
2018-11-27 03:09:36 +03:00
|
|
|
assert doc[1].nbor(-1).text == "0"
|
2018-07-25 00:38:44 +03:00
|
|
|
with pytest.raises(IndexError):
|
|
|
|
assert doc[2].nbor(1)
|
2018-11-27 03:09:36 +03:00
|
|
|
assert doc[1].nbor(1).text == "2"
|
2018-07-25 00:38:44 +03:00
|
|
|
|
|
|
|
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(1434)
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_issue1434():
|
|
|
|
"""Test matches occur when optional element at end of short doc."""
|
2018-11-27 03:09:36 +03:00
|
|
|
pattern = [{"ORTH": "Hello"}, {"IS_ALPHA": True, "OP": "?"}]
|
2018-07-25 00:38:44 +03:00
|
|
|
vocab = Vocab(lex_attr_getters=LEX_ATTRS)
|
2018-11-27 03:09:36 +03:00
|
|
|
hello_world = Doc(vocab, words=["Hello", "World"])
|
|
|
|
hello = Doc(vocab, words=["Hello"])
|
2018-07-25 00:38:44 +03:00
|
|
|
matcher = Matcher(vocab)
|
2019-10-25 23:21:08 +03:00
|
|
|
matcher.add("MyMatcher", [pattern])
|
2018-07-25 00:38:44 +03:00
|
|
|
matches = matcher(hello_world)
|
|
|
|
assert matches
|
|
|
|
matches = matcher(hello)
|
|
|
|
assert matches
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"string,start,end",
|
|
|
|
[
|
|
|
|
("a", 0, 1),
|
|
|
|
("a b", 0, 2),
|
|
|
|
("a c", 0, 1),
|
|
|
|
("a b c", 0, 2),
|
|
|
|
("a b b c", 0, 3),
|
|
|
|
("a b b", 0, 3),
|
|
|
|
],
|
|
|
|
)
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(1450)
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_issue1450(string, start, end):
|
|
|
|
"""Test matcher works when patterns end with * operator."""
|
2018-11-27 03:09:36 +03:00
|
|
|
pattern = [{"ORTH": "a"}, {"ORTH": "b", "OP": "*"}]
|
2018-07-25 00:38:44 +03:00
|
|
|
matcher = Matcher(Vocab())
|
2019-10-25 23:21:08 +03:00
|
|
|
matcher.add("TSTEND", [pattern])
|
2018-07-25 00:38:44 +03:00
|
|
|
doc = Doc(Vocab(), words=string.split())
|
|
|
|
matches = matcher(doc)
|
|
|
|
if start is None or end is None:
|
|
|
|
assert matches == []
|
|
|
|
assert matches[-1][1] == start
|
|
|
|
assert matches[-1][2] == end
|
|
|
|
|
|
|
|
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(1488)
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_issue1488():
|
2018-11-27 03:09:36 +03:00
|
|
|
prefix_re = re.compile(r"""[\[\("']""")
|
|
|
|
suffix_re = re.compile(r"""[\]\)"']""")
|
|
|
|
infix_re = re.compile(r"""[-~\.]""")
|
|
|
|
simple_url_re = re.compile(r"""^https?://""")
|
2018-07-25 00:38:44 +03:00
|
|
|
|
|
|
|
def my_tokenizer(nlp):
|
2018-11-27 03:09:36 +03:00
|
|
|
return Tokenizer(
|
|
|
|
nlp.vocab,
|
|
|
|
{},
|
|
|
|
prefix_search=prefix_re.search,
|
|
|
|
suffix_search=suffix_re.search,
|
|
|
|
infix_finditer=infix_re.finditer,
|
|
|
|
token_match=simple_url_re.match,
|
|
|
|
)
|
2018-07-25 00:38:44 +03:00
|
|
|
|
|
|
|
nlp = English()
|
|
|
|
nlp.tokenizer = my_tokenizer(nlp)
|
|
|
|
doc = nlp("This is a test.")
|
|
|
|
for token in doc:
|
|
|
|
assert token.text
|
|
|
|
|
|
|
|
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(1494)
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_issue1494():
|
2018-11-27 03:09:36 +03:00
|
|
|
infix_re = re.compile(r"""[^a-z]""")
|
|
|
|
test_cases = [
|
|
|
|
("token 123test", ["token", "1", "2", "3", "test"]),
|
|
|
|
("token 1test", ["token", "1test"]),
|
|
|
|
("hello...test", ["hello", ".", ".", ".", "test"]),
|
|
|
|
]
|
|
|
|
|
|
|
|
def new_tokenizer(nlp):
|
|
|
|
return Tokenizer(nlp.vocab, {}, infix_finditer=infix_re.finditer)
|
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
nlp = English()
|
|
|
|
nlp.tokenizer = new_tokenizer(nlp)
|
|
|
|
for text, expected in test_cases:
|
|
|
|
assert [token.text for token in nlp(text)] == expected
|