2018-07-25 00:38:44 +03:00
|
|
|
import pytest
|
2019-03-11 03:31:21 +03:00
|
|
|
import numpy
|
2018-07-25 00:38:44 +03:00
|
|
|
from spacy.tokens import Doc
|
2019-07-10 13:49:18 +03:00
|
|
|
from spacy.matcher import Matcher
|
2018-07-25 00:38:44 +03:00
|
|
|
from spacy.displacy import render
|
2020-09-09 11:31:03 +03:00
|
|
|
from spacy.training import iob_to_biluo
|
2018-09-27 17:41:57 +03:00
|
|
|
from spacy.lang.it import Italian
|
2019-02-21 00:10:13 +03:00
|
|
|
from spacy.lang.en import English
|
2018-07-25 00:38:44 +03:00
|
|
|
|
2020-09-21 21:43:54 +03:00
|
|
|
from ..util import add_vecs_to_vocab
|
2018-07-25 00:38:44 +03:00
|
|
|
|
|
|
|
|
2020-07-20 15:49:54 +03:00
|
|
|
@pytest.mark.skip(
|
|
|
|
reason="Can not be fixed without iterative looping between prefix/suffix and infix"
|
|
|
|
)
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(2070)
|
2019-02-21 00:10:13 +03:00
|
|
|
def test_issue2070():
|
2019-03-11 02:47:41 +03:00
|
|
|
"""Test that checks that a dot followed by a quote is handled
|
|
|
|
appropriately.
|
|
|
|
"""
|
|
|
|
# Problem: The dot is now properly split off, but the prefix/suffix rules
|
|
|
|
# are not applied again afterwards. This means that the quote will still be
|
|
|
|
# attached to the remaining token.
|
2019-02-21 00:10:13 +03:00
|
|
|
nlp = English()
|
|
|
|
doc = nlp('First sentence."A quoted sentence" he said ...')
|
|
|
|
assert len(doc) == 11
|
|
|
|
|
|
|
|
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(2179)
|
2018-09-27 17:41:57 +03:00
|
|
|
def test_issue2179():
|
|
|
|
"""Test that spurious 'extra_labels' aren't created when initializing NER."""
|
|
|
|
nlp = Italian()
|
2020-07-22 14:42:59 +03:00
|
|
|
ner = nlp.add_pipe("ner")
|
2018-11-27 03:09:36 +03:00
|
|
|
ner.add_label("CITIZENSHIP")
|
2020-09-28 22:35:09 +03:00
|
|
|
nlp.initialize()
|
2018-09-27 17:41:57 +03:00
|
|
|
nlp2 = Italian()
|
2020-07-22 14:42:59 +03:00
|
|
|
nlp2.add_pipe("ner")
|
2020-02-27 20:42:27 +03:00
|
|
|
assert len(nlp2.get_pipe("ner").labels) == 0
|
2020-05-18 23:23:33 +03:00
|
|
|
model = nlp2.get_pipe("ner").model
|
|
|
|
model.attrs["resize_output"](model, nlp.get_pipe("ner").moves.n_moves)
|
2018-09-27 17:41:57 +03:00
|
|
|
nlp2.from_bytes(nlp.to_bytes())
|
2018-11-27 03:09:36 +03:00
|
|
|
assert "extra_labels" not in nlp2.get_pipe("ner").cfg
|
2019-02-14 22:03:19 +03:00
|
|
|
assert nlp2.get_pipe("ner").labels == ("CITIZENSHIP",)
|
2018-09-27 17:41:57 +03:00
|
|
|
|
|
|
|
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(2203)
|
2019-03-11 03:31:21 +03:00
|
|
|
def test_issue2203(en_vocab):
|
|
|
|
"""Test that lemmas are set correctly in doc.from_array."""
|
|
|
|
words = ["I", "'ll", "survive"]
|
|
|
|
tags = ["PRP", "MD", "VB"]
|
|
|
|
lemmas = ["-PRON-", "will", "survive"]
|
|
|
|
tag_ids = [en_vocab.strings.add(tag) for tag in tags]
|
|
|
|
lemma_ids = [en_vocab.strings.add(lemma) for lemma in lemmas]
|
|
|
|
doc = Doc(en_vocab, words=words)
|
2020-03-02 13:49:28 +03:00
|
|
|
# Work around lemma corruption problem and set lemmas after tags
|
2019-03-11 03:31:21 +03:00
|
|
|
doc.from_array("TAG", numpy.array(tag_ids, dtype="uint64"))
|
|
|
|
doc.from_array("LEMMA", numpy.array(lemma_ids, dtype="uint64"))
|
|
|
|
assert [t.tag_ for t in doc] == tags
|
|
|
|
assert [t.lemma_ for t in doc] == lemmas
|
|
|
|
# We need to serialize both tag and lemma, since this is what causes the bug
|
|
|
|
doc_array = doc.to_array(["TAG", "LEMMA"])
|
|
|
|
new_doc = Doc(doc.vocab, words=words).from_array(["TAG", "LEMMA"], doc_array)
|
|
|
|
assert [t.tag_ for t in new_doc] == tags
|
|
|
|
assert [t.lemma_ for t in new_doc] == lemmas
|
|
|
|
|
|
|
|
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(2219)
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_issue2219(en_vocab):
|
|
|
|
vectors = [("a", [1, 2, 3]), ("letter", [4, 5, 6])]
|
|
|
|
add_vecs_to_vocab(en_vocab, vectors)
|
|
|
|
[(word1, vec1), (word2, vec2)] = vectors
|
|
|
|
doc = Doc(en_vocab, words=[word1, word2])
|
|
|
|
assert doc[0].similarity(doc[1]) == doc[1].similarity(doc[0])
|
|
|
|
|
|
|
|
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(2361)
|
2020-09-21 21:43:54 +03:00
|
|
|
def test_issue2361(de_vocab):
|
2018-11-27 03:09:36 +03:00
|
|
|
chars = ("<", ">", "&", """)
|
2020-09-21 21:43:54 +03:00
|
|
|
words = ["<", ">", "&", '"']
|
|
|
|
doc = Doc(de_vocab, words=words, deps=["dep"] * len(words))
|
2018-07-25 00:38:44 +03:00
|
|
|
html = render(doc)
|
|
|
|
for char in chars:
|
|
|
|
assert char in html
|
|
|
|
|
|
|
|
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(2385)
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_issue2385():
|
|
|
|
"""Test that IOB tags are correctly converted to BILUO tags."""
|
|
|
|
# fix bug in labels with a 'b' character
|
2018-11-27 03:09:36 +03:00
|
|
|
tags1 = ("B-BRAWLER", "I-BRAWLER", "I-BRAWLER")
|
|
|
|
assert iob_to_biluo(tags1) == ["B-BRAWLER", "I-BRAWLER", "L-BRAWLER"]
|
2018-07-25 00:38:44 +03:00
|
|
|
# maintain support for iob1 format
|
2018-11-27 03:09:36 +03:00
|
|
|
tags2 = ("I-ORG", "I-ORG", "B-ORG")
|
|
|
|
assert iob_to_biluo(tags2) == ["B-ORG", "L-ORG", "U-ORG"]
|
2018-07-25 00:38:44 +03:00
|
|
|
# maintain support for iob2 format
|
2018-11-27 03:09:36 +03:00
|
|
|
tags3 = ("B-PERSON", "I-PERSON", "B-PERSON")
|
|
|
|
assert iob_to_biluo(tags3) == ["B-PERSON", "L-PERSON", "U-PERSON"]
|
2018-07-25 00:38:44 +03:00
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"tags",
|
|
|
|
[
|
|
|
|
("B-ORG", "L-ORG"),
|
|
|
|
("B-PERSON", "I-PERSON", "L-PERSON"),
|
|
|
|
("U-BRAWLER", "U-BRAWLER"),
|
|
|
|
],
|
|
|
|
)
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(2385)
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_issue2385_biluo(tags):
|
|
|
|
"""Test that BILUO-compatible tags aren't modified."""
|
|
|
|
assert iob_to_biluo(tags) == list(tags)
|
2018-09-28 16:18:30 +03:00
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(2396)
|
2019-02-08 16:14:49 +03:00
|
|
|
def test_issue2396(en_vocab):
|
|
|
|
words = ["She", "created", "a", "test", "for", "spacy"]
|
2020-09-21 21:43:54 +03:00
|
|
|
heads = [1, 1, 3, 1, 3, 4]
|
2020-09-17 01:14:01 +03:00
|
|
|
deps = ["dep"] * len(heads)
|
2019-02-08 16:14:49 +03:00
|
|
|
matrix = numpy.array(
|
|
|
|
[
|
|
|
|
[0, 1, 1, 1, 1, 1],
|
|
|
|
[1, 1, 1, 1, 1, 1],
|
|
|
|
[1, 1, 2, 3, 3, 3],
|
|
|
|
[1, 1, 3, 3, 3, 3],
|
|
|
|
[1, 1, 3, 3, 4, 4],
|
|
|
|
[1, 1, 3, 3, 4, 5],
|
|
|
|
],
|
|
|
|
dtype=numpy.int32,
|
|
|
|
)
|
2020-09-21 21:43:54 +03:00
|
|
|
doc = Doc(en_vocab, words=words, heads=heads, deps=deps)
|
2019-02-08 16:14:49 +03:00
|
|
|
span = doc[:]
|
|
|
|
assert (doc.get_lca_matrix() == matrix).all()
|
|
|
|
assert (span.get_lca_matrix() == matrix).all()
|
|
|
|
|
|
|
|
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(2464)
|
2019-07-10 13:49:18 +03:00
|
|
|
def test_issue2464(en_vocab):
|
|
|
|
"""Test problem with successive ?. This is the same bug, so putting it here."""
|
|
|
|
matcher = Matcher(en_vocab)
|
|
|
|
doc = Doc(en_vocab, words=["a", "b"])
|
2019-10-25 23:21:08 +03:00
|
|
|
matcher.add("4", [[{"OP": "?"}, {"OP": "?"}]])
|
2019-07-10 13:49:18 +03:00
|
|
|
matches = matcher(doc)
|
|
|
|
assert len(matches) == 3
|
|
|
|
|
|
|
|
|
2021-11-05 04:27:19 +03:00
|
|
|
@pytest.mark.issue(2482)
|
2018-09-28 16:18:30 +03:00
|
|
|
def test_issue2482():
|
2018-11-27 03:09:36 +03:00
|
|
|
"""Test we can serialize and deserialize a blank NER or parser model."""
|
2018-09-28 16:18:30 +03:00
|
|
|
nlp = Italian()
|
2020-07-22 14:42:59 +03:00
|
|
|
nlp.add_pipe("ner")
|
2018-09-28 16:18:30 +03:00
|
|
|
b = nlp.to_bytes()
|
2018-11-30 19:43:08 +03:00
|
|
|
Italian().from_bytes(b)
|