2017-01-13 16:29:54 +03:00
|
|
|
# coding: utf-8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2015-09-14 10:48:13 +03:00
|
|
|
import pytest
|
2018-07-25 00:38:44 +03:00
|
|
|
import numpy
|
|
|
|
from numpy.testing import assert_allclose
|
|
|
|
from spacy._ml import cosine
|
|
|
|
from spacy.vocab import Vocab
|
|
|
|
from spacy.vectors import Vectors
|
|
|
|
from spacy.tokenizer import Tokenizer
|
|
|
|
from spacy.strings import hash_string
|
|
|
|
from spacy.tokens import Doc
|
|
|
|
|
|
|
|
from ..util import add_vecs_to_vocab
|
2015-09-14 10:48:13 +03:00
|
|
|
|
2017-01-13 16:29:54 +03:00
|
|
|
|
|
|
|
@pytest.fixture
|
2017-06-05 13:32:49 +03:00
|
|
|
def strings():
|
|
|
|
return ["apple", "orange"]
|
2017-01-13 16:29:54 +03:00
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
|
2017-08-19 21:34:58 +03:00
|
|
|
@pytest.fixture
|
|
|
|
def vectors():
|
|
|
|
return [
|
|
|
|
("apple", [1, 2, 3]),
|
|
|
|
("orange", [-1, -2, -3]),
|
2018-11-27 03:09:36 +03:00
|
|
|
("and", [-1, -1, -1]),
|
|
|
|
("juice", [5, 5, 10]),
|
|
|
|
("pie", [7, 6.3, 8.9]),
|
|
|
|
]
|
2017-08-19 21:34:58 +03:00
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
|
2018-04-20 23:04:14 +03:00
|
|
|
@pytest.fixture
|
|
|
|
def ngrams_vectors():
|
|
|
|
return [
|
|
|
|
("apple", [1, 2, 3]),
|
|
|
|
("app", [-0.1, -0.2, -0.3]),
|
2018-11-27 03:09:36 +03:00
|
|
|
("ppl", [-0.2, -0.3, -0.4]),
|
|
|
|
("pl", [0.7, 0.8, 0.9]),
|
2018-04-20 23:04:14 +03:00
|
|
|
]
|
2018-07-25 00:38:44 +03:00
|
|
|
|
|
|
|
|
2018-04-20 23:04:14 +03:00
|
|
|
@pytest.fixture()
|
|
|
|
def ngrams_vocab(en_vocab, ngrams_vectors):
|
|
|
|
add_vecs_to_vocab(en_vocab, ngrams_vectors)
|
|
|
|
return en_vocab
|
2017-08-19 21:34:58 +03:00
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
|
2017-06-05 13:32:49 +03:00
|
|
|
@pytest.fixture
|
|
|
|
def data():
|
2018-11-27 03:09:36 +03:00
|
|
|
return numpy.asarray([[0.0, 1.0, 2.0], [3.0, -2.0, 4.0]], dtype="f")
|
2017-06-05 13:32:49 +03:00
|
|
|
|
2019-10-18 12:27:38 +03:00
|
|
|
|
2019-10-17 00:18:55 +03:00
|
|
|
@pytest.fixture
|
|
|
|
def most_similar_vectors_data():
|
2019-10-18 12:27:38 +03:00
|
|
|
return numpy.asarray(
|
|
|
|
[[0.0, 1.0, 2.0], [1.0, -2.0, 4.0], [1.0, 1.0, -1.0], [2.0, 3.0, 1.0]],
|
|
|
|
dtype="f",
|
|
|
|
)
|
2019-10-17 00:18:55 +03:00
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
|
2018-03-31 14:28:25 +03:00
|
|
|
@pytest.fixture
|
|
|
|
def resize_data():
|
2018-11-27 03:09:36 +03:00
|
|
|
return numpy.asarray([[0.0, 1.0], [2.0, 3.0]], dtype="f")
|
2017-06-05 13:32:49 +03:00
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
|
2017-08-19 21:34:58 +03:00
|
|
|
@pytest.fixture()
|
|
|
|
def vocab(en_vocab, vectors):
|
|
|
|
add_vecs_to_vocab(en_vocab, vectors)
|
|
|
|
return en_vocab
|
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
|
|
|
|
@pytest.fixture()
|
|
|
|
def tokenizer_v(vocab):
|
|
|
|
return Tokenizer(vocab, {}, None, None, None)
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
def test_init_vectors_with_resize_shape(strings, resize_data):
|
2018-03-31 14:28:25 +03:00
|
|
|
v = Vectors(shape=(len(strings), 3))
|
|
|
|
v.resize(shape=resize_data.shape)
|
|
|
|
assert v.shape == resize_data.shape
|
|
|
|
assert v.shape != (len(strings), 3)
|
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
def test_init_vectors_with_resize_data(data, resize_data):
|
2018-03-31 14:28:25 +03:00
|
|
|
v = Vectors(data=data)
|
|
|
|
v.resize(shape=resize_data.shape)
|
|
|
|
assert v.shape == resize_data.shape
|
|
|
|
assert v.shape != data.shape
|
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
def test_get_vector_resize(strings, data, resize_data):
|
2018-03-31 14:28:25 +03:00
|
|
|
v = Vectors(data=data)
|
|
|
|
v.resize(shape=resize_data.shape)
|
|
|
|
strings = [hash_string(s) for s in strings]
|
|
|
|
for i, string in enumerate(strings):
|
|
|
|
v.add(string, row=i)
|
|
|
|
|
|
|
|
assert list(v[strings[0]]) == list(resize_data[0])
|
|
|
|
assert list(v[strings[0]]) != list(resize_data[1])
|
|
|
|
assert list(v[strings[1]]) != list(resize_data[0])
|
|
|
|
assert list(v[strings[1]]) == list(resize_data[1])
|
2017-08-19 21:34:58 +03:00
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
|
2017-06-05 13:32:49 +03:00
|
|
|
def test_init_vectors_with_data(strings, data):
|
2017-10-31 20:25:08 +03:00
|
|
|
v = Vectors(data=data)
|
2017-06-05 13:32:49 +03:00
|
|
|
assert v.shape == data.shape
|
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
|
2017-10-31 20:25:08 +03:00
|
|
|
def test_init_vectors_with_shape(strings):
|
|
|
|
v = Vectors(shape=(len(strings), 3))
|
2017-06-05 13:32:49 +03:00
|
|
|
assert v.shape == (len(strings), 3)
|
|
|
|
|
|
|
|
|
|
|
|
def test_get_vector(strings, data):
|
2017-10-31 20:25:08 +03:00
|
|
|
v = Vectors(data=data)
|
2017-11-01 15:24:47 +03:00
|
|
|
strings = [hash_string(s) for s in strings]
|
2017-10-31 20:25:08 +03:00
|
|
|
for i, string in enumerate(strings):
|
|
|
|
v.add(string, row=i)
|
2017-06-05 13:32:49 +03:00
|
|
|
assert list(v[strings[0]]) == list(data[0])
|
|
|
|
assert list(v[strings[0]]) != list(data[1])
|
|
|
|
assert list(v[strings[1]]) != list(data[0])
|
|
|
|
|
|
|
|
|
|
|
|
def test_set_vector(strings, data):
|
|
|
|
orig = data.copy()
|
2017-10-31 20:25:08 +03:00
|
|
|
v = Vectors(data=data)
|
2017-11-01 15:24:47 +03:00
|
|
|
strings = [hash_string(s) for s in strings]
|
2017-10-31 20:25:08 +03:00
|
|
|
for i, string in enumerate(strings):
|
|
|
|
v.add(string, row=i)
|
2017-06-05 13:32:49 +03:00
|
|
|
assert list(v[strings[0]]) == list(orig[0])
|
|
|
|
assert list(v[strings[0]]) != list(orig[1])
|
|
|
|
v[strings[0]] = data[1]
|
|
|
|
assert list(v[strings[0]]) == list(orig[1])
|
|
|
|
assert list(v[strings[0]]) != list(orig[0])
|
|
|
|
|
|
|
|
|
2019-10-17 00:18:55 +03:00
|
|
|
def test_vectors_most_similar(most_similar_vectors_data):
|
|
|
|
v = Vectors(data=most_similar_vectors_data)
|
|
|
|
_, best_rows, _ = v.most_similar(v.data, batch_size=2, n=2, sort=True)
|
|
|
|
assert all(row[0] == i for i, row in enumerate(best_rows))
|
|
|
|
|
|
|
|
|
2019-10-22 19:18:43 +03:00
|
|
|
def test_vectors_most_similar_identical():
|
|
|
|
"""Test that most similar identical vectors are assigned a score of 1.0."""
|
|
|
|
data = numpy.asarray([[4, 2, 2, 2], [4, 2, 2, 2], [1, 1, 1, 1]], dtype="f")
|
|
|
|
v = Vectors(data=data, keys=["A", "B", "C"])
|
|
|
|
keys, _, scores = v.most_similar(numpy.asarray([[4, 2, 2, 2]], dtype="f"))
|
|
|
|
assert scores[0][0] == 1.0 # not 1.0000002
|
|
|
|
data = numpy.asarray([[1, 2, 3], [1, 2, 3], [1, 1, 1]], dtype="f")
|
|
|
|
v = Vectors(data=data, keys=["A", "B", "C"])
|
|
|
|
keys, _, scores = v.most_similar(numpy.asarray([[1, 2, 3]], dtype="f"))
|
|
|
|
assert scores[0][0] == 1.0 # not 0.9999999
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["apple and orange"])
|
2017-08-19 21:34:58 +03:00
|
|
|
def test_vectors_token_vector(tokenizer_v, vectors, text):
|
|
|
|
doc = tokenizer_v(text)
|
|
|
|
assert vectors[0] == (doc[0].text, list(doc[0].vector))
|
|
|
|
assert vectors[1] == (doc[2].text, list(doc[2].vector))
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["apple"])
|
|
|
|
def test_vectors__ngrams_word(ngrams_vocab, ngrams_vectors, text):
|
|
|
|
assert list(ngrams_vocab.get_vector(text)) == list(ngrams_vectors[0][1])
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("text", ["applpie"])
|
|
|
|
def test_vectors__ngrams_subword(ngrams_vocab, ngrams_vectors, text):
|
|
|
|
truth = list(ngrams_vocab.get_vector(text, 1, 6))
|
|
|
|
test = list(
|
|
|
|
[
|
|
|
|
(
|
|
|
|
ngrams_vectors[1][1][i]
|
|
|
|
+ ngrams_vectors[2][1][i]
|
|
|
|
+ ngrams_vectors[3][1][i]
|
|
|
|
)
|
|
|
|
/ 3
|
|
|
|
for i in range(len(ngrams_vectors[1][1]))
|
|
|
|
]
|
|
|
|
)
|
2018-04-20 23:04:14 +03:00
|
|
|
eps = [abs(truth[i] - test[i]) for i in range(len(truth))]
|
|
|
|
for i in eps:
|
2018-11-27 03:09:36 +03:00
|
|
|
assert i < 1e-6
|
|
|
|
|
2018-04-20 23:04:14 +03:00
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["apple", "orange"])
|
2017-08-19 21:34:58 +03:00
|
|
|
def test_vectors_lexeme_vector(vocab, text):
|
|
|
|
lex = vocab[text]
|
|
|
|
assert list(lex.vector)
|
|
|
|
assert lex.vector_norm
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", [["apple", "and", "orange"]])
|
2017-08-19 21:34:58 +03:00
|
|
|
def test_vectors_doc_vector(vocab, text):
|
2018-07-25 00:38:44 +03:00
|
|
|
doc = Doc(vocab, words=text)
|
2017-08-19 21:34:58 +03:00
|
|
|
assert list(doc.vector)
|
|
|
|
assert doc.vector_norm
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", [["apple", "and", "orange"]])
|
2017-08-19 21:34:58 +03:00
|
|
|
def test_vectors_span_vector(vocab, text):
|
2018-07-25 00:38:44 +03:00
|
|
|
span = Doc(vocab, words=text)[0:2]
|
2017-08-19 21:34:58 +03:00
|
|
|
assert list(span.vector)
|
|
|
|
assert span.vector_norm
|
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", ["apple orange"])
|
2017-08-19 21:34:58 +03:00
|
|
|
def test_vectors_token_token_similarity(tokenizer_v, text):
|
|
|
|
doc = tokenizer_v(text)
|
|
|
|
assert doc[0].similarity(doc[1]) == doc[1].similarity(doc[0])
|
2018-11-27 03:09:36 +03:00
|
|
|
assert -1.0 < doc[0].similarity(doc[1]) < 1.0
|
2017-08-19 21:34:58 +03:00
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text1,text2", [("apple", "orange")])
|
2017-08-19 21:34:58 +03:00
|
|
|
def test_vectors_token_lexeme_similarity(tokenizer_v, vocab, text1, text2):
|
|
|
|
token = tokenizer_v(text1)
|
|
|
|
lex = vocab[text2]
|
|
|
|
assert token.similarity(lex) == lex.similarity(token)
|
2018-11-27 03:09:36 +03:00
|
|
|
assert -1.0 < token.similarity(lex) < 1.0
|
2017-08-19 21:34:58 +03:00
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", [["apple", "orange", "juice"]])
|
2017-08-19 21:34:58 +03:00
|
|
|
def test_vectors_token_span_similarity(vocab, text):
|
2018-07-25 00:38:44 +03:00
|
|
|
doc = Doc(vocab, words=text)
|
2017-08-19 21:34:58 +03:00
|
|
|
assert doc[0].similarity(doc[1:3]) == doc[1:3].similarity(doc[0])
|
2018-11-27 03:09:36 +03:00
|
|
|
assert -1.0 < doc[0].similarity(doc[1:3]) < 1.0
|
2017-08-19 21:34:58 +03:00
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", [["apple", "orange", "juice"]])
|
2017-08-19 21:34:58 +03:00
|
|
|
def test_vectors_token_doc_similarity(vocab, text):
|
2018-07-25 00:38:44 +03:00
|
|
|
doc = Doc(vocab, words=text)
|
2017-08-19 21:34:58 +03:00
|
|
|
assert doc[0].similarity(doc) == doc.similarity(doc[0])
|
2018-11-27 03:09:36 +03:00
|
|
|
assert -1.0 < doc[0].similarity(doc) < 1.0
|
2017-08-19 21:34:58 +03:00
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", [["apple", "orange", "juice"]])
|
2017-08-19 21:34:58 +03:00
|
|
|
def test_vectors_lexeme_span_similarity(vocab, text):
|
2018-07-25 00:38:44 +03:00
|
|
|
doc = Doc(vocab, words=text)
|
2017-08-19 21:34:58 +03:00
|
|
|
lex = vocab[text[0]]
|
|
|
|
assert lex.similarity(doc[1:3]) == doc[1:3].similarity(lex)
|
2018-11-27 03:09:36 +03:00
|
|
|
assert -1.0 < doc.similarity(doc[1:3]) < 1.0
|
2017-08-19 21:34:58 +03:00
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text1,text2", [("apple", "orange")])
|
2017-08-19 21:34:58 +03:00
|
|
|
def test_vectors_lexeme_lexeme_similarity(vocab, text1, text2):
|
|
|
|
lex1 = vocab[text1]
|
|
|
|
lex2 = vocab[text2]
|
|
|
|
assert lex1.similarity(lex2) == lex2.similarity(lex1)
|
2018-11-27 03:09:36 +03:00
|
|
|
assert -1.0 < lex1.similarity(lex2) < 1.0
|
2017-08-19 21:34:58 +03:00
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", [["apple", "orange", "juice"]])
|
2017-08-19 21:34:58 +03:00
|
|
|
def test_vectors_lexeme_doc_similarity(vocab, text):
|
2018-07-25 00:38:44 +03:00
|
|
|
doc = Doc(vocab, words=text)
|
2017-08-19 21:34:58 +03:00
|
|
|
lex = vocab[text[0]]
|
|
|
|
assert lex.similarity(doc) == doc.similarity(lex)
|
2018-11-27 03:09:36 +03:00
|
|
|
assert -1.0 < lex.similarity(doc) < 1.0
|
2017-08-19 21:34:58 +03:00
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", [["apple", "orange", "juice"]])
|
2017-08-19 21:34:58 +03:00
|
|
|
def test_vectors_span_span_similarity(vocab, text):
|
2018-07-25 00:38:44 +03:00
|
|
|
doc = Doc(vocab, words=text)
|
2019-02-10 16:02:19 +03:00
|
|
|
with pytest.warns(UserWarning):
|
2018-05-21 02:22:38 +03:00
|
|
|
assert doc[0:2].similarity(doc[1:3]) == doc[1:3].similarity(doc[0:2])
|
2018-11-27 03:09:36 +03:00
|
|
|
assert -1.0 < doc[0:2].similarity(doc[1:3]) < 1.0
|
2017-08-19 21:34:58 +03:00
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text", [["apple", "orange", "juice"]])
|
2017-08-19 21:34:58 +03:00
|
|
|
def test_vectors_span_doc_similarity(vocab, text):
|
2018-07-25 00:38:44 +03:00
|
|
|
doc = Doc(vocab, words=text)
|
2019-02-10 16:02:19 +03:00
|
|
|
with pytest.warns(UserWarning):
|
2018-05-21 02:22:38 +03:00
|
|
|
assert doc[0:2].similarity(doc) == doc.similarity(doc[0:2])
|
2018-11-27 03:09:36 +03:00
|
|
|
assert -1.0 < doc[0:2].similarity(doc) < 1.0
|
2017-08-19 21:34:58 +03:00
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"text1,text2", [(["apple", "and", "apple", "pie"], ["orange", "juice"])]
|
|
|
|
)
|
2017-08-19 21:34:58 +03:00
|
|
|
def test_vectors_doc_doc_similarity(vocab, text1, text2):
|
2018-07-25 00:38:44 +03:00
|
|
|
doc1 = Doc(vocab, words=text1)
|
|
|
|
doc2 = Doc(vocab, words=text2)
|
2017-08-19 21:34:58 +03:00
|
|
|
assert doc1.similarity(doc2) == doc2.similarity(doc1)
|
2018-11-27 03:09:36 +03:00
|
|
|
assert -1.0 < doc1.similarity(doc2) < 1.0
|
2018-07-25 00:38:44 +03:00
|
|
|
|
|
|
|
|
|
|
|
def test_vocab_add_vector():
|
2019-09-16 16:16:54 +03:00
|
|
|
vocab = Vocab(vectors_name="test_vocab_add_vector")
|
2018-11-27 03:09:36 +03:00
|
|
|
data = numpy.ndarray((5, 3), dtype="f")
|
|
|
|
data[0] = 1.0
|
|
|
|
data[1] = 2.0
|
|
|
|
vocab.set_vector("cat", data[0])
|
|
|
|
vocab.set_vector("dog", data[1])
|
|
|
|
cat = vocab["cat"]
|
|
|
|
assert list(cat.vector) == [1.0, 1.0, 1.0]
|
|
|
|
dog = vocab["dog"]
|
|
|
|
assert list(dog.vector) == [2.0, 2.0, 2.0]
|
2018-07-25 00:38:44 +03:00
|
|
|
|
|
|
|
|
|
|
|
def test_vocab_prune_vectors():
|
2019-09-16 16:16:54 +03:00
|
|
|
vocab = Vocab(vectors_name="test_vocab_prune_vectors")
|
2018-11-30 19:43:08 +03:00
|
|
|
_ = vocab["cat"] # noqa: F841
|
|
|
|
_ = vocab["dog"] # noqa: F841
|
|
|
|
_ = vocab["kitten"] # noqa: F841
|
2018-11-27 03:09:36 +03:00
|
|
|
data = numpy.ndarray((5, 3), dtype="f")
|
2019-10-03 15:09:44 +03:00
|
|
|
data[0] = [1.0, 1.2, 1.1]
|
|
|
|
data[1] = [0.3, 1.3, 1.0]
|
|
|
|
data[2] = [0.9, 1.22, 1.05]
|
2018-11-27 03:09:36 +03:00
|
|
|
vocab.set_vector("cat", data[0])
|
|
|
|
vocab.set_vector("dog", data[1])
|
|
|
|
vocab.set_vector("kitten", data[2])
|
2018-07-25 00:38:44 +03:00
|
|
|
|
2019-10-17 00:18:55 +03:00
|
|
|
remap = vocab.prune_vectors(2, batch_size=2)
|
2018-11-27 03:09:36 +03:00
|
|
|
assert list(remap.keys()) == ["kitten"]
|
2018-07-25 00:38:44 +03:00
|
|
|
neighbour, similarity = list(remap.values())[0]
|
2018-11-27 03:09:36 +03:00
|
|
|
assert neighbour == "cat", remap
|
2019-10-22 21:10:42 +03:00
|
|
|
assert_allclose(similarity, cosine(data[0], data[2]), atol=1e-4, rtol=1e-3)
|