diff --git a/spacy/lexeme.pyx b/spacy/lexeme.pyx index fe396b7a2..4f614e6fd 100644 --- a/spacy/lexeme.pyx +++ b/spacy/lexeme.pyx @@ -125,11 +125,9 @@ cdef class Lexeme: if self.vector_norm == 0 or other.vector_norm == 0: user_warning(Warnings.W008.format(obj='Lexeme')) return 0.0 - vector = self.vector xp = get_array_module(vector) - return (xp.dot(self.vector, other.vector) / - (self.vector_norm * other.vector_norm)) + return (xp.dot(vector, other.vector) / (self.vector_norm * other.vector_norm)) def to_bytes(self): lex_data = Lexeme.c_to_bytes(self.c) diff --git a/spacy/tokens/doc.pyx b/spacy/tokens/doc.pyx index 328b5e627..2bef44cbc 100644 --- a/spacy/tokens/doc.pyx +++ b/spacy/tokens/doc.pyx @@ -329,10 +329,9 @@ cdef class Doc: if self.vector_norm == 0 or other.vector_norm == 0: user_warning(Warnings.W008.format(obj='Doc')) return 0.0 - vector = self.vector xp = get_array_module(vector) - return xp.dot(self.vector, other.vector) / (self.vector_norm * other.vector_norm) + return xp.dot(vector, other.vector) / (self.vector_norm * other.vector_norm) property has_vector: """A boolean value indicating whether a word vector is associated with diff --git a/spacy/tokens/span.pyx b/spacy/tokens/span.pyx index 1c3b9373b..1450eb214 100644 --- a/spacy/tokens/span.pyx +++ b/spacy/tokens/span.pyx @@ -234,10 +234,9 @@ cdef class Span: if self.vector_norm == 0.0 or other.vector_norm == 0.0: user_warning(Warnings.W008.format(obj='Span')) return 0.0 - vector = self.vector xp = get_array_module(vector) - return xp.dot(self.vector, other.vector) / (self.vector_norm * other.vector_norm) + return xp.dot(vector, other.vector) / (self.vector_norm * other.vector_norm) cpdef np.ndarray to_array(self, object py_attr_ids): """Given a list of M attribute IDs, export the tokens to a numpy diff --git a/spacy/tokens/token.pyx b/spacy/tokens/token.pyx index 35b7360db..00de4897c 100644 --- a/spacy/tokens/token.pyx +++ b/spacy/tokens/token.pyx @@ -172,8 +172,7 @@ cdef class Token: return 0.0 vector = self.vector xp = get_array_module(vector) - return (xp.dot(vector, other.vector) / - (self.vector_norm * other.vector_norm)) + return (xp.dot(vector, other.vector) / (self.vector_norm * other.vector_norm)) property lex_id: """RETURNS (int): Sequential ID of the token's lexical type."""