2020-03-02 13:48:10 +03:00
|
|
|
# cython: infer_types=True, profile=True
|
2020-08-18 17:10:36 +03:00
|
|
|
from typing import Iterator
|
2019-04-24 12:26:38 +03:00
|
|
|
from cymem.cymem cimport Pool
|
|
|
|
from preshed.maps cimport PreshMap
|
2019-04-23 17:33:40 +03:00
|
|
|
from cpython.exc cimport PyErr_SetFromErrno
|
2019-06-19 10:15:43 +03:00
|
|
|
from libc.stdio cimport fopen, fclose, fread, fwrite, feof, fseek
|
2019-04-23 17:33:40 +03:00
|
|
|
from libc.stdint cimport int32_t, int64_t
|
2020-05-21 21:05:03 +03:00
|
|
|
from libcpp.vector cimport vector
|
|
|
|
|
2020-02-28 14:20:23 +03:00
|
|
|
from pathlib import Path
|
2020-05-21 21:05:03 +03:00
|
|
|
import warnings
|
|
|
|
from os import path
|
2019-04-23 17:33:40 +03:00
|
|
|
|
|
|
|
from .typedefs cimport hash_t
|
2020-05-21 21:05:03 +03:00
|
|
|
from .errors import Errors, Warnings
|
2019-04-24 12:26:38 +03:00
|
|
|
|
2019-04-23 17:33:40 +03:00
|
|
|
|
2019-03-21 02:04:06 +03:00
|
|
|
cdef class Candidate:
|
2019-08-13 16:38:59 +03:00
|
|
|
"""A `Candidate` object refers to a textual mention (`alias`) that may or may not be resolved
|
|
|
|
to a specific `entity` from a Knowledge Base. This will be used as input for the entity linking
|
|
|
|
algorithm which will disambiguate the various candidates to the correct one.
|
|
|
|
Each candidate (alias, entity) pair is assigned to a certain prior probability.
|
|
|
|
|
2019-09-12 12:38:34 +03:00
|
|
|
DOCS: https://spacy.io/api/kb/#candidate_init
|
2019-08-13 16:38:59 +03:00
|
|
|
"""
|
2019-03-21 02:04:06 +03:00
|
|
|
|
2019-06-05 19:29:18 +03:00
|
|
|
def __init__(self, KnowledgeBase kb, entity_hash, entity_freq, entity_vector, alias_hash, prior_prob):
|
2019-03-21 15:26:12 +03:00
|
|
|
self.kb = kb
|
2019-03-25 20:10:41 +03:00
|
|
|
self.entity_hash = entity_hash
|
2019-04-25 00:52:34 +03:00
|
|
|
self.entity_freq = entity_freq
|
2019-06-05 19:29:18 +03:00
|
|
|
self.entity_vector = entity_vector
|
2019-03-21 14:31:02 +03:00
|
|
|
self.alias_hash = alias_hash
|
|
|
|
self.prior_prob = prior_prob
|
2019-03-21 02:04:06 +03:00
|
|
|
|
2019-03-22 18:10:49 +03:00
|
|
|
@property
|
2019-03-25 20:10:41 +03:00
|
|
|
def entity(self):
|
|
|
|
"""RETURNS (uint64): hash of the entity's KB ID/name"""
|
|
|
|
return self.entity_hash
|
2019-03-21 20:20:57 +03:00
|
|
|
|
2019-03-22 18:10:49 +03:00
|
|
|
@property
|
2019-03-25 20:10:41 +03:00
|
|
|
def entity_(self):
|
2020-05-24 18:20:58 +03:00
|
|
|
"""RETURNS (str): ID/name of this entity in the KB"""
|
2019-04-10 17:06:09 +03:00
|
|
|
return self.kb.vocab.strings[self.entity_hash]
|
2019-03-21 02:04:06 +03:00
|
|
|
|
2019-03-22 18:10:49 +03:00
|
|
|
@property
|
|
|
|
def alias(self):
|
2019-03-21 15:26:12 +03:00
|
|
|
"""RETURNS (uint64): hash of the alias"""
|
2019-03-22 18:10:49 +03:00
|
|
|
return self.alias_hash
|
2019-03-21 02:04:06 +03:00
|
|
|
|
2019-03-22 18:10:49 +03:00
|
|
|
@property
|
|
|
|
def alias_(self):
|
2020-05-24 18:20:58 +03:00
|
|
|
"""RETURNS (str): ID of the original alias"""
|
2019-04-10 17:06:09 +03:00
|
|
|
return self.kb.vocab.strings[self.alias_hash]
|
2019-03-21 20:20:57 +03:00
|
|
|
|
2019-04-25 00:52:34 +03:00
|
|
|
@property
|
|
|
|
def entity_freq(self):
|
|
|
|
return self.entity_freq
|
|
|
|
|
2019-06-05 19:29:18 +03:00
|
|
|
@property
|
|
|
|
def entity_vector(self):
|
|
|
|
return self.entity_vector
|
|
|
|
|
2019-03-22 18:10:49 +03:00
|
|
|
@property
|
|
|
|
def prior_prob(self):
|
|
|
|
return self.prior_prob
|
2019-03-21 02:04:06 +03:00
|
|
|
|
|
|
|
|
2020-08-18 17:10:36 +03:00
|
|
|
def get_candidates(KnowledgeBase kb, span) -> Iterator[Candidate]:
|
|
|
|
"""
|
|
|
|
Return candidate entities for a given span by using the text of the span as the alias
|
|
|
|
and fetching appropriate entries from the index.
|
|
|
|
This particular function is optimized to work with the built-in KB functionality,
|
|
|
|
but any other custom candidate generation method can be used in combination with the KB as well.
|
|
|
|
"""
|
|
|
|
return kb.get_alias_candidates(span.text)
|
|
|
|
|
|
|
|
|
2019-03-15 18:05:23 +03:00
|
|
|
cdef class KnowledgeBase:
|
2019-08-13 16:38:59 +03:00
|
|
|
"""A `KnowledgeBase` instance stores unique identifiers for entities and their textual aliases,
|
|
|
|
to support entity linking of named entities to real-world concepts.
|
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/kb
|
|
|
|
"""
|
2019-06-05 19:29:18 +03:00
|
|
|
|
2020-08-18 17:10:36 +03:00
|
|
|
def __init__(self, Vocab vocab, entity_vector_length):
|
|
|
|
"""Create a KnowledgeBase."""
|
2019-03-18 19:27:51 +03:00
|
|
|
self.mem = Pool()
|
2019-06-05 19:29:18 +03:00
|
|
|
self.entity_vector_length = entity_vector_length
|
2019-04-10 18:25:10 +03:00
|
|
|
self._entry_index = PreshMap()
|
|
|
|
self._alias_index = PreshMap()
|
2020-08-04 15:34:09 +03:00
|
|
|
self.vocab = vocab
|
2019-04-10 18:25:10 +03:00
|
|
|
self.vocab.strings.add("")
|
|
|
|
self._create_empty_vectors(dummy_hash=self.vocab.strings[""])
|
2019-03-18 19:27:51 +03:00
|
|
|
|
2019-06-07 13:58:42 +03:00
|
|
|
@property
|
|
|
|
def entity_vector_length(self):
|
|
|
|
"""RETURNS (uint64): length of the entity vectors"""
|
|
|
|
return self.entity_vector_length
|
|
|
|
|
2019-03-15 18:05:23 +03:00
|
|
|
def __len__(self):
|
2019-03-19 17:51:56 +03:00
|
|
|
return self.get_size_entities()
|
|
|
|
|
|
|
|
def get_size_entities(self):
|
2019-04-24 12:26:38 +03:00
|
|
|
return len(self._entry_index)
|
2019-03-15 18:05:23 +03:00
|
|
|
|
2019-04-18 15:12:17 +03:00
|
|
|
def get_entity_strings(self):
|
2019-04-24 21:24:24 +03:00
|
|
|
return [self.vocab.strings[x] for x in self._entry_index]
|
2019-04-18 15:12:17 +03:00
|
|
|
|
2019-03-19 17:51:56 +03:00
|
|
|
def get_size_aliases(self):
|
2019-04-24 12:26:38 +03:00
|
|
|
return len(self._alias_index)
|
2019-04-18 15:12:17 +03:00
|
|
|
|
|
|
|
def get_alias_strings(self):
|
2019-04-24 21:24:24 +03:00
|
|
|
return [self.vocab.strings[x] for x in self._alias_index]
|
2019-03-19 17:51:56 +03:00
|
|
|
|
2019-07-19 18:40:28 +03:00
|
|
|
def add_entity(self, unicode entity, float freq, vector[float] entity_vector):
|
2019-03-21 15:26:12 +03:00
|
|
|
"""
|
2019-04-10 17:06:09 +03:00
|
|
|
Add an entity to the KB, optionally specifying its log probability based on corpus frequency
|
2019-06-05 19:29:18 +03:00
|
|
|
Return the hash of the entity ID/name at the end.
|
2019-03-21 15:26:12 +03:00
|
|
|
"""
|
2019-03-25 20:10:41 +03:00
|
|
|
cdef hash_t entity_hash = self.vocab.strings.add(entity)
|
2019-03-18 19:27:51 +03:00
|
|
|
|
2019-03-19 23:35:24 +03:00
|
|
|
# Return if this entity was added before
|
2019-03-25 20:10:41 +03:00
|
|
|
if entity_hash in self._entry_index:
|
2020-04-28 14:37:37 +03:00
|
|
|
warnings.warn(Warnings.W018.format(entity=entity))
|
2019-03-15 18:05:23 +03:00
|
|
|
return
|
|
|
|
|
2019-06-19 13:35:26 +03:00
|
|
|
# Raise an error if the provided entity vector is not of the correct length
|
2019-06-05 19:29:18 +03:00
|
|
|
if len(entity_vector) != self.entity_vector_length:
|
2019-06-19 13:35:26 +03:00
|
|
|
raise ValueError(Errors.E141.format(found=len(entity_vector), required=self.entity_vector_length))
|
2019-06-05 19:29:18 +03:00
|
|
|
|
|
|
|
vector_index = self.c_add_vector(entity_vector=entity_vector)
|
2019-04-10 18:25:10 +03:00
|
|
|
|
2019-06-05 19:29:18 +03:00
|
|
|
new_index = self.c_add_entity(entity_hash=entity_hash,
|
2019-07-19 18:40:28 +03:00
|
|
|
freq=freq,
|
2019-06-05 19:29:18 +03:00
|
|
|
vector_index=vector_index,
|
|
|
|
feats_row=-1) # Features table currently not implemented
|
|
|
|
self._entry_index[entity_hash] = new_index
|
2019-03-15 18:05:23 +03:00
|
|
|
|
2019-03-25 20:10:41 +03:00
|
|
|
return entity_hash
|
2019-03-21 15:26:12 +03:00
|
|
|
|
2019-07-19 18:40:28 +03:00
|
|
|
cpdef set_entities(self, entity_list, freq_list, vector_list):
|
|
|
|
if len(entity_list) != len(freq_list) or len(entity_list) != len(vector_list):
|
2019-06-19 13:35:26 +03:00
|
|
|
raise ValueError(Errors.E140)
|
2019-06-06 20:51:27 +03:00
|
|
|
|
2019-12-13 12:45:29 +03:00
|
|
|
nr_entities = len(set(entity_list))
|
2019-05-01 01:00:38 +03:00
|
|
|
self._entry_index = PreshMap(nr_entities+1)
|
|
|
|
self._entries = entry_vec(nr_entities+1)
|
|
|
|
|
|
|
|
i = 0
|
2019-06-26 16:55:26 +03:00
|
|
|
cdef KBEntryC entry
|
2019-10-14 13:28:53 +03:00
|
|
|
cdef hash_t entity_hash
|
2019-12-13 12:45:29 +03:00
|
|
|
while i < len(entity_list):
|
|
|
|
# only process this entity if its unique ID hadn't been added before
|
2019-06-06 20:51:27 +03:00
|
|
|
entity_hash = self.vocab.strings.add(entity_list[i])
|
2019-12-13 12:45:29 +03:00
|
|
|
if entity_hash in self._entry_index:
|
2020-04-28 14:37:37 +03:00
|
|
|
warnings.warn(Warnings.W018.format(entity=entity_list[i]))
|
2019-12-13 12:45:29 +03:00
|
|
|
|
|
|
|
else:
|
|
|
|
entity_vector = vector_list[i]
|
|
|
|
if len(entity_vector) != self.entity_vector_length:
|
|
|
|
raise ValueError(Errors.E141.format(found=len(entity_vector), required=self.entity_vector_length))
|
|
|
|
|
|
|
|
entry.entity_hash = entity_hash
|
|
|
|
entry.freq = freq_list[i]
|
2019-06-05 19:29:18 +03:00
|
|
|
|
2019-12-13 12:45:29 +03:00
|
|
|
vector_index = self.c_add_vector(entity_vector=vector_list[i])
|
|
|
|
entry.vector_index = vector_index
|
2019-06-05 19:29:18 +03:00
|
|
|
|
2019-12-13 12:45:29 +03:00
|
|
|
entry.feats_row = -1 # Features table currently not implemented
|
2019-05-01 01:00:38 +03:00
|
|
|
|
2019-12-13 12:45:29 +03:00
|
|
|
self._entries[i+1] = entry
|
|
|
|
self._entry_index[entity_hash] = i+1
|
2019-05-01 01:00:38 +03:00
|
|
|
|
|
|
|
i += 1
|
|
|
|
|
2019-10-14 13:28:53 +03:00
|
|
|
def contains_entity(self, unicode entity):
|
|
|
|
cdef hash_t entity_hash = self.vocab.strings.add(entity)
|
|
|
|
return entity_hash in self._entry_index
|
|
|
|
|
|
|
|
def contains_alias(self, unicode alias):
|
|
|
|
cdef hash_t alias_hash = self.vocab.strings.add(alias)
|
|
|
|
return alias_hash in self._alias_index
|
|
|
|
|
2019-03-18 19:27:51 +03:00
|
|
|
def add_alias(self, unicode alias, entities, probabilities):
|
2019-03-21 15:26:12 +03:00
|
|
|
"""
|
|
|
|
For a given alias, add its potential entities and prior probabilies to the KB.
|
|
|
|
Return the alias_hash at the end
|
|
|
|
"""
|
2019-03-19 23:55:10 +03:00
|
|
|
# Throw an error if the length of entities and probabilities are not the same
|
|
|
|
if not len(entities) == len(probabilities):
|
2019-03-22 18:55:05 +03:00
|
|
|
raise ValueError(Errors.E132.format(alias=alias,
|
|
|
|
entities_length=len(entities),
|
|
|
|
probabilities_length=len(probabilities)))
|
2019-03-19 23:55:10 +03:00
|
|
|
|
2019-05-02 00:05:40 +03:00
|
|
|
# Throw an error if the probabilities sum up to more than 1 (allow for some rounding errors)
|
2019-03-19 23:43:48 +03:00
|
|
|
prob_sum = sum(probabilities)
|
2019-05-02 00:05:40 +03:00
|
|
|
if prob_sum > 1.00001:
|
2019-03-22 18:55:05 +03:00
|
|
|
raise ValueError(Errors.E133.format(alias=alias, sum=prob_sum))
|
2019-03-19 23:43:48 +03:00
|
|
|
|
2019-03-22 01:17:25 +03:00
|
|
|
cdef hash_t alias_hash = self.vocab.strings.add(alias)
|
2019-03-19 23:35:24 +03:00
|
|
|
|
2019-06-05 19:29:18 +03:00
|
|
|
# Check whether this alias was added before
|
2019-03-19 23:35:24 +03:00
|
|
|
if alias_hash in self._alias_index:
|
2020-04-28 14:37:37 +03:00
|
|
|
warnings.warn(Warnings.W017.format(alias=alias))
|
2019-03-19 23:35:24 +03:00
|
|
|
return
|
|
|
|
|
2019-03-19 18:15:38 +03:00
|
|
|
cdef vector[int64_t] entry_indices
|
|
|
|
cdef vector[float] probs
|
|
|
|
|
|
|
|
for entity, prob in zip(entities, probabilities):
|
2019-03-25 20:10:41 +03:00
|
|
|
entity_hash = self.vocab.strings[entity]
|
|
|
|
if not entity_hash in self._entry_index:
|
2019-10-14 13:28:53 +03:00
|
|
|
raise ValueError(Errors.E134.format(entity=entity))
|
2019-03-19 19:39:35 +03:00
|
|
|
|
2019-03-25 20:10:41 +03:00
|
|
|
entry_index = <int64_t>self._entry_index.get(entity_hash)
|
2019-03-19 18:15:38 +03:00
|
|
|
entry_indices.push_back(int(entry_index))
|
|
|
|
probs.push_back(float(prob))
|
2019-03-18 14:38:40 +03:00
|
|
|
|
2019-04-10 18:25:10 +03:00
|
|
|
new_index = self.c_add_aliases(alias_hash=alias_hash, entry_indices=entry_indices, probs=probs)
|
|
|
|
self._alias_index[alias_hash] = new_index
|
2019-03-18 19:27:51 +03:00
|
|
|
|
2019-03-21 15:26:12 +03:00
|
|
|
return alias_hash
|
|
|
|
|
2019-10-14 13:28:53 +03:00
|
|
|
def append_alias(self, unicode alias, unicode entity, float prior_prob, ignore_warnings=False):
|
|
|
|
"""
|
|
|
|
For an alias already existing in the KB, extend its potential entities with one more.
|
|
|
|
Throw a warning if either the alias or the entity is unknown,
|
|
|
|
or when the combination is already previously recorded.
|
|
|
|
Throw an error if this entity+prior prob would exceed the sum of 1.
|
|
|
|
For efficiency, it's best to use the method `add_alias` as much as possible instead of this one.
|
|
|
|
"""
|
|
|
|
# Check if the alias exists in the KB
|
|
|
|
cdef hash_t alias_hash = self.vocab.strings[alias]
|
|
|
|
if not alias_hash in self._alias_index:
|
|
|
|
raise ValueError(Errors.E176.format(alias=alias))
|
|
|
|
|
|
|
|
# Check if the entity exists in the KB
|
|
|
|
cdef hash_t entity_hash = self.vocab.strings[entity]
|
|
|
|
if not entity_hash in self._entry_index:
|
|
|
|
raise ValueError(Errors.E134.format(entity=entity))
|
|
|
|
entry_index = <int64_t>self._entry_index.get(entity_hash)
|
|
|
|
|
|
|
|
# Throw an error if the prior probabilities (including the new one) sum up to more than 1
|
|
|
|
alias_index = <int64_t>self._alias_index.get(alias_hash)
|
|
|
|
alias_entry = self._aliases_table[alias_index]
|
|
|
|
current_sum = sum([p for p in alias_entry.probs])
|
|
|
|
new_sum = current_sum + prior_prob
|
|
|
|
|
|
|
|
if new_sum > 1.00001:
|
|
|
|
raise ValueError(Errors.E133.format(alias=alias, sum=new_sum))
|
|
|
|
|
|
|
|
entry_indices = alias_entry.entry_indices
|
|
|
|
|
|
|
|
is_present = False
|
|
|
|
for i in range(entry_indices.size()):
|
|
|
|
if entry_indices[i] == int(entry_index):
|
|
|
|
is_present = True
|
|
|
|
|
|
|
|
if is_present:
|
|
|
|
if not ignore_warnings:
|
2020-04-28 14:37:37 +03:00
|
|
|
warnings.warn(Warnings.W024.format(entity=entity, alias=alias))
|
2019-10-14 13:28:53 +03:00
|
|
|
else:
|
|
|
|
entry_indices.push_back(int(entry_index))
|
|
|
|
alias_entry.entry_indices = entry_indices
|
|
|
|
|
|
|
|
probs = alias_entry.probs
|
|
|
|
probs.push_back(float(prior_prob))
|
|
|
|
alias_entry.probs = probs
|
|
|
|
self._aliases_table[alias_index] = alias_entry
|
|
|
|
|
2020-08-18 17:10:36 +03:00
|
|
|
def get_alias_candidates(self, unicode alias) -> Iterator[Candidate]:
|
2019-10-14 13:28:53 +03:00
|
|
|
"""
|
|
|
|
Return candidate entities for an alias. Each candidate defines the entity, the original alias,
|
|
|
|
and the prior probability of that alias resolving to that entity.
|
|
|
|
If the alias is not known in the KB, and empty list is returned.
|
|
|
|
"""
|
2019-03-22 01:17:25 +03:00
|
|
|
cdef hash_t alias_hash = self.vocab.strings[alias]
|
2019-10-14 13:28:53 +03:00
|
|
|
if not alias_hash in self._alias_index:
|
|
|
|
return []
|
2019-07-22 14:39:32 +03:00
|
|
|
alias_index = <int64_t>self._alias_index.get(alias_hash)
|
2019-03-21 02:04:06 +03:00
|
|
|
alias_entry = self._aliases_table[alias_index]
|
|
|
|
|
2019-03-21 15:26:12 +03:00
|
|
|
return [Candidate(kb=self,
|
2019-03-25 20:10:41 +03:00
|
|
|
entity_hash=self._entries[entry_index].entity_hash,
|
2019-07-19 18:40:28 +03:00
|
|
|
entity_freq=self._entries[entry_index].freq,
|
2019-06-05 19:29:18 +03:00
|
|
|
entity_vector=self._vectors_table[self._entries[entry_index].vector_index],
|
2019-03-21 14:31:02 +03:00
|
|
|
alias_hash=alias_hash,
|
2019-07-17 18:18:26 +03:00
|
|
|
prior_prob=prior_prob)
|
|
|
|
for (entry_index, prior_prob) in zip(alias_entry.entry_indices, alias_entry.probs)
|
2019-03-21 17:24:40 +03:00
|
|
|
if entry_index != 0]
|
2019-04-23 17:33:40 +03:00
|
|
|
|
2019-07-17 13:17:02 +03:00
|
|
|
def get_vector(self, unicode entity):
|
2019-07-17 18:18:26 +03:00
|
|
|
cdef hash_t entity_hash = self.vocab.strings[entity]
|
2019-07-17 13:17:02 +03:00
|
|
|
|
|
|
|
# Return an empty list if this entity is unknown in this KB
|
|
|
|
if entity_hash not in self._entry_index:
|
2019-07-18 11:22:24 +03:00
|
|
|
return [0] * self.entity_vector_length
|
2019-07-17 13:17:02 +03:00
|
|
|
entry_index = self._entry_index[entity_hash]
|
|
|
|
|
|
|
|
return self._vectors_table[self._entries[entry_index].vector_index]
|
2019-04-23 17:33:40 +03:00
|
|
|
|
2019-07-17 18:18:26 +03:00
|
|
|
def get_prior_prob(self, unicode entity, unicode alias):
|
|
|
|
""" Return the prior probability of a given alias being linked to a given entity,
|
|
|
|
or return 0.0 when this combination is not known in the knowledge base"""
|
|
|
|
cdef hash_t alias_hash = self.vocab.strings[alias]
|
|
|
|
cdef hash_t entity_hash = self.vocab.strings[entity]
|
|
|
|
|
|
|
|
if entity_hash not in self._entry_index or alias_hash not in self._alias_index:
|
|
|
|
return 0.0
|
|
|
|
|
|
|
|
alias_index = <int64_t>self._alias_index.get(alias_hash)
|
|
|
|
entry_index = self._entry_index[entity_hash]
|
|
|
|
|
|
|
|
alias_entry = self._aliases_table[alias_index]
|
|
|
|
for (entry_index, prior_prob) in zip(alias_entry.entry_indices, alias_entry.probs):
|
|
|
|
if self._entries[entry_index].entity_hash == entity_hash:
|
|
|
|
return prior_prob
|
|
|
|
|
|
|
|
return 0.0
|
|
|
|
|
|
|
|
|
2020-08-18 17:10:36 +03:00
|
|
|
def to_disk(self, loc):
|
2019-04-23 19:36:50 +03:00
|
|
|
cdef Writer writer = Writer(loc)
|
2019-06-05 19:29:18 +03:00
|
|
|
writer.write_header(self.get_size_entities(), self.entity_vector_length)
|
|
|
|
|
|
|
|
# dumping the entity vectors in their original order
|
|
|
|
i = 0
|
|
|
|
for entity_vector in self._vectors_table:
|
|
|
|
for element in entity_vector:
|
|
|
|
writer.write_vector_element(element)
|
|
|
|
i = i+1
|
2019-04-23 17:33:40 +03:00
|
|
|
|
2019-04-24 12:26:38 +03:00
|
|
|
# dumping the entry records in the order in which they are in the _entries vector.
|
|
|
|
# index 0 is a dummy object not stored in the _entry_index and can be ignored.
|
|
|
|
i = 1
|
|
|
|
for entry_hash, entry_index in sorted(self._entry_index.items(), key=lambda x: x[1]):
|
2019-04-23 19:36:50 +03:00
|
|
|
entry = self._entries[entry_index]
|
2019-06-05 19:29:18 +03:00
|
|
|
assert entry.entity_hash == entry_hash
|
2019-04-24 12:26:38 +03:00
|
|
|
assert entry_index == i
|
2019-07-19 18:40:28 +03:00
|
|
|
writer.write_entry(entry.entity_hash, entry.freq, entry.vector_index)
|
2019-04-24 21:24:24 +03:00
|
|
|
i = i+1
|
|
|
|
|
|
|
|
writer.write_alias_length(self.get_size_aliases())
|
|
|
|
|
|
|
|
# dumping the aliases in the order in which they are in the _alias_index vector.
|
|
|
|
# index 0 is a dummy object not stored in the _aliases_table and can be ignored.
|
|
|
|
i = 1
|
|
|
|
for alias_hash, alias_index in sorted(self._alias_index.items(), key=lambda x: x[1]):
|
|
|
|
alias = self._aliases_table[alias_index]
|
|
|
|
assert alias_index == i
|
|
|
|
|
|
|
|
candidate_length = len(alias.entry_indices)
|
|
|
|
writer.write_alias_header(alias_hash, candidate_length)
|
|
|
|
|
|
|
|
for j in range(0, candidate_length):
|
|
|
|
writer.write_alias(alias.entry_indices[j], alias.probs[j])
|
|
|
|
|
2019-04-24 12:26:38 +03:00
|
|
|
i = i+1
|
2019-04-23 17:33:40 +03:00
|
|
|
|
|
|
|
writer.close()
|
|
|
|
|
2020-08-18 17:10:36 +03:00
|
|
|
cpdef from_disk(self, loc):
|
2019-04-23 17:33:40 +03:00
|
|
|
cdef hash_t entity_hash
|
2019-04-24 21:24:24 +03:00
|
|
|
cdef hash_t alias_hash
|
|
|
|
cdef int64_t entry_index
|
2019-07-22 14:34:12 +03:00
|
|
|
cdef float freq, prob
|
2019-06-05 19:29:18 +03:00
|
|
|
cdef int32_t vector_index
|
2019-06-26 16:55:26 +03:00
|
|
|
cdef KBEntryC entry
|
2019-04-24 21:24:24 +03:00
|
|
|
cdef AliasC alias
|
2019-06-05 19:29:18 +03:00
|
|
|
cdef float vector_element
|
2019-04-23 19:36:50 +03:00
|
|
|
|
|
|
|
cdef Reader reader = Reader(loc)
|
2019-04-24 21:24:24 +03:00
|
|
|
|
2019-06-05 19:29:18 +03:00
|
|
|
# STEP 0: load header and initialize KB
|
2019-04-24 16:31:44 +03:00
|
|
|
cdef int64_t nr_entities
|
2019-06-05 19:29:18 +03:00
|
|
|
cdef int64_t entity_vector_length
|
|
|
|
reader.read_header(&nr_entities, &entity_vector_length)
|
|
|
|
|
|
|
|
self.entity_vector_length = entity_vector_length
|
2019-04-24 12:26:38 +03:00
|
|
|
self._entry_index = PreshMap(nr_entities+1)
|
|
|
|
self._entries = entry_vec(nr_entities+1)
|
2019-06-05 19:29:18 +03:00
|
|
|
self._vectors_table = float_matrix(nr_entities+1)
|
2019-04-24 12:26:38 +03:00
|
|
|
|
2019-06-05 19:29:18 +03:00
|
|
|
# STEP 1: load entity vectors
|
|
|
|
cdef int i = 0
|
|
|
|
cdef int j = 0
|
|
|
|
while i < nr_entities:
|
|
|
|
entity_vector = float_vec(entity_vector_length)
|
|
|
|
j = 0
|
|
|
|
while j < entity_vector_length:
|
|
|
|
reader.read_vector_element(&vector_element)
|
|
|
|
entity_vector[j] = vector_element
|
|
|
|
j = j+1
|
|
|
|
self._vectors_table[i] = entity_vector
|
|
|
|
i = i+1
|
|
|
|
|
|
|
|
# STEP 2: load entities
|
2019-04-24 21:24:24 +03:00
|
|
|
# we assume that the entity data was written in sequence
|
2019-04-24 12:26:38 +03:00
|
|
|
# index 0 is a dummy object not stored in the _entry_index and can be ignored.
|
2019-06-05 19:29:18 +03:00
|
|
|
i = 1
|
2019-04-24 21:24:24 +03:00
|
|
|
while i <= nr_entities:
|
2019-07-19 18:40:28 +03:00
|
|
|
reader.read_entry(&entity_hash, &freq, &vector_index)
|
2019-04-24 12:26:38 +03:00
|
|
|
|
2019-04-23 19:36:50 +03:00
|
|
|
entry.entity_hash = entity_hash
|
2019-07-19 18:40:28 +03:00
|
|
|
entry.freq = freq
|
2019-06-05 19:29:18 +03:00
|
|
|
entry.vector_index = vector_index
|
|
|
|
entry.feats_row = -1 # Features table currently not implemented
|
2019-04-23 19:36:50 +03:00
|
|
|
|
2019-04-24 12:26:38 +03:00
|
|
|
self._entries[i] = entry
|
|
|
|
self._entry_index[entity_hash] = i
|
|
|
|
|
|
|
|
i += 1
|
|
|
|
|
2019-04-24 21:24:24 +03:00
|
|
|
# check that all entities were read in properly
|
|
|
|
assert nr_entities == self.get_size_entities()
|
|
|
|
|
2019-06-05 19:29:18 +03:00
|
|
|
# STEP 3: load aliases
|
2019-04-24 21:24:24 +03:00
|
|
|
cdef int64_t nr_aliases
|
|
|
|
reader.read_alias_length(&nr_aliases)
|
|
|
|
self._alias_index = PreshMap(nr_aliases+1)
|
|
|
|
self._aliases_table = alias_vec(nr_aliases+1)
|
|
|
|
|
|
|
|
cdef int64_t nr_candidates
|
|
|
|
cdef vector[int64_t] entry_indices
|
|
|
|
cdef vector[float] probs
|
|
|
|
|
|
|
|
i = 1
|
|
|
|
# we assume the alias data was written in sequence
|
|
|
|
# index 0 is a dummy object not stored in the _entry_index and can be ignored.
|
|
|
|
while i <= nr_aliases:
|
|
|
|
reader.read_alias_header(&alias_hash, &nr_candidates)
|
|
|
|
entry_indices = vector[int64_t](nr_candidates)
|
|
|
|
probs = vector[float](nr_candidates)
|
|
|
|
|
|
|
|
for j in range(0, nr_candidates):
|
|
|
|
reader.read_alias(&entry_index, &prob)
|
|
|
|
entry_indices[j] = entry_index
|
|
|
|
probs[j] = prob
|
|
|
|
|
|
|
|
alias.entry_indices = entry_indices
|
|
|
|
alias.probs = probs
|
|
|
|
|
|
|
|
self._aliases_table[i] = alias
|
|
|
|
self._alias_index[alias_hash] = i
|
|
|
|
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
# check that all aliases were read in properly
|
|
|
|
assert nr_aliases == self.get_size_aliases()
|
|
|
|
|
2019-04-23 17:33:40 +03:00
|
|
|
|
|
|
|
cdef class Writer:
|
|
|
|
def __init__(self, object loc):
|
2019-06-13 17:25:39 +03:00
|
|
|
if isinstance(loc, Path):
|
|
|
|
loc = bytes(loc)
|
2020-04-10 21:35:52 +03:00
|
|
|
if path.exists(loc):
|
2020-08-18 17:10:36 +03:00
|
|
|
if path.isdir(loc):
|
|
|
|
raise ValueError(Errors.E928.format(loc=loc))
|
2019-04-23 17:33:40 +03:00
|
|
|
cdef bytes bytes_loc = loc.encode('utf8') if type(loc) == unicode else loc
|
|
|
|
self._fp = fopen(<char*>bytes_loc, 'wb')
|
2019-07-22 15:56:13 +03:00
|
|
|
if not self._fp:
|
|
|
|
raise IOError(Errors.E146.format(path=loc))
|
2019-04-23 17:33:40 +03:00
|
|
|
fseek(self._fp, 0, 0)
|
|
|
|
|
|
|
|
def close(self):
|
|
|
|
cdef size_t status = fclose(self._fp)
|
|
|
|
assert status == 0
|
|
|
|
|
2019-06-05 19:29:18 +03:00
|
|
|
cdef int write_header(self, int64_t nr_entries, int64_t entity_vector_length) except -1:
|
2019-04-24 16:31:44 +03:00
|
|
|
self._write(&nr_entries, sizeof(nr_entries))
|
2019-06-05 19:29:18 +03:00
|
|
|
self._write(&entity_vector_length, sizeof(entity_vector_length))
|
2019-04-23 17:33:40 +03:00
|
|
|
|
2019-06-05 19:29:18 +03:00
|
|
|
cdef int write_vector_element(self, float element) except -1:
|
|
|
|
self._write(&element, sizeof(element))
|
|
|
|
|
2019-07-19 18:40:28 +03:00
|
|
|
cdef int write_entry(self, hash_t entry_hash, float entry_freq, int32_t vector_index) except -1:
|
2019-04-24 16:31:44 +03:00
|
|
|
self._write(&entry_hash, sizeof(entry_hash))
|
2019-07-19 18:40:28 +03:00
|
|
|
self._write(&entry_freq, sizeof(entry_freq))
|
2019-06-05 19:29:18 +03:00
|
|
|
self._write(&vector_index, sizeof(vector_index))
|
|
|
|
# Features table currently not implemented and not written to file
|
2019-04-23 17:33:40 +03:00
|
|
|
|
2019-04-24 21:24:24 +03:00
|
|
|
cdef int write_alias_length(self, int64_t alias_length) except -1:
|
|
|
|
self._write(&alias_length, sizeof(alias_length))
|
|
|
|
|
|
|
|
cdef int write_alias_header(self, hash_t alias_hash, int64_t candidate_length) except -1:
|
|
|
|
self._write(&alias_hash, sizeof(alias_hash))
|
|
|
|
self._write(&candidate_length, sizeof(candidate_length))
|
|
|
|
|
|
|
|
cdef int write_alias(self, int64_t entry_index, float prob) except -1:
|
|
|
|
self._write(&entry_index, sizeof(entry_index))
|
|
|
|
self._write(&prob, sizeof(prob))
|
|
|
|
|
2019-04-24 16:31:44 +03:00
|
|
|
cdef int _write(self, void* value, size_t size) except -1:
|
|
|
|
status = fwrite(value, size, 1, self._fp)
|
|
|
|
assert status == 1, status
|
2019-04-23 17:33:40 +03:00
|
|
|
|
|
|
|
|
|
|
|
cdef class Reader:
|
|
|
|
def __init__(self, object loc):
|
2019-06-13 17:25:39 +03:00
|
|
|
if isinstance(loc, Path):
|
|
|
|
loc = bytes(loc)
|
2020-08-18 17:10:36 +03:00
|
|
|
if not path.exists(loc):
|
|
|
|
raise ValueError(Errors.E929.format(loc=loc))
|
|
|
|
if path.isdir(loc):
|
|
|
|
raise ValueError(Errors.E928.format(loc=loc))
|
2019-04-23 17:33:40 +03:00
|
|
|
cdef bytes bytes_loc = loc.encode('utf8') if type(loc) == unicode else loc
|
|
|
|
self._fp = fopen(<char*>bytes_loc, 'rb')
|
|
|
|
if not self._fp:
|
|
|
|
PyErr_SetFromErrno(IOError)
|
|
|
|
status = fseek(self._fp, 0, 0) # this can be 0 if there is no header
|
|
|
|
|
|
|
|
def __dealloc__(self):
|
|
|
|
fclose(self._fp)
|
|
|
|
|
2019-06-05 19:29:18 +03:00
|
|
|
cdef int read_header(self, int64_t* nr_entries, int64_t* entity_vector_length) except -1:
|
2019-04-24 16:31:44 +03:00
|
|
|
status = self._read(nr_entries, sizeof(int64_t))
|
|
|
|
if status < 1:
|
|
|
|
if feof(self._fp):
|
|
|
|
return 0 # end of file
|
2019-07-22 15:36:07 +03:00
|
|
|
raise IOError(Errors.E145.format(param="header"))
|
2019-04-24 16:31:44 +03:00
|
|
|
|
2019-06-05 19:29:18 +03:00
|
|
|
status = self._read(entity_vector_length, sizeof(int64_t))
|
|
|
|
if status < 1:
|
|
|
|
if feof(self._fp):
|
|
|
|
return 0 # end of file
|
2019-07-22 15:36:07 +03:00
|
|
|
raise IOError(Errors.E145.format(param="vector length"))
|
2019-06-05 19:29:18 +03:00
|
|
|
|
|
|
|
cdef int read_vector_element(self, float* element) except -1:
|
|
|
|
status = self._read(element, sizeof(float))
|
|
|
|
if status < 1:
|
|
|
|
if feof(self._fp):
|
|
|
|
return 0 # end of file
|
2019-07-22 15:36:07 +03:00
|
|
|
raise IOError(Errors.E145.format(param="vector element"))
|
2019-06-05 19:29:18 +03:00
|
|
|
|
2019-07-19 18:40:28 +03:00
|
|
|
cdef int read_entry(self, hash_t* entity_hash, float* freq, int32_t* vector_index) except -1:
|
2019-04-24 16:31:44 +03:00
|
|
|
status = self._read(entity_hash, sizeof(hash_t))
|
2019-04-23 17:33:40 +03:00
|
|
|
if status < 1:
|
|
|
|
if feof(self._fp):
|
|
|
|
return 0 # end of file
|
2019-07-22 15:36:07 +03:00
|
|
|
raise IOError(Errors.E145.format(param="entity hash"))
|
2019-04-23 17:33:40 +03:00
|
|
|
|
2019-07-19 18:40:28 +03:00
|
|
|
status = self._read(freq, sizeof(float))
|
2019-04-23 17:33:40 +03:00
|
|
|
if status < 1:
|
|
|
|
if feof(self._fp):
|
|
|
|
return 0 # end of file
|
2019-07-22 15:36:07 +03:00
|
|
|
raise IOError(Errors.E145.format(param="entity freq"))
|
2019-04-23 19:36:50 +03:00
|
|
|
|
2019-06-05 19:29:18 +03:00
|
|
|
status = self._read(vector_index, sizeof(int32_t))
|
|
|
|
if status < 1:
|
|
|
|
if feof(self._fp):
|
|
|
|
return 0 # end of file
|
2019-07-22 15:36:07 +03:00
|
|
|
raise IOError(Errors.E145.format(param="vector index"))
|
2019-06-05 19:29:18 +03:00
|
|
|
|
2019-04-23 19:36:50 +03:00
|
|
|
if feof(self._fp):
|
|
|
|
return 0
|
|
|
|
else:
|
|
|
|
return 1
|
2019-04-24 12:26:38 +03:00
|
|
|
|
2019-04-24 21:24:24 +03:00
|
|
|
cdef int read_alias_length(self, int64_t* alias_length) except -1:
|
|
|
|
status = self._read(alias_length, sizeof(int64_t))
|
|
|
|
if status < 1:
|
|
|
|
if feof(self._fp):
|
|
|
|
return 0 # end of file
|
2019-07-22 15:36:07 +03:00
|
|
|
raise IOError(Errors.E145.format(param="alias length"))
|
2019-04-24 21:24:24 +03:00
|
|
|
|
|
|
|
cdef int read_alias_header(self, hash_t* alias_hash, int64_t* candidate_length) except -1:
|
|
|
|
status = self._read(alias_hash, sizeof(hash_t))
|
|
|
|
if status < 1:
|
|
|
|
if feof(self._fp):
|
|
|
|
return 0 # end of file
|
2019-07-22 15:36:07 +03:00
|
|
|
raise IOError(Errors.E145.format(param="alias hash"))
|
2019-04-24 21:24:24 +03:00
|
|
|
|
|
|
|
status = self._read(candidate_length, sizeof(int64_t))
|
|
|
|
if status < 1:
|
|
|
|
if feof(self._fp):
|
|
|
|
return 0 # end of file
|
2019-07-22 15:36:07 +03:00
|
|
|
raise IOError(Errors.E145.format(param="candidate length"))
|
2019-04-24 21:24:24 +03:00
|
|
|
|
|
|
|
cdef int read_alias(self, int64_t* entry_index, float* prob) except -1:
|
|
|
|
status = self._read(entry_index, sizeof(int64_t))
|
|
|
|
if status < 1:
|
|
|
|
if feof(self._fp):
|
|
|
|
return 0 # end of file
|
2019-07-22 15:36:07 +03:00
|
|
|
raise IOError(Errors.E145.format(param="entry index"))
|
2019-04-24 21:24:24 +03:00
|
|
|
|
|
|
|
status = self._read(prob, sizeof(float))
|
|
|
|
if status < 1:
|
|
|
|
if feof(self._fp):
|
|
|
|
return 0 # end of file
|
2019-07-22 15:36:07 +03:00
|
|
|
raise IOError(Errors.E145.format(param="prior probability"))
|
2019-04-24 21:24:24 +03:00
|
|
|
|
2019-04-24 16:31:44 +03:00
|
|
|
cdef int _read(self, void* value, size_t size) except -1:
|
|
|
|
status = fread(value, size, 1, self._fp)
|
|
|
|
return status
|