spaCy/spacy/kb.pyx

589 lines
22 KiB
Cython
Raw Normal View History

2019-04-10 17:06:09 +03:00
# cython: infer_types=True
2019-03-18 19:27:51 +03:00
# cython: profile=True
# coding: utf8
2019-03-22 18:55:05 +03:00
from spacy.errors import Errors, Warnings, user_warning
2019-06-19 10:15:43 +03:00
from pathlib import Path
from cymem.cymem cimport Pool
from preshed.maps cimport PreshMap
from cpython.exc cimport PyErr_SetFromErrno
2019-06-19 10:15:43 +03:00
from libc.stdio cimport fopen, fclose, fread, fwrite, feof, fseek
from libc.stdint cimport int32_t, int64_t
from .typedefs cimport hash_t
from os import path
from libcpp.vector cimport vector
cdef class Candidate:
CLI scripts for entity linking (wikipedia & generic) (#4091) * document token ent_kb_id * document span kb_id * update pipeline documentation * prior and context weights as bool's instead * entitylinker api documentation * drop for both models * finish entitylinker documentation * small fixes * documentation for KB * candidate documentation * links to api pages in code * small fix * frequency examples as counts for consistency * consistent documentation about tensors returned by predict * add entity linking to usage 101 * add entity linking infobox and KB section to 101 * entity-linking in linguistic features * small typo corrections * training example and docs for entity_linker * predefined nlp and kb * revert back to similarity encodings for simplicity (for now) * set prior probabilities to 0 when excluded * code clean up * bugfix: deleting kb ID from tokens when entities were removed * refactor train el example to use either model or vocab * pretrain_kb example for example kb generation * add to training docs for KB + EL example scripts * small fixes * error numbering * ensure the language of vocab and nlp stay consistent across serialization * equality with = * avoid conflict in errors file * add error 151 * final adjustements to the train scripts - consistency * update of goldparse documentation * small corrections * push commit * turn kb_creator into CLI script (wip) * proper parameters for training entity vectors * wikidata pipeline split up into two executable scripts * remove context_width * move wikidata scripts in bin directory, remove old dummy script * refine KB script with logs and preprocessing options * small edits * small improvements to logging of EL CLI script
2019-08-13 16:38:59 +03:00
"""A `Candidate` object refers to a textual mention (`alias`) that may or may not be resolved
to a specific `entity` from a Knowledge Base. This will be used as input for the entity linking
algorithm which will disambiguate the various candidates to the correct one.
Each candidate (alias, entity) pair is assigned to a certain prior probability.
Documentation for Entity Linking (#4065) * document token ent_kb_id * document span kb_id * update pipeline documentation * prior and context weights as bool's instead * entitylinker api documentation * drop for both models * finish entitylinker documentation * small fixes * documentation for KB * candidate documentation * links to api pages in code * small fix * frequency examples as counts for consistency * consistent documentation about tensors returned by predict * add entity linking to usage 101 * add entity linking infobox and KB section to 101 * entity-linking in linguistic features * small typo corrections * training example and docs for entity_linker * predefined nlp and kb * revert back to similarity encodings for simplicity (for now) * set prior probabilities to 0 when excluded * code clean up * bugfix: deleting kb ID from tokens when entities were removed * refactor train el example to use either model or vocab * pretrain_kb example for example kb generation * add to training docs for KB + EL example scripts * small fixes * error numbering * ensure the language of vocab and nlp stay consistent across serialization * equality with = * avoid conflict in errors file * add error 151 * final adjustements to the train scripts - consistency * update of goldparse documentation * small corrections * push commit * typo fix * add candidate API to kb documentation * update API sidebar with EntityLinker and KnowledgeBase * remove EL from 101 docs * remove entity linker from 101 pipelines / rephrase * custom el model instead of existing model * set version to 2.2 for EL functionality * update documentation for 2 CLI scripts
2019-09-12 12:38:34 +03:00
DOCS: https://spacy.io/api/kb/#candidate_init
CLI scripts for entity linking (wikipedia & generic) (#4091) * document token ent_kb_id * document span kb_id * update pipeline documentation * prior and context weights as bool's instead * entitylinker api documentation * drop for both models * finish entitylinker documentation * small fixes * documentation for KB * candidate documentation * links to api pages in code * small fix * frequency examples as counts for consistency * consistent documentation about tensors returned by predict * add entity linking to usage 101 * add entity linking infobox and KB section to 101 * entity-linking in linguistic features * small typo corrections * training example and docs for entity_linker * predefined nlp and kb * revert back to similarity encodings for simplicity (for now) * set prior probabilities to 0 when excluded * code clean up * bugfix: deleting kb ID from tokens when entities were removed * refactor train el example to use either model or vocab * pretrain_kb example for example kb generation * add to training docs for KB + EL example scripts * small fixes * error numbering * ensure the language of vocab and nlp stay consistent across serialization * equality with = * avoid conflict in errors file * add error 151 * final adjustements to the train scripts - consistency * update of goldparse documentation * small corrections * push commit * turn kb_creator into CLI script (wip) * proper parameters for training entity vectors * wikidata pipeline split up into two executable scripts * remove context_width * move wikidata scripts in bin directory, remove old dummy script * refine KB script with logs and preprocessing options * small edits * small improvements to logging of EL CLI script
2019-08-13 16:38:59 +03:00
"""
def __init__(self, KnowledgeBase kb, entity_hash, entity_freq, entity_vector, alias_hash, prior_prob):
self.kb = kb
self.entity_hash = entity_hash
2019-04-25 00:52:34 +03:00
self.entity_freq = entity_freq
self.entity_vector = entity_vector
2019-03-21 14:31:02 +03:00
self.alias_hash = alias_hash
self.prior_prob = prior_prob
@property
def entity(self):
"""RETURNS (uint64): hash of the entity's KB ID/name"""
return self.entity_hash
2019-03-21 20:20:57 +03:00
@property
def entity_(self):
"""RETURNS (unicode): ID/name of this entity in the KB"""
2019-04-10 17:06:09 +03:00
return self.kb.vocab.strings[self.entity_hash]
@property
def alias(self):
"""RETURNS (uint64): hash of the alias"""
return self.alias_hash
@property
def alias_(self):
2019-03-21 20:20:57 +03:00
"""RETURNS (unicode): ID of the original alias"""
2019-04-10 17:06:09 +03:00
return self.kb.vocab.strings[self.alias_hash]
2019-03-21 20:20:57 +03:00
2019-04-25 00:52:34 +03:00
@property
def entity_freq(self):
return self.entity_freq
@property
def entity_vector(self):
return self.entity_vector
@property
def prior_prob(self):
return self.prior_prob
cdef class KnowledgeBase:
CLI scripts for entity linking (wikipedia & generic) (#4091) * document token ent_kb_id * document span kb_id * update pipeline documentation * prior and context weights as bool's instead * entitylinker api documentation * drop for both models * finish entitylinker documentation * small fixes * documentation for KB * candidate documentation * links to api pages in code * small fix * frequency examples as counts for consistency * consistent documentation about tensors returned by predict * add entity linking to usage 101 * add entity linking infobox and KB section to 101 * entity-linking in linguistic features * small typo corrections * training example and docs for entity_linker * predefined nlp and kb * revert back to similarity encodings for simplicity (for now) * set prior probabilities to 0 when excluded * code clean up * bugfix: deleting kb ID from tokens when entities were removed * refactor train el example to use either model or vocab * pretrain_kb example for example kb generation * add to training docs for KB + EL example scripts * small fixes * error numbering * ensure the language of vocab and nlp stay consistent across serialization * equality with = * avoid conflict in errors file * add error 151 * final adjustements to the train scripts - consistency * update of goldparse documentation * small corrections * push commit * turn kb_creator into CLI script (wip) * proper parameters for training entity vectors * wikidata pipeline split up into two executable scripts * remove context_width * move wikidata scripts in bin directory, remove old dummy script * refine KB script with logs and preprocessing options * small edits * small improvements to logging of EL CLI script
2019-08-13 16:38:59 +03:00
"""A `KnowledgeBase` instance stores unique identifiers for entities and their textual aliases,
to support entity linking of named entities to real-world concepts.
DOCS: https://spacy.io/api/kb
"""
CLI scripts for entity linking (wikipedia & generic) (#4091) * document token ent_kb_id * document span kb_id * update pipeline documentation * prior and context weights as bool's instead * entitylinker api documentation * drop for both models * finish entitylinker documentation * small fixes * documentation for KB * candidate documentation * links to api pages in code * small fix * frequency examples as counts for consistency * consistent documentation about tensors returned by predict * add entity linking to usage 101 * add entity linking infobox and KB section to 101 * entity-linking in linguistic features * small typo corrections * training example and docs for entity_linker * predefined nlp and kb * revert back to similarity encodings for simplicity (for now) * set prior probabilities to 0 when excluded * code clean up * bugfix: deleting kb ID from tokens when entities were removed * refactor train el example to use either model or vocab * pretrain_kb example for example kb generation * add to training docs for KB + EL example scripts * small fixes * error numbering * ensure the language of vocab and nlp stay consistent across serialization * equality with = * avoid conflict in errors file * add error 151 * final adjustements to the train scripts - consistency * update of goldparse documentation * small corrections * push commit * turn kb_creator into CLI script (wip) * proper parameters for training entity vectors * wikidata pipeline split up into two executable scripts * remove context_width * move wikidata scripts in bin directory, remove old dummy script * refine KB script with logs and preprocessing options * small edits * small improvements to logging of EL CLI script
2019-08-13 16:38:59 +03:00
def __init__(self, Vocab vocab, entity_vector_length=64):
2019-03-22 01:17:25 +03:00
self.vocab = vocab
2019-03-18 19:27:51 +03:00
self.mem = Pool()
self.entity_vector_length = entity_vector_length
self._entry_index = PreshMap()
self._alias_index = PreshMap()
self.vocab.strings.add("")
self._create_empty_vectors(dummy_hash=self.vocab.strings[""])
2019-03-18 19:27:51 +03:00
@property
def entity_vector_length(self):
"""RETURNS (uint64): length of the entity vectors"""
return self.entity_vector_length
def __len__(self):
2019-03-19 17:51:56 +03:00
return self.get_size_entities()
def get_size_entities(self):
return len(self._entry_index)
def get_entity_strings(self):
2019-04-24 21:24:24 +03:00
return [self.vocab.strings[x] for x in self._entry_index]
2019-03-19 17:51:56 +03:00
def get_size_aliases(self):
return len(self._alias_index)
def get_alias_strings(self):
2019-04-24 21:24:24 +03:00
return [self.vocab.strings[x] for x in self._alias_index]
2019-03-19 17:51:56 +03:00
2019-07-19 18:40:28 +03:00
def add_entity(self, unicode entity, float freq, vector[float] entity_vector):
"""
2019-04-10 17:06:09 +03:00
Add an entity to the KB, optionally specifying its log probability based on corpus frequency
Return the hash of the entity ID/name at the end.
"""
cdef hash_t entity_hash = self.vocab.strings.add(entity)
2019-03-18 19:27:51 +03:00
# Return if this entity was added before
if entity_hash in self._entry_index:
user_warning(Warnings.W018.format(entity=entity))
return
2019-06-19 13:35:26 +03:00
# Raise an error if the provided entity vector is not of the correct length
if len(entity_vector) != self.entity_vector_length:
2019-06-19 13:35:26 +03:00
raise ValueError(Errors.E141.format(found=len(entity_vector), required=self.entity_vector_length))
vector_index = self.c_add_vector(entity_vector=entity_vector)
new_index = self.c_add_entity(entity_hash=entity_hash,
2019-07-19 18:40:28 +03:00
freq=freq,
vector_index=vector_index,
feats_row=-1) # Features table currently not implemented
self._entry_index[entity_hash] = new_index
return entity_hash
2019-07-19 18:40:28 +03:00
cpdef set_entities(self, entity_list, freq_list, vector_list):
if len(entity_list) != len(freq_list) or len(entity_list) != len(vector_list):
2019-06-19 13:35:26 +03:00
raise ValueError(Errors.E140)
nr_entities = len(set(entity_list))
self._entry_index = PreshMap(nr_entities+1)
self._entries = entry_vec(nr_entities+1)
i = 0
2019-06-26 16:55:26 +03:00
cdef KBEntryC entry
cdef hash_t entity_hash
while i < len(entity_list):
# only process this entity if its unique ID hadn't been added before
entity_hash = self.vocab.strings.add(entity_list[i])
if entity_hash in self._entry_index:
user_warning(Warnings.W018.format(entity=entity_list[i]))
else:
entity_vector = vector_list[i]
if len(entity_vector) != self.entity_vector_length:
raise ValueError(Errors.E141.format(found=len(entity_vector), required=self.entity_vector_length))
entry.entity_hash = entity_hash
entry.freq = freq_list[i]
vector_index = self.c_add_vector(entity_vector=vector_list[i])
entry.vector_index = vector_index
entry.feats_row = -1 # Features table currently not implemented
self._entries[i+1] = entry
self._entry_index[entity_hash] = i+1
i += 1
def contains_entity(self, unicode entity):
cdef hash_t entity_hash = self.vocab.strings.add(entity)
return entity_hash in self._entry_index
def contains_alias(self, unicode alias):
cdef hash_t alias_hash = self.vocab.strings.add(alias)
return alias_hash in self._alias_index
2019-03-18 19:27:51 +03:00
def add_alias(self, unicode alias, entities, probabilities):
"""
For a given alias, add its potential entities and prior probabilies to the KB.
Return the alias_hash at the end
"""
# Throw an error if the length of entities and probabilities are not the same
if not len(entities) == len(probabilities):
2019-03-22 18:55:05 +03:00
raise ValueError(Errors.E132.format(alias=alias,
entities_length=len(entities),
probabilities_length=len(probabilities)))
2019-05-02 00:05:40 +03:00
# Throw an error if the probabilities sum up to more than 1 (allow for some rounding errors)
prob_sum = sum(probabilities)
2019-05-02 00:05:40 +03:00
if prob_sum > 1.00001:
2019-03-22 18:55:05 +03:00
raise ValueError(Errors.E133.format(alias=alias, sum=prob_sum))
2019-03-22 01:17:25 +03:00
cdef hash_t alias_hash = self.vocab.strings.add(alias)
# Check whether this alias was added before
if alias_hash in self._alias_index:
2019-03-22 18:55:05 +03:00
user_warning(Warnings.W017.format(alias=alias))
return
2019-03-19 18:15:38 +03:00
cdef vector[int64_t] entry_indices
cdef vector[float] probs
for entity, prob in zip(entities, probabilities):
entity_hash = self.vocab.strings[entity]
if not entity_hash in self._entry_index:
raise ValueError(Errors.E134.format(entity=entity))
entry_index = <int64_t>self._entry_index.get(entity_hash)
2019-03-19 18:15:38 +03:00
entry_indices.push_back(int(entry_index))
probs.push_back(float(prob))
2019-03-18 14:38:40 +03:00
new_index = self.c_add_aliases(alias_hash=alias_hash, entry_indices=entry_indices, probs=probs)
self._alias_index[alias_hash] = new_index
2019-03-18 19:27:51 +03:00
return alias_hash
def append_alias(self, unicode alias, unicode entity, float prior_prob, ignore_warnings=False):
"""
For an alias already existing in the KB, extend its potential entities with one more.
Throw a warning if either the alias or the entity is unknown,
or when the combination is already previously recorded.
Throw an error if this entity+prior prob would exceed the sum of 1.
For efficiency, it's best to use the method `add_alias` as much as possible instead of this one.
"""
# Check if the alias exists in the KB
cdef hash_t alias_hash = self.vocab.strings[alias]
if not alias_hash in self._alias_index:
raise ValueError(Errors.E176.format(alias=alias))
# Check if the entity exists in the KB
cdef hash_t entity_hash = self.vocab.strings[entity]
if not entity_hash in self._entry_index:
raise ValueError(Errors.E134.format(entity=entity))
entry_index = <int64_t>self._entry_index.get(entity_hash)
# Throw an error if the prior probabilities (including the new one) sum up to more than 1
alias_index = <int64_t>self._alias_index.get(alias_hash)
alias_entry = self._aliases_table[alias_index]
current_sum = sum([p for p in alias_entry.probs])
new_sum = current_sum + prior_prob
if new_sum > 1.00001:
raise ValueError(Errors.E133.format(alias=alias, sum=new_sum))
entry_indices = alias_entry.entry_indices
is_present = False
for i in range(entry_indices.size()):
if entry_indices[i] == int(entry_index):
is_present = True
if is_present:
if not ignore_warnings:
user_warning(Warnings.W024.format(entity=entity, alias=alias))
else:
entry_indices.push_back(int(entry_index))
alias_entry.entry_indices = entry_indices
probs = alias_entry.probs
probs.push_back(float(prior_prob))
alias_entry.probs = probs
self._aliases_table[alias_index] = alias_entry
2019-03-18 19:50:01 +03:00
def get_candidates(self, unicode alias):
"""
Return candidate entities for an alias. Each candidate defines the entity, the original alias,
and the prior probability of that alias resolving to that entity.
If the alias is not known in the KB, and empty list is returned.
"""
2019-03-22 01:17:25 +03:00
cdef hash_t alias_hash = self.vocab.strings[alias]
if not alias_hash in self._alias_index:
return []
2019-07-22 14:39:32 +03:00
alias_index = <int64_t>self._alias_index.get(alias_hash)
alias_entry = self._aliases_table[alias_index]
return [Candidate(kb=self,
entity_hash=self._entries[entry_index].entity_hash,
2019-07-19 18:40:28 +03:00
entity_freq=self._entries[entry_index].freq,
entity_vector=self._vectors_table[self._entries[entry_index].vector_index],
2019-03-21 14:31:02 +03:00
alias_hash=alias_hash,
2019-07-17 18:18:26 +03:00
prior_prob=prior_prob)
for (entry_index, prior_prob) in zip(alias_entry.entry_indices, alias_entry.probs)
if entry_index != 0]
2019-07-17 13:17:02 +03:00
def get_vector(self, unicode entity):
2019-07-17 18:18:26 +03:00
cdef hash_t entity_hash = self.vocab.strings[entity]
2019-07-17 13:17:02 +03:00
# Return an empty list if this entity is unknown in this KB
if entity_hash not in self._entry_index:
return [0] * self.entity_vector_length
2019-07-17 13:17:02 +03:00
entry_index = self._entry_index[entity_hash]
return self._vectors_table[self._entries[entry_index].vector_index]
2019-07-17 18:18:26 +03:00
def get_prior_prob(self, unicode entity, unicode alias):
""" Return the prior probability of a given alias being linked to a given entity,
or return 0.0 when this combination is not known in the knowledge base"""
cdef hash_t alias_hash = self.vocab.strings[alias]
cdef hash_t entity_hash = self.vocab.strings[entity]
if entity_hash not in self._entry_index or alias_hash not in self._alias_index:
return 0.0
alias_index = <int64_t>self._alias_index.get(alias_hash)
entry_index = self._entry_index[entity_hash]
alias_entry = self._aliases_table[alias_index]
for (entry_index, prior_prob) in zip(alias_entry.entry_indices, alias_entry.probs):
if self._entries[entry_index].entity_hash == entity_hash:
return prior_prob
return 0.0
def dump(self, loc):
cdef Writer writer = Writer(loc)
writer.write_header(self.get_size_entities(), self.entity_vector_length)
# dumping the entity vectors in their original order
i = 0
for entity_vector in self._vectors_table:
for element in entity_vector:
writer.write_vector_element(element)
i = i+1
# dumping the entry records in the order in which they are in the _entries vector.
# index 0 is a dummy object not stored in the _entry_index and can be ignored.
i = 1
for entry_hash, entry_index in sorted(self._entry_index.items(), key=lambda x: x[1]):
entry = self._entries[entry_index]
assert entry.entity_hash == entry_hash
assert entry_index == i
2019-07-19 18:40:28 +03:00
writer.write_entry(entry.entity_hash, entry.freq, entry.vector_index)
2019-04-24 21:24:24 +03:00
i = i+1
writer.write_alias_length(self.get_size_aliases())
# dumping the aliases in the order in which they are in the _alias_index vector.
# index 0 is a dummy object not stored in the _aliases_table and can be ignored.
i = 1
for alias_hash, alias_index in sorted(self._alias_index.items(), key=lambda x: x[1]):
alias = self._aliases_table[alias_index]
assert alias_index == i
candidate_length = len(alias.entry_indices)
writer.write_alias_header(alias_hash, candidate_length)
for j in range(0, candidate_length):
writer.write_alias(alias.entry_indices[j], alias.probs[j])
i = i+1
writer.close()
cpdef load_bulk(self, loc):
cdef hash_t entity_hash
2019-04-24 21:24:24 +03:00
cdef hash_t alias_hash
cdef int64_t entry_index
2019-07-22 14:34:12 +03:00
cdef float freq, prob
cdef int32_t vector_index
2019-06-26 16:55:26 +03:00
cdef KBEntryC entry
2019-04-24 21:24:24 +03:00
cdef AliasC alias
cdef float vector_element
cdef Reader reader = Reader(loc)
2019-04-24 21:24:24 +03:00
# STEP 0: load header and initialize KB
cdef int64_t nr_entities
cdef int64_t entity_vector_length
reader.read_header(&nr_entities, &entity_vector_length)
self.entity_vector_length = entity_vector_length
self._entry_index = PreshMap(nr_entities+1)
self._entries = entry_vec(nr_entities+1)
self._vectors_table = float_matrix(nr_entities+1)
# STEP 1: load entity vectors
cdef int i = 0
cdef int j = 0
while i < nr_entities:
entity_vector = float_vec(entity_vector_length)
j = 0
while j < entity_vector_length:
reader.read_vector_element(&vector_element)
entity_vector[j] = vector_element
j = j+1
self._vectors_table[i] = entity_vector
i = i+1
# STEP 2: load entities
2019-04-24 21:24:24 +03:00
# we assume that the entity data was written in sequence
# index 0 is a dummy object not stored in the _entry_index and can be ignored.
i = 1
2019-04-24 21:24:24 +03:00
while i <= nr_entities:
2019-07-19 18:40:28 +03:00
reader.read_entry(&entity_hash, &freq, &vector_index)
entry.entity_hash = entity_hash
2019-07-19 18:40:28 +03:00
entry.freq = freq
entry.vector_index = vector_index
entry.feats_row = -1 # Features table currently not implemented
self._entries[i] = entry
self._entry_index[entity_hash] = i
i += 1
2019-04-24 21:24:24 +03:00
# check that all entities were read in properly
assert nr_entities == self.get_size_entities()
# STEP 3: load aliases
2019-04-24 21:24:24 +03:00
cdef int64_t nr_aliases
reader.read_alias_length(&nr_aliases)
self._alias_index = PreshMap(nr_aliases+1)
self._aliases_table = alias_vec(nr_aliases+1)
cdef int64_t nr_candidates
cdef vector[int64_t] entry_indices
cdef vector[float] probs
i = 1
# we assume the alias data was written in sequence
# index 0 is a dummy object not stored in the _entry_index and can be ignored.
while i <= nr_aliases:
reader.read_alias_header(&alias_hash, &nr_candidates)
entry_indices = vector[int64_t](nr_candidates)
probs = vector[float](nr_candidates)
for j in range(0, nr_candidates):
reader.read_alias(&entry_index, &prob)
entry_indices[j] = entry_index
probs[j] = prob
alias.entry_indices = entry_indices
alias.probs = probs
self._aliases_table[i] = alias
self._alias_index[alias_hash] = i
i += 1
# check that all aliases were read in properly
assert nr_aliases == self.get_size_aliases()
cdef class Writer:
def __init__(self, object loc):
if path.exists(loc):
assert not path.isdir(loc), "%s is directory." % loc
if isinstance(loc, Path):
loc = bytes(loc)
cdef bytes bytes_loc = loc.encode('utf8') if type(loc) == unicode else loc
self._fp = fopen(<char*>bytes_loc, 'wb')
if not self._fp:
raise IOError(Errors.E146.format(path=loc))
fseek(self._fp, 0, 0)
def close(self):
cdef size_t status = fclose(self._fp)
assert status == 0
cdef int write_header(self, int64_t nr_entries, int64_t entity_vector_length) except -1:
self._write(&nr_entries, sizeof(nr_entries))
self._write(&entity_vector_length, sizeof(entity_vector_length))
cdef int write_vector_element(self, float element) except -1:
self._write(&element, sizeof(element))
2019-07-19 18:40:28 +03:00
cdef int write_entry(self, hash_t entry_hash, float entry_freq, int32_t vector_index) except -1:
self._write(&entry_hash, sizeof(entry_hash))
2019-07-19 18:40:28 +03:00
self._write(&entry_freq, sizeof(entry_freq))
self._write(&vector_index, sizeof(vector_index))
# Features table currently not implemented and not written to file
2019-04-24 21:24:24 +03:00
cdef int write_alias_length(self, int64_t alias_length) except -1:
self._write(&alias_length, sizeof(alias_length))
cdef int write_alias_header(self, hash_t alias_hash, int64_t candidate_length) except -1:
self._write(&alias_hash, sizeof(alias_hash))
self._write(&candidate_length, sizeof(candidate_length))
cdef int write_alias(self, int64_t entry_index, float prob) except -1:
self._write(&entry_index, sizeof(entry_index))
self._write(&prob, sizeof(prob))
cdef int _write(self, void* value, size_t size) except -1:
status = fwrite(value, size, 1, self._fp)
assert status == 1, status
cdef class Reader:
def __init__(self, object loc):
assert path.exists(loc)
assert not path.isdir(loc)
if isinstance(loc, Path):
loc = bytes(loc)
cdef bytes bytes_loc = loc.encode('utf8') if type(loc) == unicode else loc
self._fp = fopen(<char*>bytes_loc, 'rb')
if not self._fp:
PyErr_SetFromErrno(IOError)
status = fseek(self._fp, 0, 0) # this can be 0 if there is no header
def __dealloc__(self):
fclose(self._fp)
cdef int read_header(self, int64_t* nr_entries, int64_t* entity_vector_length) except -1:
status = self._read(nr_entries, sizeof(int64_t))
if status < 1:
if feof(self._fp):
return 0 # end of file
raise IOError(Errors.E145.format(param="header"))
status = self._read(entity_vector_length, sizeof(int64_t))
if status < 1:
if feof(self._fp):
return 0 # end of file
raise IOError(Errors.E145.format(param="vector length"))
cdef int read_vector_element(self, float* element) except -1:
status = self._read(element, sizeof(float))
if status < 1:
if feof(self._fp):
return 0 # end of file
raise IOError(Errors.E145.format(param="vector element"))
2019-07-19 18:40:28 +03:00
cdef int read_entry(self, hash_t* entity_hash, float* freq, int32_t* vector_index) except -1:
status = self._read(entity_hash, sizeof(hash_t))
if status < 1:
if feof(self._fp):
return 0 # end of file
raise IOError(Errors.E145.format(param="entity hash"))
2019-07-19 18:40:28 +03:00
status = self._read(freq, sizeof(float))
if status < 1:
if feof(self._fp):
return 0 # end of file
raise IOError(Errors.E145.format(param="entity freq"))
status = self._read(vector_index, sizeof(int32_t))
if status < 1:
if feof(self._fp):
return 0 # end of file
raise IOError(Errors.E145.format(param="vector index"))
if feof(self._fp):
return 0
else:
return 1
2019-04-24 21:24:24 +03:00
cdef int read_alias_length(self, int64_t* alias_length) except -1:
status = self._read(alias_length, sizeof(int64_t))
if status < 1:
if feof(self._fp):
return 0 # end of file
raise IOError(Errors.E145.format(param="alias length"))
2019-04-24 21:24:24 +03:00
cdef int read_alias_header(self, hash_t* alias_hash, int64_t* candidate_length) except -1:
status = self._read(alias_hash, sizeof(hash_t))
if status < 1:
if feof(self._fp):
return 0 # end of file
raise IOError(Errors.E145.format(param="alias hash"))
2019-04-24 21:24:24 +03:00
status = self._read(candidate_length, sizeof(int64_t))
if status < 1:
if feof(self._fp):
return 0 # end of file
raise IOError(Errors.E145.format(param="candidate length"))
2019-04-24 21:24:24 +03:00
cdef int read_alias(self, int64_t* entry_index, float* prob) except -1:
status = self._read(entry_index, sizeof(int64_t))
if status < 1:
if feof(self._fp):
return 0 # end of file
raise IOError(Errors.E145.format(param="entry index"))
2019-04-24 21:24:24 +03:00
status = self._read(prob, sizeof(float))
if status < 1:
if feof(self._fp):
return 0 # end of file
raise IOError(Errors.E145.format(param="prior probability"))
2019-04-24 21:24:24 +03:00
cdef int _read(self, void* value, size_t size) except -1:
status = fread(value, size, 1, self._fp)
return status