* Restore dense_hash_map for cache dictionary. Seems to double efficiency

This commit is contained in:
Matthew Honnibal 2014-09-12 02:23:51 +02:00
parent 3c928fb5e0
commit 073ee0de63
5 changed files with 57 additions and 16 deletions

View File

@ -224,7 +224,7 @@ cdef class English(Language):
fl_is_digit = Flag_IsDigit fl_is_digit = Flag_IsDigit
v_shape = View_WordShape v_shape = View_WordShape
def __cinit__(self, name, user_string_features, user_flag_features): def __cinit__(self, name, user_string_features, user_flag_features):
self.cache = {} self.cache.set_empty_key(0)
lang_data = util.read_lang_data(name) lang_data = util.read_lang_data(name)
rules, words, probs, clusters, case_stats, tag_stats = lang_data rules, words, probs, clusters, case_stats, tag_stats = lang_data
self.lexicon = lang.Lexicon(words, probs, clusters, case_stats, tag_stats, self.lexicon = lang.Lexicon(words, probs, clusters, case_stats, tag_stats,

View File

@ -3,14 +3,52 @@ from libc.stdint cimport uint64_t
from spacy.word cimport Lexeme from spacy.word cimport Lexeme
from spacy.tokens cimport Tokens from spacy.tokens cimport Tokens
from libcpp.utility cimport pair
cdef struct Flags: from libcpp.vector cimport vector
size_t is_alpha from libc.stdint cimport uint64_t, int64_t
size_t can_noun
cdef struct ViewIDs: cdef extern from "sparsehash/dense_hash_map" namespace "google":
size_t canon_form cdef cppclass dense_hash_map[K, D]:
K& key_type
D& data_type
pair[K, D]& value_type
uint64_t size_type
cppclass iterator:
pair[K, D]& operator*() nogil
iterator operator++() nogil
iterator operator--() nogil
bint operator==(iterator) nogil
bint operator!=(iterator) nogil
iterator begin()
iterator end()
uint64_t size()
uint64_t max_size()
bint empty()
uint64_t bucket_count()
uint64_t bucket_size(uint64_t i)
uint64_t bucket(K& key)
double max_load_factor()
void max_load_vactor(double new_grow)
double min_load_factor()
double min_load_factor(double new_grow)
void set_resizing_parameters(double shrink, double grow)
void resize(uint64_t n)
void rehash(uint64_t n)
dense_hash_map()
dense_hash_map(uint64_t n)
void swap(dense_hash_map&)
pair[iterator, bint] insert(pair[K, D]) nogil
void set_empty_key(K&)
void set_deleted_key(K& key)
void clear_deleted_key()
void erase(iterator pos)
uint64_t erase(K& k)
void erase(iterator first, iterator last)
void clear()
void clear_no_resize()
pair[iterator, iterator] equal_range(K& k)
D& operator[](K&) nogil
cdef class Lexicon: cdef class Lexicon:
@ -27,13 +65,14 @@ cdef class Lexicon:
cdef class Language: cdef class Language:
cdef unicode name cdef unicode name
cdef dict cache cdef dense_hash_map[uint64_t, size_t] cache
cdef size_t cache_size
cpdef readonly Lexicon lexicon cpdef readonly Lexicon lexicon
cpdef readonly object tokens_class cpdef readonly object tokens_class
cpdef Tokens tokenize(self, unicode text) cpdef Tokens tokenize(self, unicode text)
cpdef Lexeme lookup(self, unicode text) cpdef Lexeme lookup(self, unicode text)
cdef _tokenize(self, Tokens tokens, unicode string) cdef _tokenize(self, Tokens tokens, Py_UNICODE* characters, size_t length)
cdef list _split(self, unicode string) cdef list _split(self, unicode string)
cdef int _split_one(self, unicode word) cdef int _split_one(self, unicode word)

View File

@ -40,7 +40,8 @@ cdef class Language:
if string_features is None: if string_features is None:
string_features = [] string_features = []
self.name = name self.name = name
self.cache = {} self.cache.set_empty_key(0)
self.cache_size = 0
lang_data = read_lang_data(name) lang_data = read_lang_data(name)
rules, words, probs, clusters, case_stats, tag_stats = lang_data rules, words, probs, clusters, case_stats, tag_stats = lang_data
self.lexicon = Lexicon(words, probs, clusters, case_stats, tag_stats, self.lexicon = Lexicon(words, probs, clusters, case_stats, tag_stats,
@ -102,11 +103,10 @@ cdef class Language:
cdef _tokenize(self, Tokens tokens, Py_UNICODE* characters, size_t length): cdef _tokenize(self, Tokens tokens, Py_UNICODE* characters, size_t length):
cdef uint64_t hashed = hash64(characters, length * sizeof(Py_UNICODE), 0) cdef uint64_t hashed = hash64(characters, length * sizeof(Py_UNICODE), 0)
cdef unicode string cdef unicode string
cdef LexemeC** lexemes
cdef bint free_chunk = False cdef bint free_chunk = False
cdef size_t i = 0 cdef size_t i = 0
if hashed in self.cache: cdef LexemeC** lexemes = <LexemeC**>self.cache[hashed]
lexemes = <LexemeC**><size_t>self.cache[hashed] if lexemes is not NULL:
while lexemes[i] != NULL: while lexemes[i] != NULL:
tokens.push_back(lexemes[i]) tokens.push_back(lexemes[i])
i += 1 i += 1
@ -121,8 +121,9 @@ cdef class Language:
# The intuition here is that if an element belongs in the cache, it # The intuition here is that if an element belongs in the cache, it
# has several chances to get in. And if the cache is large, we less # has several chances to get in. And if the cache is large, we less
# believe that the element belongs there. # believe that the element belongs there.
if not self.cache or random.random() < (100000.0 / len(self.cache)): if self.cache_size == 0 or random.random() < (100000.0 / self.cache_size):
self.cache[hashed] = <size_t>lexemes self.cache[hashed] = <size_t>lexemes
self.cache_size += 1
else: else:
free(lexemes) free(lexemes)
@ -172,6 +173,7 @@ cdef class Language:
lexemes[i + 1] = NULL lexemes[i + 1] = NULL
hashed = hash64(<Py_UNICODE*>string, len(string) * sizeof(Py_UNICODE), 0) hashed = hash64(<Py_UNICODE*>string, len(string) * sizeof(Py_UNICODE), 0)
self.cache[hashed] = <size_t>lexemes self.cache[hashed] = <size_t>lexemes
self.cache_size += 1
cdef class Lexicon: cdef class Lexicon:

View File

@ -5,7 +5,7 @@ cdef class Tokens:
cdef size_t size cdef size_t size
cdef LexemeC** lexemes cdef LexemeC** lexemes
cdef push_back(self, LexemeC* lexeme) cdef int push_back(self, LexemeC* lexeme) except -1
cpdef unicode string(self, size_t i) cpdef unicode string(self, size_t i)
cpdef double prob(self, size_t i) cpdef double prob(self, size_t i)

View File

@ -44,7 +44,7 @@ cdef class Tokens:
def append(self, Lexeme lexeme): def append(self, Lexeme lexeme):
self.push_back(lexeme._c) self.push_back(lexeme._c)
cdef push_back(self, LexemeC* lexeme): cdef int push_back(self, LexemeC* lexeme) except -1:
if (self.size + 1) == self.length: if (self.size + 1) == self.length:
self.size *= 2 self.size *= 2
self.lexemes = <LexemeC**>realloc(self.lexemes, self.size * sizeof(LexemeC*)) self.lexemes = <LexemeC**>realloc(self.lexemes, self.size * sizeof(LexemeC*))