Changes to vocab for new stringstore scheme

This commit is contained in:
Matthew Honnibal 2016-09-30 20:01:19 +02:00
parent 21e90d7d0b
commit a51149a717

View File

@ -104,7 +104,7 @@ cdef class Vocab:
# Need to rethink this. # Need to rethink this.
for name in symbols.NAMES + list(sorted(tag_map.keys())): for name in symbols.NAMES + list(sorted(tag_map.keys())):
if name: if name:
_ = self.strings[name] self.strings.intern(name)
self.lex_attr_getters = lex_attr_getters self.lex_attr_getters = lex_attr_getters
self.morphology = Morphology(self.strings, tag_map, lemmatizer) self.morphology = Morphology(self.strings, tag_map, lemmatizer)
self.serializer_freqs = serializer_freqs self.serializer_freqs = serializer_freqs
@ -159,28 +159,38 @@ cdef class Vocab:
if lex != NULL: if lex != NULL:
return lex return lex
else: else:
return self._new_lexeme(mem, self.strings[orth]) return self._new_lexeme(mem, self.strings.decode_int(orth))
cdef const LexemeC* _new_lexeme(self, Pool mem, unicode string) except NULL: cdef const LexemeC* _new_lexeme(self, Pool mem, unicode string) except NULL:
# TODO: This is all quite a mess, and doesn't work currently.
# The current state is:
# - mem is None: Denotes that the string should be interned within the StringStore
# - mem is self.mem: Denotes that the string wll be permanently available,
# but that it will be stored within the *Vocab*
# - mem is Doc.mem: Denotes that the string will 'belong' to the Doc
cdef hash_t key cdef hash_t key
cdef bint is_oov = mem is not self.mem if mem is None:
if len(string) < 3 or not is_oov: lex = <LexemeC*>self.mem.alloc(sizeof(LexemeC), 1)
mem = self.mem else:
lex = <LexemeC*>mem.alloc(sizeof(LexemeC), 1) lex = <LexemeC*>mem.alloc(sizeof(LexemeC), 1)
print("Add new lexeme", "id(mem)", id(mem), "id(self.mem)", id(self.mem))
lex.orth = self.strings.intern(string, mem=mem) lex.orth = self.strings.intern(string, mem=mem)
lex.length = len(string) lex.length = len(string)
lex.id = self.length lex.id = self.length
lex.vector = <float*>mem.alloc(self.vectors_length, sizeof(float)) if mem is None:
lex.vector = <float*>self.mem.alloc(self.vectors_length, sizeof(float))
else:
lex.vector = <float*>mem.alloc(self.vectors_length, sizeof(float))
if self.lex_attr_getters is not None: if self.lex_attr_getters is not None:
for attr, func in self.lex_attr_getters.items(): for attr, func in self.lex_attr_getters.items():
value = func(string) value = func(string)
if isinstance(value, unicode): if isinstance(value, unicode):
value = self.strings.intern(value) value = self.strings.intern(value, mem=mem)
if attr == PROB: if attr == PROB:
lex.prob = value lex.prob = value
else: else:
Lexeme.set_struct_attr(lex, attr, value) Lexeme.set_struct_attr(lex, attr, value)
if is_oov: if mem not in (None, self.mem):
lex.id = 0 lex.id = 0
else: else:
key = hash_string(string) key = hash_string(string)
@ -221,8 +231,9 @@ cdef class Vocab:
instantiation. instantiation.
''' '''
cdef attr_t orth cdef attr_t orth
if type(id_or_string) == unicode: if isinstance(id_or_string, basestring):
orth = self.strings.intern(id_or_string) print("Intern", id_or_string, id(self.mem))
orth = self.strings.intern(id_or_string, mem=self.mem)
else: else:
orth = id_or_string orth = id_or_string
return Lexeme(self, orth) return Lexeme(self, orth)
@ -234,7 +245,7 @@ cdef class Vocab:
token = &tokens[i] token = &tokens[i]
# Set the special tokens up to have morphology and lemmas if # Set the special tokens up to have morphology and lemmas if
# specified, otherwise use the part-of-speech tag (if specified) # specified, otherwise use the part-of-speech tag (if specified)
token.lex = <LexemeC*>self.get(self.mem, props['F']) token.lex = <LexemeC*>self.get(None, props['F'])
if 'pos' in props: if 'pos' in props:
self.morphology.assign_tag(token, props['pos']) self.morphology.assign_tag(token, props['pos'])
if 'L' in props: if 'L' in props:
@ -345,7 +356,7 @@ cdef class Vocab:
raise VectorReadError.mismatched_sizes(file_, line_num, raise VectorReadError.mismatched_sizes(file_, line_num,
vec_len, len(pieces)) vec_len, len(pieces))
orth = self.strings[word_str] orth = self.strings[word_str]
lexeme = <LexemeC*><void*>self.get_by_orth(self.mem, orth) lexeme = <LexemeC*><void*>self.get_by_orth(None, orth)
lexeme.vector = <float*>self.mem.alloc(self.vectors_length, sizeof(float)) lexeme.vector = <float*>self.mem.alloc(self.vectors_length, sizeof(float))
for i, val_str in enumerate(pieces): for i, val_str in enumerate(pieces):