diff --git a/spacy/lang.pxd b/spacy/lang.pxd index 4ccc0f078..93d813d1c 100644 --- a/spacy/lang.pxd +++ b/spacy/lang.pxd @@ -42,5 +42,5 @@ cdef class Language: cpdef Tokens tokenize(self, unicode text) cpdef Lexeme lookup(self, unicode text) - cdef int _tokenize(self, vector[LexemeC*] *tokens_v, String* string) + cdef int _tokenize(self, vector[LexemeC*] *tokens_v, String* string) except -1 cdef int _split_one(self, Py_UNICODE* characters, size_t length) diff --git a/spacy/lang.pyx b/spacy/lang.pyx index b8fd5368d..bd205b233 100644 --- a/spacy/lang.pyx +++ b/spacy/lang.pyx @@ -190,7 +190,7 @@ cdef class Language: self._tokenize(tokens.v, &span) return tokens - cdef int _tokenize(self, vector[LexemeC*] *tokens_v, String* string): + cdef int _tokenize(self, vector[LexemeC*] *tokens_v, String* string) except -1: cdef LexemeC** lexemes = self.cache.get(string.key) cdef size_t i if lexemes != NULL: @@ -204,6 +204,7 @@ cdef class Language: cdef int split cdef int remaining = string.n cdef String prefix + cdef LexemeC* lexeme while remaining >= 1: split = self._split_one(string.chars, string.n) remaining -= split @@ -215,11 +216,13 @@ cdef class Language: tokens_v.push_back(lexemes[i]) i += 1 else: - tokens_v.push_back(self.lexicon.get(&prefix)) - lexemes = calloc(tokens_v.size() - first_token, sizeof(LexemeC*)) + lexeme = self.lexicon.get(&prefix) + tokens_v.push_back(lexeme) + lexemes = calloc((tokens_v.size() - first_token) + 1, sizeof(LexemeC*)) cdef size_t j for i, j in enumerate(range(first_token, tokens_v.size())): lexemes[i] = tokens_v[0][j] + lexemes[i+1] = NULL self.cache.set(key, lexemes) cdef int _split_one(self, Py_UNICODE* characters, size_t length): @@ -310,7 +313,6 @@ cdef class Lexicon: cdef void string_from_unicode(String* s, unicode uni): - Py_INCREF(uni) cdef Py_UNICODE* c_uni = uni string_from_slice(s, c_uni, 0, len(uni))