mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-10 19:57:17 +03:00
* Refactoring tokenizer
This commit is contained in:
parent
213a440ffc
commit
a225ca5b0d
|
@ -57,7 +57,7 @@ cpdef Tokens tokenize(unicode string):
|
|||
|
||||
|
||||
cpdef Lexeme_addr lookup(unicode string) except 0:
|
||||
return EN.lookup(-1, string, len(string))
|
||||
return EN.lookup_chunk(string)
|
||||
|
||||
|
||||
cpdef unicode unhash(StringHash hash_value):
|
||||
|
|
|
@ -53,7 +53,7 @@ cpdef Tokens tokenize(unicode string):
|
|||
|
||||
|
||||
cpdef Lexeme_addr lookup(unicode string) except 0:
|
||||
return EN_PTB.lookup(-1, string, len(string))
|
||||
return EN_PTB.lookup_chunk(string)
|
||||
|
||||
|
||||
cpdef unicode unhash(StringHash hash_value):
|
||||
|
|
|
@ -9,9 +9,6 @@ ctypedef char Bits8
|
|||
ctypedef uint64_t Bits64
|
||||
|
||||
|
||||
from spacy.spacy cimport Language
|
||||
|
||||
|
||||
cdef struct Orthography:
|
||||
StringHash last3
|
||||
StringHash shape
|
||||
|
|
|
@ -30,14 +30,19 @@ cdef class Language:
|
|||
cdef Vocab* distri
|
||||
cdef Vocab* ortho
|
||||
cdef dict bacov
|
||||
cdef int find_split(self, unicode word, size_t length)
|
||||
|
||||
cdef Lexeme_addr lookup(self, int split, Py_UNICODE* string, size_t length) except 0
|
||||
cdef StringHash hash_string(self, Py_UNICODE* string, size_t length) except 0
|
||||
cpdef Tokens tokenize(self, unicode text)
|
||||
|
||||
cdef Lexeme_addr lookup(self, unicode string) except 0
|
||||
cdef Lexeme_addr lookup_chunk(self, unicode string) except 0
|
||||
cdef Orthography* lookup_orth(self, StringHash key, unicode lex) except NULL
|
||||
cdef Distribution* lookup_dist(self, StringHash key) except NULL
|
||||
|
||||
cdef Lexeme* new_lexeme(self, StringHash key, unicode lex) except NULL
|
||||
cdef Orthography* new_orth(self, StringHash hashed, unicode lex) except NULL
|
||||
cdef Distribution* new_dist(self, StringHash key) except NULL
|
||||
|
||||
cdef StringHash hash_string(self, unicode string) except 0
|
||||
cdef unicode unhash(self, StringHash hashed)
|
||||
|
||||
cpdef Tokens tokenize(self, unicode text)
|
||||
cdef Lexeme* _add(self, StringHash hashed, unicode string, int split, size_t length)
|
||||
cdef Lexeme* init_lexeme(self, unicode string, StringHash hashed,
|
||||
int split, size_t length)
|
||||
cdef Orthography* init_orth(self, StringHash hashed, unicode lex)
|
||||
cdef int find_split(self, unicode word, size_t length)
|
||||
|
|
263
spacy/spacy.pyx
263
spacy/spacy.pyx
|
@ -10,6 +10,8 @@ from spacy.lexeme cimport Lexeme
|
|||
from spacy.lexeme cimport BLANK_WORD
|
||||
|
||||
from spacy.string_tools cimport substr
|
||||
from spacy.string_tools cimport to_bytes
|
||||
from spacy.string_tools cimport from_bytes
|
||||
|
||||
|
||||
from . import util
|
||||
|
@ -69,17 +71,146 @@ cdef class Language:
|
|||
self.ortho[0].set_deleted_key(1)
|
||||
self.load_tokenization(util.read_tokenization(name))
|
||||
|
||||
cpdef Tokens tokenize(self, unicode string):
|
||||
cdef size_t length = len(string)
|
||||
cdef Py_UNICODE* characters = <Py_UNICODE*>string
|
||||
|
||||
cdef size_t i
|
||||
cdef Py_UNICODE c
|
||||
|
||||
cdef Tokens tokens = Tokens(self)
|
||||
cdef Py_UNICODE* current = <Py_UNICODE*>calloc(len(string), sizeof(Py_UNICODE))
|
||||
cdef size_t word_len = 0
|
||||
cdef Lexeme* token
|
||||
for i in range(length):
|
||||
c = characters[i]
|
||||
if _is_whitespace(c):
|
||||
if word_len != 0:
|
||||
token = <Lexeme*>self.lookup_chunk(current)
|
||||
while token != NULL:
|
||||
tokens.append(<Lexeme_addr>token)
|
||||
token = token.tail
|
||||
for j in range(word_len+1):
|
||||
current[j] = 0
|
||||
word_len = 0
|
||||
else:
|
||||
current[word_len] = c
|
||||
word_len += 1
|
||||
if word_len != 0:
|
||||
token = <Lexeme*>self.lookup_chunk(current)
|
||||
while token != NULL:
|
||||
tokens.append(<Lexeme_addr>token)
|
||||
token = token.tail
|
||||
free(current)
|
||||
return tokens
|
||||
|
||||
cdef Lexeme_addr lookup(self, unicode string) except 0:
|
||||
cdef size_t length = len(string)
|
||||
if length == 0:
|
||||
return <Lexeme_addr>&BLANK_WORD
|
||||
cdef bytes b = to_bytes(string)
|
||||
|
||||
cdef StringHash hashed = mrmr.hash32(<char*>b, len(b) * sizeof(char), 0)
|
||||
# First, check words seen 2+ times
|
||||
cdef Lexeme* word_ptr = <Lexeme*>self.vocab[0][hashed]
|
||||
if word_ptr == NULL:
|
||||
word_ptr = self.new_lexeme(hashed, string)
|
||||
self.bacov[hashed] = b
|
||||
return <Lexeme_addr>word_ptr
|
||||
|
||||
cdef Lexeme_addr lookup_chunk(self, unicode string) except 0:
|
||||
'''Fetch a Lexeme representing a word string. If the word has not been seen,
|
||||
construct one, splitting off any attached punctuation or clitics. A
|
||||
reference to BLANK_WORD is returned for the empty string.
|
||||
'''
|
||||
cdef size_t length = len(string)
|
||||
if length == 0:
|
||||
return <Lexeme_addr>&BLANK_WORD
|
||||
cdef StringHash hashed = self.hash_string(string)
|
||||
# First, check words seen 2+ times
|
||||
cdef Lexeme* word_ptr = <Lexeme*>self.vocab[0][hashed]
|
||||
cdef int split
|
||||
if word_ptr == NULL:
|
||||
split = self.find_split(string, length)
|
||||
if split != 0 and split != -1 and split < length:
|
||||
word_ptr = self.new_lexeme(hashed, string[:split])
|
||||
word_ptr.tail = <Lexeme*>self.lookup_chunk(string[split:])
|
||||
else:
|
||||
word_ptr = self.new_lexeme(hashed, string)
|
||||
return <Lexeme_addr>word_ptr
|
||||
|
||||
cdef Orthography* lookup_orth(self, StringHash hashed, unicode lex):
|
||||
cdef Orthography* orth = <Orthography*>self.ortho[0][hashed]
|
||||
if orth == NULL:
|
||||
orth = self.new_orth(hashed, lex)
|
||||
return orth
|
||||
|
||||
cdef Distribution* lookup_dist(self, StringHash hashed):
|
||||
cdef Distribution* dist = <Distribution*>self.distri[0][hashed]
|
||||
if dist == NULL:
|
||||
dist = self.new_dist(hashed)
|
||||
return dist
|
||||
|
||||
cdef Lexeme* new_lexeme(self, StringHash key, unicode string) except NULL:
|
||||
cdef Lexeme* word = <Lexeme*>calloc(1, sizeof(Lexeme))
|
||||
word.sic = key
|
||||
word.lex = self.hash_string(string)
|
||||
word.orth = self.lookup_orth(word.lex, string)
|
||||
word.dist = self.lookup_dist(word.lex)
|
||||
self.vocab[0][key] = <size_t>word
|
||||
return word
|
||||
|
||||
cdef Orthography* new_orth(self, StringHash hashed, unicode lex) except NULL:
|
||||
cdef unicode last3
|
||||
cdef unicode norm
|
||||
cdef unicode shape
|
||||
cdef int length
|
||||
|
||||
length = len(lex)
|
||||
orth = <Orthography*>calloc(1, sizeof(Orthography))
|
||||
orth.first = <Py_UNICODE>lex[0]
|
||||
|
||||
orth.length = length
|
||||
orth.flags = set_orth_flags(lex, orth.length)
|
||||
|
||||
orth.last3 = self.hash_string(substr(lex, length - 3, length, length))
|
||||
orth.norm = self.hash_string(get_normalized(lex, length))
|
||||
orth.shape = self.hash_string(get_word_shape(lex, length))
|
||||
|
||||
self.ortho[0][hashed] = <size_t>orth
|
||||
return orth
|
||||
|
||||
cdef Distribution* new_dist(self, StringHash hashed) except NULL:
|
||||
dist = <Distribution*>calloc(1, sizeof(Distribution))
|
||||
self.distri[0][hashed] = <size_t>dist
|
||||
return dist
|
||||
|
||||
cdef StringHash hash_string(self, unicode s) except 0:
|
||||
'''Hash unicode with MurmurHash64A'''
|
||||
cdef bytes byte_string = to_bytes(s)
|
||||
|
||||
cdef StringHash hashed = mrmr.hash32(<char*>byte_string, len(byte_string) * sizeof(char), 0)
|
||||
self.bacov[hashed] = byte_string
|
||||
return hashed
|
||||
|
||||
cdef unicode unhash(self, StringHash hash_value):
|
||||
'''Fetch a string from the reverse index, given its hash value.'''
|
||||
return from_bytes(self.bacov[hash_value])
|
||||
|
||||
cdef int find_split(self, unicode word, size_t length):
|
||||
return -1
|
||||
|
||||
def load_tokenization(self, token_rules=None):
|
||||
cdef Lexeme* word
|
||||
cdef StringHash hashed
|
||||
for chunk, lex, tokens in token_rules:
|
||||
hashed = self.hash_string(chunk, len(chunk))
|
||||
word = self._add(hashed, lex, len(lex), len(lex))
|
||||
hashed = self.hash_string(chunk)
|
||||
word = <Lexeme*>self.new_lexeme(hashed, lex)
|
||||
for i, lex in enumerate(tokens):
|
||||
token_string = '%s:@:%d:@:%s' % (chunk, i, lex)
|
||||
length = len(token_string)
|
||||
hashed = self.hash_string(token_string, length)
|
||||
word.tail = self._add(hashed, lex, 0, len(lex))
|
||||
hashed = self.hash_string(token_string)
|
||||
word.tail = <Lexeme*>self.new_lexeme(hashed, lex)
|
||||
word = word.tail
|
||||
|
||||
def load_clusters(self):
|
||||
|
@ -96,128 +227,8 @@ cdef class Language:
|
|||
# the first 4 bits. See redshift._parse_features.pyx
|
||||
cluster = int(cluster_str[::-1], 2)
|
||||
upper_pc, title_pc = case_stats.get(token_string.lower(), (0.0, 0.0))
|
||||
hashed = self.hash_string(token_string, len(token_string))
|
||||
word = self._add(hashed, token_string,
|
||||
len(token_string), len(token_string))
|
||||
|
||||
cdef StringHash hash_string(self, Py_UNICODE* s, size_t length) except 0:
|
||||
'''Hash unicode with MurmurHash64A'''
|
||||
return mrmr.hash32(<Py_UNICODE*>s, length * sizeof(Py_UNICODE), 0)
|
||||
|
||||
cdef unicode unhash(self, StringHash hash_value):
|
||||
'''Fetch a string from the reverse index, given its hash value.'''
|
||||
return self.bacov[hash_value].decode('utf8')
|
||||
|
||||
cdef Lexeme_addr lookup(self, int start, Py_UNICODE* string, size_t length) except 0:
|
||||
'''Fetch a Lexeme representing a word string. If the word has not been seen,
|
||||
construct one, splitting off any attached punctuation or clitics. A
|
||||
reference to BLANK_WORD is returned for the empty string.
|
||||
|
||||
To specify the boundaries of the word if it has not been seen, use lookup_chunk.
|
||||
'''
|
||||
if length == 0:
|
||||
return <Lexeme_addr>&BLANK_WORD
|
||||
cdef StringHash hashed = self.hash_string(string, length)
|
||||
# First, check words seen 2+ times
|
||||
cdef Lexeme* word_ptr = <Lexeme*>self.vocab[0][hashed]
|
||||
if word_ptr == NULL:
|
||||
start = self.find_split(string, length) if start == -1 else start
|
||||
word_ptr = self._add(hashed, string, start, length)
|
||||
return <Lexeme_addr>word_ptr
|
||||
|
||||
cdef Lexeme* _add(self, StringHash hashed, unicode string, int split, size_t length):
|
||||
cdef size_t i
|
||||
word = self.init_lexeme(string, hashed, split, length)
|
||||
self.vocab[0][hashed] = <size_t>word
|
||||
self.bacov[hashed] = string.encode('utf8')
|
||||
return word
|
||||
|
||||
cpdef Tokens tokenize(self, unicode string):
|
||||
cdef size_t length = len(string)
|
||||
cdef Py_UNICODE* characters = <Py_UNICODE*>string
|
||||
|
||||
cdef size_t i
|
||||
cdef Py_UNICODE c
|
||||
|
||||
cdef Tokens tokens = Tokens(self)
|
||||
cdef Py_UNICODE* current = <Py_UNICODE*>calloc(len(string), sizeof(Py_UNICODE))
|
||||
cdef size_t word_len = 0
|
||||
cdef Lexeme* token
|
||||
for i in range(length):
|
||||
c = characters[i]
|
||||
if _is_whitespace(c):
|
||||
if word_len != 0:
|
||||
token = <Lexeme*>self.lookup(-1, current, word_len)
|
||||
while token != NULL:
|
||||
tokens.append(<Lexeme_addr>token)
|
||||
token = token.tail
|
||||
for j in range(word_len+1):
|
||||
current[j] = 0
|
||||
word_len = 0
|
||||
else:
|
||||
current[word_len] = c
|
||||
word_len += 1
|
||||
if word_len != 0:
|
||||
token = <Lexeme*>self.lookup(-1, current, word_len)
|
||||
while token != NULL:
|
||||
tokens.append(<Lexeme_addr>token)
|
||||
token = token.tail
|
||||
free(current)
|
||||
return tokens
|
||||
|
||||
cdef int find_split(self, unicode word, size_t length):
|
||||
return -1
|
||||
|
||||
cdef Lexeme* init_lexeme(self, unicode string, StringHash hashed,
|
||||
int split, size_t length):
|
||||
cdef Lexeme* word = <Lexeme*>calloc(1, sizeof(Lexeme))
|
||||
|
||||
word.sic = hashed
|
||||
|
||||
cdef unicode tail_string
|
||||
cdef unicode lex
|
||||
if split != 0 and split < length:
|
||||
lex = substr(string, 0, split, length)
|
||||
tail_string = substr(string, split, length, length)
|
||||
else:
|
||||
lex = string
|
||||
tail_string = ''
|
||||
|
||||
word.lex = self.hash_string(lex, len(lex))
|
||||
self.bacov[word.lex] = lex.encode('utf8')
|
||||
word.orth = <Orthography*>self.ortho[0][word.lex]
|
||||
if word.orth == NULL:
|
||||
word.orth = self.init_orth(word.lex, lex)
|
||||
word.dist = <Distribution*>self.distri[0][word.lex]
|
||||
|
||||
# Now recurse, and deal with the tail
|
||||
if tail_string:
|
||||
word.tail = <Lexeme*>self.lookup(-1, tail_string, len(tail_string))
|
||||
return word
|
||||
|
||||
cdef Orthography* init_orth(self, StringHash hashed, unicode lex):
|
||||
cdef Orthography* orth = <Orthography*>calloc(1, sizeof(Orthography))
|
||||
orth.first = <Py_UNICODE>lex[0]
|
||||
|
||||
cdef int length = len(lex)
|
||||
|
||||
orth.length = length
|
||||
orth.flags = set_orth_flags(lex, length)
|
||||
|
||||
cdef unicode last3 = substr(lex, length - 3, length, length)
|
||||
cdef unicode norm = get_normalized(lex, length)
|
||||
cdef unicode shape = get_word_shape(lex, length)
|
||||
|
||||
orth.last3 = self.hash_string(last3, len(last3))
|
||||
orth.shape = self.hash_string(shape, len(shape))
|
||||
orth.norm = self.hash_string(norm, len(norm))
|
||||
|
||||
self.bacov[orth.last3] = last3.encode('utf8')
|
||||
self.bacov[orth.shape] = shape.encode('utf8')
|
||||
self.bacov[orth.norm] = norm.encode('utf8')
|
||||
|
||||
self.ortho[0][hashed] = <size_t>orth
|
||||
return orth
|
||||
hashed = self.hash_string(token_string)
|
||||
word = self.init_lexeme(hashed, token_string)
|
||||
|
||||
|
||||
cdef inline bint _is_whitespace(Py_UNICODE c) nogil:
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# cython: profile=True
|
||||
from murmurhash cimport mrmr
|
||||
|
||||
|
||||
cpdef bytes to_bytes(unicode string):
|
||||
|
|
Loading…
Reference in New Issue
Block a user