2014-07-07 09:36:43 +04:00
|
|
|
# cython: profile=True
|
2014-07-05 22:51:42 +04:00
|
|
|
from __future__ import unicode_literals
|
2014-07-07 06:21:06 +04:00
|
|
|
|
2014-07-07 10:05:29 +04:00
|
|
|
from libc.stdlib cimport calloc, free
|
|
|
|
|
2014-07-07 06:21:06 +04:00
|
|
|
from ext.murmurhash cimport MurmurHash64A
|
|
|
|
from ext.murmurhash cimport MurmurHash64B
|
|
|
|
|
|
|
|
from spacy.lexeme cimport init_lexeme
|
|
|
|
from spacy.lexeme cimport BLANK_WORD
|
|
|
|
|
|
|
|
from spacy.string_tools cimport is_whitespace
|
|
|
|
|
|
|
|
from . import util
|
2014-07-07 09:36:43 +04:00
|
|
|
from os import path
|
|
|
|
cimport cython
|
2014-07-07 06:21:06 +04:00
|
|
|
|
|
|
|
|
2014-07-07 09:36:43 +04:00
|
|
|
cdef load_tokenization(Vocab* vocab, dict bacov, token_rules):
|
2014-07-07 06:21:06 +04:00
|
|
|
cdef Lexeme* word
|
|
|
|
cdef StringHash hashed
|
|
|
|
for chunk, lex, tokens in token_rules:
|
|
|
|
hashed = hash_string(chunk, len(chunk))
|
2014-07-07 09:36:43 +04:00
|
|
|
assert vocab[0][hashed] == 0, chunk
|
2014-07-07 06:21:06 +04:00
|
|
|
word = _add(vocab, bacov, <Splitter>NULL, hashed, lex, len(lex), len(lex))
|
|
|
|
for i, lex in enumerate(tokens):
|
|
|
|
token_string = '%s:@:%d:@:%s' % (chunk, i, lex)
|
|
|
|
length = len(token_string)
|
|
|
|
hashed = hash_string(token_string, length)
|
|
|
|
word.tail = _add(vocab, bacov, <Splitter>NULL, hashed, lex, 0, len(lex))
|
|
|
|
word = word.tail
|
|
|
|
|
|
|
|
|
2014-07-07 09:36:43 +04:00
|
|
|
cdef load_browns(Vocab* vocab, dict bacov, Splitter find_split):
|
|
|
|
cdef Lexeme* w
|
|
|
|
data_dir = path.join(path.dirname(__file__), '..', 'data', 'en')
|
|
|
|
case_stats = util.load_case_stats(data_dir)
|
|
|
|
brown_loc = path.join(data_dir, 'clusters')
|
|
|
|
cdef size_t start
|
|
|
|
cdef int end
|
|
|
|
with util.utf8open(brown_loc) as browns_file:
|
|
|
|
for i, line in enumerate(browns_file):
|
|
|
|
cluster_str, token_string, freq_str = line.split()
|
|
|
|
# Decode as a little-endian string, so that we can do & 15 to get
|
|
|
|
# the first 4 bits. See redshift._parse_features.pyx
|
|
|
|
cluster = int(cluster_str[::-1], 2)
|
|
|
|
upper_pc, title_pc = case_stats.get(token_string.lower(), (0.0, 0.0))
|
|
|
|
start = 0
|
|
|
|
end = -1
|
|
|
|
hashed = hash_string(token_string, len(token_string))
|
|
|
|
|
|
|
|
word = _add(vocab, bacov, find_split, hashed, token_string,
|
|
|
|
len(token_string), len(token_string))
|
|
|
|
|
|
|
|
|
|
|
|
cdef vector[Lexeme_addr] tokenize(Vocab* vocab, dict bacov, Splitter splitter,
|
2014-07-07 06:21:06 +04:00
|
|
|
unicode string) except *:
|
|
|
|
cdef size_t length = len(string)
|
|
|
|
cdef Py_UNICODE* characters = <Py_UNICODE*>string
|
|
|
|
|
|
|
|
cdef size_t i
|
|
|
|
cdef Py_UNICODE c
|
|
|
|
|
|
|
|
cdef vector[Lexeme_addr] tokens = vector[Lexeme_addr]()
|
2014-07-07 10:05:29 +04:00
|
|
|
cdef Py_UNICODE* current = <Py_UNICODE*>calloc(len(string), sizeof(Py_UNICODE))
|
2014-07-07 09:36:43 +04:00
|
|
|
cdef size_t word_len = 0
|
2014-07-07 06:21:06 +04:00
|
|
|
cdef Lexeme* token
|
|
|
|
for i in range(length):
|
|
|
|
c = characters[i]
|
2014-07-07 09:36:43 +04:00
|
|
|
if _is_whitespace(c):
|
|
|
|
if word_len != 0:
|
|
|
|
token = <Lexeme*>lookup(vocab, bacov, splitter, -1, current, word_len)
|
2014-07-07 06:21:06 +04:00
|
|
|
while token != NULL:
|
|
|
|
tokens.push_back(<Lexeme_addr>token)
|
|
|
|
token = token.tail
|
2014-07-07 09:36:43 +04:00
|
|
|
for j in range(word_len+1):
|
|
|
|
current[j] = 0
|
|
|
|
word_len = 0
|
2014-07-07 06:21:06 +04:00
|
|
|
else:
|
2014-07-07 09:36:43 +04:00
|
|
|
current[word_len] = c
|
|
|
|
word_len += 1
|
|
|
|
if word_len != 0:
|
|
|
|
token = <Lexeme*>lookup(vocab, bacov, splitter, -1, current, word_len)
|
2014-07-07 06:21:06 +04:00
|
|
|
while token != NULL:
|
|
|
|
tokens.push_back(<Lexeme_addr>token)
|
|
|
|
token = token.tail
|
2014-07-07 10:05:29 +04:00
|
|
|
free(current)
|
2014-07-07 06:21:06 +04:00
|
|
|
return tokens
|
|
|
|
|
2014-07-07 09:36:43 +04:00
|
|
|
cdef inline bint _is_whitespace(Py_UNICODE c) nogil:
|
|
|
|
if c == ' ':
|
|
|
|
return True
|
|
|
|
elif c == '\n':
|
|
|
|
return True
|
|
|
|
elif c == '\t':
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
|
|
|
cdef Lexeme_addr lookup(Vocab* vocab, dict bacov, Splitter find_split, int start,
|
|
|
|
Py_UNICODE* string, size_t length) except 0:
|
2014-07-07 06:21:06 +04:00
|
|
|
'''Fetch a Lexeme representing a word string. If the word has not been seen,
|
|
|
|
construct one, splitting off any attached punctuation or clitics. A
|
|
|
|
reference to BLANK_WORD is returned for the empty string.
|
|
|
|
|
|
|
|
To specify the boundaries of the word if it has not been seen, use lookup_chunk.
|
|
|
|
'''
|
2014-07-07 09:36:43 +04:00
|
|
|
if length == 0:
|
2014-07-07 06:21:06 +04:00
|
|
|
return <Lexeme_addr>&BLANK_WORD
|
|
|
|
cdef StringHash hashed = hash_string(string, length)
|
2014-07-07 09:36:43 +04:00
|
|
|
cdef Lexeme* word_ptr = <Lexeme*>vocab[0][hashed]
|
2014-07-07 06:21:06 +04:00
|
|
|
if word_ptr == NULL:
|
|
|
|
start = find_split(string, length) if start == -1 else start
|
|
|
|
word_ptr = _add(vocab, bacov, find_split, hashed, string, start, length)
|
|
|
|
return <Lexeme_addr>word_ptr
|
2014-07-05 22:51:42 +04:00
|
|
|
|
|
|
|
|
|
|
|
cpdef vector[size_t] expand_chunk(size_t addr) except *:
|
|
|
|
cdef vector[size_t] tokens = vector[size_t]()
|
|
|
|
word = <Lexeme*>addr
|
|
|
|
while word is not NULL:
|
|
|
|
tokens.push_back(<size_t>word)
|
|
|
|
word = word.tail
|
|
|
|
return tokens
|
|
|
|
|
|
|
|
|
2014-07-07 09:36:43 +04:00
|
|
|
cdef StringHash hash_string(Py_UNICODE* s, size_t length) nogil:
|
2014-07-07 06:21:06 +04:00
|
|
|
'''Hash unicode with MurmurHash64A'''
|
|
|
|
return MurmurHash64A(<Py_UNICODE*>s, length * sizeof(Py_UNICODE), 0)
|
|
|
|
|
|
|
|
|
|
|
|
cdef unicode unhash(dict bacov, StringHash hash_value):
|
|
|
|
'''Fetch a string from the reverse index, given its hash value.'''
|
|
|
|
return bacov[hash_value]
|
|
|
|
|
|
|
|
|
2014-07-07 09:36:43 +04:00
|
|
|
@cython.nonecheck(False)
|
|
|
|
cdef Lexeme* _add(Vocab* vocab, dict bacov, Splitter find_split, StringHash hashed,
|
|
|
|
unicode string, int split, size_t length):
|
2014-07-07 06:21:06 +04:00
|
|
|
word = init_lexeme(vocab, bacov, find_split, string, hashed, split, length)
|
2014-07-07 09:36:43 +04:00
|
|
|
vocab[0][hashed] = <Lexeme_addr>word
|
2014-07-07 06:21:06 +04:00
|
|
|
bacov[hashed] = string
|
|
|
|
return word
|
2014-07-07 09:36:43 +04:00
|
|
|
|
|
|
|
|