mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-10 19:57:17 +03:00
82277f63a3
This patch improves tokenizer speed by about 10%, and reduces memory usage in the `Vocab` by removing a redundant index. The `vocab._by_orth` and `vocab._by_hash` indexed on different data in v1, but in v2 the orth and the hash are identical. The patch also fixes an uninitialized variable in the tokenizer, the `has_special` flag. This checks whether a chunk we're tokenizing triggers a special-case rule. If it does, then we avoid caching within the chunk. This check led to incorrectly rejecting some chunks from the cache. With the `en_core_web_md` model, we now tokenize the IMDB train data at 503,104k words per second. Prior to this patch, we had 465,764k words per second. Before switching to the regex library and supporting more languages, we had 1.3m words per second for the tokenizer. In order to recover the missing speed, we need to: * Fix the variable-length lookarounds in the suffix, infix and `token_match` rules * Improve the performance of the `token_match` regex * Switch back from the `regex` library to the `re` library. ## Checklist <!--- Before you submit the PR, go over this checklist and make sure you can tick off all the boxes. [] -> [x] --> - [x] I have submitted the spaCy Contributor Agreement. - [x] I ran the tests, and all new and existing tests passed. - [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
46 lines
1.2 KiB
Cython
46 lines
1.2 KiB
Cython
from libcpp.vector cimport vector
|
|
|
|
from preshed.maps cimport PreshMap
|
|
from cymem.cymem cimport Pool
|
|
from murmurhash.mrmr cimport hash64
|
|
|
|
from .structs cimport LexemeC, TokenC
|
|
from .typedefs cimport utf8_t, attr_t, hash_t
|
|
from .strings cimport StringStore
|
|
from .morphology cimport Morphology
|
|
|
|
|
|
cdef LexemeC EMPTY_LEXEME
|
|
|
|
|
|
cdef union LexemesOrTokens:
|
|
const LexemeC* const* lexemes
|
|
const TokenC* tokens
|
|
|
|
|
|
cdef struct _Cached:
|
|
LexemesOrTokens data
|
|
bint is_lex
|
|
int length
|
|
|
|
|
|
cdef class Vocab:
|
|
cdef Pool mem
|
|
cpdef readonly StringStore strings
|
|
cpdef public Morphology morphology
|
|
cpdef public object vectors
|
|
cdef readonly int length
|
|
cdef public object data_dir
|
|
cdef public object lex_attr_getters
|
|
cdef public object cfg
|
|
|
|
cdef const LexemeC* get(self, Pool mem, unicode string) except NULL
|
|
cdef const LexemeC* get_by_orth(self, Pool mem, attr_t orth) except NULL
|
|
cdef const TokenC* make_fused_token(self, substrings) except NULL
|
|
|
|
cdef const LexemeC* _new_lexeme(self, Pool mem, unicode string) except NULL
|
|
cdef int _add_lex_to_vocab(self, hash_t key, const LexemeC* lex) except -1
|
|
cdef const LexemeC* _new_lexeme(self, Pool mem, unicode string) except NULL
|
|
|
|
cdef PreshMap _by_orth
|