mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-25 17:36:30 +03:00
abf8b16d71
This patch takes a step towards #1487 by introducing the doc.retokenize() context manager, to handle merging spans, and soon splitting tokens. The idea is to do merging and splitting like this: with doc.retokenize() as retokenizer: for start, end, label in matches: retokenizer.merge(doc[start : end], attrs={'ent_type': label}) The retokenizer accumulates the merge requests, and applies them together at the end of the block. This will allow retokenization to be more efficient, and much less error prone. A retokenizer.split() function will then be added, to handle splitting a single token into multiple tokens. These methods take `Span` and `Token` objects; if the user wants to go directly from offsets, they can append to the .merges and .splits lists on the retokenizer. The doc.merge() method's behaviour remains unchanged, so this patch should be 100% backwards incompatible (modulo bugs). Internally, doc.merge() fixes up the arguments (to handle the various deprecated styles), opens the retokenizer, and makes the single merge. We can later start making deprecation warnings on direct calls to doc.merge(), to migrate people to use of the retokenize context manager.
66 lines
1.5 KiB
Cython
66 lines
1.5 KiB
Cython
from cymem.cymem cimport Pool
|
|
cimport numpy as np
|
|
from preshed.counter cimport PreshCounter
|
|
|
|
from ..vocab cimport Vocab
|
|
from ..structs cimport TokenC, LexemeC
|
|
from ..typedefs cimport attr_t
|
|
from ..attrs cimport attr_id_t
|
|
|
|
|
|
cdef attr_t get_token_attr(const TokenC* token, attr_id_t feat_name) nogil
|
|
|
|
|
|
ctypedef const LexemeC* const_Lexeme_ptr
|
|
ctypedef const TokenC* const_TokenC_ptr
|
|
|
|
ctypedef fused LexemeOrToken:
|
|
const_Lexeme_ptr
|
|
const_TokenC_ptr
|
|
|
|
|
|
cdef int token_by_start(const TokenC* tokens, int length, int start_char) except -2
|
|
|
|
|
|
cdef int token_by_end(const TokenC* tokens, int length, int end_char) except -2
|
|
|
|
|
|
cdef int set_children_from_heads(TokenC* tokens, int length) except -1
|
|
|
|
cdef class Doc:
|
|
cdef readonly Pool mem
|
|
cdef readonly Vocab vocab
|
|
|
|
cdef public object _vector
|
|
cdef public object _vector_norm
|
|
|
|
cdef public object tensor
|
|
cdef public object cats
|
|
cdef public object user_data
|
|
|
|
cdef TokenC* c
|
|
|
|
cdef public bint is_tagged
|
|
cdef public bint is_parsed
|
|
|
|
cdef public float sentiment
|
|
|
|
cdef public dict user_hooks
|
|
cdef public dict user_token_hooks
|
|
cdef public dict user_span_hooks
|
|
|
|
cdef public list _py_tokens
|
|
|
|
cdef int length
|
|
cdef int max_length
|
|
|
|
cdef public object noun_chunks_iterator
|
|
|
|
cdef object __weakref__
|
|
|
|
cdef int push_back(self, LexemeOrToken lex_or_tok, bint has_space) except -1
|
|
|
|
cpdef np.ndarray to_array(self, object features)
|
|
|
|
cdef void set_parse(self, const TokenC* parsed) nogil
|