mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-11 12:18:04 +03:00
a119667a36
* Clean up spacy.tokens * Update `set_children_from_heads`: * Don't check `dep` when setting lr_* or sentence starts * Set all non-sentence starts to `False` * Use `set_children_from_heads` in `Token.head` setter * Reduce similar/duplicate code (admittedly adds a bit of overhead) * Update sentence starts consistently * Remove unused `Doc.set_parse` * Minor changes: * Declare cython variables (to avoid cython warnings) * Clean up imports * Modify set_children_from_heads to set token range Modify `set_children_from_heads` so that it adjust tokens within a specified range rather then the whole document. Modify the `Token.head` setter to adjust only the tokens affected by the new head assignment.
74 lines
1.7 KiB
Cython
74 lines
1.7 KiB
Cython
from cymem.cymem cimport Pool
|
|
cimport numpy as np
|
|
|
|
from ..vocab cimport Vocab
|
|
from ..structs cimport TokenC, LexemeC
|
|
from ..typedefs cimport attr_t
|
|
from ..attrs cimport attr_id_t
|
|
|
|
|
|
cdef attr_t get_token_attr(const TokenC* token, attr_id_t feat_name) nogil
|
|
cdef attr_t get_token_attr_for_matcher(const TokenC* token, attr_id_t feat_name) nogil
|
|
|
|
|
|
ctypedef const LexemeC* const_Lexeme_ptr
|
|
ctypedef const TokenC* const_TokenC_ptr
|
|
|
|
ctypedef fused LexemeOrToken:
|
|
const_Lexeme_ptr
|
|
const_TokenC_ptr
|
|
|
|
|
|
cdef int set_children_from_heads(TokenC* tokens, int start, int end) except -1
|
|
|
|
|
|
cdef int _set_lr_kids_and_edges(TokenC* tokens, int start, int end, int loop_count) except -1
|
|
|
|
|
|
cdef int token_by_start(const TokenC* tokens, int length, int start_char) except -2
|
|
|
|
|
|
cdef int token_by_end(const TokenC* tokens, int length, int end_char) except -2
|
|
|
|
|
|
cdef int [:,:] _get_lca_matrix(Doc, int start, int end)
|
|
|
|
cdef class Doc:
|
|
cdef readonly Pool mem
|
|
cdef readonly Vocab vocab
|
|
|
|
cdef public object _vector
|
|
cdef public object _vector_norm
|
|
|
|
cdef public object tensor
|
|
cdef public object cats
|
|
cdef public object user_data
|
|
|
|
cdef TokenC* c
|
|
|
|
cdef public bint is_tagged
|
|
cdef public bint is_parsed
|
|
cdef public bint is_morphed
|
|
|
|
cdef public float sentiment
|
|
|
|
cdef public dict user_hooks
|
|
cdef public dict user_token_hooks
|
|
cdef public dict user_span_hooks
|
|
|
|
cdef public bint has_unknown_spaces
|
|
|
|
cdef public list _py_tokens
|
|
|
|
cdef int length
|
|
cdef int max_length
|
|
|
|
|
|
cdef public object noun_chunks_iterator
|
|
|
|
cdef object __weakref__
|
|
|
|
cdef int push_back(self, LexemeOrToken lex_or_tok, bint has_space) except -1
|
|
|
|
cpdef np.ndarray to_array(self, object features)
|