mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-10-31 16:07:41 +03:00 
			
		
		
		
	* Tell convert CLI to store user data for Doc * Remove assert * Add has_unknwon_spaces flag on Doc * Do not tokenize docs with unknown spaces in Corpus * Handle conversion of unknown spaces in Example * Fixes * Fixes * Draft has_known_spaces support in DocBin * Add test for serialize has_unknown_spaces * Fix DocBin serialization when has_unknown_spaces * Use serialization in test
		
			
				
	
	
		
			79 lines
		
	
	
		
			1.8 KiB
		
	
	
	
		
			Cython
		
	
	
	
	
	
			
		
		
	
	
			79 lines
		
	
	
		
			1.8 KiB
		
	
	
	
		
			Cython
		
	
	
	
	
	
| from cymem.cymem cimport Pool
 | |
| cimport numpy as np
 | |
| 
 | |
| from ..vocab cimport Vocab
 | |
| from ..structs cimport TokenC, LexemeC
 | |
| from ..typedefs cimport attr_t
 | |
| from ..attrs cimport attr_id_t
 | |
| 
 | |
| 
 | |
| cdef attr_t get_token_attr(const TokenC* token, attr_id_t feat_name) nogil
 | |
| cdef attr_t get_token_attr_for_matcher(const TokenC* token, attr_id_t feat_name) nogil
 | |
| 
 | |
| 
 | |
| ctypedef const LexemeC* const_Lexeme_ptr
 | |
| ctypedef const TokenC* const_TokenC_ptr
 | |
| 
 | |
| ctypedef fused LexemeOrToken:
 | |
|     const_Lexeme_ptr
 | |
|     const_TokenC_ptr
 | |
| 
 | |
| 
 | |
| cdef int set_children_from_heads(TokenC* tokens, int length) except -1
 | |
| 
 | |
| 
 | |
| cdef int _set_lr_kids_and_edges(TokenC* tokens, int length, int loop_count) except -1
 | |
| 
 | |
| 
 | |
| cdef int token_by_start(const TokenC* tokens, int length, int start_char) except -2
 | |
| 
 | |
| 
 | |
| cdef int token_by_end(const TokenC* tokens, int length, int end_char) except -2
 | |
| 
 | |
| 
 | |
| cdef int set_children_from_heads(TokenC* tokens, int length) except -1
 | |
| 
 | |
| 
 | |
| cdef int [:,:] _get_lca_matrix(Doc, int start, int end)
 | |
| 
 | |
| cdef class Doc:
 | |
|     cdef readonly Pool mem
 | |
|     cdef readonly Vocab vocab
 | |
| 
 | |
|     cdef public object _vector
 | |
|     cdef public object _vector_norm
 | |
| 
 | |
|     cdef public object tensor
 | |
|     cdef public object cats
 | |
|     cdef public object user_data
 | |
| 
 | |
|     cdef TokenC* c
 | |
| 
 | |
|     cdef public bint is_tagged
 | |
|     cdef public bint is_parsed
 | |
|     cdef public bint is_morphed
 | |
| 
 | |
|     cdef public float sentiment
 | |
| 
 | |
|     cdef public dict user_hooks
 | |
|     cdef public dict user_token_hooks
 | |
|     cdef public dict user_span_hooks
 | |
| 
 | |
|     cdef public bint has_unknown_spaces
 | |
| 
 | |
|     cdef public list _py_tokens
 | |
| 
 | |
|     cdef int length
 | |
|     cdef int max_length
 | |
| 
 | |
| 
 | |
|     cdef public object noun_chunks_iterator
 | |
| 
 | |
|     cdef object __weakref__
 | |
| 
 | |
|     cdef int push_back(self, LexemeOrToken lex_or_tok, bint has_space) except -1
 | |
| 
 | |
|     cpdef np.ndarray to_array(self, object features)
 | |
| 
 | |
|     cdef void set_parse(self, const TokenC* parsed) nogil
 |