mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-10-31 16:07:41 +03:00 
			
		
		
		
	* chore: add cython-linter dev dependency * fix: lexeme.pyx * fix: morphology.pxd * fix: tokenizer.pxd * fix: vocab.pxd * fix: morphology.pxd (line length) * ci: add cython-lint * ci: fix cython-lint call * Fix kb/candidate.pyx. * Fix kb/kb.pyx. * Fix kb/kb_in_memory.pyx. * Fix kb. * Fix training/ partially. * Fix training/. Ignore trailing whitespaces and too long lines. * Fix ml/. * Fix matcher/. * Fix pipeline/. * Fix tokens/. * Fix build errors. Fix vocab.pyx. * Fix cython-lint install and run. * Fix lexeme.pyx, parts_of_speech.pxd, vectors.pyx. Temporarily disable cython-lint execution. * Fix attrs.pyx, lexeme.pyx, symbols.pxd, isort issues. * Make cython-lint install conditional. Fix tokenizer.pyx. * Fix remaining files. Reenable cython-lint check. * Readded parentheses. * Fix test_build_dependencies(). * Add explanatory comment to cython-lint execution. --------- Co-authored-by: Raphael Mitsch <r.mitsch@outlook.com>
		
			
				
	
	
		
			71 lines
		
	
	
		
			1.6 KiB
		
	
	
	
		
			Cython
		
	
	
	
	
	
			
		
		
	
	
			71 lines
		
	
	
		
			1.6 KiB
		
	
	
	
		
			Cython
		
	
	
	
	
	
| cimport numpy as np
 | |
| from cymem.cymem cimport Pool
 | |
| 
 | |
| from ..attrs cimport attr_id_t
 | |
| from ..structs cimport LexemeC, SpanC, TokenC
 | |
| from ..typedefs cimport attr_t
 | |
| from ..vocab cimport Vocab
 | |
| 
 | |
| 
 | |
| cdef attr_t get_token_attr(const TokenC* token, attr_id_t feat_name) nogil
 | |
| cdef attr_t get_token_attr_for_matcher(const TokenC* token, attr_id_t feat_name) nogil
 | |
| 
 | |
| 
 | |
| ctypedef const LexemeC* const_Lexeme_ptr
 | |
| ctypedef const TokenC* const_TokenC_ptr
 | |
| 
 | |
| ctypedef fused LexemeOrToken:
 | |
|     const_Lexeme_ptr
 | |
|     const_TokenC_ptr
 | |
| 
 | |
| 
 | |
| cdef int set_children_from_heads(TokenC* tokens, int start, int end) except -1
 | |
| 
 | |
| 
 | |
| cdef int _set_lr_kids_and_edges(TokenC* tokens, int start, int end, int loop_count) except -1
 | |
| 
 | |
| 
 | |
| cdef int token_by_start(const TokenC* tokens, int length, int start_char) except -2
 | |
| 
 | |
| 
 | |
| cdef int token_by_end(const TokenC* tokens, int length, int end_char) except -2
 | |
| 
 | |
| 
 | |
| cdef int [:, :] _get_lca_matrix(Doc, int start, int end)
 | |
| 
 | |
| 
 | |
| cdef class Doc:
 | |
|     cdef readonly Pool mem
 | |
|     cdef readonly Vocab vocab
 | |
| 
 | |
|     cdef public object _vector
 | |
|     cdef public object _vector_norm
 | |
| 
 | |
|     cdef public object tensor
 | |
|     cdef public object cats
 | |
|     cdef public object user_data
 | |
|     cdef readonly object spans
 | |
| 
 | |
|     cdef TokenC* c
 | |
| 
 | |
|     cdef public float sentiment
 | |
| 
 | |
|     cdef public dict user_hooks
 | |
|     cdef public dict user_token_hooks
 | |
|     cdef public dict user_span_hooks
 | |
| 
 | |
|     cdef public bint has_unknown_spaces
 | |
| 
 | |
|     cdef public object _context
 | |
| 
 | |
|     cdef int length
 | |
|     cdef int max_length
 | |
| 
 | |
|     cdef public object noun_chunks_iterator
 | |
| 
 | |
|     cdef object __weakref__
 | |
| 
 | |
|     cdef int push_back(self, LexemeOrToken lex_or_tok, bint has_space) except -1
 | |
| 
 | |
|     cpdef np.ndarray to_array(self, object features)
 |