mirror of
				https://github.com/explosion/spaCy.git
				synced 2025-11-04 01:48:04 +03:00 
			
		
		
		
	* Upd Tokens to use vector, with bounds checking.
This commit is contained in:
		
							parent
							
								
									0f6bf2a2ee
								
							
						
					
					
						commit
						f77b7098c0
					
				| 
						 | 
				
			
			@ -18,6 +18,8 @@ from .util import read_lang_data
 | 
			
		|||
from spacy.tokens import Tokens
 | 
			
		||||
from spacy.lexeme cimport LexemeC, lexeme_init
 | 
			
		||||
from murmurhash.mrmr cimport hash64
 | 
			
		||||
from cpython.ref cimport Py_INCREF
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
from spacy._hashing cimport PointerHash
 | 
			
		||||
from spacy import orth
 | 
			
		||||
| 
						 | 
				
			
			@ -194,11 +196,11 @@ cdef class Language:
 | 
			
		|||
        if lexemes != NULL:
 | 
			
		||||
            i = 0
 | 
			
		||||
            while lexemes[i] != NULL:
 | 
			
		||||
                tokens.push_back(lexemes[i])
 | 
			
		||||
                tokens.v.push_back(lexemes[i])
 | 
			
		||||
                i += 1
 | 
			
		||||
            return 0
 | 
			
		||||
        cdef uint64_t key = string.key 
 | 
			
		||||
        cdef size_t first_token = tokens.length
 | 
			
		||||
        cdef size_t first_token = len(tokens)
 | 
			
		||||
        cdef int split
 | 
			
		||||
        cdef int remaining = string.n
 | 
			
		||||
        cdef String prefix
 | 
			
		||||
| 
						 | 
				
			
			@ -210,14 +212,14 @@ cdef class Language:
 | 
			
		|||
            if lexemes != NULL:
 | 
			
		||||
                i = 0
 | 
			
		||||
                while lexemes[i] != NULL:
 | 
			
		||||
                    tokens.push_back(lexemes[i])
 | 
			
		||||
                    tokens.v.push_back(lexemes[i])
 | 
			
		||||
                    i += 1
 | 
			
		||||
            else:
 | 
			
		||||
                tokens.push_back(<LexemeC*>self.lexicon.get(&prefix))
 | 
			
		||||
        lexemes = <LexemeC**>calloc(tokens.length - first_token, sizeof(LexemeC*))
 | 
			
		||||
                tokens.v.push_back(<LexemeC*>self.lexicon.get(&prefix))
 | 
			
		||||
        lexemes = <LexemeC**>calloc(len(tokens) - first_token, sizeof(LexemeC*))
 | 
			
		||||
        cdef size_t j
 | 
			
		||||
        for i, j in enumerate(range(first_token, tokens.length)):
 | 
			
		||||
            lexemes[i] = tokens.lexemes[j]
 | 
			
		||||
        for i, j in enumerate(range(first_token, tokens.v.size())):
 | 
			
		||||
            lexemes[i] = tokens.v[j]
 | 
			
		||||
        self.cache.set(key, lexemes)
 | 
			
		||||
 | 
			
		||||
    cdef int _split_one(self, Py_UNICODE* characters, size_t length):
 | 
			
		||||
| 
						 | 
				
			
			@ -307,10 +309,8 @@ cdef class Lexicon:
 | 
			
		|||
        return Lexeme(<size_t>lexeme)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
_unicodes = set()
 | 
			
		||||
cdef void string_from_unicode(String* s, unicode uni):
 | 
			
		||||
    global _unicodes
 | 
			
		||||
    _unicodes.add(uni)
 | 
			
		||||
    Py_INCREF(uni)
 | 
			
		||||
    cdef Py_UNICODE* c_uni = <Py_UNICODE*>uni
 | 
			
		||||
    string_from_slice(s, c_uni, 0, len(uni))
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -7,9 +7,8 @@ cdef struct LexemeC:
 | 
			
		|||
    size_t cluster
 | 
			
		||||
 | 
			
		||||
    char* string
 | 
			
		||||
 | 
			
		||||
    
 | 
			
		||||
    char** views
 | 
			
		||||
 | 
			
		||||
    flag_t flags
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,4 +1,6 @@
 | 
			
		|||
from libc.stdlib cimport calloc, free
 | 
			
		||||
from cpython.ref cimport Py_INCREF
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef LexemeC* lexeme_init(unicode string, double prob, size_t cluster,
 | 
			
		||||
                     list views, set flags):
 | 
			
		||||
| 
						 | 
				
			
			@ -21,13 +23,11 @@ cdef int lexeme_free(LexemeC* lexeme) except -1:
 | 
			
		|||
    free(lexeme)
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
cdef set _strings = set()
 | 
			
		||||
cdef char* intern_and_encode(unicode string, size_t* length):
 | 
			
		||||
    global _strings
 | 
			
		||||
    cdef bytes decoded = string.encode('utf8')
 | 
			
		||||
    cdef bytes utf8_string = intern(decoded)
 | 
			
		||||
    cdef bytes utf8_string = intern(string.encode('utf8'))
 | 
			
		||||
    Py_INCREF(utf8_string)
 | 
			
		||||
    length[0] = len(utf8_string)
 | 
			
		||||
    _strings.add(utf8_string)
 | 
			
		||||
    return <char*>utf8_string
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,50 +1,46 @@
 | 
			
		|||
from spacy.lexeme cimport LexemeC
 | 
			
		||||
from libcpp.vector cimport vector
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
cdef class Tokens:
 | 
			
		||||
    cdef size_t length
 | 
			
		||||
    cdef size_t size
 | 
			
		||||
    cdef vector[LexemeC*] v
 | 
			
		||||
 | 
			
		||||
    cdef LexemeC** lexemes
 | 
			
		||||
    cdef int push_back(self, LexemeC* lexeme) except 01
 | 
			
		||||
 | 
			
		||||
    cpdef size_t id(self, size_t i)
 | 
			
		||||
    cpdef size_t id(self, size_t i) except 0
 | 
			
		||||
    cpdef unicode string(self, size_t i)
 | 
			
		||||
    cpdef double prob(self, size_t i)
 | 
			
		||||
    cpdef size_t cluster(self, size_t i)
 | 
			
		||||
    cpdef bint check_flag(self, size_t i, size_t flag_id)
 | 
			
		||||
    cpdef double prob(self, size_t i) except 1
 | 
			
		||||
    cpdef size_t cluster(self, size_t i) except *
 | 
			
		||||
    cpdef bint check_flag(self, size_t i, size_t flag_id) except *
 | 
			
		||||
    cpdef unicode string_view(self, size_t i, size_t view_id)
 | 
			
		||||
 | 
			
		||||
    cpdef size_t canon(self, size_t i)
 | 
			
		||||
    cpdef size_t shape(self, size_t i)
 | 
			
		||||
    cpdef size_t non_sparse(self, size_t i)
 | 
			
		||||
    cpdef size_t asciied(self, size_t i)
 | 
			
		||||
    cpdef size_t canon(self, size_t i) except 0
 | 
			
		||||
    cpdef size_t shape(self, size_t i) except 0
 | 
			
		||||
    cpdef size_t non_sparse(self, size_t i) except 0
 | 
			
		||||
    cpdef size_t asciied(self, size_t i) except 0
 | 
			
		||||
    cpdef unicode canon_string(self, size_t i)
 | 
			
		||||
    cpdef unicode shape_string(self, size_t i)
 | 
			
		||||
    cpdef unicode shape_string(self, size_t i) 
 | 
			
		||||
    cpdef unicode non_sparse_string(self, size_t i)
 | 
			
		||||
    cpdef unicode asciied_string(self, size_t i)
 | 
			
		||||
    cpdef bint is_alpha(self, size_t i)
 | 
			
		||||
    cpdef bint is_ascii(self, size_t i)
 | 
			
		||||
    cpdef bint is_digit(self, size_t i)
 | 
			
		||||
    cpdef bint is_lower(self, size_t i)
 | 
			
		||||
    cpdef bint is_punct(self, size_t i)
 | 
			
		||||
    cpdef bint is_space(self, size_t i)
 | 
			
		||||
    cpdef bint is_title(self, size_t i)
 | 
			
		||||
    cpdef bint is_upper(self, size_t i)
 | 
			
		||||
    cpdef bint can_adj(self, size_t i)
 | 
			
		||||
    cpdef bint can_adp(self, size_t i)
 | 
			
		||||
    cpdef bint can_adv(self, size_t i)
 | 
			
		||||
    cpdef bint can_conj(self, size_t i)
 | 
			
		||||
    cpdef bint can_det(self, size_t i)
 | 
			
		||||
    cpdef bint can_noun(self, size_t i)
 | 
			
		||||
    cpdef bint can_num(self, size_t i)
 | 
			
		||||
    cpdef bint can_pdt(self, size_t i)
 | 
			
		||||
    cpdef bint can_pos(self, size_t i)
 | 
			
		||||
    cpdef bint can_pron(self, size_t i)
 | 
			
		||||
    cpdef bint can_prt(self, size_t i)
 | 
			
		||||
    cpdef bint can_punct(self, size_t i)
 | 
			
		||||
    cpdef bint can_verb(self, size_t i)
 | 
			
		||||
    cpdef bint oft_lower(self, size_t i)
 | 
			
		||||
    cpdef bint oft_title(self, size_t i)
 | 
			
		||||
    cpdef bint oft_upper(self, size_t i)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    cpdef bint is_alpha(self, size_t i) except *
 | 
			
		||||
    cpdef bint is_ascii(self, size_t i) except * 
 | 
			
		||||
    cpdef bint is_digit(self, size_t i) except *
 | 
			
		||||
    cpdef bint is_lower(self, size_t i) except *
 | 
			
		||||
    cpdef bint is_punct(self, size_t i) except *
 | 
			
		||||
    cpdef bint is_space(self, size_t i) except *
 | 
			
		||||
    cpdef bint is_title(self, size_t i) except *
 | 
			
		||||
    cpdef bint is_upper(self, size_t i) except *
 | 
			
		||||
    cpdef bint can_adj(self, size_t i) except *
 | 
			
		||||
    cpdef bint can_adp(self, size_t i) except *
 | 
			
		||||
    cpdef bint can_adv(self, size_t i) except *
 | 
			
		||||
    cpdef bint can_conj(self, size_t i) except *
 | 
			
		||||
    cpdef bint can_det(self, size_t i) except *
 | 
			
		||||
    cpdef bint can_noun(self, size_t i) except *
 | 
			
		||||
    cpdef bint can_num(self, size_t i) except *
 | 
			
		||||
    cpdef bint can_pdt(self, size_t i) except *
 | 
			
		||||
    cpdef bint can_pos(self, size_t i) except *
 | 
			
		||||
    cpdef bint can_pron(self, size_t i) except *
 | 
			
		||||
    cpdef bint can_prt(self, size_t i) except *
 | 
			
		||||
    cpdef bint can_punct(self, size_t i) except *
 | 
			
		||||
    cpdef bint can_verb(self, size_t i) except *
 | 
			
		||||
    cpdef bint oft_lower(self, size_t i) except *
 | 
			
		||||
    cpdef bint oft_title(self, size_t i) except *
 | 
			
		||||
    cpdef bint oft_upper(self, size_t i) except *
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										164
									
								
								spacy/tokens.pyx
									
									
									
									
									
								
							
							
						
						
									
										164
									
								
								spacy/tokens.pyx
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -65,148 +65,132 @@ cdef class Tokens:
 | 
			
		|||
    """
 | 
			
		||||
    def __cinit__(self, string_length=0):
 | 
			
		||||
        size = int(string_length / 3) if string_length >= 3 else 1
 | 
			
		||||
        self.lexemes = <LexemeC**>calloc(size, sizeof(LexemeC*))
 | 
			
		||||
        self.size = size
 | 
			
		||||
        self.length = 0
 | 
			
		||||
 | 
			
		||||
    def __dealloc__(self):
 | 
			
		||||
        free(self.lexemes)
 | 
			
		||||
        self.v.reserve(size)
 | 
			
		||||
 | 
			
		||||
    def __getitem__(self, i):
 | 
			
		||||
        if i >= self.length:
 | 
			
		||||
            raise IndexError
 | 
			
		||||
        return Lexeme(<size_t>self.lexemes[i])
 | 
			
		||||
        return Lexeme(<size_t>self.v.at(i))
 | 
			
		||||
 | 
			
		||||
    def __len__(self):
 | 
			
		||||
        return self.length
 | 
			
		||||
        return self.v.size()
 | 
			
		||||
 | 
			
		||||
    def append(self, Lexeme lexeme):
 | 
			
		||||
        self.push_back(lexeme._c)
 | 
			
		||||
 | 
			
		||||
    cdef int push_back(self, LexemeC* lexeme) except -1:
 | 
			
		||||
        if (self.length + 1) == self.size:
 | 
			
		||||
            self.size *= 2
 | 
			
		||||
            self.lexemes = <LexemeC**>realloc(self.lexemes, self.size * sizeof(LexemeC*))
 | 
			
		||||
        self.lexemes[self.length] = lexeme
 | 
			
		||||
        self.length += 1
 | 
			
		||||
        self.v.push_back(lexeme._c)
 | 
			
		||||
 | 
			
		||||
    cpdef unicode string(self, size_t i):
 | 
			
		||||
        cdef bytes utf8_string = self.lexemes[i].string[:self.lexemes[i].length]
 | 
			
		||||
        cdef bytes utf8_string = self.v[i].string[:self.v[i].length]
 | 
			
		||||
        cdef unicode string = utf8_string.decode('utf8')
 | 
			
		||||
        return string
 | 
			
		||||
 | 
			
		||||
    cpdef size_t id(self, size_t i):
 | 
			
		||||
        return id(self.lexemes[i].string)
 | 
			
		||||
    cpdef size_t id(self, size_t i) except 0:
 | 
			
		||||
        return <size_t>&self.v.at(i).string
 | 
			
		||||
 | 
			
		||||
    cpdef double prob(self, size_t i):
 | 
			
		||||
        return self.lexemes[i].prob
 | 
			
		||||
    cpdef double prob(self, size_t i) except 1:
 | 
			
		||||
        return self.v.at(i).prob
 | 
			
		||||
 | 
			
		||||
    cpdef size_t cluster(self, size_t i):
 | 
			
		||||
        return self.lexemes[i].cluster
 | 
			
		||||
    cpdef size_t cluster(self, size_t i) except *:
 | 
			
		||||
        return self.v.at(i).cluster
 | 
			
		||||
 | 
			
		||||
    cpdef bint check_flag(self, size_t i, size_t flag_id):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], flag_id)
 | 
			
		||||
    cpdef bint check_flag(self, size_t i, size_t flag_id) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v[i], flag_id)
 | 
			
		||||
 | 
			
		||||
    cpdef unicode string_view(self, size_t i, size_t view_id):
 | 
			
		||||
        return lexeme_string_view(self.lexemes[i], view_id)
 | 
			
		||||
        return lexeme_string_view(self.v.at(i), view_id)
 | 
			
		||||
 | 
			
		||||
    # Provide accessor methods for the features supported by the language.
 | 
			
		||||
    # Without these, clients have to use the underlying string_view and check_flag
 | 
			
		||||
    # methods, which requires them to know the IDs.
 | 
			
		||||
    cpdef unicode canon_string(self, size_t i):
 | 
			
		||||
        return lexeme_string_view(self.lexemes[i], View_CanonForm)
 | 
			
		||||
        return lexeme_string_view(self.v.at(i), View_CanonForm)
 | 
			
		||||
 | 
			
		||||
    cpdef unicode shape_string(self, size_t i):
 | 
			
		||||
        return lexeme_string_view(self.lexemes[i], View_WordShape)
 | 
			
		||||
        return lexeme_string_view(self.v.at(i), View_WordShape)
 | 
			
		||||
 | 
			
		||||
    cpdef unicode non_sparse_string(self, size_t i):
 | 
			
		||||
        return lexeme_string_view(self.lexemes[i], View_NonSparse)
 | 
			
		||||
        return lexeme_string_view(self.v.at(i), View_NonSparse)
 | 
			
		||||
 | 
			
		||||
    cpdef unicode asciied_string(self, size_t i):
 | 
			
		||||
        return lexeme_string_view(self.lexemes[i], View_Asciied)
 | 
			
		||||
        return lexeme_string_view(self.v.at(i), View_Asciied)
 | 
			
		||||
 | 
			
		||||
    cpdef size_t canon(self, size_t i):
 | 
			
		||||
        return id(self.lexemes[i].views[<size_t>View_CanonForm])
 | 
			
		||||
    cpdef size_t canon(self, size_t i) except *:
 | 
			
		||||
        return id(self.v.at(i).views[<size_t>View_CanonForm])
 | 
			
		||||
 | 
			
		||||
    cpdef size_t shape(self, size_t i):
 | 
			
		||||
        return id(self.lexemes[i].views[<size_t>View_WordShape])
 | 
			
		||||
    cpdef size_t shape(self, size_t i) except *:
 | 
			
		||||
        return id(self.v.at(i).views[<size_t>View_WordShape])
 | 
			
		||||
 | 
			
		||||
    cpdef size_t non_sparse(self, size_t i):
 | 
			
		||||
        return id(self.lexemes[i].views[<size_t>View_NonSparse])
 | 
			
		||||
    cpdef size_t non_sparse(self, size_t i) except *:
 | 
			
		||||
        return id(self.v.at(i).views[<size_t>View_NonSparse])
 | 
			
		||||
 | 
			
		||||
    cpdef size_t asciied(self, size_t i):
 | 
			
		||||
        return id(self.lexemes[i].views[<size_t>View_Asciied])
 | 
			
		||||
    cpdef size_t asciied(self, size_t i) except *:
 | 
			
		||||
        return id(self.v.at(i).views[<size_t>View_Asciied])
 | 
			
		||||
    
 | 
			
		||||
    cpdef bint is_alpha(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_IsAlpha)
 | 
			
		||||
    cpdef bint is_alpha(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_IsAlpha)
 | 
			
		||||
 | 
			
		||||
    cpdef bint is_ascii(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_IsAscii)
 | 
			
		||||
    cpdef bint is_ascii(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_IsAscii)
 | 
			
		||||
 | 
			
		||||
    cpdef bint is_digit(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_IsDigit)
 | 
			
		||||
    cpdef bint is_digit(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_IsDigit)
 | 
			
		||||
 | 
			
		||||
    cpdef bint is_lower(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_IsLower)
 | 
			
		||||
    cpdef bint is_lower(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_IsLower)
 | 
			
		||||
 | 
			
		||||
    cpdef bint is_punct(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_IsPunct)
 | 
			
		||||
    cpdef bint is_punct(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_IsPunct)
 | 
			
		||||
 | 
			
		||||
    cpdef bint is_space(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_IsSpace)
 | 
			
		||||
    cpdef bint is_space(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_IsSpace)
 | 
			
		||||
 | 
			
		||||
    cpdef bint is_title(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_IsTitle)
 | 
			
		||||
    cpdef bint is_title(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_IsTitle)
 | 
			
		||||
 | 
			
		||||
    cpdef bint is_upper(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_IsUpper)
 | 
			
		||||
    cpdef bint is_upper(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_IsUpper)
 | 
			
		||||
 | 
			
		||||
    cpdef bint can_adj(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_CanAdj)
 | 
			
		||||
    cpdef bint can_adj(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_CanAdj)
 | 
			
		||||
 | 
			
		||||
    cpdef bint can_adp(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_CanAdp)
 | 
			
		||||
    cpdef bint can_adp(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_CanAdp)
 | 
			
		||||
 | 
			
		||||
    cpdef bint can_adv(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_CanAdv)
 | 
			
		||||
    cpdef bint can_adv(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_CanAdv)
 | 
			
		||||
 | 
			
		||||
    cpdef bint can_conj(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_CanConj)
 | 
			
		||||
    cpdef bint can_conj(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_CanConj)
 | 
			
		||||
 | 
			
		||||
    cpdef bint can_det(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_CanDet)
 | 
			
		||||
    cpdef bint can_det(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_CanDet)
 | 
			
		||||
 | 
			
		||||
    cpdef bint can_noun(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_CanNoun)
 | 
			
		||||
    cpdef bint can_noun(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_CanNoun)
 | 
			
		||||
 | 
			
		||||
    cpdef bint can_num(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_CanNum)
 | 
			
		||||
    cpdef bint can_num(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_CanNum)
 | 
			
		||||
 | 
			
		||||
    cpdef bint can_pdt(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_CanPdt)
 | 
			
		||||
    cpdef bint can_pdt(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_CanPdt)
 | 
			
		||||
 | 
			
		||||
    cpdef bint can_pos(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_CanPos)
 | 
			
		||||
    cpdef bint can_pos(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_CanPos)
 | 
			
		||||
 | 
			
		||||
    cpdef bint can_pron(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_CanPron)
 | 
			
		||||
    cpdef bint can_pron(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_CanPron)
 | 
			
		||||
 | 
			
		||||
    cpdef bint can_prt(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_CanPrt)
 | 
			
		||||
    cpdef bint can_prt(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_CanPrt)
 | 
			
		||||
 | 
			
		||||
    cpdef bint can_punct(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_CanPunct)
 | 
			
		||||
    cpdef bint can_punct(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_CanPunct)
 | 
			
		||||
 | 
			
		||||
    cpdef bint can_verb(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_CanVerb)
 | 
			
		||||
    cpdef bint can_verb(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_CanVerb)
 | 
			
		||||
 | 
			
		||||
    cpdef bint oft_lower(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_OftLower)
 | 
			
		||||
 | 
			
		||||
    cpdef bint oft_title(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_OftTitle)
 | 
			
		||||
 | 
			
		||||
    cpdef bint oft_upper(self, size_t i):
 | 
			
		||||
        return lexeme_check_flag(self.lexemes[i], Flag_OftUpper)
 | 
			
		||||
    cpdef bint oft_lower(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_OftLower)
 | 
			
		||||
 | 
			
		||||
    cpdef bint oft_title(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_OftTitle)
 | 
			
		||||
 | 
			
		||||
    cpdef bint oft_upper(self, size_t i) except *:
 | 
			
		||||
        return lexeme_check_flag(self.v.at(i), Flag_OftUpper)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in New Issue
	
	Block a user