From b64cbe511326e0d0916e8d97d96773bc2e262579 Mon Sep 17 00:00:00 2001 From: Paul O'Leary McCann Date: Mon, 23 Jan 2023 13:41:21 +0900 Subject: [PATCH] Remove more unused mem args --- spacy/tokenizer.pxd | 2 +- spacy/tokenizer.pyx | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/spacy/tokenizer.pxd b/spacy/tokenizer.pxd index 6f9dfc90f..ba268eaeb 100644 --- a/spacy/tokenizer.pxd +++ b/spacy/tokenizer.pxd @@ -37,7 +37,7 @@ cdef class Tokenizer: bint with_special_cases) except -1 cdef int _tokenize(self, Doc tokens, str span, hash_t key, int* has_special, bint with_special_cases) except -1 - cdef str _split_affixes(self, Pool mem, str string, + cdef str _split_affixes(self, str string, vector[LexemeC*] *prefixes, vector[LexemeC*] *suffixes, int* has_special, bint with_special_cases) diff --git a/spacy/tokenizer.pyx b/spacy/tokenizer.pyx index 8840b181c..0466b041a 100644 --- a/spacy/tokenizer.pyx +++ b/spacy/tokenizer.pyx @@ -389,14 +389,14 @@ cdef class Tokenizer: cdef vector[LexemeC*] suffixes cdef int orig_size orig_size = tokens.length - span = self._split_affixes(tokens.mem, span, &prefixes, &suffixes, + span = self._split_affixes(span, &prefixes, &suffixes, has_special, with_special_cases) self._attach_tokens(tokens, span, &prefixes, &suffixes, has_special, with_special_cases) self._save_cached(&tokens.c[orig_size], orig_key, has_special, tokens.length - orig_size) - cdef str _split_affixes(self, Pool mem, str string, + cdef str _split_affixes(self, str string, vector[const LexemeC*] *prefixes, vector[const LexemeC*] *suffixes, int* has_special,