2014-12-19 23:54:49 +03:00
|
|
|
# cython: embedsignature=True
|
2017-11-15 15:55:46 +03:00
|
|
|
# cython: profile=True
|
2017-04-15 13:05:47 +03:00
|
|
|
# coding: utf8
|
2014-12-19 23:54:49 +03:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
from cython.operator cimport dereference as deref
|
|
|
|
from cython.operator cimport preincrement as preinc
|
|
|
|
from cymem.cymem cimport Pool
|
|
|
|
from preshed.maps cimport PreshMap
|
2015-07-22 05:49:39 +03:00
|
|
|
cimport cython
|
2014-12-19 23:54:49 +03:00
|
|
|
|
2019-03-08 13:42:26 +03:00
|
|
|
from collections import OrderedDict
|
|
|
|
import re
|
2020-04-28 14:37:37 +03:00
|
|
|
import warnings
|
2019-03-08 13:42:26 +03:00
|
|
|
|
2015-07-13 21:20:58 +03:00
|
|
|
from .tokens.doc cimport Doc
|
2017-10-27 22:07:59 +03:00
|
|
|
from .strings cimport hash_string
|
2020-03-02 13:55:02 +03:00
|
|
|
from .compat import unescape_unicode, basestring_
|
2019-11-20 15:07:25 +03:00
|
|
|
from .attrs import intify_attrs
|
|
|
|
from .symbols import ORTH
|
2019-03-08 13:42:26 +03:00
|
|
|
|
2020-04-28 14:37:37 +03:00
|
|
|
from .errors import Errors, Warnings
|
2017-10-27 22:07:59 +03:00
|
|
|
from . import util
|
2014-12-19 23:54:49 +03:00
|
|
|
|
|
|
|
|
|
|
|
cdef class Tokenizer:
|
2017-05-21 14:18:14 +03:00
|
|
|
"""Segment text, and create Doc objects with the discovered segment
|
|
|
|
boundaries.
|
2019-03-08 13:42:26 +03:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer
|
2017-04-15 12:59:21 +03:00
|
|
|
"""
|
2017-10-24 17:07:44 +03:00
|
|
|
def __init__(self, Vocab vocab, rules=None, prefix_search=None,
|
2020-05-05 11:35:33 +03:00
|
|
|
suffix_search=None, infix_finditer=None, token_match=None,
|
2020-05-22 13:41:03 +03:00
|
|
|
url_match=None):
|
2017-05-21 14:18:14 +03:00
|
|
|
"""Create a `Tokenizer`, to create `Doc` objects given unicode text.
|
|
|
|
|
|
|
|
vocab (Vocab): A storage container for lexical types.
|
|
|
|
rules (dict): Exceptions and special-cases for the tokenizer.
|
|
|
|
prefix_search (callable): A function matching the signature of
|
|
|
|
`re.compile(string).search` to match prefixes.
|
|
|
|
suffix_search (callable): A function matching the signature of
|
|
|
|
`re.compile(string).search` to match suffixes.
|
|
|
|
`infix_finditer` (callable): A function matching the signature of
|
|
|
|
`re.compile(string).finditer` to find infixes.
|
|
|
|
token_match (callable): A boolean function matching strings to be
|
|
|
|
recognised as tokens.
|
2020-05-22 13:41:03 +03:00
|
|
|
url_match (callable): A boolean function matching strings to be
|
2020-05-05 11:35:33 +03:00
|
|
|
recognised as tokens after considering prefixes and suffixes.
|
2017-05-21 14:18:14 +03:00
|
|
|
RETURNS (Tokenizer): The newly constructed object.
|
|
|
|
|
|
|
|
EXAMPLE:
|
|
|
|
>>> tokenizer = Tokenizer(nlp.vocab)
|
|
|
|
>>> tokenizer = English().Defaults.create_tokenizer(nlp)
|
2019-03-08 13:42:26 +03:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#init
|
2017-04-15 12:59:21 +03:00
|
|
|
"""
|
2014-12-19 23:54:49 +03:00
|
|
|
self.mem = Pool()
|
|
|
|
self._cache = PreshMap()
|
|
|
|
self._specials = PreshMap()
|
2017-01-03 20:17:57 +03:00
|
|
|
self.token_match = token_match
|
2020-05-22 13:41:03 +03:00
|
|
|
self.url_match = url_match
|
2016-09-24 16:42:01 +03:00
|
|
|
self.prefix_search = prefix_search
|
|
|
|
self.suffix_search = suffix_search
|
|
|
|
self.infix_finditer = infix_finditer
|
2014-12-21 23:25:43 +03:00
|
|
|
self.vocab = vocab
|
2016-04-13 11:38:26 +03:00
|
|
|
self._rules = {}
|
2019-11-20 15:07:25 +03:00
|
|
|
self._load_special_tokenization(rules)
|
2015-10-24 08:18:47 +03:00
|
|
|
|
2019-09-08 21:52:46 +03:00
|
|
|
property token_match:
|
|
|
|
def __get__(self):
|
|
|
|
return self._token_match
|
|
|
|
|
|
|
|
def __set__(self, token_match):
|
|
|
|
self._token_match = token_match
|
|
|
|
self._flush_cache()
|
|
|
|
|
2020-05-22 13:41:03 +03:00
|
|
|
property url_match:
|
2020-05-05 11:35:33 +03:00
|
|
|
def __get__(self):
|
2020-05-22 13:41:03 +03:00
|
|
|
return self._url_match
|
2020-05-05 11:35:33 +03:00
|
|
|
|
2020-05-22 13:41:03 +03:00
|
|
|
def __set__(self, url_match):
|
|
|
|
self._url_match = url_match
|
2020-05-05 11:35:33 +03:00
|
|
|
self._flush_cache()
|
|
|
|
|
2019-09-08 21:52:46 +03:00
|
|
|
property prefix_search:
|
|
|
|
def __get__(self):
|
|
|
|
return self._prefix_search
|
|
|
|
|
|
|
|
def __set__(self, prefix_search):
|
|
|
|
self._prefix_search = prefix_search
|
|
|
|
self._flush_cache()
|
|
|
|
|
|
|
|
property suffix_search:
|
|
|
|
def __get__(self):
|
|
|
|
return self._suffix_search
|
|
|
|
|
|
|
|
def __set__(self, suffix_search):
|
|
|
|
self._suffix_search = suffix_search
|
|
|
|
self._flush_cache()
|
|
|
|
|
|
|
|
property infix_finditer:
|
|
|
|
def __get__(self):
|
|
|
|
return self._infix_finditer
|
|
|
|
|
|
|
|
def __set__(self, infix_finditer):
|
|
|
|
self._infix_finditer = infix_finditer
|
|
|
|
self._flush_cache()
|
|
|
|
|
2019-11-20 15:07:25 +03:00
|
|
|
property rules:
|
|
|
|
def __get__(self):
|
|
|
|
return self._rules
|
|
|
|
|
|
|
|
def __set__(self, rules):
|
|
|
|
self._rules = {}
|
|
|
|
self._reset_cache([key for key in self._cache])
|
|
|
|
self._reset_specials()
|
|
|
|
self._cache = PreshMap()
|
|
|
|
self._specials = PreshMap()
|
|
|
|
self._load_special_tokenization(rules)
|
|
|
|
|
2015-10-24 08:18:47 +03:00
|
|
|
def __reduce__(self):
|
2016-09-25 15:49:53 +03:00
|
|
|
args = (self.vocab,
|
2020-05-05 11:35:33 +03:00
|
|
|
self.rules,
|
2017-05-17 13:04:50 +03:00
|
|
|
self.prefix_search,
|
|
|
|
self.suffix_search,
|
|
|
|
self.infix_finditer,
|
2020-05-05 11:35:33 +03:00
|
|
|
self.token_match,
|
2020-05-22 13:41:03 +03:00
|
|
|
self.url_match)
|
2015-10-24 08:18:47 +03:00
|
|
|
return (self.__class__, args, None, None)
|
2017-04-15 13:05:47 +03:00
|
|
|
|
2015-07-08 19:53:00 +03:00
|
|
|
cpdef Doc tokens_from_list(self, list strings):
|
2020-04-28 14:37:37 +03:00
|
|
|
warnings.warn(Warnings.W002, DeprecationWarning)
|
2016-11-04 21:18:07 +03:00
|
|
|
return Doc(self.vocab, words=strings)
|
2014-12-19 23:54:49 +03:00
|
|
|
|
2015-07-22 05:49:39 +03:00
|
|
|
@cython.boundscheck(False)
|
2015-01-17 08:21:17 +03:00
|
|
|
def __call__(self, unicode string):
|
2017-05-21 14:18:14 +03:00
|
|
|
"""Tokenize a string.
|
2014-12-19 23:54:49 +03:00
|
|
|
|
2017-05-21 14:18:14 +03:00
|
|
|
string (unicode): The string to tokenize.
|
|
|
|
RETURNS (Doc): A container for linguistic annotations.
|
2019-03-08 13:42:26 +03:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#call
|
2014-12-19 23:54:49 +03:00
|
|
|
"""
|
2015-10-15 20:54:16 +03:00
|
|
|
if len(string) >= (2 ** 30):
|
2018-04-03 16:50:31 +03:00
|
|
|
raise ValueError(Errors.E025.format(length=len(string)))
|
2014-12-19 23:54:49 +03:00
|
|
|
cdef int length = len(string)
|
2017-10-17 19:21:41 +03:00
|
|
|
cdef Doc doc = Doc(self.vocab)
|
2014-12-19 23:54:49 +03:00
|
|
|
if length == 0:
|
2017-10-17 19:21:41 +03:00
|
|
|
return doc
|
2014-12-19 23:54:49 +03:00
|
|
|
cdef int i = 0
|
|
|
|
cdef int start = 0
|
|
|
|
cdef bint cache_hit
|
2016-05-04 16:53:36 +03:00
|
|
|
cdef bint in_ws = string[0].isspace()
|
2015-07-22 05:49:39 +03:00
|
|
|
cdef unicode span
|
|
|
|
# The task here is much like string.split, but not quite
|
|
|
|
# We find spans of whitespace and non-space characters, and ignore
|
|
|
|
# spans that are exactly ' '. So, our sequences will all be separated
|
|
|
|
# by either ' ' or nothing.
|
2015-07-28 15:45:37 +03:00
|
|
|
for uc in string:
|
|
|
|
if uc.isspace() != in_ws:
|
2014-12-19 23:54:49 +03:00
|
|
|
if start < i:
|
2015-07-28 15:45:37 +03:00
|
|
|
# When we want to make this fast, get the data buffer once
|
|
|
|
# with PyUnicode_AS_DATA, and then maintain a start_byte
|
|
|
|
# and end_byte, so we can call hash64 directly. That way
|
|
|
|
# we don't have to create the slice when we hit the cache.
|
|
|
|
span = string[start:i]
|
|
|
|
key = hash_string(span)
|
2017-10-17 19:21:41 +03:00
|
|
|
cache_hit = self._try_cache(key, doc)
|
2014-12-19 23:54:49 +03:00
|
|
|
if not cache_hit:
|
2017-10-17 19:21:41 +03:00
|
|
|
self._tokenize(doc, span, key)
|
2015-07-22 14:38:45 +03:00
|
|
|
if uc == ' ':
|
2017-10-17 19:21:41 +03:00
|
|
|
doc.c[doc.length - 1].spacy = True
|
2015-07-22 14:38:45 +03:00
|
|
|
start = i + 1
|
|
|
|
else:
|
|
|
|
start = i
|
2016-05-04 16:53:36 +03:00
|
|
|
in_ws = not in_ws
|
2015-07-28 15:45:37 +03:00
|
|
|
i += 1
|
2014-12-19 23:54:49 +03:00
|
|
|
if start < i:
|
2015-07-28 15:45:37 +03:00
|
|
|
span = string[start:]
|
|
|
|
key = hash_string(span)
|
2017-10-17 19:21:41 +03:00
|
|
|
cache_hit = self._try_cache(key, doc)
|
2014-12-19 23:54:49 +03:00
|
|
|
if not cache_hit:
|
2017-10-17 19:21:41 +03:00
|
|
|
self._tokenize(doc, span, key)
|
2019-03-08 13:42:26 +03:00
|
|
|
doc.c[doc.length - 1].spacy = string[-1] == " " and not in_ws
|
2017-10-17 19:21:41 +03:00
|
|
|
return doc
|
2014-12-19 23:54:49 +03:00
|
|
|
|
2019-03-15 18:24:26 +03:00
|
|
|
def pipe(self, texts, batch_size=1000, n_threads=-1):
|
2017-05-21 14:18:14 +03:00
|
|
|
"""Tokenize a stream of texts.
|
|
|
|
|
|
|
|
texts: A sequence of unicode texts.
|
2017-10-27 22:07:59 +03:00
|
|
|
batch_size (int): Number of texts to accumulate in an internal buffer.
|
2019-03-28 14:48:02 +03:00
|
|
|
Defaults to 1000.
|
2017-05-21 14:18:14 +03:00
|
|
|
YIELDS (Doc): A sequence of Doc objects, in order.
|
2019-03-08 13:42:26 +03:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#pipe
|
2016-11-03 01:15:39 +03:00
|
|
|
"""
|
2019-03-15 18:38:44 +03:00
|
|
|
if n_threads != -1:
|
2020-04-28 14:37:37 +03:00
|
|
|
warnings.warn(Warnings.W016, DeprecationWarning)
|
2016-02-03 04:32:37 +03:00
|
|
|
for text in texts:
|
|
|
|
yield self(text)
|
|
|
|
|
2019-09-08 21:52:46 +03:00
|
|
|
def _flush_cache(self):
|
|
|
|
self._reset_cache([key for key in self._cache if not key in self._specials])
|
|
|
|
|
2017-11-15 19:11:12 +03:00
|
|
|
def _reset_cache(self, keys):
|
|
|
|
for k in keys:
|
|
|
|
del self._cache[k]
|
2019-09-08 21:52:46 +03:00
|
|
|
if not k in self._specials:
|
|
|
|
cached = <_Cached*>self._cache.get(k)
|
|
|
|
if cached is not NULL:
|
|
|
|
self.mem.free(cached)
|
|
|
|
|
|
|
|
def _reset_specials(self):
|
|
|
|
for k in self._specials:
|
|
|
|
cached = <_Cached*>self._specials.get(k)
|
|
|
|
del self._specials[k]
|
|
|
|
if cached is not NULL:
|
|
|
|
self.mem.free(cached)
|
2017-11-14 21:15:04 +03:00
|
|
|
|
2015-07-13 22:46:02 +03:00
|
|
|
cdef int _try_cache(self, hash_t key, Doc tokens) except -1:
|
2014-12-19 23:54:49 +03:00
|
|
|
cached = <_Cached*>self._cache.get(key)
|
|
|
|
if cached == NULL:
|
|
|
|
return False
|
|
|
|
cdef int i
|
|
|
|
if cached.is_lex:
|
2015-07-14 01:10:51 +03:00
|
|
|
for i in range(cached.length):
|
2015-07-13 22:46:02 +03:00
|
|
|
tokens.push_back(cached.data.lexemes[i], False)
|
2014-12-19 23:54:49 +03:00
|
|
|
else:
|
2015-07-14 01:10:51 +03:00
|
|
|
for i in range(cached.length):
|
2015-07-13 22:46:02 +03:00
|
|
|
tokens.push_back(&cached.data.tokens[i], False)
|
2014-12-19 23:54:49 +03:00
|
|
|
return True
|
|
|
|
|
2015-07-22 05:49:39 +03:00
|
|
|
cdef int _tokenize(self, Doc tokens, unicode span, hash_t orig_key) except -1:
|
2015-01-12 02:26:22 +03:00
|
|
|
cdef vector[LexemeC*] prefixes
|
|
|
|
cdef vector[LexemeC*] suffixes
|
2014-12-19 23:54:49 +03:00
|
|
|
cdef int orig_size
|
💫 Small efficiency fixes to tokenizer (#2587)
This patch improves tokenizer speed by about 10%, and reduces memory usage in the `Vocab` by removing a redundant index. The `vocab._by_orth` and `vocab._by_hash` indexed on different data in v1, but in v2 the orth and the hash are identical.
The patch also fixes an uninitialized variable in the tokenizer, the `has_special` flag. This checks whether a chunk we're tokenizing triggers a special-case rule. If it does, then we avoid caching within the chunk. This check led to incorrectly rejecting some chunks from the cache.
With the `en_core_web_md` model, we now tokenize the IMDB train data at 503,104k words per second. Prior to this patch, we had 465,764k words per second.
Before switching to the regex library and supporting more languages, we had 1.3m words per second for the tokenizer. In order to recover the missing speed, we need to:
* Fix the variable-length lookarounds in the suffix, infix and `token_match` rules
* Improve the performance of the `token_match` regex
* Switch back from the `regex` library to the `re` library.
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-07-25 00:35:54 +03:00
|
|
|
cdef int has_special = 0
|
2014-12-19 23:54:49 +03:00
|
|
|
orig_size = tokens.length
|
2017-10-24 17:07:44 +03:00
|
|
|
span = self._split_affixes(tokens.mem, span, &prefixes, &suffixes,
|
|
|
|
&has_special)
|
2015-07-22 05:49:39 +03:00
|
|
|
self._attach_tokens(tokens, span, &prefixes, &suffixes)
|
2017-10-24 17:07:44 +03:00
|
|
|
self._save_cached(&tokens.c[orig_size], orig_key, has_special,
|
|
|
|
tokens.length - orig_size)
|
2014-12-19 23:54:49 +03:00
|
|
|
|
2016-04-13 11:38:26 +03:00
|
|
|
cdef unicode _split_affixes(self, Pool mem, unicode string,
|
|
|
|
vector[const LexemeC*] *prefixes,
|
2017-10-24 17:07:44 +03:00
|
|
|
vector[const LexemeC*] *suffixes,
|
|
|
|
int* has_special):
|
2014-12-19 23:54:49 +03:00
|
|
|
cdef size_t i
|
2015-07-22 05:49:39 +03:00
|
|
|
cdef unicode prefix
|
|
|
|
cdef unicode suffix
|
|
|
|
cdef unicode minus_pre
|
|
|
|
cdef unicode minus_suf
|
2014-12-19 23:54:49 +03:00
|
|
|
cdef size_t last_size = 0
|
2015-07-22 05:49:39 +03:00
|
|
|
while string and len(string) != last_size:
|
2020-03-09 14:09:41 +03:00
|
|
|
if self.token_match and self.token_match(string):
|
|
|
|
break
|
2019-09-08 21:52:46 +03:00
|
|
|
if self._specials.get(hash_string(string)) != NULL:
|
|
|
|
has_special[0] = 1
|
|
|
|
break
|
2015-07-22 05:49:39 +03:00
|
|
|
last_size = len(string)
|
|
|
|
pre_len = self.find_prefix(string)
|
2014-12-19 23:54:49 +03:00
|
|
|
if pre_len != 0:
|
2015-07-22 05:49:39 +03:00
|
|
|
prefix = string[:pre_len]
|
|
|
|
minus_pre = string[pre_len:]
|
2014-12-19 23:54:49 +03:00
|
|
|
# Check whether we've hit a special-case
|
2015-07-22 05:49:39 +03:00
|
|
|
if minus_pre and self._specials.get(hash_string(minus_pre)) != NULL:
|
|
|
|
string = minus_pre
|
2016-02-22 02:17:47 +03:00
|
|
|
prefixes.push_back(self.vocab.get(mem, prefix))
|
2017-10-24 17:07:44 +03:00
|
|
|
has_special[0] = 1
|
2014-12-19 23:54:49 +03:00
|
|
|
break
|
2015-07-22 05:49:39 +03:00
|
|
|
suf_len = self.find_suffix(string)
|
2014-12-19 23:54:49 +03:00
|
|
|
if suf_len != 0:
|
2015-07-22 05:49:39 +03:00
|
|
|
suffix = string[-suf_len:]
|
|
|
|
minus_suf = string[:-suf_len]
|
2014-12-19 23:54:49 +03:00
|
|
|
# Check whether we've hit a special-case
|
2015-07-22 05:49:39 +03:00
|
|
|
if minus_suf and (self._specials.get(hash_string(minus_suf)) != NULL):
|
|
|
|
string = minus_suf
|
2016-02-22 02:17:47 +03:00
|
|
|
suffixes.push_back(self.vocab.get(mem, suffix))
|
2017-10-24 17:07:44 +03:00
|
|
|
has_special[0] = 1
|
2014-12-19 23:54:49 +03:00
|
|
|
break
|
2015-07-22 05:49:39 +03:00
|
|
|
if pre_len and suf_len and (pre_len + suf_len) <= len(string):
|
|
|
|
string = string[pre_len:-suf_len]
|
2016-02-22 02:17:47 +03:00
|
|
|
prefixes.push_back(self.vocab.get(mem, prefix))
|
|
|
|
suffixes.push_back(self.vocab.get(mem, suffix))
|
2014-12-19 23:54:49 +03:00
|
|
|
elif pre_len:
|
2015-07-22 05:49:39 +03:00
|
|
|
string = minus_pre
|
2016-02-22 02:17:47 +03:00
|
|
|
prefixes.push_back(self.vocab.get(mem, prefix))
|
2014-12-19 23:54:49 +03:00
|
|
|
elif suf_len:
|
2015-07-22 05:49:39 +03:00
|
|
|
string = minus_suf
|
2016-02-22 02:17:47 +03:00
|
|
|
suffixes.push_back(self.vocab.get(mem, suffix))
|
2015-07-22 05:49:39 +03:00
|
|
|
if string and (self._specials.get(hash_string(string)) != NULL):
|
2017-10-24 17:07:44 +03:00
|
|
|
has_special[0] = 1
|
2014-12-19 23:54:49 +03:00
|
|
|
break
|
|
|
|
return string
|
|
|
|
|
2015-07-22 05:49:39 +03:00
|
|
|
cdef int _attach_tokens(self, Doc tokens, unicode string,
|
2015-01-12 02:26:22 +03:00
|
|
|
vector[const LexemeC*] *prefixes,
|
|
|
|
vector[const LexemeC*] *suffixes) except -1:
|
2014-12-19 23:54:49 +03:00
|
|
|
cdef bint cache_hit
|
2015-07-18 23:45:00 +03:00
|
|
|
cdef int split, end
|
2015-01-12 02:26:22 +03:00
|
|
|
cdef const LexemeC* const* lexemes
|
2015-07-13 22:46:02 +03:00
|
|
|
cdef const LexemeC* lexeme
|
2015-07-22 05:49:39 +03:00
|
|
|
cdef unicode span
|
2014-12-19 23:54:49 +03:00
|
|
|
cdef int i
|
|
|
|
if prefixes.size():
|
|
|
|
for i in range(prefixes.size()):
|
2015-07-13 22:46:02 +03:00
|
|
|
tokens.push_back(prefixes[0][i], False)
|
2015-07-22 05:49:39 +03:00
|
|
|
if string:
|
|
|
|
cache_hit = self._try_cache(hash_string(string), tokens)
|
2017-01-03 20:17:57 +03:00
|
|
|
if cache_hit:
|
|
|
|
pass
|
2020-05-05 11:35:33 +03:00
|
|
|
elif (self.token_match and self.token_match(string)) or \
|
2020-05-22 13:41:03 +03:00
|
|
|
(self.url_match and \
|
|
|
|
self.url_match(string)):
|
2017-03-08 16:33:32 +03:00
|
|
|
# We're always saying 'no' to spaces here -- the caller will
|
|
|
|
# fix up the outermost one, with reference to the original.
|
|
|
|
# See Issue #859
|
|
|
|
tokens.push_back(self.vocab.get(tokens.mem, string), False)
|
2017-01-03 20:17:57 +03:00
|
|
|
else:
|
2016-04-13 11:38:26 +03:00
|
|
|
matches = self.find_infix(string)
|
|
|
|
if not matches:
|
2015-07-13 22:46:02 +03:00
|
|
|
tokens.push_back(self.vocab.get(tokens.mem, string), False)
|
2014-12-19 23:54:49 +03:00
|
|
|
else:
|
2019-03-08 13:42:26 +03:00
|
|
|
# Let's say we have dyn-o-mite-dave - the regex finds the
|
2017-10-27 22:07:59 +03:00
|
|
|
# start and end positions of the hyphens
|
2016-04-13 11:38:26 +03:00
|
|
|
start = 0
|
2017-11-28 19:17:12 +03:00
|
|
|
start_before_infixes = start
|
2016-04-13 11:38:26 +03:00
|
|
|
for match in matches:
|
|
|
|
infix_start = match.start()
|
|
|
|
infix_end = match.end()
|
2016-11-02 22:35:48 +03:00
|
|
|
|
2017-11-28 19:17:12 +03:00
|
|
|
if infix_start == start_before_infixes:
|
2016-05-09 14:23:47 +03:00
|
|
|
continue
|
2016-11-02 22:35:48 +03:00
|
|
|
|
2017-11-19 17:14:40 +03:00
|
|
|
if infix_start != start:
|
|
|
|
span = string[start:infix_start]
|
|
|
|
tokens.push_back(self.vocab.get(tokens.mem, span), False)
|
2017-01-23 20:28:01 +03:00
|
|
|
|
|
|
|
if infix_start != infix_end:
|
|
|
|
# If infix_start != infix_end, it means the infix
|
|
|
|
# token is non-empty. Empty infix tokens are useful
|
|
|
|
# for tokenization in some languages (see
|
|
|
|
# https://github.com/explosion/spaCy/issues/768)
|
|
|
|
infix_span = string[infix_start:infix_end]
|
|
|
|
tokens.push_back(self.vocab.get(tokens.mem, infix_span), False)
|
2016-04-13 11:38:26 +03:00
|
|
|
start = infix_end
|
|
|
|
span = string[start:]
|
2017-10-14 14:28:46 +03:00
|
|
|
if span:
|
|
|
|
tokens.push_back(self.vocab.get(tokens.mem, span), False)
|
2015-01-12 02:26:22 +03:00
|
|
|
cdef vector[const LexemeC*].reverse_iterator it = suffixes.rbegin()
|
2014-12-19 23:54:49 +03:00
|
|
|
while it != suffixes.rend():
|
2015-07-13 22:46:02 +03:00
|
|
|
lexeme = deref(it)
|
2014-12-19 23:54:49 +03:00
|
|
|
preinc(it)
|
2015-07-13 22:46:02 +03:00
|
|
|
tokens.push_back(lexeme, False)
|
2014-12-19 23:54:49 +03:00
|
|
|
|
2017-10-24 17:07:44 +03:00
|
|
|
cdef int _save_cached(self, const TokenC* tokens, hash_t key,
|
|
|
|
int has_special, int n) except -1:
|
2014-12-19 23:54:49 +03:00
|
|
|
cdef int i
|
2019-10-22 17:54:33 +03:00
|
|
|
if n <= 0:
|
|
|
|
# avoid mem alloc of zero length
|
|
|
|
return 0
|
2014-12-19 23:54:49 +03:00
|
|
|
for i in range(n):
|
💫 Small efficiency fixes to tokenizer (#2587)
This patch improves tokenizer speed by about 10%, and reduces memory usage in the `Vocab` by removing a redundant index. The `vocab._by_orth` and `vocab._by_hash` indexed on different data in v1, but in v2 the orth and the hash are identical.
The patch also fixes an uninitialized variable in the tokenizer, the `has_special` flag. This checks whether a chunk we're tokenizing triggers a special-case rule. If it does, then we avoid caching within the chunk. This check led to incorrectly rejecting some chunks from the cache.
With the `en_core_web_md` model, we now tokenize the IMDB train data at 503,104k words per second. Prior to this patch, we had 465,764k words per second.
Before switching to the regex library and supporting more languages, we had 1.3m words per second for the tokenizer. In order to recover the missing speed, we need to:
* Fix the variable-length lookarounds in the suffix, infix and `token_match` rules
* Improve the performance of the `token_match` regex
* Switch back from the `regex` library to the `re` library.
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2018-07-25 00:35:54 +03:00
|
|
|
if self.vocab._by_orth.get(tokens[i].lex.orth) == NULL:
|
2014-12-19 23:54:49 +03:00
|
|
|
return 0
|
2019-03-08 13:42:26 +03:00
|
|
|
# See #1250
|
2017-10-24 17:07:44 +03:00
|
|
|
if has_special:
|
|
|
|
return 0
|
2014-12-19 23:54:49 +03:00
|
|
|
cached = <_Cached*>self.mem.alloc(1, sizeof(_Cached))
|
|
|
|
cached.length = n
|
|
|
|
cached.is_lex = True
|
2015-01-12 02:26:22 +03:00
|
|
|
lexemes = <const LexemeC**>self.mem.alloc(n, sizeof(LexemeC**))
|
2014-12-19 23:54:49 +03:00
|
|
|
for i in range(n):
|
|
|
|
lexemes[i] = tokens[i].lex
|
2015-01-12 02:26:22 +03:00
|
|
|
cached.data.lexemes = <const LexemeC* const*>lexemes
|
2014-12-19 23:54:49 +03:00
|
|
|
self._cache.set(key, cached)
|
|
|
|
|
2015-07-22 05:49:39 +03:00
|
|
|
def find_infix(self, unicode string):
|
2017-05-21 14:18:14 +03:00
|
|
|
"""Find internal split points of the string, such as hyphens.
|
2016-11-03 01:15:39 +03:00
|
|
|
|
|
|
|
string (unicode): The string to segment.
|
2017-05-21 14:18:14 +03:00
|
|
|
RETURNS (list): A list of `re.MatchObject` objects that have `.start()`
|
|
|
|
and `.end()` methods, denoting the placement of internal segment
|
|
|
|
separators, e.g. hyphens.
|
2019-03-08 13:42:26 +03:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#find_infix
|
2016-11-03 01:15:39 +03:00
|
|
|
"""
|
2016-11-02 22:35:48 +03:00
|
|
|
if self.infix_finditer is None:
|
|
|
|
return 0
|
2016-09-24 16:42:01 +03:00
|
|
|
return list(self.infix_finditer(string))
|
2015-04-19 11:31:31 +03:00
|
|
|
|
2015-07-22 05:49:39 +03:00
|
|
|
def find_prefix(self, unicode string):
|
2017-10-27 22:07:59 +03:00
|
|
|
"""Find the length of a prefix that should be segmented from the
|
|
|
|
string, or None if no prefix rules match.
|
2016-11-03 01:15:39 +03:00
|
|
|
|
2017-05-21 14:18:14 +03:00
|
|
|
string (unicode): The string to segment.
|
|
|
|
RETURNS (int): The length of the prefix if present, otherwise `None`.
|
2019-03-08 13:42:26 +03:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#find_prefix
|
2016-11-03 01:15:39 +03:00
|
|
|
"""
|
2016-11-02 22:35:48 +03:00
|
|
|
if self.prefix_search is None:
|
|
|
|
return 0
|
2016-09-24 16:42:01 +03:00
|
|
|
match = self.prefix_search(string)
|
2014-12-19 23:54:49 +03:00
|
|
|
return (match.end() - match.start()) if match is not None else 0
|
|
|
|
|
2015-07-22 05:49:39 +03:00
|
|
|
def find_suffix(self, unicode string):
|
2017-10-27 22:07:59 +03:00
|
|
|
"""Find the length of a suffix that should be segmented from the
|
|
|
|
string, or None if no suffix rules match.
|
2016-11-03 01:15:39 +03:00
|
|
|
|
2017-05-21 14:18:14 +03:00
|
|
|
string (unicode): The string to segment.
|
|
|
|
Returns (int): The length of the suffix if present, otherwise `None`.
|
2019-03-08 13:42:26 +03:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#find_suffix
|
2016-11-03 01:15:39 +03:00
|
|
|
"""
|
2016-11-02 22:35:48 +03:00
|
|
|
if self.suffix_search is None:
|
|
|
|
return 0
|
2016-09-24 16:42:01 +03:00
|
|
|
match = self.suffix_search(string)
|
2014-12-19 23:54:49 +03:00
|
|
|
return (match.end() - match.start()) if match is not None else 0
|
|
|
|
|
2015-08-26 20:20:11 +03:00
|
|
|
def _load_special_tokenization(self, special_cases):
|
2017-05-21 14:18:14 +03:00
|
|
|
"""Add special-case tokenization rules."""
|
2019-11-20 15:07:25 +03:00
|
|
|
if special_cases is not None:
|
|
|
|
for chunk, substrings in sorted(special_cases.items()):
|
|
|
|
self.add_special_case(chunk, substrings)
|
2017-04-15 13:05:47 +03:00
|
|
|
|
2016-11-03 01:15:39 +03:00
|
|
|
def add_special_case(self, unicode string, substrings):
|
2017-05-21 14:18:14 +03:00
|
|
|
"""Add a special-case tokenization rule.
|
|
|
|
|
|
|
|
string (unicode): The string to specially tokenize.
|
2019-07-15 12:19:34 +03:00
|
|
|
substrings (iterable): A sequence of dicts, where each dict describes
|
2017-10-27 22:07:59 +03:00
|
|
|
a token and its attributes. The `ORTH` fields of the attributes
|
|
|
|
must exactly match the string when they are concatenated.
|
2019-03-08 13:42:26 +03:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#add_special_case
|
2017-04-15 12:59:21 +03:00
|
|
|
"""
|
2016-04-13 11:38:26 +03:00
|
|
|
substrings = list(substrings)
|
|
|
|
cached = <_Cached*>self.mem.alloc(1, sizeof(_Cached))
|
|
|
|
cached.length = len(substrings)
|
|
|
|
cached.is_lex = False
|
|
|
|
cached.data.tokens = self.vocab.make_fused_token(substrings)
|
2016-11-03 01:15:39 +03:00
|
|
|
key = hash_string(string)
|
2019-09-08 21:52:46 +03:00
|
|
|
stale_special = <_Cached*>self._specials.get(key)
|
|
|
|
stale_cached = <_Cached*>self._cache.get(key)
|
|
|
|
self._flush_cache()
|
2016-04-13 11:38:26 +03:00
|
|
|
self._specials.set(key, cached)
|
|
|
|
self._cache.set(key, cached)
|
2019-09-08 21:52:46 +03:00
|
|
|
if stale_special is not NULL:
|
|
|
|
self.mem.free(stale_special)
|
|
|
|
if stale_special != stale_cached and stale_cached is not NULL:
|
|
|
|
self.mem.free(stale_cached)
|
2016-11-03 01:15:39 +03:00
|
|
|
self._rules[string] = substrings
|
2017-05-21 14:18:14 +03:00
|
|
|
|
2019-11-20 15:07:25 +03:00
|
|
|
def explain(self, text):
|
|
|
|
"""A debugging tokenizer that provides information about which
|
|
|
|
tokenizer rule or pattern was matched for each token. The tokens
|
|
|
|
produced are identical to `nlp.tokenizer()` except for whitespace
|
|
|
|
tokens.
|
|
|
|
|
|
|
|
string (unicode): The string to tokenize.
|
|
|
|
RETURNS (list): A list of (pattern_string, token_string) tuples
|
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#explain
|
|
|
|
"""
|
|
|
|
prefix_search = self.prefix_search
|
|
|
|
suffix_search = self.suffix_search
|
|
|
|
infix_finditer = self.infix_finditer
|
|
|
|
token_match = self.token_match
|
2020-05-05 11:35:33 +03:00
|
|
|
if token_match is None:
|
|
|
|
token_match = re.compile("a^").match
|
2020-05-22 13:41:03 +03:00
|
|
|
url_match = self.url_match
|
|
|
|
if url_match is None:
|
|
|
|
url_match = re.compile("a^").match
|
2019-11-20 15:07:25 +03:00
|
|
|
special_cases = {}
|
|
|
|
for orth, special_tokens in self.rules.items():
|
|
|
|
special_cases[orth] = [intify_attrs(special_token, strings_map=self.vocab.strings, _do_deprecated=True) for special_token in special_tokens]
|
|
|
|
tokens = []
|
|
|
|
for substring in text.split():
|
|
|
|
suffixes = []
|
|
|
|
while substring:
|
|
|
|
while prefix_search(substring) or suffix_search(substring):
|
2020-03-09 14:09:41 +03:00
|
|
|
if token_match(substring):
|
|
|
|
tokens.append(("TOKEN_MATCH", substring))
|
|
|
|
substring = ''
|
|
|
|
break
|
2019-11-20 15:07:25 +03:00
|
|
|
if substring in special_cases:
|
|
|
|
tokens.extend(("SPECIAL-" + str(i + 1), self.vocab.strings[e[ORTH]]) for i, e in enumerate(special_cases[substring]))
|
|
|
|
substring = ''
|
|
|
|
break
|
|
|
|
if prefix_search(substring):
|
|
|
|
split = prefix_search(substring).end()
|
2019-11-20 18:31:29 +03:00
|
|
|
# break if pattern matches the empty string
|
|
|
|
if split == 0:
|
|
|
|
break
|
2019-11-20 15:07:25 +03:00
|
|
|
tokens.append(("PREFIX", substring[:split]))
|
|
|
|
substring = substring[split:]
|
|
|
|
if substring in special_cases:
|
|
|
|
continue
|
|
|
|
if suffix_search(substring):
|
|
|
|
split = suffix_search(substring).start()
|
|
|
|
# break if pattern matches the empty string
|
|
|
|
if split == len(substring):
|
|
|
|
break
|
2019-11-20 18:31:29 +03:00
|
|
|
suffixes.append(("SUFFIX", substring[split:]))
|
|
|
|
substring = substring[:split]
|
2020-03-09 14:09:41 +03:00
|
|
|
if token_match(substring):
|
2019-11-20 15:07:25 +03:00
|
|
|
tokens.append(("TOKEN_MATCH", substring))
|
|
|
|
substring = ''
|
2020-05-22 13:41:03 +03:00
|
|
|
elif url_match(substring):
|
|
|
|
tokens.append(("URL_MATCH", substring))
|
2020-05-05 11:35:33 +03:00
|
|
|
substring = ''
|
2020-03-09 14:09:41 +03:00
|
|
|
elif substring in special_cases:
|
|
|
|
tokens.extend(("SPECIAL-" + str(i + 1), self.vocab.strings[e[ORTH]]) for i, e in enumerate(special_cases[substring]))
|
|
|
|
substring = ''
|
2019-11-20 15:07:25 +03:00
|
|
|
elif list(infix_finditer(substring)):
|
|
|
|
infixes = infix_finditer(substring)
|
|
|
|
offset = 0
|
|
|
|
for match in infixes:
|
2019-11-20 18:31:29 +03:00
|
|
|
if substring[offset : match.start()]:
|
|
|
|
tokens.append(("TOKEN", substring[offset : match.start()]))
|
|
|
|
if substring[match.start() : match.end()]:
|
|
|
|
tokens.append(("INFIX", substring[match.start() : match.end()]))
|
2019-11-20 15:07:25 +03:00
|
|
|
offset = match.end()
|
|
|
|
if substring[offset:]:
|
|
|
|
tokens.append(("TOKEN", substring[offset:]))
|
|
|
|
substring = ''
|
|
|
|
elif substring:
|
|
|
|
tokens.append(("TOKEN", substring))
|
|
|
|
substring = ''
|
|
|
|
tokens.extend(reversed(suffixes))
|
|
|
|
return tokens
|
|
|
|
|
2019-03-10 21:16:45 +03:00
|
|
|
def to_disk(self, path, **kwargs):
|
2017-05-21 14:18:14 +03:00
|
|
|
"""Save the current state to a directory.
|
|
|
|
|
|
|
|
path (unicode or Path): A path to a directory, which will be created if
|
2019-03-10 21:16:45 +03:00
|
|
|
it doesn't exist.
|
|
|
|
exclude (list): String names of serialization fields to exclude.
|
2019-03-08 13:42:26 +03:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#to_disk
|
2017-05-21 14:18:14 +03:00
|
|
|
"""
|
2020-03-08 15:25:56 +03:00
|
|
|
path = util.ensure_path(path)
|
2019-03-08 13:42:26 +03:00
|
|
|
with path.open("wb") as file_:
|
2019-03-10 21:16:45 +03:00
|
|
|
file_.write(self.to_bytes(**kwargs))
|
2017-05-21 14:18:14 +03:00
|
|
|
|
2019-03-10 21:16:45 +03:00
|
|
|
def from_disk(self, path, **kwargs):
|
2017-05-21 14:18:14 +03:00
|
|
|
"""Loads state from a directory. Modifies the object in place and
|
|
|
|
returns it.
|
|
|
|
|
2019-03-10 21:16:45 +03:00
|
|
|
path (unicode or Path): A path to a directory.
|
|
|
|
exclude (list): String names of serialization fields to exclude.
|
2017-05-21 14:18:14 +03:00
|
|
|
RETURNS (Tokenizer): The modified `Tokenizer` object.
|
2019-03-08 13:42:26 +03:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#from_disk
|
2017-05-21 14:18:14 +03:00
|
|
|
"""
|
2020-03-08 15:25:56 +03:00
|
|
|
path = util.ensure_path(path)
|
2019-03-08 13:42:26 +03:00
|
|
|
with path.open("rb") as file_:
|
2017-05-31 14:43:31 +03:00
|
|
|
bytes_data = file_.read()
|
2019-03-10 21:16:45 +03:00
|
|
|
self.from_bytes(bytes_data, **kwargs)
|
2017-05-29 13:24:41 +03:00
|
|
|
return self
|
2017-05-21 14:18:14 +03:00
|
|
|
|
2019-03-10 21:16:45 +03:00
|
|
|
def to_bytes(self, exclude=tuple(), **kwargs):
|
2017-05-21 14:18:14 +03:00
|
|
|
"""Serialize the current state to a binary string.
|
|
|
|
|
2019-03-10 21:16:45 +03:00
|
|
|
exclude (list): String names of serialization fields to exclude.
|
2017-05-21 14:18:14 +03:00
|
|
|
RETURNS (bytes): The serialized form of the `Tokenizer` object.
|
2019-03-08 13:42:26 +03:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#to_bytes
|
2017-05-21 14:18:14 +03:00
|
|
|
"""
|
2017-06-03 18:05:09 +03:00
|
|
|
serializers = OrderedDict((
|
2019-03-08 13:42:26 +03:00
|
|
|
("vocab", lambda: self.vocab.to_bytes()),
|
|
|
|
("prefix_search", lambda: _get_regex_pattern(self.prefix_search)),
|
|
|
|
("suffix_search", lambda: _get_regex_pattern(self.suffix_search)),
|
|
|
|
("infix_finditer", lambda: _get_regex_pattern(self.infix_finditer)),
|
|
|
|
("token_match", lambda: _get_regex_pattern(self.token_match)),
|
2020-05-22 13:41:03 +03:00
|
|
|
("url_match", lambda: _get_regex_pattern(self.url_match)),
|
2019-03-08 13:42:26 +03:00
|
|
|
("exceptions", lambda: OrderedDict(sorted(self._rules.items())))
|
2017-06-03 18:05:09 +03:00
|
|
|
))
|
2019-03-10 21:16:45 +03:00
|
|
|
exclude = util.get_serialization_exclude(serializers, exclude, kwargs)
|
2017-05-29 13:24:41 +03:00
|
|
|
return util.to_bytes(serializers, exclude)
|
2017-05-21 14:18:14 +03:00
|
|
|
|
2019-03-10 21:16:45 +03:00
|
|
|
def from_bytes(self, bytes_data, exclude=tuple(), **kwargs):
|
2017-05-21 14:18:14 +03:00
|
|
|
"""Load state from a binary string.
|
|
|
|
|
|
|
|
bytes_data (bytes): The data to load from.
|
2019-03-10 21:16:45 +03:00
|
|
|
exclude (list): String names of serialization fields to exclude.
|
2017-05-21 14:18:14 +03:00
|
|
|
RETURNS (Tokenizer): The `Tokenizer` object.
|
2019-03-08 13:42:26 +03:00
|
|
|
|
|
|
|
DOCS: https://spacy.io/api/tokenizer#from_bytes
|
2017-05-21 14:18:14 +03:00
|
|
|
"""
|
2017-06-03 18:05:09 +03:00
|
|
|
data = OrderedDict()
|
|
|
|
deserializers = OrderedDict((
|
2019-03-08 13:42:26 +03:00
|
|
|
("vocab", lambda b: self.vocab.from_bytes(b)),
|
|
|
|
("prefix_search", lambda b: data.setdefault("prefix_search", b)),
|
|
|
|
("suffix_search", lambda b: data.setdefault("suffix_search", b)),
|
|
|
|
("infix_finditer", lambda b: data.setdefault("infix_finditer", b)),
|
|
|
|
("token_match", lambda b: data.setdefault("token_match", b)),
|
2020-05-22 13:41:03 +03:00
|
|
|
("url_match", lambda b: data.setdefault("url_match", b)),
|
2019-03-08 13:42:26 +03:00
|
|
|
("exceptions", lambda b: data.setdefault("rules", b))
|
2017-06-03 18:05:09 +03:00
|
|
|
))
|
2019-03-10 21:16:45 +03:00
|
|
|
exclude = util.get_serialization_exclude(deserializers, exclude, kwargs)
|
2017-05-29 13:24:41 +03:00
|
|
|
msg = util.from_bytes(bytes_data, deserializers, exclude)
|
2020-05-22 13:41:03 +03:00
|
|
|
for key in ["prefix_search", "suffix_search", "infix_finditer", "token_match", "url_match"]:
|
Fix tokenizer on Python2.7 (#3460)
spaCy v2.1 switched to the built-in re module, where v2.0 had been using
the third-party regex library. When the tokenizer was deserialized on
Python2.7, the `re.compile()` function was called with expressions that
featured escaped unicode codepoints that were not in Python2.7's unicode
database.
Problems occurred when we had a range between two of these unknown
codepoints, like this:
```
'[\\uAA77-\\uAA79]'
```
On Python2.7, the unknown codepoints are not unescaped correctly,
resulting in arbitrary out-of-range characters being matched by the
expression.
This problem does not occur if we instead have a range between two
unicode literals, rather than the escape sequences. To fix the bug, we
therefore add a new compat function that unescapes unicode sequences
using the `ast.literal_eval()` function. Care is taken to ensure we
do not also escape non-unicode sequences.
Closes #3356.
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2019-03-22 15:42:47 +03:00
|
|
|
if key in data:
|
|
|
|
data[key] = unescape_unicode(data[key])
|
2020-03-02 13:55:02 +03:00
|
|
|
if "prefix_search" in data and isinstance(data["prefix_search"], basestring_):
|
2019-03-08 13:42:26 +03:00
|
|
|
self.prefix_search = re.compile(data["prefix_search"]).search
|
2020-03-02 13:55:02 +03:00
|
|
|
if "suffix_search" in data and isinstance(data["suffix_search"], basestring_):
|
2019-03-08 13:42:26 +03:00
|
|
|
self.suffix_search = re.compile(data["suffix_search"]).search
|
2020-03-02 13:55:02 +03:00
|
|
|
if "infix_finditer" in data and isinstance(data["infix_finditer"], basestring_):
|
2019-03-08 13:42:26 +03:00
|
|
|
self.infix_finditer = re.compile(data["infix_finditer"]).finditer
|
2021-01-14 09:31:29 +03:00
|
|
|
# for token_match and url_match, set to None to override the language
|
|
|
|
# defaults if no regex is provided
|
2020-03-02 13:55:02 +03:00
|
|
|
if "token_match" in data and isinstance(data["token_match"], basestring_):
|
2019-03-08 13:42:26 +03:00
|
|
|
self.token_match = re.compile(data["token_match"]).match
|
2021-01-14 09:31:29 +03:00
|
|
|
else:
|
|
|
|
self.token_match = None
|
2020-05-22 13:41:03 +03:00
|
|
|
if "url_match" in data and isinstance(data["url_match"], basestring_):
|
|
|
|
self.url_match = re.compile(data["url_match"]).match
|
2021-01-14 09:31:29 +03:00
|
|
|
else:
|
|
|
|
self.url_match = None
|
2020-03-02 13:55:02 +03:00
|
|
|
if "rules" in data and isinstance(data["rules"], dict):
|
2019-08-28 15:17:44 +03:00
|
|
|
# make sure to hard reset the cache to remove data from the default exceptions
|
|
|
|
self._rules = {}
|
2019-09-08 21:52:46 +03:00
|
|
|
self._reset_cache([key for key in self._cache])
|
|
|
|
self._reset_specials()
|
2019-08-28 15:17:44 +03:00
|
|
|
self._cache = PreshMap()
|
2019-09-08 21:52:46 +03:00
|
|
|
self._specials = PreshMap()
|
2020-03-02 13:55:02 +03:00
|
|
|
self._load_special_tokenization(data["rules"])
|
2019-08-28 15:17:44 +03:00
|
|
|
|
2017-06-03 14:26:13 +03:00
|
|
|
return self
|
2018-07-06 13:23:04 +03:00
|
|
|
|
2018-07-06 13:33:42 +03:00
|
|
|
|
2018-07-06 13:23:04 +03:00
|
|
|
def _get_regex_pattern(regex):
|
2018-07-06 13:33:42 +03:00
|
|
|
"""Get a pattern string for a regex, or None if the pattern is None."""
|
2018-07-06 13:23:04 +03:00
|
|
|
return None if regex is None else regex.__self__.pattern
|