From 26446aa7285981ec4a09f9217ca275bcade987a6 Mon Sep 17 00:00:00 2001 From: Matthew Honnibal Date: Sat, 25 Feb 2017 11:55:00 +0100 Subject: [PATCH] Avoid loading all French exceptions on import Move exceptions loading behind a get_tokenizer_exceptions() function for French, instead of loading into the top-level namespace. This cuts import times from 0.6s to 0.2s, at the expense of making the French data a little different from the others (there's no top-level TOKENIZER_EXCEPTIONS variable.) The current solution feels somewhat unsatisfying. --- spacy/fr/__init__.py | 2 +- spacy/fr/language_data.py | 4 ++-- spacy/fr/tokenizer_exceptions.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/spacy/fr/__init__.py b/spacy/fr/__init__.py index 9e4735a1f..cf371d3c2 100644 --- a/spacy/fr/__init__.py +++ b/spacy/fr/__init__.py @@ -19,7 +19,7 @@ class FrenchDefaults(BaseDefaults): @classmethod def create_tokenizer(cls, nlp=None): - cls.tokenizer_exceptions = TOKENIZER_EXCEPTIONS + cls.tokenizer_exceptions = get_tokenizer_exceptions() return super(FrenchDefaults, cls).create_tokenizer(nlp) diff --git a/spacy/fr/language_data.py b/spacy/fr/language_data.py index 9d25644b7..7b7674168 100644 --- a/spacy/fr/language_data.py +++ b/spacy/fr/language_data.py @@ -2,10 +2,10 @@ from __future__ import unicode_literals from .stop_words import STOP_WORDS -from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS, TOKEN_MATCH +from .tokenizer_exceptions import get_tokenizer_exceptions, TOKEN_MATCH STOP_WORDS = set(STOP_WORDS) -__all__ = ["STOP_WORDS", "TOKENIZER_EXCEPTIONS", "TOKEN_MATCH"] +__all__ = ["STOP_WORDS", "get_tokenizer_exceptions", "TOKEN_MATCH"] diff --git a/spacy/fr/tokenizer_exceptions.py b/spacy/fr/tokenizer_exceptions.py index 1aeb73232..8f8dcf0b0 100644 --- a/spacy/fr/tokenizer_exceptions.py +++ b/spacy/fr/tokenizer_exceptions.py @@ -217,6 +217,6 @@ REGULAR_EXP.append(_URL_PATTERN) TOKEN_MATCH = re.compile('|'.join('(?:{})'.format(m) for m in REGULAR_EXP), re.IGNORECASE).match -TOKENIZER_EXCEPTIONS = get_tokenizer_exceptions() +#TOKENIZER_EXCEPTIONS = get_tokenizer_exceptions() -__all__ = ["TOKENIZER_EXCEPTIONS", "TOKEN_MATCH"] +__all__ = ["get_tokenizer_exceptions", "TOKEN_MATCH"]