mirror of
https://github.com/explosion/spaCy.git
synced 2025-07-13 01:32:32 +03:00
Use consistent imports and exports
Bundle everything in language_data to keep it consistent with other languages and make TOKENIZER_EXCEPTIONS importable from there.
This commit is contained in:
parent
21f09d10d7
commit
fa3b8512da
|
@ -6,7 +6,6 @@ from ..attrs import LANG
|
||||||
|
|
||||||
from .language_data import *
|
from .language_data import *
|
||||||
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES
|
from .punctuation import TOKENIZER_INFIXES, TOKENIZER_SUFFIXES
|
||||||
from .tokenizer_exceptions import get_tokenizer_exceptions, TOKEN_MATCH
|
|
||||||
|
|
||||||
|
|
||||||
class FrenchDefaults(BaseDefaults):
|
class FrenchDefaults(BaseDefaults):
|
||||||
|
@ -20,7 +19,7 @@ class FrenchDefaults(BaseDefaults):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def create_tokenizer(cls, nlp=None):
|
def create_tokenizer(cls, nlp=None):
|
||||||
cls.tokenizer_exceptions = get_tokenizer_exceptions()
|
cls.tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||||
return super(FrenchDefaults, cls).create_tokenizer(nlp)
|
return super(FrenchDefaults, cls).create_tokenizer(nlp)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -2,9 +2,10 @@
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from .stop_words import STOP_WORDS
|
from .stop_words import STOP_WORDS
|
||||||
|
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS, TOKEN_MATCH
|
||||||
|
|
||||||
|
|
||||||
STOP_WORDS = set(STOP_WORDS)
|
STOP_WORDS = set(STOP_WORDS)
|
||||||
|
|
||||||
|
|
||||||
__all__ = ["STOP_WORDS"]
|
__all__ = ["STOP_WORDS", "TOKENIZER_EXCEPTIONS", "TOKEN_MATCH"]
|
||||||
|
|
|
@ -214,4 +214,6 @@ REGULAR_EXP.append(_URL_PATTERN)
|
||||||
|
|
||||||
TOKEN_MATCH = re.compile('|'.join('({})'.format(m) for m in REGULAR_EXP), re.IGNORECASE).match
|
TOKEN_MATCH = re.compile('|'.join('({})'.format(m) for m in REGULAR_EXP), re.IGNORECASE).match
|
||||||
|
|
||||||
__all__ = ("get_tokenizer_exceptions", "TOKEN_MATCH")
|
TOKENIZER_EXCEPTIONS = get_tokenizer_exceptions()
|
||||||
|
|
||||||
|
__all__ = ["TOKENIZER_EXCEPTIONS", "TOKEN_MATCH"]
|
||||||
|
|
Loading…
Reference in New Issue
Block a user