mirror of
https://github.com/explosion/spaCy.git
synced 2025-02-03 21:24:11 +03:00
Merge branch 'french-tokenizer-exceptions'
This commit is contained in:
commit
34bcc8706d
|
@ -19,7 +19,7 @@ class FrenchDefaults(BaseDefaults):
|
|||
|
||||
@classmethod
|
||||
def create_tokenizer(cls, nlp=None):
|
||||
cls.tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
cls.tokenizer_exceptions = get_tokenizer_exceptions()
|
||||
return super(FrenchDefaults, cls).create_tokenizer(nlp)
|
||||
|
||||
|
||||
|
|
26303
spacy/fr/_tokenizer_exceptions_list.py
Normal file
26303
spacy/fr/_tokenizer_exceptions_list.py
Normal file
File diff suppressed because it is too large
Load Diff
|
@ -2,10 +2,10 @@
|
|||
from __future__ import unicode_literals
|
||||
|
||||
from .stop_words import STOP_WORDS
|
||||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS, TOKEN_MATCH
|
||||
from .tokenizer_exceptions import get_tokenizer_exceptions, TOKEN_MATCH
|
||||
|
||||
|
||||
STOP_WORDS = set(STOP_WORDS)
|
||||
|
||||
|
||||
__all__ = ["STOP_WORDS", "TOKENIZER_EXCEPTIONS", "TOKEN_MATCH"]
|
||||
__all__ = ["STOP_WORDS", "get_tokenizer_exceptions", "TOKEN_MATCH"]
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -8,6 +8,7 @@ from ..language_data.tokenizer_exceptions import _URL_PATTERN
|
|||
from ..language_data.punctuation import ALPHA_LOWER
|
||||
|
||||
from .punctuation import ELISION, HYPHENS
|
||||
from ._tokenizer_exceptions_list import BASE_EXCEPTIONS
|
||||
|
||||
from ..symbols import *
|
||||
|
||||
|
@ -16,11 +17,13 @@ import io
|
|||
import re
|
||||
|
||||
|
||||
def iter_exceptions():
|
||||
with io.open(os.path.join(os.path.dirname(__file__), 'resources/tokenizer_exceptions'),
|
||||
'rt', encoding='utf8') as f:
|
||||
for line in f:
|
||||
yield line.strip('\n')
|
||||
def get_exceptions():
|
||||
return BASE_EXCEPTIONS
|
||||
|
||||
# with io.open(os.path.join(os.path.dirname(__file__), 'resources/tokenizer_exceptions'),
|
||||
# 'rt', encoding='utf8') as f:
|
||||
# for line in f:
|
||||
# yield line.strip('\n')
|
||||
|
||||
|
||||
def upper_first_letter(text):
|
||||
|
@ -142,7 +145,7 @@ def get_tokenizer_exceptions():
|
|||
|
||||
HYPHEN = ['-', '‐']
|
||||
|
||||
base_exceptions = list(iter_exceptions())
|
||||
base_exceptions = get_exceptions()
|
||||
infixes_exceptions = []
|
||||
|
||||
for elision_char in ELISION:
|
||||
|
@ -214,6 +217,6 @@ REGULAR_EXP.append(_URL_PATTERN)
|
|||
|
||||
TOKEN_MATCH = re.compile('|'.join('(?:{})'.format(m) for m in REGULAR_EXP), re.IGNORECASE).match
|
||||
|
||||
TOKENIZER_EXCEPTIONS = get_tokenizer_exceptions()
|
||||
#TOKENIZER_EXCEPTIONS = get_tokenizer_exceptions()
|
||||
|
||||
__all__ = ["TOKENIZER_EXCEPTIONS", "TOKEN_MATCH"]
|
||||
__all__ = ["get_tokenizer_exceptions", "TOKEN_MATCH"]
|
||||
|
|
|
@ -3,7 +3,7 @@ from __future__ import unicode_literals
|
|||
|
||||
from ...language import Language
|
||||
from ...attrs import LANG
|
||||
from ...fr.language_data import TOKENIZER_EXCEPTIONS, STOP_WORDS
|
||||
from ...fr.language_data import get_tokenizer_exceptions, STOP_WORDS
|
||||
from ...language_data.punctuation import TOKENIZER_INFIXES, ALPHA
|
||||
|
||||
import pytest
|
||||
|
@ -20,7 +20,7 @@ def fr_tokenizer_w_infix():
|
|||
class Defaults(Language.Defaults):
|
||||
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
|
||||
lex_attr_getters[LANG] = lambda text: 'fr'
|
||||
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
|
||||
tokenizer_exceptions = get_tokenizer_exceptions()
|
||||
stop_words = STOP_WORDS
|
||||
infixes = TOKENIZER_INFIXES + [SPLIT_INFIX]
|
||||
|
||||
|
|
12
spacy/tests/regression/test_issue852.py
Normal file
12
spacy/tests/regression/test_issue852.py
Normal file
|
@ -0,0 +1,12 @@
|
|||
# encoding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["au-delàs", "pair-programmâmes",
|
||||
"terra-formées", "σ-compacts"])
|
||||
def test_issue852(fr_tokenizer, text):
|
||||
"""Test that French tokenizer exceptions are imported correctly."""
|
||||
tokens = fr_tokenizer(text)
|
||||
assert len(tokens) == 1
|
Loading…
Reference in New Issue
Block a user