mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-29 11:26:28 +03:00
Update test to make it slightly more direct
The `nlp` container should be unnecessary here. If so, we can test the tokenizer class just a little more directly.
This commit is contained in:
parent
a6d9fb5bb6
commit
9bffcaa73d
|
@ -1,46 +1,40 @@
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
from ... import load
|
from ...lang.en import English
|
||||||
from ...tokenizer import Tokenizer
|
from ...tokenizer import Tokenizer
|
||||||
from ... import util
|
from ... import util
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def tokenizer(en_vocab):
|
||||||
|
prefix_re = util.compile_prefix_regex(nlp_model.Defaults.prefixes)
|
||||||
|
suffix_re = util.compile_suffix_regex(nlp_model.Defaults.suffixes)
|
||||||
|
custom_infixes = ['\.\.\.+',
|
||||||
|
'(?<=[0-9])-(?=[0-9])',
|
||||||
|
# '(?<=[0-9]+),(?=[0-9]+)',
|
||||||
|
'[0-9]+(,[0-9]+)+',
|
||||||
|
u'[\[\]!&:,()\*—–\/-]']
|
||||||
|
|
||||||
def test_customized_tokenizer_handles_infixes():
|
infix_re = util.compile_infix_regex(custom_infixes)
|
||||||
def custom_tokenizer(nlp_model):
|
return Tokenizer(en_vocab,
|
||||||
prefix_re = util.compile_prefix_regex(nlp_model.Defaults.prefixes)
|
English.Defaults.tokenizer_exceptions,
|
||||||
suffix_re = util.compile_suffix_regex(nlp_model.Defaults.suffixes)
|
prefix_re.search,
|
||||||
custom_infixes = ['\.\.\.+',
|
suffix_re.search,
|
||||||
'(?<=[0-9])-(?=[0-9])',
|
infix_re.finditer,
|
||||||
# '(?<=[0-9]+),(?=[0-9]+)',
|
token_match=None)
|
||||||
'[0-9]+(,[0-9]+)+',
|
|
||||||
u'[\[\]!&:,()\*—–\/-]']
|
|
||||||
|
|
||||||
infix_re = util.compile_infix_regex(custom_infixes)
|
|
||||||
|
|
||||||
# infix_re = re.compile(ur'[\[\]!&:,()]')
|
|
||||||
|
|
||||||
tokenizer = Tokenizer(nlp_model.vocab,
|
|
||||||
nlp_model.Defaults.tokenizer_exceptions,
|
|
||||||
prefix_re.search,
|
|
||||||
suffix_re.search,
|
|
||||||
infix_re.finditer,
|
|
||||||
token_match=None)
|
|
||||||
return lambda text: tokenizer(text)
|
|
||||||
|
|
||||||
nlp = load('en', create_make_doc=custom_tokenizer)
|
|
||||||
|
|
||||||
|
def test_customized_tokenizer_handles_infixes(tokenizer):
|
||||||
sentence = "The 8 and 10-county definitions are not used for the greater Southern California Megaregion."
|
sentence = "The 8 and 10-county definitions are not used for the greater Southern California Megaregion."
|
||||||
context = [word.text for word in nlp(sentence)]
|
context = [word.text for word in tokenizer(sentence)]
|
||||||
assert context == [u'The', u'8', u'and', u'10', u'-', u'county', u'definitions', u'are', u'not', u'used',
|
assert context == [u'The', u'8', u'and', u'10', u'-', u'county', u'definitions', u'are', u'not', u'used',
|
||||||
u'for',
|
u'for',
|
||||||
u'the', u'greater', u'Southern', u'California', u'Megaregion', u'.']
|
u'the', u'greater', u'Southern', u'California', u'Megaregion', u'.']
|
||||||
|
|
||||||
# the trailing '-' may cause Assertion Error
|
# the trailing '-' may cause Assertion Error
|
||||||
sentence = "The 8- and 10-county definitions are not used for the greater Southern California Megaregion."
|
sentence = "The 8- and 10-county definitions are not used for the greater Southern California Megaregion."
|
||||||
context = [word.text for word in nlp(sentence)]
|
context = [word.text for word in tokenizer(sentence)]
|
||||||
assert context == [u'The', u'8', u'-', u'and', u'10', u'-', u'county', u'definitions', u'are', u'not', u'used',
|
assert context == [u'The', u'8', u'-', u'and', u'10', u'-', u'county', u'definitions', u'are', u'not', u'used',
|
||||||
u'for',
|
u'for',
|
||||||
u'the', u'greater', u'Southern', u'California', u'Megaregion', u'.']
|
u'the', u'greater', u'Southern', u'California', u'Megaregion', u'.']
|
||||||
|
|
Loading…
Reference in New Issue
Block a user