From cd6a29dce7af3edc00de988d6976a11b91b43682 Mon Sep 17 00:00:00 2001 From: ines Date: Sat, 14 Oct 2017 13:28:46 +0200 Subject: [PATCH] Port over changes from #1294 --- .../lang/en/test_customized_tokenizer.py | 42 +++++++++++++++++++ spacy/tokenizer.pyx | 3 +- 2 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 spacy/tests/lang/en/test_customized_tokenizer.py diff --git a/spacy/tests/lang/en/test_customized_tokenizer.py b/spacy/tests/lang/en/test_customized_tokenizer.py new file mode 100644 index 000000000..1d35fb128 --- /dev/null +++ b/spacy/tests/lang/en/test_customized_tokenizer.py @@ -0,0 +1,42 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import pytest + +from ....lang.en import English +from ....tokenizer import Tokenizer +from .... import util + + +@pytest.fixture +def custom_en_tokenizer(en_vocab): + prefix_re = util.compile_prefix_regex(English.Defaults.prefixes) + suffix_re = util.compile_suffix_regex(English.Defaults.suffixes) + custom_infixes = ['\.\.\.+', + '(?<=[0-9])-(?=[0-9])', + # '(?<=[0-9]+),(?=[0-9]+)', + '[0-9]+(,[0-9]+)+', + '[\[\]!&:,()\*—–\/-]'] + + infix_re = util.compile_infix_regex(custom_infixes) + return Tokenizer(en_vocab, + English.Defaults.tokenizer_exceptions, + prefix_re.search, + suffix_re.search, + infix_re.finditer, + token_match=None) + + +def test_customized_tokenizer_handles_infixes(custom_en_tokenizer): + sentence = "The 8 and 10-county definitions are not used for the greater Southern California Megaregion." + context = [word.text for word in custom_en_tokenizer(sentence)] + assert context == ['The', '8', 'and', '10', '-', 'county', 'definitions', + 'are', 'not', 'used', 'for', 'the', 'greater', + 'Southern', 'California', 'Megaregion', '.'] + + # the trailing '-' may cause Assertion Error + sentence = "The 8- and 10-county definitions are not used for the greater Southern California Megaregion." + context = [word.text for word in custom_en_tokenizer(sentence)] + assert context == ['The', '8', '-', 'and', '10', '-', 'county', + 'definitions', 'are', 'not', 'used', 'for', 'the', + 'greater', 'Southern', 'California', 'Megaregion', '.'] diff --git a/spacy/tokenizer.pyx b/spacy/tokenizer.pyx index de184baba..f2d21de44 100644 --- a/spacy/tokenizer.pyx +++ b/spacy/tokenizer.pyx @@ -248,7 +248,8 @@ cdef class Tokenizer: start = infix_end span = string[start:] - tokens.push_back(self.vocab.get(tokens.mem, span), False) + if span: + tokens.push_back(self.vocab.get(tokens.mem, span), False) cdef vector[const LexemeC*].reverse_iterator it = suffixes.rbegin() while it != suffixes.rend(): lexeme = deref(it)