Exclude "were" and "Were" from tokenizer exceptions and add regression test (resolves #744)

This commit is contained in:
Ines Montani 2017-01-16 13:10:38 +01:00
parent e053c7693b
commit 50878ef598
2 changed files with 14 additions and 1 deletions

View File

@ -7,7 +7,7 @@ from ..language_data import PRON_LEMMA
EXC = {}
EXCLUDE_EXC = ["Ill", "ill", "Its", "its", "Hell", "hell", "Well", "well", "Whore", "whore"]
EXCLUDE_EXC = ["Ill", "ill", "Its", "its", "Hell", "hell", "were", "Were", "Well", "well", "Whore", "whore"]
# Pronouns

View File

@ -0,0 +1,13 @@
# coding: utf-8
from __future__ import unicode_literals
import pytest
@pytest.mark.parametrize('text', ["We were scared", "We Were Scared"])
def test_issue744(en_tokenizer, text):
"""Test that 'were' and 'Were' are excluded from the contractions
generated by the English tokenizer exceptions."""
tokens = en_tokenizer(text)
assert len(tokens) == 3
assert tokens[1].text.lower() == "were"