mirror of
https://github.com/explosion/spaCy.git
synced 2025-01-26 17:24:41 +03:00
Exclude "were" and "Were" from tokenizer exceptions and add regression test (resolves #744)
This commit is contained in:
parent
e053c7693b
commit
50878ef598
|
@ -7,7 +7,7 @@ from ..language_data import PRON_LEMMA
|
|||
|
||||
EXC = {}
|
||||
|
||||
EXCLUDE_EXC = ["Ill", "ill", "Its", "its", "Hell", "hell", "Well", "well", "Whore", "whore"]
|
||||
EXCLUDE_EXC = ["Ill", "ill", "Its", "its", "Hell", "hell", "were", "Were", "Well", "well", "Whore", "whore"]
|
||||
|
||||
|
||||
# Pronouns
|
||||
|
|
13
spacy/tests/regression/test_issue744.py
Normal file
13
spacy/tests/regression/test_issue744.py
Normal file
|
@ -0,0 +1,13 @@
|
|||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.parametrize('text', ["We were scared", "We Were Scared"])
|
||||
def test_issue744(en_tokenizer, text):
|
||||
"""Test that 'were' and 'Were' are excluded from the contractions
|
||||
generated by the English tokenizer exceptions."""
|
||||
tokens = en_tokenizer(text)
|
||||
assert len(tokens) == 3
|
||||
assert tokens[1].text.lower() == "were"
|
Loading…
Reference in New Issue
Block a user