From 7f6be41f212c2a6f65612beeccf170665e0ba106 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Tue, 23 May 2017 12:18:00 +0200 Subject: [PATCH] Fix typo in English tokenizer exceptions (resolves #1071) --- spacy/en/tokenizer_exceptions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spacy/en/tokenizer_exceptions.py b/spacy/en/tokenizer_exceptions.py index 3d009241b..d9aa01734 100644 --- a/spacy/en/tokenizer_exceptions.py +++ b/spacy/en/tokenizer_exceptions.py @@ -178,7 +178,7 @@ for word in ["who", "what", "when", "where", "why", "how", "there", "that"]: EXC[orth + "ve"] = [ {ORTH: orth, LEMMA: word}, - {ORTH: "'ve", LEMMA: "have", TAG: "VB"} + {ORTH: "ve", LEMMA: "have", TAG: "VB"} ] EXC[orth + "'d"] = [