From 615dba9d99df0cd25fbecd8d58b19371da59a719 Mon Sep 17 00:00:00 2001 From: Ines Montani Date: Wed, 27 Jan 2021 22:11:42 +1100 Subject: [PATCH] Fix tokenizer exceptions --- spacy/lang/ky/tokenizer_exceptions.py | 50 +++++++++++++-------------- 1 file changed, 24 insertions(+), 26 deletions(-) diff --git a/spacy/lang/ky/tokenizer_exceptions.py b/spacy/lang/ky/tokenizer_exceptions.py index eb367aeef..cd51c2714 100644 --- a/spacy/lang/ky/tokenizer_exceptions.py +++ b/spacy/lang/ky/tokenizer_exceptions.py @@ -1,39 +1,39 @@ from ..tokenizer_exceptions import BASE_EXCEPTIONS -from ...symbols import ORTH, LEMMA, NORM +from ...symbols import ORTH, NORM from ...util import update_exc _exc = {} _abbrev_exc = [ # Weekdays abbreviations - {ORTH: "дүй", LEMMA: "дүйшөмбү"}, - {ORTH: "шей", LEMMA: "шейшемби"}, - {ORTH: "шар", LEMMA: "шаршемби"}, - {ORTH: "бей", LEMMA: "бейшемби"}, - {ORTH: "жум", LEMMA: "жума"}, - {ORTH: "ишм", LEMMA: "ишемби"}, - {ORTH: "жек", LEMMA: "жекшемби"}, + {ORTH: "дүй", NORM: "дүйшөмбү"}, + {ORTH: "шей", NORM: "шейшемби"}, + {ORTH: "шар", NORM: "шаршемби"}, + {ORTH: "бей", NORM: "бейшемби"}, + {ORTH: "жум", NORM: "жума"}, + {ORTH: "ишм", NORM: "ишемби"}, + {ORTH: "жек", NORM: "жекшемби"}, # Months abbreviations - {ORTH: "янв", LEMMA: "январь"}, - {ORTH: "фев", LEMMA: "февраль"}, - {ORTH: "мар", LEMMA: "март"}, - {ORTH: "апр", LEMMA: "апрель"}, - {ORTH: "июн", LEMMA: "июнь"}, - {ORTH: "июл", LEMMA: "июль"}, - {ORTH: "авг", LEMMA: "август"}, - {ORTH: "сен", LEMMA: "сентябрь"}, - {ORTH: "окт", LEMMA: "октябрь"}, - {ORTH: "ноя", LEMMA: "ноябрь"}, - {ORTH: "дек", LEMMA: "декабрь"}, + {ORTH: "янв", NORM: "январь"}, + {ORTH: "фев", NORM: "февраль"}, + {ORTH: "мар", NORM: "март"}, + {ORTH: "апр", NORM: "апрель"}, + {ORTH: "июн", NORM: "июнь"}, + {ORTH: "июл", NORM: "июль"}, + {ORTH: "авг", NORM: "август"}, + {ORTH: "сен", NORM: "сентябрь"}, + {ORTH: "окт", NORM: "октябрь"}, + {ORTH: "ноя", NORM: "ноябрь"}, + {ORTH: "дек", NORM: "декабрь"}, # Number abbreviations - {ORTH: "млрд", LEMMA: "миллиард"}, - {ORTH: "млн", LEMMA: "миллион"}, + {ORTH: "млрд", NORM: "миллиард"}, + {ORTH: "млн", NORM: "миллион"}, ] for abbr in _abbrev_exc: for orth in (abbr[ORTH], abbr[ORTH].capitalize(), abbr[ORTH].upper()): - _exc[orth] = [{ORTH: orth, LEMMA: abbr[LEMMA], NORM: abbr[LEMMA]}] - _exc[orth + "."] = [{ORTH: orth + ".", LEMMA: abbr[LEMMA], NORM: abbr[LEMMA]}] + _exc[orth] = [{ORTH: orth, NORM: abbr[NORM]}] + _exc[orth + "."] = [{ORTH: orth + ".", NORM: abbr[NORM]}] for exc_data in [ # "etc." abbreviations {ORTH: "ж.б.у.с.", NORM: "жана башка ушул сыяктуу"}, @@ -47,8 +47,6 @@ for exc_data in [ # "etc." abbreviations {ORTH: "көч.", NORM: "көчөсү"}, {ORTH: "м-н", NORM: "менен"}, {ORTH: "б-ча", NORM: "боюнча"}, -]: - exc_data[LEMMA] = exc_data[NORM] - _exc[exc_data[ORTH]] = [exc_data] +]: _exc[exc_data[ORTH]] = [exc_data] TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)