spaCy/spacy/lang/ky/tokenizer_exceptions.py
Daniël de Kok e2b70df012
Configure isort to use the Black profile, recursively isort the spacy module (#12721)
* Use isort with Black profile

* isort all the things

* Fix import cycles as a result of import sorting

* Add DOCBIN_ALL_ATTRS type definition

* Add isort to requirements

* Remove isort from build dependencies check

* Typo
2023-06-14 17:48:41 +02:00

54 lines
2.0 KiB
Python
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

from ...symbols import NORM, ORTH
from ...util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
_exc = {}
_abbrev_exc = [
# Weekdays abbreviations
{ORTH: "дүй", NORM: "дүйшөмбү"},
{ORTH: "шей", NORM: "шейшемби"},
{ORTH: "шар", NORM: "шаршемби"},
{ORTH: "бей", NORM: "бейшемби"},
{ORTH: "жум", NORM: "жума"},
{ORTH: "ишм", NORM: "ишемби"},
{ORTH: "жек", NORM: "жекшемби"},
# Months abbreviations
{ORTH: "янв", NORM: "январь"},
{ORTH: "фев", NORM: "февраль"},
{ORTH: "мар", NORM: "март"},
{ORTH: "апр", NORM: "апрель"},
{ORTH: "июн", NORM: "июнь"},
{ORTH: "июл", NORM: "июль"},
{ORTH: "авг", NORM: "август"},
{ORTH: "сен", NORM: "сентябрь"},
{ORTH: "окт", NORM: "октябрь"},
{ORTH: "ноя", NORM: "ноябрь"},
{ORTH: "дек", NORM: "декабрь"},
# Number abbreviations
{ORTH: "млрд", NORM: "миллиард"},
{ORTH: "млн", NORM: "миллион"},
]
for abbr in _abbrev_exc:
for orth in (abbr[ORTH], abbr[ORTH].capitalize(), abbr[ORTH].upper()):
_exc[orth] = [{ORTH: orth, NORM: abbr[NORM]}]
_exc[orth + "."] = [{ORTH: orth + ".", NORM: abbr[NORM]}]
for exc_data in [ # "etc." abbreviations
{ORTH: "ж.б.у.с.", NORM: "жана башка ушул сыяктуу"},
{ORTH: "ж.б.", NORM: "жана башка"},
{ORTH: "ж.", NORM: "жыл"},
{ORTH: "б.з.ч.", NORM: "биздин заманга чейин"},
{ORTH: "б.з.", NORM: "биздин заман"},
{ORTH: "кк.", NORM: "кылымдар"},
{ORTH: "жж.", NORM: "жылдар"},
{ORTH: "к.", NORM: "кылым"},
{ORTH: "көч.", NORM: "көчөсү"},
{ORTH: "м-н", NORM: "менен"},
{ORTH: "б-ча", NORM: "боюнча"},
]:
_exc[exc_data[ORTH]] = [exc_data]
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)