mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-14 05:37:03 +03:00
8fe525beb5
* add tréis/théis * remove previous contents, add demutate/unponc * fmt off/on wrapping * type hints * IrishLemmatizer (sic) * Use spacy-lookups-data>=1.0.3 * Minor bug fixes, refactoring for IrishLemmatizer * Fix return type for ADP list lookups * Fix and refactor lookup table lookups for missing/string/list * Remove unused variables * skip lookup of verbal substantives and adjectives; just demutate * Fix morph checks API details * Add types and format * Move helper methods into lemmatizer Co-authored-by: Adriane Boyd <adrianeboyd@gmail.com>
73 lines
1.8 KiB
Python
73 lines
1.8 KiB
Python
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
|
from ...symbols import ORTH, NORM
|
|
from ...util import update_exc
|
|
|
|
|
|
_exc = {
|
|
"'acha'n": [{ORTH: "'ach", NORM: "gach"}, {ORTH: "a'n", NORM: "aon"}],
|
|
"dem'": [{ORTH: "de", NORM: "de"}, {ORTH: "m'", NORM: "mo"}],
|
|
"ded'": [{ORTH: "de", NORM: "de"}, {ORTH: "d'", NORM: "do"}],
|
|
"lem'": [{ORTH: "le", NORM: "le"}, {ORTH: "m'", NORM: "mo"}],
|
|
"led'": [{ORTH: "le", NORM: "le"}, {ORTH: "d'", NORM: "do"}],
|
|
"théis": [{ORTH: "th", NORM: "tar"}, {ORTH: "éis", NORM: "éis"}],
|
|
"tréis": [{ORTH: "tr", NORM: "tar"}, {ORTH: "éis", NORM: "éis"}],
|
|
}
|
|
|
|
for exc_data in [
|
|
{ORTH: "'gus", NORM: "agus"},
|
|
{ORTH: "'ach", NORM: "gach"},
|
|
{ORTH: "ao'", NORM: "aon"},
|
|
{ORTH: "'niar", NORM: "aniar"},
|
|
{ORTH: "'níos", NORM: "aníos"},
|
|
{ORTH: "'ndiu", NORM: "inniu"},
|
|
{ORTH: "'nocht", NORM: "anocht"},
|
|
{ORTH: "m'"},
|
|
{ORTH: "Aib."},
|
|
{ORTH: "Ath."},
|
|
{ORTH: "Beal."},
|
|
{ORTH: "a.C.n."},
|
|
{ORTH: "m.sh."},
|
|
{ORTH: "M.F."},
|
|
{ORTH: "M.Fómh."},
|
|
{ORTH: "D.F."},
|
|
{ORTH: "D.Fómh."},
|
|
{ORTH: "r.C."},
|
|
{ORTH: "R.C."},
|
|
{ORTH: "r.Ch."},
|
|
{ORTH: "r.Chr."},
|
|
{ORTH: "R.Ch."},
|
|
{ORTH: "R.Chr."},
|
|
{ORTH: "⁊rl."},
|
|
{ORTH: "srl."},
|
|
{ORTH: "Co."},
|
|
{ORTH: "Ean."},
|
|
{ORTH: "Feab."},
|
|
{ORTH: "gCo."},
|
|
{ORTH: ".i."},
|
|
{ORTH: "B'"},
|
|
{ORTH: "b'"},
|
|
{ORTH: "lch."},
|
|
{ORTH: "Lch."},
|
|
{ORTH: "lgh."},
|
|
{ORTH: "Lgh."},
|
|
{ORTH: "Lún."},
|
|
{ORTH: "Már."},
|
|
{ORTH: "Meith."},
|
|
{ORTH: "Noll."},
|
|
{ORTH: "Samh."},
|
|
{ORTH: "tAth."},
|
|
{ORTH: "tUas."},
|
|
{ORTH: "teo."},
|
|
{ORTH: "Teo."},
|
|
{ORTH: "Uas."},
|
|
{ORTH: "uimh."},
|
|
{ORTH: "Uimh."},
|
|
]:
|
|
_exc[exc_data[ORTH]] = [exc_data]
|
|
|
|
for orth in ["d'", "D'"]:
|
|
_exc[orth] = [{ORTH: orth}]
|
|
|
|
|
|
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|