spaCy/spacy/es/tokenizer_exceptions.py

82 lines
2.2 KiB
Python
Raw Normal View History

2017-03-12 15:07:28 +03:00
# coding: utf8
2016-12-18 18:54:19 +03:00
from __future__ import unicode_literals
2017-05-08 16:48:04 +03:00
from ..symbols import ORTH, LEMMA, TAG, NORM, ADP, DET
from ..deprecated import PRON_LEMMA, DET_LEMMA
2016-12-18 18:54:19 +03:00
2017-05-08 16:48:04 +03:00
_exc = {
2016-12-18 18:54:19 +03:00
"al": [
{ORTH: "a", LEMMA: "a", TAG: ADP},
2017-05-08 16:48:04 +03:00
{ORTH: "l", LEMMA: "el", TAG: DET}],
2016-12-18 18:54:19 +03:00
"consigo": [
{ORTH: "con", LEMMA: "con"},
2017-05-08 16:48:04 +03:00
{ORTH: "sigo", LEMMA: PRON_LEMMA, NORM: ""}],
2016-12-18 18:54:19 +03:00
"conmigo": [
{ORTH: "con", LEMMA: "con"},
2017-05-08 16:48:04 +03:00
{ORTH: "migo", LEMMA: PRON_LEMMA, NORM: ""}],
2016-12-18 18:54:19 +03:00
"contigo": [
{ORTH: "con", LEMMA: "con"},
2017-05-08 16:48:04 +03:00
{ORTH: "tigo", LEMMA: PRON_LEMMA, NORM: "ti"}],
2016-12-18 18:54:19 +03:00
"del": [
{ORTH: "de", LEMMA: "de", TAG: ADP},
2017-05-08 16:48:04 +03:00
{ORTH: "l", LEMMA: "el", TAG: DET}],
2016-12-18 18:54:19 +03:00
"pel": [
{ORTH: "pe", LEMMA: "per", TAG: ADP},
2017-05-08 16:48:04 +03:00
{ORTH: "l", LEMMA: "el", TAG: DET}],
2016-12-18 18:54:19 +03:00
"pal": [
{ORTH: "pa", LEMMA: "para"},
2017-05-08 16:48:04 +03:00
{ORTH: "l", LEMMA: DET_LEMMA, NORM: "el"}],
2016-12-18 18:54:19 +03:00
"pala": [
{ORTH: "pa", LEMMA: "para"},
2017-05-08 16:48:04 +03:00
{ORTH: "la", LEMMA: DET_LEMMA}]
}
2017-05-08 16:48:04 +03:00
for exc_data in [
{ORTH: "aprox.", LEMMA: "aproximadamente"},
{ORTH: "dna.", LEMMA: "docena"},
{ORTH: "esq.", LEMMA: "esquina"},
{ORTH: "pág.", LEMMA: "página"},
{ORTH: "p.ej.", LEMMA: "por ejemplo"},
{ORTH: "Ud.", LEMMA: PRON_LEMMA, NORM: "usted"},
{ORTH: "Vd.", LEMMA: PRON_LEMMA, NORM: "usted"},
{ORTH: "Uds.", LEMMA: PRON_LEMMA, NORM: "ustedes"},
{ORTH: "Vds.", LEMMA: PRON_LEMMA, NORM: "ustedes"}]:
_exc[exc_data[ORTH]] = [dict(exc_data)]
2017-05-08 16:48:04 +03:00
# Times
2017-05-08 16:48:04 +03:00
_exc["12m."] = [
{ORTH: "12"},
{ORTH: "m.", LEMMA: "p.m."}]
2017-05-08 16:48:04 +03:00
for h in range(1, 12 + 1):
for period in ["a.m.", "am"]:
2017-05-08 17:38:16 +03:00
_exc["%d%s" % (h, period)] = [
{ORTH: "%d" % h},
2017-05-08 16:48:04 +03:00
{ORTH: period, LEMMA: "a.m."}]
for period in ["p.m.", "pm"]:
2017-05-08 17:38:16 +03:00
_exc["%d%s" % (h, period)] = [
{ORTH: "%d" % h},
2017-05-08 16:48:04 +03:00
{ORTH: period, LEMMA: "p.m."}]
2017-05-08 16:48:04 +03:00
for orth in [
"a.C.", "a.J.C.", "apdo.", "Av.", "Avda.", "Cía.", "etc.", "Gob.", "Gral.",
"Ing.", "J.C.", "Lic.", "m.n.", "no.", "núm.", "P.D.", "Prof.", "Profa.",
"q.e.p.d.", "S.A.", "S.L.", "s.s.s.", "Sr.", "Sra.", "Srta."]:
_exc[orth] = [{ORTH: orth}]
2016-12-18 18:54:19 +03:00
2017-05-08 16:48:04 +03:00
TOKENIZER_EXCEPTIONS = dict(_exc)