spaCy/spacy/es/tokenizer_exceptions.py

114 lines
2.0 KiB
Python
Raw Normal View History

2017-03-12 15:07:28 +03:00
# coding: utf8
2016-12-18 18:54:19 +03:00
from __future__ import unicode_literals
from ..symbols import *
from ..language_data import PRON_LEMMA, DET_LEMMA
2016-12-18 18:54:19 +03:00
TOKENIZER_EXCEPTIONS = {
"al": [
{ORTH: "a", LEMMA: "a", TAG: ADP},
{ORTH: "el", LEMMA: "el", TAG: DET}
2016-12-18 18:54:19 +03:00
],
"consigo": [
{ORTH: "con", LEMMA: "con"},
{ORTH: "sigo", LEMMA: PRON_LEMMA, NORM: ""}
2016-12-18 18:54:19 +03:00
],
"conmigo": [
{ORTH: "con", LEMMA: "con"},
{ORTH: "migo", LEMMA: PRON_LEMMA, NORM: ""}
2016-12-18 18:54:19 +03:00
],
"contigo": [
{ORTH: "con", LEMMA: "con"},
{ORTH: "tigo", LEMMA: PRON_LEMMA, NORM: "ti"}
2016-12-18 18:54:19 +03:00
],
"del": [
{ORTH: "de", LEMMA: "de", TAG: ADP},
{ORTH: "l", LEMMA: "el", TAG: DET}
2016-12-18 18:54:19 +03:00
],
"pel": [
{ORTH: "pe", LEMMA: "per", TAG: ADP},
{ORTH: "l", LEMMA: "el", TAG: DET}
2016-12-18 18:54:19 +03:00
],
"pal": [
{ORTH: "pa", LEMMA: "para"},
{ORTH: "l", LEMMA: DET_LEMMA, NORM: "el"}
2016-12-18 18:54:19 +03:00
],
"pala": [
{ORTH: "pa", LEMMA: "para"},
{ORTH: "la", LEMMA: DET_LEMMA}
],
"aprox.": [
{ORTH: "aprox.", LEMMA: "aproximadamente"}
],
"dna.": [
{ORTH: "dna.", LEMMA: "docena"}
],
"esq.": [
{ORTH: "esq.", LEMMA: "esquina"}
],
"pág.": [
{ORTH: "pág.", LEMMA: "página"}
],
"p.ej.": [
{ORTH: "p.ej.", LEMMA: "por ejemplo"}
],
"Ud.": [
{ORTH: "Ud.", LEMMA: PRON_LEMMA, NORM: "usted"}
],
"Vd.": [
{ORTH: "Vd.", LEMMA: PRON_LEMMA, NORM: "usted"}
],
"Uds.": [
{ORTH: "Uds.", LEMMA: PRON_LEMMA, NORM: "ustedes"}
],
"Vds.": [
{ORTH: "Vds.", LEMMA: PRON_LEMMA, NORM: "ustedes"}
2016-12-18 18:54:19 +03:00
]
}
ORTH_ONLY = [
"a.C.",
"a.J.C.",
"apdo.",
"Av.",
"Avda.",
"Cía.",
"etc.",
"Gob.",
"Gral.",
"Ing.",
"J.C.",
"Lic.",
"m.n.",
"no.",
"núm.",
"P.D.",
"Prof.",
"Profa.",
"q.e.p.d."
"S.A.",
"S.L.",
"s.s.s.",
"Sr.",
"Sra.",
"Srta."
2016-12-18 18:54:19 +03:00
]