spaCy/spacy/lang/pt/tokenizer_exceptions.py
Ines Montani db55577c45
Drop Python 2.7 and 3.5 (#4828)
* Remove unicode declarations

* Remove Python 3.5 and 2.7 from CI

* Don't require pathlib

* Replace compat helpers

* Remove OrderedDict

* Use f-strings

* Set Cython compiler language level

* Fix typo

* Re-add OrderedDict for Table

* Update setup.cfg

* Revert CONTRIBUTING.md

* Revert lookups.md

* Revert top-level.md

* Small adjustments and docs [ci skip]
2019-12-22 01:53:56 +01:00

76 lines
1.5 KiB
Python

from ...symbols import ORTH, NORM
_exc = {
"às": [{ORTH: "à", NORM: "a"}, {ORTH: "s", NORM: "as"}],
"ao": [{ORTH: "a"}, {ORTH: "o"}],
"aos": [{ORTH: "a"}, {ORTH: "os"}],
"àquele": [{ORTH: "à", NORM: "a"}, {ORTH: "quele", NORM: "aquele"}],
"àquela": [{ORTH: "à", NORM: "a"}, {ORTH: "quela", NORM: "aquela"}],
"àqueles": [{ORTH: "à", NORM: "a"}, {ORTH: "queles", NORM: "aqueles"}],
"àquelas": [{ORTH: "à", NORM: "a"}, {ORTH: "quelas", NORM: "aquelas"}],
"àquilo": [{ORTH: "à", NORM: "a"}, {ORTH: "quilo", NORM: "aquilo"}],
"aonde": [{ORTH: "a"}, {ORTH: "onde"}],
}
# Contractions
_per_pron = ["ele", "ela", "eles", "elas"]
_dem_pron = [
"este",
"esta",
"estes",
"estas",
"isto",
"esse",
"essa",
"esses",
"essas",
"isso",
"aquele",
"aquela",
"aqueles",
"aquelas",
"aquilo",
]
_und_pron = ["outro", "outra", "outros", "outras"]
_adv = ["aqui", "", "ali", "além"]
for orth in _per_pron + _dem_pron + _und_pron + _adv:
_exc["d" + orth] = [{ORTH: "d", NORM: "de"}, {ORTH: orth}]
for orth in _per_pron + _dem_pron + _und_pron:
_exc["n" + orth] = [{ORTH: "n", NORM: "em"}, {ORTH: orth}]
for orth in [
"Adm.",
"Dr.",
"e.g.",
"E.g.",
"E.G.",
"Gen.",
"Gov.",
"i.e.",
"I.e.",
"I.E.",
"Jr.",
"Ltd.",
"p.m.",
"Ph.D.",
"Rep.",
"Rev.",
"Sen.",
"Sr.",
"Sra.",
"vs.",
"tel.",
"pág.",
"pag.",
]:
_exc[orth] = [{ORTH: orth}]
TOKENIZER_EXCEPTIONS = _exc