2017-03-31 17:52:55 +03:00
|
|
|
# coding: utf8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2017-11-06 19:41:53 +03:00
|
|
|
from ...symbols import ORTH, LEMMA, NORM, PRON_LEMMA
|
2017-03-31 17:52:55 +03:00
|
|
|
|
|
|
|
|
2017-05-08 16:52:01 +03:00
|
|
|
_exc = {
|
2017-03-31 17:52:55 +03:00
|
|
|
"às": [
|
|
|
|
{ORTH: "à", NORM: "a"},
|
2017-05-08 16:52:01 +03:00
|
|
|
{ORTH: "s", NORM: "as"}],
|
|
|
|
|
2017-03-31 17:52:55 +03:00
|
|
|
"ao": [
|
|
|
|
{ORTH: "a"},
|
2017-05-08 16:52:01 +03:00
|
|
|
{ORTH: "o"}],
|
|
|
|
|
2017-03-31 17:52:55 +03:00
|
|
|
"aos": [
|
|
|
|
{ORTH: "a"},
|
2017-05-08 16:52:01 +03:00
|
|
|
{ORTH: "os"}],
|
|
|
|
|
2017-03-31 17:52:55 +03:00
|
|
|
"àquele": [
|
|
|
|
{ORTH: "à", NORM: "a"},
|
2017-05-08 16:52:01 +03:00
|
|
|
{ORTH: "quele", NORM: "aquele"}],
|
|
|
|
|
2017-03-31 17:52:55 +03:00
|
|
|
"àquela": [
|
|
|
|
{ORTH: "à", NORM: "a"},
|
2017-05-08 16:52:01 +03:00
|
|
|
{ORTH: "quela", NORM: "aquela"}],
|
|
|
|
|
2017-03-31 17:52:55 +03:00
|
|
|
"àqueles": [
|
|
|
|
{ORTH: "à", NORM: "a"},
|
2017-05-08 16:52:01 +03:00
|
|
|
{ORTH: "queles", NORM: "aqueles"}],
|
|
|
|
|
2017-03-31 17:52:55 +03:00
|
|
|
"àquelas": [
|
|
|
|
{ORTH: "à", NORM: "a"},
|
2017-05-08 16:52:01 +03:00
|
|
|
{ORTH: "quelas", NORM: "aquelas"}],
|
|
|
|
|
2017-03-31 17:52:55 +03:00
|
|
|
"àquilo": [
|
|
|
|
{ORTH: "à", NORM: "a"},
|
2017-05-08 16:52:01 +03:00
|
|
|
{ORTH: "quilo", NORM: "aquilo"}],
|
|
|
|
|
2017-03-31 17:52:55 +03:00
|
|
|
"aonde": [
|
|
|
|
{ORTH: "a"},
|
2017-05-08 16:52:01 +03:00
|
|
|
{ORTH: "onde"}]
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
# Contractions
|
|
|
|
|
|
|
|
_per_pron = ["ele", "ela", "eles", "elas"]
|
|
|
|
_dem_pron = ["este", "esta", "estes", "estas", "isto", "esse", "essa", "esses",
|
|
|
|
"essas", "isso", "aquele", "aquela", "aqueles", "aquelas", "aquilo"]
|
|
|
|
_und_pron = ["outro", "outra", "outros", "outras"]
|
|
|
|
_adv = ["aqui", "aí", "ali", "além"]
|
|
|
|
|
|
|
|
|
|
|
|
for orth in _per_pron + _dem_pron + _und_pron + _adv:
|
|
|
|
_exc["d" + orth] = [
|
|
|
|
{ORTH: "d", NORM: "de"},
|
|
|
|
{ORTH: orth}]
|
|
|
|
|
|
|
|
for orth in _per_pron + _dem_pron + _und_pron:
|
|
|
|
_exc["n" + orth] = [
|
|
|
|
{ORTH: "n", NORM: "em"},
|
|
|
|
{ORTH: orth}]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for orth in [
|
|
|
|
"Adm.", "Dr.", "e.g.", "E.g.", "E.G.", "Gen.", "Gov.", "i.e.", "I.e.",
|
|
|
|
"I.E.", "Jr.", "Ltd.", "p.m.", "Ph.D.", "Rep.", "Rev.", "Sen.", "Sr.",
|
|
|
|
"Sra.", "vs."]:
|
|
|
|
_exc[orth] = [{ORTH: orth}]
|
|
|
|
|
|
|
|
|
2017-10-31 23:05:29 +03:00
|
|
|
TOKENIZER_EXCEPTIONS = _exc
|