mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-14 13:47:13 +03:00
fe5f5d6ac6
* Update Makefile For more recent python version * updated for bsc changes New tokenization changes * Update test_text.py * updating tests and requirements * changed failed test in test/lang/ca changed failed test in test/lang/ca * Update .gitignore deleted stashed changes line * back to python 3.6 and remove transformer requirements As per request * Update test_exception.py Change the test * Update test_exception.py Remove test print * Update Makefile For more recent python version * updated for bsc changes New tokenization changes * updating tests and requirements * Update requirements.txt Removed spacy-transfromers from requirements * Update test_exception.py Added final punctuation to ensure consistency * Update Makefile Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> * Format * Update test to check all tokens Co-authored-by: cayorodriguez <crodriguezp@gmail.com> Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com>
69 lines
1.9 KiB
Python
Executable File
69 lines
1.9 KiB
Python
Executable File
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
|
from ...symbols import ORTH, NORM
|
|
from ...util import update_exc
|
|
|
|
|
|
_exc = {}
|
|
|
|
for exc_data in [
|
|
{ORTH: "aprox.", NORM: "aproximadament"},
|
|
{ORTH: "pàg.", NORM: "pàgina"},
|
|
{ORTH: "p.ex.", NORM: "per exemple"},
|
|
{ORTH: "gen.", NORM: "gener"},
|
|
{ORTH: "feb.", NORM: "febrer"},
|
|
{ORTH: "abr.", NORM: "abril"},
|
|
{ORTH: "jul.", NORM: "juliol"},
|
|
{ORTH: "set.", NORM: "setembre"},
|
|
{ORTH: "oct.", NORM: "octubre"},
|
|
{ORTH: "nov.", NORM: "novembre"},
|
|
{ORTH: "dec.", NORM: "desembre"},
|
|
{ORTH: "Dr.", NORM: "doctor"},
|
|
{ORTH: "Dra.", NORM: "doctora"},
|
|
{ORTH: "Sr.", NORM: "senyor"},
|
|
{ORTH: "Sra.", NORM: "senyora"},
|
|
{ORTH: "Srta.", NORM: "senyoreta"},
|
|
{ORTH: "núm", NORM: "número"},
|
|
{ORTH: "St.", NORM: "sant"},
|
|
{ORTH: "Sta.", NORM: "santa"},
|
|
{ORTH: "pl.", NORM: "plaça"},
|
|
{ORTH: "à."},
|
|
{ORTH: "è."},
|
|
{ORTH: "é."},
|
|
{ORTH: "í."},
|
|
{ORTH: "ò."},
|
|
{ORTH: "ó."},
|
|
{ORTH: "ú."},
|
|
{ORTH: "'l"},
|
|
{ORTH: "'ls"},
|
|
{ORTH: "'m"},
|
|
{ORTH: "'n"},
|
|
{ORTH: "'ns"},
|
|
{ORTH: "'s"},
|
|
{ORTH: "'t"},
|
|
]:
|
|
_exc[exc_data[ORTH]] = [exc_data]
|
|
|
|
_exc["del"] = [{ORTH: "d", NORM: "de"}, {ORTH: "el"}]
|
|
_exc["dels"] = [{ORTH: "d", NORM: "de"}, {ORTH: "els"}]
|
|
|
|
_exc["al"] = [{ORTH: "a"}, {ORTH: "l", NORM: "el"}]
|
|
_exc["als"] = [{ORTH: "a"}, {ORTH: "ls", NORM: "els"}]
|
|
|
|
_exc["pel"] = [{ORTH: "p", NORM: "per"}, {ORTH: "el"}]
|
|
_exc["pels"] = [{ORTH: "p", NORM: "per"}, {ORTH: "els"}]
|
|
|
|
_exc["holahola"] = [{ORTH: "holahola", NORM: "cocacola"}]
|
|
|
|
|
|
# Times
|
|
_exc["12m."] = [{ORTH: "12"}, {ORTH: "m.", NORM: "p.m."}]
|
|
|
|
for h in range(1, 12 + 1):
|
|
for period in ["a.m.", "am"]:
|
|
_exc[f"{h}{period}"] = [{ORTH: f"{h}"}, {ORTH: period, NORM: "a.m."}]
|
|
for period in ["p.m.", "pm"]:
|
|
_exc[f"{h}{period}"] = [{ORTH: f"{h}"}, {ORTH: period, NORM: "p.m."}]
|
|
|
|
|
|
TOKENIZER_EXCEPTIONS = update_exc(BASE_EXCEPTIONS, _exc)
|