spaCy/spacy/lang/pl/tokenizer_exceptions.py

27 lines
703 B
Python
Raw Normal View History

2017-06-27 00:40:04 +03:00
# encoding: utf8
from __future__ import unicode_literals
from ._tokenizer_exceptions_list import PL_BASE_EXCEPTIONS
2019-02-08 16:14:49 +03:00
from ...symbols import POS, ADV, NOUN, ORTH, LEMMA, ADJ
2017-06-27 00:40:04 +03:00
_exc = {}
for exc_data in [
{ORTH: "m.in.", LEMMA: "między innymi", POS: ADV},
{ORTH: "inż.", LEMMA: "inżynier", POS: NOUN},
{ORTH: "mgr.", LEMMA: "magister", POS: NOUN},
{ORTH: "tzn.", LEMMA: "to znaczy", POS: ADV},
{ORTH: "tj.", LEMMA: "to jest", POS: ADV},
{ORTH: "tzw.", LEMMA: "tak zwany", POS: ADJ},
]:
2017-11-02 01:02:45 +03:00
_exc[exc_data[ORTH]] = [exc_data]
2017-06-27 00:40:04 +03:00
for orth in ["w.", "r."]:
2017-06-27 00:40:04 +03:00
_exc[orth] = [{ORTH: orth}]
for orth in PL_BASE_EXCEPTIONS:
_exc[orth] = [{ORTH: orth}]
2017-06-27 00:40:04 +03:00
TOKENIZER_EXCEPTIONS = _exc