Fix relative imports

This commit is contained in:
ines 2017-05-08 22:29:04 +02:00
parent ae99990f63
commit 73b577cb01
35 changed files with 108 additions and 108 deletions

View File

@ -7,10 +7,10 @@ from .tag_map import TAG_MAP
from .stop_words import STOP_WORDS
from .lemmatizer import LEMMA_RULES
from ..language_data import BASE_EXCEPTIONS
from ..language import Language
from ..attrs import LANG
from ..util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...language import Language
from ...attrs import LANG
from ...util import update_exc
class Bengali(Language):

View File

@ -1,8 +1,8 @@
# coding: utf8
from __future__ import unicode_literals
from ..symbols import LEMMA
from ..deprecated import PRON_LEMMA
from ...deprecated import PRON_LEMMA
from ...symbols import LEMMA
MORPH_RULES = {

View File

@ -1,9 +1,6 @@
# coding: utf8
from __future__ import unicode_literals
from ..language_data.punctuation import ALPHA_LOWER, LIST_ELLIPSES, QUOTES
from ..language_data.punctuation import ALPHA_UPPER, LIST_QUOTES, UNITS
from ..language_data.punctuation import CURRENCY, LIST_PUNCT, ALPHA, _QUOTES
CURRENCY_SYMBOLS = r"\$ ¢ £ € ¥ ฿ ৳"
@ -44,3 +41,5 @@ TOKENIZER_INFIXES = (
r'(?<=[{a}])([{q}\)\]\(\[])(?=[\-{a}])'.format(a=ALPHA, q=_QUOTES.replace("'", "").strip().replace(" ", "")),
]
)
from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, UNITS
from ..char_classes import ALPHA_LOWER, ALPHA_UPPER, ALPHA, HYPHENS, QUOTES

View File

@ -1,8 +1,8 @@
# coding: utf8
from __future__ import unicode_literals
from ..symbols import POS, PUNCT, ADJ, CONJ, SCONJ, NUM, DET, ADV, ADP, X, VERB
from ..symbols import CCONJ, NOUN, PROPN, PART, INTJ, SPACE, PRON, AUX, SYM
from ...symbols import POS, PUNCT, ADJ, CONJ, SCONJ, NUM, DET, ADV, ADP, X, VERB
from ...symbols import CCONJ, NOUN, PROPN, PART, INTJ, SPACE, PRON, AUX, SYM
TAG_MAP = {

View File

@ -1,7 +1,7 @@
# coding=utf-8
from __future__ import unicode_literals
from ..symbols import *
from ...symbols import ORTH, LEMMA
TOKENIZER_EXCEPTIONS = {}

View File

@ -6,11 +6,11 @@ from .tag_map import TAG_MAP
from .stop_words import STOP_WORDS
from .lemmatizer import LOOKUP
from ..language_data import BASE_EXCEPTIONS
from ..language import Language
from ..lemmatizerlookup import Lemmatizer
from ..attrs import LANG
from ..util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...language import Language
from ...lemmatizerlookup import Lemmatizer
from ...attrs import LANG
from ...util import update_exc
class German(Language):

View File

@ -1,8 +1,8 @@
# coding: utf8
from __future__ import unicode_literals
from ..symbols import POS, PUNCT, ADJ, CONJ, SCONJ, NUM, DET, ADV, ADP, X, VERB
from ..symbols import NOUN, PROPN, PART, INTJ, SPACE, PRON, AUX
from ...symbols import POS, PUNCT, ADJ, CONJ, SCONJ, NUM, DET, ADV, ADP, X, VERB
from ...symbols import NOUN, PROPN, PART, INTJ, SPACE, PRON, AUX
TAG_MAP = {

View File

@ -1,8 +1,8 @@
# coding: utf8
from __future__ import unicode_literals
from ..symbols import ORTH, LEMMA, TAG, NORM
from ..deprecated import PRON_LEMMA
from ...symbols import ORTH, LEMMA, TAG, NORM
from ...deprecated import PRON_LEMMA
_exc = {

View File

@ -7,10 +7,10 @@ from .stop_words import STOP_WORDS
from .morph_rules import MORPH_RULES
from .lemmatizer import LEMMA_RULES, LEMMA_INDEX, LEMMA_EXC
from ..language_data import BASE_EXCEPTIONS
from ..language import Language
from ..attrs import LANG
from ..util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...language import Language
from ...attrs import LANG
from ...util import update_exc
class English(Language):

View File

@ -1,8 +1,8 @@
# coding: utf8
from __future__ import unicode_literals
from ..symbols import LEMMA
from ..deprecated import PRON_LEMMA
from ...symbols import LEMMA
from ...deprecated import PRON_LEMMA
MORPH_RULES = {

View File

@ -1,8 +1,8 @@
# coding: utf8
from __future__ import unicode_literals
from ..symbols import POS, PUNCT, SYM, ADJ, CCONJ, NUM, DET, ADV, ADP, X, VERB
from ..symbols import NOUN, PROPN, PART, INTJ, SPACE, PRON
from ...symbols import POS, PUNCT, SYM, ADJ, CCONJ, NUM, DET, ADV, ADP, X, VERB
from ...symbols import NOUN, PROPN, PART, INTJ, SPACE, PRON
TAG_MAP = {

View File

@ -1,8 +1,8 @@
# coding: utf8
from __future__ import unicode_literals
from ..symbols import ORTH, LEMMA, TAG, NORM
from ..deprecated import PRON_LEMMA
from ...symbols import ORTH, LEMMA, TAG, NORM
from ...deprecated import PRON_LEMMA
_exc = {}

View File

@ -6,11 +6,11 @@ from .tag_map import TAG_MAP
from .stop_words import STOP_WORDS
from .lemmatizer import LOOKUP
from ..language_data import BASE_EXCEPTIONS
from ..language import Language
from ..lemmatizerlookup import Lemmatizer
from ..attrs import LANG
from ..util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...language import Language
from ...lemmatizerlookup import Lemmatizer
from ...attrs import LANG
from ...util import update_exc
class Spanish(Language):

View File

@ -1,8 +1,8 @@
# coding: utf8
from __future__ import unicode_literals
from ..symbols import ORTH, LEMMA, TAG, NORM, ADP, DET
from ..deprecated import PRON_LEMMA
from ...symbols import ORTH, LEMMA, TAG, NORM, ADP, DET
from ...deprecated import PRON_LEMMA
_exc = {

View File

@ -4,10 +4,10 @@ from __future__ import unicode_literals
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS
from ..language_data import BASE_EXCEPTIONS
from ..language import Language
from ..attrs import LANG
from ..util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...language import Language
from ...attrs import LANG
from ...util import update_exc
class Finnish(Language):

View File

@ -1,7 +1,7 @@
# coding: utf8
from __future__ import unicode_literals
from ..symbols import ORTH, LEMMA
from ...symbols import ORTH, LEMMA
_exc = {}

View File

@ -6,11 +6,11 @@ from .punctuation import TOKENIZER_SUFFIXES, TOKENIZER_INFIXES
from .stop_words import STOP_WORDS
from .lemmatizer import LOOKUP
from ..language_data import BASE_EXCEPTIONS
from ..language import Language
from ..lemmatizerlookup import Lemmatizer
from ..attrs import LANG
from ..util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...language import Language
from ...lemmatizerlookup import Lemmatizer
from ...attrs import LANG
from ...util import update_exc
class French(Language):

View File

@ -1,9 +1,9 @@
# coding: utf8
from __future__ import unicode_literals
from ..language_data.punctuation import ALPHA, TOKENIZER_INFIXES, LIST_PUNCT
from ..language_data.punctuation import LIST_ELLIPSES, LIST_QUOTES, CURRENCY
from ..language_data.punctuation import UNITS, ALPHA_LOWER, QUOTES, ALPHA_UPPER
from ..char_classes import TOKENIZER_INFIXES, LIST_PUNCT LIST_ELLIPSES
from ..char_classes import LIST_QUOTES, CURRENCY, QUOTES, UNITS
from ..char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER
ELISION = " ' ".strip().replace(' ', '').replace('\n', '')

View File

@ -3,12 +3,12 @@ from __future__ import unicode_literals
import regex as re
from .punctuation import ELISION, HYPHENS
from ._tokenizer_exceptions_list import FR_BASE_EXCEPTIONS
from ..symbols import ORTH, LEMMA, TAG, NORM
from ..deprecated import PRON_LEMMA
from ..language_data.tokenizer_exceptions import _URL_PATTERN
from ..language_data.punctuation import ALPHA_LOWER
from .punctuation import ELISION, HYPHENS
from ..tokenizer_exceptions import URL_PATTERN
from ..char_classes import ALPHA_LOWER
from ...symbols import ORTH, LEMMA, TAG, NORM
from ...deprecated import PRON_LEMMA
def upper_first_letter(text):

View File

@ -3,10 +3,10 @@ from __future__ import unicode_literals
from .stop_words import STOP_WORDS
from ..language_data import BASE_EXCEPTIONS
from ..language import Language
from ..attrs import LANG
from ..util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...language import Language
from ...attrs import LANG
from ...util import update_exc
class Hebrew(Language):

View File

@ -6,11 +6,11 @@ from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIX
from .stop_words import STOP_WORDS
from .lemmatizer import LOOKUP
from ..language_data import BASE_EXCEPTIONS
from ..language import Language
from ..lemmatizerlookup import Lemmatizer
from ..attrs import LANG
from ..util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...language import Language
from ...lemmatizerlookup import Lemmatizer
from ...attrs import LANG
from ...util import update_exc
class Hungarian(Language):

View File

@ -1,9 +1,6 @@
# coding: utf8
from __future__ import unicode_literals
from ..language_data.punctuation import ALPHA_LOWER, LIST_ELLIPSES, QUOTES
from ..language_data.punctuation import ALPHA_UPPER, LIST_QUOTES, UNITS
from ..language_data.punctuation import CURRENCY, LIST_PUNCT, ALPHA, _QUOTES
_currency_symbols = r"\$ ¢ £ € ¥ ฿"
@ -38,3 +35,6 @@ TOKENIZER_INFIXES = (
r'(?<=[{a}])--(?=[{a}])'.format(a=ALPHA),
r'(?<=[{a}]),(?=[{a}])'.format(a=ALPHA),
r'(?<=[{a}])([{q}\)\]\(\[])(?=[\-{a}])'.format(a=ALPHA, q=_QUOTES.replace("'", "").strip().replace(" ", ""))])
from ..char_classes import TOKENIZER_INFIXES, LIST_PUNCT LIST_ELLIPSES
from ..char_classes import LIST_QUOTES, CURRENCY, QUOTES, UNITS
from ..char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER

View File

@ -3,9 +3,10 @@ from __future__ import unicode_literals
import regex as re
from ..symbols import ORTH
from ..language_data.punctuation import ALPHA_LOWER, CURRENCY
from ..language_data.tokenizer_exceptions import _URL_PATTERN
from ..punctuation import ALPHA_LOWER, CURRENCY
from ..tokenizer_exceptions import URL_PATTERN
from ...symbols import ORTH
_exc = {}

View File

@ -4,11 +4,11 @@ from __future__ import unicode_literals
from .stop_words import STOP_WORDS
from .lemmatizer import LOOKUP
from ..language_data import BASE_EXCEPTIONS
from ..language import Language
from ..lemmatizerlookup import Lemmatizer
from ..attrs import LANG
from ..util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...language import Language
from ...lemmatizerlookup import Lemmatizer
from ...attrs import LANG
from ...util import update_exc
class Italian(Language):

View File

@ -1,9 +1,9 @@
# encoding: utf8
from __future__ import unicode_literals, print_function
from ..language import Language
from ..attrs import LANG
from ..tokens import Doc
from ...language import Language
from ...attrs import LANG
from ...tokens import Doc
class Japanese(Language):

View File

@ -5,10 +5,10 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS
from .morph_rules import MORPH_RULES
from ..language_data import BASE_EXCEPTIONS
from ..language import Language
from ..attrs import LANG
from ..util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...language import Language
from ...attrs import LANG
from ...util import update_exc
class Norwegian(Language):

View File

@ -1,8 +1,8 @@
# encoding: utf8
from __future__ import unicode_literals
from ..symbols import LEMMA
from ..deprecated import PRON_LEMMA
from ...symbols import LEMMA
from ...deprecated import PRON_LEMMA
# Used the table of pronouns at https://no.wiktionary.org/wiki/Tillegg:Pronomen_i_norsk

View File

@ -1,7 +1,7 @@
# encoding: utf8
from __future__ import unicode_literals
from ..symbols import ORTH, LEMMA
from ...symbols import ORTH, LEMMA
_exc = {}

View File

@ -3,10 +3,10 @@ from __future__ import unicode_literals
from .stop_words import STOP_WORDS
from ..language_data import BASE_EXCEPTIONS
from ..language import Language
from ..attrs import LANG
from ..util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...language import Language
from ...attrs import LANG
from ...util import update_exc

View File

@ -5,11 +5,11 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
from .stop_words import STOP_WORDS
from .lemmatizer import LOOKUP
from ..language_data import BASE_EXCEPTIONS
from ..language import Language
from ..lemmatizerlookup import Lemmatizer
from ..attrs import LANG
from ..util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...language import Language
from ...lemmatizerlookup import Lemmatizer
from ...attrs import LANG
from ...util import update_exc
class Portuguese(Language):

View File

@ -1,8 +1,8 @@
# coding: utf8
from __future__ import unicode_literals
from ..symbols import ORTH, LEMMA, NORM
from ..deprecated import PRON_LEMMA
from ...symbols import ORTH, LEMMA, NORM
from ...deprecated import PRON_LEMMA
_exc = {

View File

@ -6,11 +6,11 @@ from .stop_words import STOP_WORDS
from .morph_rules import MORPH_RULES
from .lemmatizer import LEMMA_RULES, LOOKUP
from ..language_data import BASE_EXCEPTIONS
from ..language import Language
from ..lemmatizerlookup import Lemmatizer
from ..attrs import LANG
from ..util import update_exc
from ..tokenizer_exceptions import BASE_EXCEPTIONS
from ...language import Language
from ...lemmatizerlookup import Lemmatizer
from ...attrs import LANG
from ...util import update_exc
class Swedish(Language):

View File

@ -1,8 +1,8 @@
# coding: utf8
from __future__ import unicode_literals
from ..symbols import LEMMA
from ..deprecated import PRON_LEMMA
from ...symbols import LEMMA
from ...deprecated import PRON_LEMMA
# Used the table of pronouns at https://sv.wiktionary.org/wiki/deras

View File

@ -1,8 +1,8 @@
# coding: utf8
from __future__ import unicode_literals
from ..symbols import ORTH, LEMMA, TAG, NORM
from ..deprecated import PRON_LEMMA
from ...symbols import ORTH, LEMMA, TAG, NORM
from ...deprecated import PRON_LEMMA
_exc = {}

View File

@ -1,8 +1,8 @@
# coding: utf8
from __future__ import unicode_literals
from ..language import Language
from ..tokens import Doc
from ...language import Language
from ...tokens import Doc
class Chinese(Language):