mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-24 00:46:28 +03:00
Fix relative imports
This commit is contained in:
parent
ae99990f63
commit
73b577cb01
|
@ -7,10 +7,10 @@ from .tag_map import TAG_MAP
|
|||
from .stop_words import STOP_WORDS
|
||||
from .lemmatizer import LEMMA_RULES
|
||||
|
||||
from ..language_data import BASE_EXCEPTIONS
|
||||
from ..language import Language
|
||||
from ..attrs import LANG
|
||||
from ..util import update_exc
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...attrs import LANG
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
class Bengali(Language):
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# coding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..symbols import LEMMA
|
||||
from ..deprecated import PRON_LEMMA
|
||||
from ...deprecated import PRON_LEMMA
|
||||
from ...symbols import LEMMA
|
||||
|
||||
|
||||
MORPH_RULES = {
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
# coding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..language_data.punctuation import ALPHA_LOWER, LIST_ELLIPSES, QUOTES
|
||||
from ..language_data.punctuation import ALPHA_UPPER, LIST_QUOTES, UNITS
|
||||
from ..language_data.punctuation import CURRENCY, LIST_PUNCT, ALPHA, _QUOTES
|
||||
|
||||
|
||||
CURRENCY_SYMBOLS = r"\$ ¢ £ € ¥ ฿ ৳"
|
||||
|
@ -44,3 +41,5 @@ TOKENIZER_INFIXES = (
|
|||
r'(?<=[{a}])([{q}\)\]\(\[])(?=[\-{a}])'.format(a=ALPHA, q=_QUOTES.replace("'", "").strip().replace(" ", "")),
|
||||
]
|
||||
)
|
||||
from ..char_classes import LIST_PUNCT, LIST_ELLIPSES, LIST_QUOTES, UNITS
|
||||
from ..char_classes import ALPHA_LOWER, ALPHA_UPPER, ALPHA, HYPHENS, QUOTES
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# coding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..symbols import POS, PUNCT, ADJ, CONJ, SCONJ, NUM, DET, ADV, ADP, X, VERB
|
||||
from ..symbols import CCONJ, NOUN, PROPN, PART, INTJ, SPACE, PRON, AUX, SYM
|
||||
from ...symbols import POS, PUNCT, ADJ, CONJ, SCONJ, NUM, DET, ADV, ADP, X, VERB
|
||||
from ...symbols import CCONJ, NOUN, PROPN, PART, INTJ, SPACE, PRON, AUX, SYM
|
||||
|
||||
|
||||
TAG_MAP = {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# coding=utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..symbols import *
|
||||
from ...symbols import ORTH, LEMMA
|
||||
|
||||
TOKENIZER_EXCEPTIONS = {}
|
||||
|
||||
|
|
|
@ -6,11 +6,11 @@ from .tag_map import TAG_MAP
|
|||
from .stop_words import STOP_WORDS
|
||||
from .lemmatizer import LOOKUP
|
||||
|
||||
from ..language_data import BASE_EXCEPTIONS
|
||||
from ..language import Language
|
||||
from ..lemmatizerlookup import Lemmatizer
|
||||
from ..attrs import LANG
|
||||
from ..util import update_exc
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...lemmatizerlookup import Lemmatizer
|
||||
from ...attrs import LANG
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
class German(Language):
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# coding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..symbols import POS, PUNCT, ADJ, CONJ, SCONJ, NUM, DET, ADV, ADP, X, VERB
|
||||
from ..symbols import NOUN, PROPN, PART, INTJ, SPACE, PRON, AUX
|
||||
from ...symbols import POS, PUNCT, ADJ, CONJ, SCONJ, NUM, DET, ADV, ADP, X, VERB
|
||||
from ...symbols import NOUN, PROPN, PART, INTJ, SPACE, PRON, AUX
|
||||
|
||||
|
||||
TAG_MAP = {
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# coding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..symbols import ORTH, LEMMA, TAG, NORM
|
||||
from ..deprecated import PRON_LEMMA
|
||||
from ...symbols import ORTH, LEMMA, TAG, NORM
|
||||
from ...deprecated import PRON_LEMMA
|
||||
|
||||
|
||||
_exc = {
|
||||
|
|
|
@ -7,10 +7,10 @@ from .stop_words import STOP_WORDS
|
|||
from .morph_rules import MORPH_RULES
|
||||
from .lemmatizer import LEMMA_RULES, LEMMA_INDEX, LEMMA_EXC
|
||||
|
||||
from ..language_data import BASE_EXCEPTIONS
|
||||
from ..language import Language
|
||||
from ..attrs import LANG
|
||||
from ..util import update_exc
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...attrs import LANG
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
class English(Language):
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# coding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..symbols import LEMMA
|
||||
from ..deprecated import PRON_LEMMA
|
||||
from ...symbols import LEMMA
|
||||
from ...deprecated import PRON_LEMMA
|
||||
|
||||
|
||||
MORPH_RULES = {
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# coding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..symbols import POS, PUNCT, SYM, ADJ, CCONJ, NUM, DET, ADV, ADP, X, VERB
|
||||
from ..symbols import NOUN, PROPN, PART, INTJ, SPACE, PRON
|
||||
from ...symbols import POS, PUNCT, SYM, ADJ, CCONJ, NUM, DET, ADV, ADP, X, VERB
|
||||
from ...symbols import NOUN, PROPN, PART, INTJ, SPACE, PRON
|
||||
|
||||
|
||||
TAG_MAP = {
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# coding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..symbols import ORTH, LEMMA, TAG, NORM
|
||||
from ..deprecated import PRON_LEMMA
|
||||
from ...symbols import ORTH, LEMMA, TAG, NORM
|
||||
from ...deprecated import PRON_LEMMA
|
||||
|
||||
|
||||
_exc = {}
|
||||
|
|
|
@ -6,11 +6,11 @@ from .tag_map import TAG_MAP
|
|||
from .stop_words import STOP_WORDS
|
||||
from .lemmatizer import LOOKUP
|
||||
|
||||
from ..language_data import BASE_EXCEPTIONS
|
||||
from ..language import Language
|
||||
from ..lemmatizerlookup import Lemmatizer
|
||||
from ..attrs import LANG
|
||||
from ..util import update_exc
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...lemmatizerlookup import Lemmatizer
|
||||
from ...attrs import LANG
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
class Spanish(Language):
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# coding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..symbols import ORTH, LEMMA, TAG, NORM, ADP, DET
|
||||
from ..deprecated import PRON_LEMMA
|
||||
from ...symbols import ORTH, LEMMA, TAG, NORM, ADP, DET
|
||||
from ...deprecated import PRON_LEMMA
|
||||
|
||||
|
||||
_exc = {
|
||||
|
|
|
@ -4,10 +4,10 @@ from __future__ import unicode_literals
|
|||
from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
||||
from .stop_words import STOP_WORDS
|
||||
|
||||
from ..language_data import BASE_EXCEPTIONS
|
||||
from ..language import Language
|
||||
from ..attrs import LANG
|
||||
from ..util import update_exc
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...attrs import LANG
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
class Finnish(Language):
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# coding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..symbols import ORTH, LEMMA
|
||||
from ...symbols import ORTH, LEMMA
|
||||
|
||||
|
||||
_exc = {}
|
||||
|
|
|
@ -6,11 +6,11 @@ from .punctuation import TOKENIZER_SUFFIXES, TOKENIZER_INFIXES
|
|||
from .stop_words import STOP_WORDS
|
||||
from .lemmatizer import LOOKUP
|
||||
|
||||
from ..language_data import BASE_EXCEPTIONS
|
||||
from ..language import Language
|
||||
from ..lemmatizerlookup import Lemmatizer
|
||||
from ..attrs import LANG
|
||||
from ..util import update_exc
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...lemmatizerlookup import Lemmatizer
|
||||
from ...attrs import LANG
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
class French(Language):
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# coding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..language_data.punctuation import ALPHA, TOKENIZER_INFIXES, LIST_PUNCT
|
||||
from ..language_data.punctuation import LIST_ELLIPSES, LIST_QUOTES, CURRENCY
|
||||
from ..language_data.punctuation import UNITS, ALPHA_LOWER, QUOTES, ALPHA_UPPER
|
||||
from ..char_classes import TOKENIZER_INFIXES, LIST_PUNCT LIST_ELLIPSES
|
||||
from ..char_classes import LIST_QUOTES, CURRENCY, QUOTES, UNITS
|
||||
from ..char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER
|
||||
|
||||
|
||||
ELISION = " ' ’ ".strip().replace(' ', '').replace('\n', '')
|
||||
|
|
|
@ -3,12 +3,12 @@ from __future__ import unicode_literals
|
|||
|
||||
import regex as re
|
||||
|
||||
from .punctuation import ELISION, HYPHENS
|
||||
from ._tokenizer_exceptions_list import FR_BASE_EXCEPTIONS
|
||||
from ..symbols import ORTH, LEMMA, TAG, NORM
|
||||
from ..deprecated import PRON_LEMMA
|
||||
from ..language_data.tokenizer_exceptions import _URL_PATTERN
|
||||
from ..language_data.punctuation import ALPHA_LOWER
|
||||
from .punctuation import ELISION, HYPHENS
|
||||
from ..tokenizer_exceptions import URL_PATTERN
|
||||
from ..char_classes import ALPHA_LOWER
|
||||
from ...symbols import ORTH, LEMMA, TAG, NORM
|
||||
from ...deprecated import PRON_LEMMA
|
||||
|
||||
|
||||
def upper_first_letter(text):
|
||||
|
|
|
@ -3,10 +3,10 @@ from __future__ import unicode_literals
|
|||
|
||||
from .stop_words import STOP_WORDS
|
||||
|
||||
from ..language_data import BASE_EXCEPTIONS
|
||||
from ..language import Language
|
||||
from ..attrs import LANG
|
||||
from ..util import update_exc
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...attrs import LANG
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
class Hebrew(Language):
|
||||
|
|
|
@ -6,11 +6,11 @@ from .punctuation import TOKENIZER_PREFIXES, TOKENIZER_SUFFIXES, TOKENIZER_INFIX
|
|||
from .stop_words import STOP_WORDS
|
||||
from .lemmatizer import LOOKUP
|
||||
|
||||
from ..language_data import BASE_EXCEPTIONS
|
||||
from ..language import Language
|
||||
from ..lemmatizerlookup import Lemmatizer
|
||||
from ..attrs import LANG
|
||||
from ..util import update_exc
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...lemmatizerlookup import Lemmatizer
|
||||
from ...attrs import LANG
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
class Hungarian(Language):
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
# coding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..language_data.punctuation import ALPHA_LOWER, LIST_ELLIPSES, QUOTES
|
||||
from ..language_data.punctuation import ALPHA_UPPER, LIST_QUOTES, UNITS
|
||||
from ..language_data.punctuation import CURRENCY, LIST_PUNCT, ALPHA, _QUOTES
|
||||
|
||||
|
||||
_currency_symbols = r"\$ ¢ £ € ¥ ฿"
|
||||
|
@ -38,3 +35,6 @@ TOKENIZER_INFIXES = (
|
|||
r'(?<=[{a}])--(?=[{a}])'.format(a=ALPHA),
|
||||
r'(?<=[{a}]),(?=[{a}])'.format(a=ALPHA),
|
||||
r'(?<=[{a}])([{q}\)\]\(\[])(?=[\-{a}])'.format(a=ALPHA, q=_QUOTES.replace("'", "").strip().replace(" ", ""))])
|
||||
from ..char_classes import TOKENIZER_INFIXES, LIST_PUNCT LIST_ELLIPSES
|
||||
from ..char_classes import LIST_QUOTES, CURRENCY, QUOTES, UNITS
|
||||
from ..char_classes import ALPHA, ALPHA_LOWER, ALPHA_UPPER
|
||||
|
|
|
@ -3,9 +3,10 @@ from __future__ import unicode_literals
|
|||
|
||||
import regex as re
|
||||
|
||||
from ..symbols import ORTH
|
||||
from ..language_data.punctuation import ALPHA_LOWER, CURRENCY
|
||||
from ..language_data.tokenizer_exceptions import _URL_PATTERN
|
||||
from ..punctuation import ALPHA_LOWER, CURRENCY
|
||||
from ..tokenizer_exceptions import URL_PATTERN
|
||||
from ...symbols import ORTH
|
||||
|
||||
|
||||
|
||||
_exc = {}
|
||||
|
|
|
@ -4,11 +4,11 @@ from __future__ import unicode_literals
|
|||
from .stop_words import STOP_WORDS
|
||||
from .lemmatizer import LOOKUP
|
||||
|
||||
from ..language_data import BASE_EXCEPTIONS
|
||||
from ..language import Language
|
||||
from ..lemmatizerlookup import Lemmatizer
|
||||
from ..attrs import LANG
|
||||
from ..util import update_exc
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...lemmatizerlookup import Lemmatizer
|
||||
from ...attrs import LANG
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
class Italian(Language):
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# encoding: utf8
|
||||
from __future__ import unicode_literals, print_function
|
||||
|
||||
from ..language import Language
|
||||
from ..attrs import LANG
|
||||
from ..tokens import Doc
|
||||
from ...language import Language
|
||||
from ...attrs import LANG
|
||||
from ...tokens import Doc
|
||||
|
||||
|
||||
class Japanese(Language):
|
||||
|
|
|
@ -5,10 +5,10 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
|||
from .stop_words import STOP_WORDS
|
||||
from .morph_rules import MORPH_RULES
|
||||
|
||||
from ..language_data import BASE_EXCEPTIONS
|
||||
from ..language import Language
|
||||
from ..attrs import LANG
|
||||
from ..util import update_exc
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...attrs import LANG
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
class Norwegian(Language):
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# encoding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..symbols import LEMMA
|
||||
from ..deprecated import PRON_LEMMA
|
||||
from ...symbols import LEMMA
|
||||
from ...deprecated import PRON_LEMMA
|
||||
|
||||
|
||||
# Used the table of pronouns at https://no.wiktionary.org/wiki/Tillegg:Pronomen_i_norsk
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
# encoding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..symbols import ORTH, LEMMA
|
||||
from ...symbols import ORTH, LEMMA
|
||||
|
||||
|
||||
_exc = {}
|
||||
|
|
|
@ -3,10 +3,10 @@ from __future__ import unicode_literals
|
|||
|
||||
from .stop_words import STOP_WORDS
|
||||
|
||||
from ..language_data import BASE_EXCEPTIONS
|
||||
from ..language import Language
|
||||
from ..attrs import LANG
|
||||
from ..util import update_exc
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...attrs import LANG
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -5,11 +5,11 @@ from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS
|
|||
from .stop_words import STOP_WORDS
|
||||
from .lemmatizer import LOOKUP
|
||||
|
||||
from ..language_data import BASE_EXCEPTIONS
|
||||
from ..language import Language
|
||||
from ..lemmatizerlookup import Lemmatizer
|
||||
from ..attrs import LANG
|
||||
from ..util import update_exc
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...lemmatizerlookup import Lemmatizer
|
||||
from ...attrs import LANG
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
class Portuguese(Language):
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# coding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..symbols import ORTH, LEMMA, NORM
|
||||
from ..deprecated import PRON_LEMMA
|
||||
from ...symbols import ORTH, LEMMA, NORM
|
||||
from ...deprecated import PRON_LEMMA
|
||||
|
||||
|
||||
_exc = {
|
||||
|
|
|
@ -6,11 +6,11 @@ from .stop_words import STOP_WORDS
|
|||
from .morph_rules import MORPH_RULES
|
||||
from .lemmatizer import LEMMA_RULES, LOOKUP
|
||||
|
||||
from ..language_data import BASE_EXCEPTIONS
|
||||
from ..language import Language
|
||||
from ..lemmatizerlookup import Lemmatizer
|
||||
from ..attrs import LANG
|
||||
from ..util import update_exc
|
||||
from ..tokenizer_exceptions import BASE_EXCEPTIONS
|
||||
from ...language import Language
|
||||
from ...lemmatizerlookup import Lemmatizer
|
||||
from ...attrs import LANG
|
||||
from ...util import update_exc
|
||||
|
||||
|
||||
class Swedish(Language):
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# coding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..symbols import LEMMA
|
||||
from ..deprecated import PRON_LEMMA
|
||||
from ...symbols import LEMMA
|
||||
from ...deprecated import PRON_LEMMA
|
||||
|
||||
|
||||
# Used the table of pronouns at https://sv.wiktionary.org/wiki/deras
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# coding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..symbols import ORTH, LEMMA, TAG, NORM
|
||||
from ..deprecated import PRON_LEMMA
|
||||
from ...symbols import ORTH, LEMMA, TAG, NORM
|
||||
from ...deprecated import PRON_LEMMA
|
||||
|
||||
|
||||
_exc = {}
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# coding: utf8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from ..language import Language
|
||||
from ..tokens import Doc
|
||||
from ...language import Language
|
||||
from ...tokens import Doc
|
||||
|
||||
|
||||
class Chinese(Language):
|
||||
|
|
Loading…
Reference in New Issue
Block a user