Update conftest to lazy load languages

This commit is contained in:
ines 2017-05-09 00:02:21 +02:00
parent 9f0fd5963f
commit bd57b611cc

View File

@ -1,98 +1,85 @@
# coding: utf-8
from __future__ import unicode_literals
from ..en import English
from ..de import German
from ..es import Spanish
from ..it import Italian
from ..fr import French
from ..pt import Portuguese
from ..nl import Dutch
from ..sv import Swedish
from ..hu import Hungarian
from ..fi import Finnish
from ..bn import Bengali
from ..he import Hebrew
from ..nb import Norwegian
from ..tokens import Doc
from ..strings import StringStore
from ..lemmatizer import Lemmatizer
from ..attrs import ORTH, TAG, HEAD, DEP
from .. import util
from io import StringIO, BytesIO
from pathlib import Path
import os
import pytest
LANGUAGES = [English, German, Spanish, Italian, French, Portuguese, Dutch,
Swedish, Hungarian, Finnish, Bengali, Norwegian]
_languages = ['bn', 'de', 'en', 'es', 'fi', 'fr', 'he', 'hu', 'it', 'nb', 'nl',
'pt', 'sv']
@pytest.fixture(params=LANGUAGES)
@pytest.fixture(params=_languages)
def tokenizer(request):
lang = request.param
lang = util.load_lang_class(request.param)
return lang.Defaults.create_tokenizer()
@pytest.fixture
def en_tokenizer():
return English.Defaults.create_tokenizer()
return util.load_lang_class('en').Defaults.create_tokenizer()
@pytest.fixture
def en_vocab():
return English.Defaults.create_vocab()
return util.load_lang_class('en').Defaults.create_vocab()
@pytest.fixture
def en_parser():
return English.Defaults.create_parser()
return util.load_lang_class('en').Defaults.create_parser()
@pytest.fixture
def es_tokenizer():
return Spanish.Defaults.create_tokenizer()
return util.load_lang_class('es').Defaults.create_tokenizer()
@pytest.fixture
def de_tokenizer():
return German.Defaults.create_tokenizer()
return util.load_lang_class('de').Defaults.create_tokenizer()
@pytest.fixture(scope='module')
def fr_tokenizer():
return French.Defaults.create_tokenizer()
return util.load_lang_class('fr').Defaults.create_tokenizer()
@pytest.fixture
def hu_tokenizer():
return Hungarian.Defaults.create_tokenizer()
return util.load_lang_class('hu').Defaults.create_tokenizer()
@pytest.fixture
def fi_tokenizer():
return Finnish.Defaults.create_tokenizer()
return util.load_lang_class('fi').Defaults.create_tokenizer()
@pytest.fixture
def sv_tokenizer():
return Swedish.Defaults.create_tokenizer()
return util.load_lang_class('sv').Defaults.create_tokenizer()
@pytest.fixture
def bn_tokenizer():
return Bengali.Defaults.create_tokenizer()
return util.load_lang_class('bn').Defaults.create_tokenizer()
@pytest.fixture
def he_tokenizer():
return Hebrew.Defaults.create_tokenizer()
return util.load_lang_class('he').Defaults.create_tokenizer()
@pytest.fixture
def nb_tokenizer():
return Norwegian.Defaults.create_tokenizer()
return util.load_lang_class('nb').Defaults.create_tokenizer()
@pytest.fixture
def stringstore():
@ -101,12 +88,12 @@ def stringstore():
@pytest.fixture
def en_entityrecognizer():
return English.Defaults.create_entity()
return util.load_lang_class('en').Defaults.create_entity()
@pytest.fixture
def lemmatizer():
return English.Defaults.create_lemmatizer()
return util.load_lang_class('en').Defaults.create_lemmatizer()
@pytest.fixture