mirror of
https://github.com/explosion/spaCy.git
synced 2025-07-08 13:53:13 +03:00
Fix tests
This commit is contained in:
parent
178760855f
commit
5d62499266
|
@ -127,7 +127,7 @@ def he_tokenizer():
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def hi_tokenizer():
|
def hi_tokenizer():
|
||||||
return get_lang_class("hi").Defaults.create_tokenizer()
|
return get_lang_class("hi")().tokenizer
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
|
@ -245,14 +245,6 @@ def tr_tokenizer():
|
||||||
return get_lang_class("tr")().tokenizer
|
return get_lang_class("tr")().tokenizer
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
|
||||||
def tr_vocab():
|
|
||||||
return get_lang_class("tr").Defaults.create_vocab()
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
|
||||||
def tr_vocab():
|
|
||||||
return get_lang_class("tr").Defaults.create_vocab()
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def tt_tokenizer():
|
def tt_tokenizer():
|
||||||
return get_lang_class("tt")().tokenizer
|
return get_lang_class("tt")().tokenizer
|
||||||
|
@ -305,11 +297,7 @@ def zh_tokenizer_pkuseg():
|
||||||
"segmenter": "pkuseg",
|
"segmenter": "pkuseg",
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"initialize": {
|
"initialize": {"tokenizer": {"pkuseg_model": "web",}},
|
||||||
"tokenizer": {
|
|
||||||
"pkuseg_model": "web",
|
|
||||||
}
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
nlp = get_lang_class("zh").from_config(config)
|
nlp = get_lang_class("zh").from_config(config)
|
||||||
nlp.initialize()
|
nlp.initialize()
|
||||||
|
|
|
@ -1,6 +1,3 @@
|
||||||
# coding: utf-8
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from spacy.lang.hi.lex_attrs import norm, like_num
|
from spacy.lang.hi.lex_attrs import norm, like_num
|
||||||
|
|
||||||
|
|
|
@ -1,35 +0,0 @@
|
||||||
# coding: utf8
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from spacy.lang.en import English
|
|
||||||
from spacy.util import fix_random_seed
|
|
||||||
|
|
||||||
|
|
||||||
def test_issue6177():
|
|
||||||
"""Test that after fixing the random seed, the results of the pipeline are truly identical"""
|
|
||||||
|
|
||||||
# NOTE: no need to transform this code to v3 when 'master' is merged into 'develop'.
|
|
||||||
# A similar test exists already for v3: test_issue5551
|
|
||||||
# This is just a backport
|
|
||||||
|
|
||||||
results = []
|
|
||||||
for i in range(3):
|
|
||||||
fix_random_seed(0)
|
|
||||||
nlp = English()
|
|
||||||
example = (
|
|
||||||
"Once hot, form ping-pong-ball-sized balls of the mixture, each weighing roughly 25 g.",
|
|
||||||
{"cats": {"Labe1": 1.0, "Label2": 0.0, "Label3": 0.0}},
|
|
||||||
)
|
|
||||||
textcat = nlp.create_pipe("textcat")
|
|
||||||
nlp.add_pipe(textcat)
|
|
||||||
for label in set(example[1]["cats"]):
|
|
||||||
textcat.add_label(label)
|
|
||||||
nlp.begin_training()
|
|
||||||
# Store the result of each iteration
|
|
||||||
result = textcat.model.predict([nlp.make_doc(example[0])])
|
|
||||||
results.append(list(result[0]))
|
|
||||||
|
|
||||||
# All results should be the same because of the fixed seed
|
|
||||||
assert len(results) == 3
|
|
||||||
assert results[0] == results[1]
|
|
||||||
assert results[0] == results[2]
|
|
Loading…
Reference in New Issue
Block a user