mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-30 20:06:30 +03:00
43b960c01b
* Update with WIP * Update with WIP * Update with pipeline serialization * Update types and pipe factories * Add deep merge, tidy up and add tests * Fix pipe creation from config * Don't validate default configs on load * Update spacy/language.py Co-authored-by: Ines Montani <ines@ines.io> * Adjust factory/component meta error * Clean up factory args and remove defaults * Add test for failing empty dict defaults * Update pipeline handling and methods * provide KB as registry function instead of as object * small change in test to make functionality more clear * update example script for EL configuration * Fix typo * Simplify test * Simplify test * splitting pipes.pyx into separate files * moving default configs to each component file * fix batch_size type * removing default values from component constructors where possible (TODO: test 4725) * skip instead of xfail * Add test for config -> nlp with multiple instances * pipeline.pipes -> pipeline.pipe * Tidy up, document, remove kwargs * small cleanup/generalization for Tok2VecListener * use DEFAULT_UPSTREAM field * revert to avoid circular imports * Fix tests * Replace deprecated arg * Make model dirs require config * fix pickling of keyword-only arguments in constructor * WIP: clean up and integrate full config * Add helper to handle function args more reliably Now also includes keyword-only args * Fix config composition and serialization * Improve config debugging and add visual diff * Remove unused defaults and fix type * Remove pipeline and factories from meta * Update spacy/default_config.cfg Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> * Update spacy/default_config.cfg * small UX edits * avoid printing stack trace for debug CLI commands * Add support for language-specific factories * specify the section of the config which holds the model to debug * WIP: add Language.from_config * Update with language data refactor WIP * Auto-format * Add backwards-compat handling for Language.factories * Update morphologizer.pyx * Fix morphologizer * Update and simplify lemmatizers * Fix Japanese tests * Port over tagger changes * Fix Chinese and tests * Update to latest Thinc * WIP: xfail first Russian lemmatizer test * Fix component-specific overrides * fix nO for output layers in debug_model * Fix default value * Fix tests and don't pass objects in config * Fix deep merging * Fix lemma lookup data registry Only load the lookups if an entry is available in the registry (and if spacy-lookups-data is installed) * Add types * Add Vocab.from_config * Fix typo * Fix tests * Make config copying more elegant * Fix pipe analysis * Fix lemmatizers and is_base_form * WIP: move language defaults to config * Fix morphology type * Fix vocab * Remove comment * Update to latest Thinc * Add morph rules to config * Tidy up * Remove set_morphology option from tagger factory * Hack use_gpu * Move [pipeline] to top-level block and make [nlp.pipeline] list Allows separating component blocks from component order – otherwise, ordering the config would mean a changed component order, which is bad. Also allows initial config to define more components and not use all of them * Fix use_gpu and resume in CLI * Auto-format * Remove resume from config * Fix formatting and error * [pipeline] -> [components] * Fix types * Fix tagger test: requires set_morphology? Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> Co-authored-by: svlandeg <sofie.vanlandeghem@gmail.com> Co-authored-by: Matthew Honnibal <honnibal+gh@gmail.com>
159 lines
4.6 KiB
Python
159 lines
4.6 KiB
Python
from typing import List
|
|
|
|
import pytest
|
|
from thinc.api import fix_random_seed, Adam, set_dropout_rate
|
|
from numpy.testing import assert_array_equal
|
|
import numpy
|
|
|
|
from spacy.ml.models import build_Tok2Vec_model
|
|
from spacy.ml.models import build_text_classifier, build_simple_cnn_text_classifier
|
|
from spacy.lang.en import English
|
|
from spacy.lang.en.examples import sentences as EN_SENTENCES
|
|
|
|
|
|
def get_all_params(model):
|
|
params = []
|
|
for node in model.walk():
|
|
for name in node.param_names:
|
|
params.append(node.get_param(name).ravel())
|
|
return node.ops.xp.concatenate(params)
|
|
|
|
|
|
def get_docs():
|
|
nlp = English()
|
|
return list(nlp.pipe(EN_SENTENCES + [" ".join(EN_SENTENCES)]))
|
|
|
|
|
|
def get_gradient(model, Y):
|
|
if isinstance(Y, model.ops.xp.ndarray):
|
|
dY = model.ops.alloc(Y.shape, dtype=Y.dtype)
|
|
dY += model.ops.xp.random.uniform(-1.0, 1.0, Y.shape)
|
|
return dY
|
|
elif isinstance(Y, List):
|
|
return [get_gradient(model, y) for y in Y]
|
|
else:
|
|
raise ValueError(f"Could not get gradient for type {type(Y)}")
|
|
|
|
|
|
def test_tok2vec():
|
|
return build_Tok2Vec_model(**TOK2VEC_KWARGS)
|
|
|
|
|
|
TOK2VEC_KWARGS = {
|
|
"width": 96,
|
|
"embed_size": 2000,
|
|
"subword_features": True,
|
|
"char_embed": False,
|
|
"conv_depth": 4,
|
|
"bilstm_depth": 0,
|
|
"maxout_pieces": 4,
|
|
"window_size": 1,
|
|
"dropout": 0.1,
|
|
"nM": 0,
|
|
"nC": 0,
|
|
"pretrained_vectors": None,
|
|
}
|
|
|
|
TEXTCAT_KWARGS = {
|
|
"width": 64,
|
|
"embed_size": 2000,
|
|
"pretrained_vectors": None,
|
|
"exclusive_classes": False,
|
|
"ngram_size": 1,
|
|
"window_size": 1,
|
|
"conv_depth": 2,
|
|
"dropout": None,
|
|
"nO": 7,
|
|
}
|
|
|
|
TEXTCAT_CNN_KWARGS = {
|
|
"tok2vec": test_tok2vec(),
|
|
"exclusive_classes": False,
|
|
"nO": 13,
|
|
}
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"seed,model_func,kwargs",
|
|
[
|
|
(0, build_Tok2Vec_model, TOK2VEC_KWARGS),
|
|
(0, build_text_classifier, TEXTCAT_KWARGS),
|
|
(0, build_simple_cnn_text_classifier, TEXTCAT_CNN_KWARGS),
|
|
],
|
|
)
|
|
def test_models_initialize_consistently(seed, model_func, kwargs):
|
|
fix_random_seed(seed)
|
|
model1 = model_func(**kwargs)
|
|
model1.initialize()
|
|
fix_random_seed(seed)
|
|
model2 = model_func(**kwargs)
|
|
model2.initialize()
|
|
params1 = get_all_params(model1)
|
|
params2 = get_all_params(model2)
|
|
assert_array_equal(params1, params2)
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"seed,model_func,kwargs,get_X",
|
|
[
|
|
(0, build_Tok2Vec_model, TOK2VEC_KWARGS, get_docs),
|
|
(0, build_text_classifier, TEXTCAT_KWARGS, get_docs),
|
|
(0, build_simple_cnn_text_classifier, TEXTCAT_CNN_KWARGS, get_docs),
|
|
],
|
|
)
|
|
def test_models_predict_consistently(seed, model_func, kwargs, get_X):
|
|
fix_random_seed(seed)
|
|
model1 = model_func(**kwargs).initialize()
|
|
Y1 = model1.predict(get_X())
|
|
fix_random_seed(seed)
|
|
model2 = model_func(**kwargs).initialize()
|
|
Y2 = model2.predict(get_X())
|
|
|
|
if model1.has_ref("tok2vec"):
|
|
tok2vec1 = model1.get_ref("tok2vec").predict(get_X())
|
|
tok2vec2 = model2.get_ref("tok2vec").predict(get_X())
|
|
for i in range(len(tok2vec1)):
|
|
for j in range(len(tok2vec1[i])):
|
|
assert_array_equal(
|
|
numpy.asarray(tok2vec1[i][j]), numpy.asarray(tok2vec2[i][j])
|
|
)
|
|
|
|
if isinstance(Y1, numpy.ndarray):
|
|
assert_array_equal(Y1, Y2)
|
|
elif isinstance(Y1, List):
|
|
assert len(Y1) == len(Y2)
|
|
for y1, y2 in zip(Y1, Y2):
|
|
assert_array_equal(y1, y2)
|
|
else:
|
|
raise ValueError(f"Could not compare type {type(Y1)}")
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"seed,dropout,model_func,kwargs,get_X",
|
|
[
|
|
(0, 0.2, build_Tok2Vec_model, TOK2VEC_KWARGS, get_docs),
|
|
(0, 0.2, build_text_classifier, TEXTCAT_KWARGS, get_docs),
|
|
(0, 0.2, build_simple_cnn_text_classifier, TEXTCAT_CNN_KWARGS, get_docs),
|
|
],
|
|
)
|
|
def test_models_update_consistently(seed, dropout, model_func, kwargs, get_X):
|
|
def get_updated_model():
|
|
fix_random_seed(seed)
|
|
optimizer = Adam(0.001)
|
|
model = model_func(**kwargs).initialize()
|
|
initial_params = get_all_params(model)
|
|
set_dropout_rate(model, dropout)
|
|
for _ in range(5):
|
|
Y, get_dX = model.begin_update(get_X())
|
|
dY = get_gradient(model, Y)
|
|
get_dX(dY)
|
|
model.finish_update(optimizer)
|
|
updated_params = get_all_params(model)
|
|
with pytest.raises(AssertionError):
|
|
assert_array_equal(initial_params, updated_params)
|
|
return model
|
|
|
|
model1 = get_updated_model()
|
|
model2 = get_updated_model()
|
|
assert_array_equal(get_all_params(model1), get_all_params(model2))
|