[training] use_gpu = -1 limit = 0 dropout = 0.2 patience = 10000 eval_frequency = 200 scores = ["ents_f"] score_weights = {"ents_f": 1} orth_variant_level = 0.0 gold_preproc = true max_length = 0 batch_size = 25 seed = 0 accumulate_gradient = 2 [optimizer] @optimizers = "Adam.v1" learn_rate = 0.001 beta1 = 0.9 beta2 = 0.999 [nlp] lang = "en" vectors = null [nlp.pipeline.tok2vec] factory = "tok2vec" [nlp.pipeline.tok2vec.model] @architectures = "spacy.Tok2Vec.v1" [nlp.pipeline.tok2vec.model.extract] @architectures = "spacy.CharacterEmbed.v1" width = 96 nM = 64 nC = 8 rows = 2000 columns = ["ID", "NORM", "PREFIX", "SUFFIX", "SHAPE", "ORTH"] [nlp.pipeline.tok2vec.model.extract.features] @architectures = "spacy.Doc2Feats.v1" columns = ${nlp.pipeline.tok2vec.model.extract:columns} [nlp.pipeline.tok2vec.model.embed] @architectures = "spacy.LayerNormalizedMaxout.v1" width = ${nlp.pipeline.tok2vec.model.extract:width} maxout_pieces = 4 [nlp.pipeline.tok2vec.model.encode] @architectures = "spacy.MaxoutWindowEncoder.v1" width = ${nlp.pipeline.tok2vec.model.extract:width} window_size = 1 maxout_pieces = 2 depth = 2 [nlp.pipeline.ner] factory = "ner" [nlp.pipeline.ner.model] @architectures = "spacy.TransitionBasedParser.v1" nr_feature_tokens = 6 hidden_width = 64 maxout_pieces = 2 [nlp.pipeline.ner.model.tok2vec] @architectures = "spacy.Tok2VecTensors.v1" width = ${nlp.pipeline.tok2vec.model.extract:width}