mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-28 19:06:33 +03:00
90 lines
1.7 KiB
INI
90 lines
1.7 KiB
INI
[training]
|
|
max_steps = 0
|
|
patience = 10000
|
|
eval_frequency = 200
|
|
dropout = 0.2
|
|
init_tok2vec = null
|
|
vectors = null
|
|
max_epochs = 100
|
|
orth_variant_level = 0.0
|
|
gold_preproc = true
|
|
max_length = 0
|
|
scores = ["tag_acc", "dep_uas", "dep_las"]
|
|
score_weights = {"dep_las": 0.8, "tag_acc": 0.2}
|
|
limit = 0
|
|
seed = 0
|
|
accumulate_gradient = 2
|
|
discard_oversize = false
|
|
raw_text = null
|
|
tag_map = null
|
|
morph_rules = null
|
|
base_model = null
|
|
|
|
eval_batch_size = 128
|
|
use_pytorch_for_gpu_memory = false
|
|
batch_by = "padded"
|
|
|
|
[training.batch_size]
|
|
@schedules = "compounding.v1"
|
|
start = 100
|
|
stop = 1000
|
|
compound = 1.001
|
|
|
|
[training.optimizer]
|
|
@optimizers = "Adam.v1"
|
|
learn_rate = 0.001
|
|
beta1 = 0.9
|
|
beta2 = 0.999
|
|
|
|
[nlp]
|
|
lang = "en"
|
|
pipeline = ["tok2vec", "tagger", "parser"]
|
|
load_vocab_data = false
|
|
|
|
[nlp.tokenizer]
|
|
@tokenizers = "spacy.Tokenizer.v1"
|
|
|
|
[nlp.lemmatizer]
|
|
@lemmatizers = "spacy.Lemmatizer.v1"
|
|
|
|
[components]
|
|
|
|
[components.tok2vec]
|
|
factory = "tok2vec"
|
|
|
|
[components.tagger]
|
|
factory = "tagger"
|
|
|
|
[components.parser]
|
|
factory = "parser"
|
|
learn_tokens = false
|
|
min_action_freq = 1
|
|
|
|
[components.tagger.model]
|
|
@architectures = "spacy.Tagger.v1"
|
|
|
|
[components.tagger.model.tok2vec]
|
|
@architectures = "spacy.Tok2VecTensors.v1"
|
|
width = ${components.tok2vec.model:width}
|
|
|
|
[components.parser.model]
|
|
@architectures = "spacy.TransitionBasedParser.v1"
|
|
nr_feature_tokens = 8
|
|
hidden_width = 64
|
|
maxout_pieces = 3
|
|
|
|
[components.parser.model.tok2vec]
|
|
@architectures = "spacy.Tok2VecTensors.v1"
|
|
width = ${components.tok2vec.model:width}
|
|
|
|
[components.tok2vec.model]
|
|
@architectures = "spacy.HashEmbedCNN.v1"
|
|
pretrained_vectors = ${training:vectors}
|
|
width = 96
|
|
depth = 4
|
|
window_size = 1
|
|
embed_size = 2000
|
|
maxout_pieces = 3
|
|
subword_features = true
|
|
dropout = null
|