2020-07-22 14:42:59 +03:00
|
|
|
[nlp]
|
|
|
|
lang = null
|
|
|
|
pipeline = []
|
2020-07-25 13:14:28 +03:00
|
|
|
load_vocab_data = true
|
2020-07-22 14:42:59 +03:00
|
|
|
|
|
|
|
[nlp.tokenizer]
|
|
|
|
@tokenizers = "spacy.Tokenizer.v1"
|
|
|
|
|
|
|
|
[nlp.lemmatizer]
|
|
|
|
@lemmatizers = "spacy.Lemmatizer.v1"
|
|
|
|
|
|
|
|
[components]
|
|
|
|
|
|
|
|
# Training hyper-parameters and additional features.
|
|
|
|
[training]
|
|
|
|
# Whether to train on sequences with 'gold standard' sentence boundaries
|
|
|
|
# and tokens. If you set this to true, take care to ensure your run-time
|
|
|
|
# data is passed in sentence-by-sentence via some prior preprocessing.
|
|
|
|
gold_preproc = false
|
|
|
|
# Limitations on training document length or number of examples.
|
|
|
|
max_length = 5000
|
|
|
|
limit = 0
|
|
|
|
# Data augmentation
|
|
|
|
orth_variant_level = 0.0
|
|
|
|
dropout = 0.1
|
|
|
|
# Controls early-stopping. 0 or -1 mean unlimited.
|
|
|
|
patience = 1600
|
|
|
|
max_epochs = 0
|
|
|
|
max_steps = 20000
|
|
|
|
eval_frequency = 200
|
|
|
|
eval_batch_size = 128
|
|
|
|
# Other settings
|
|
|
|
seed = 0
|
|
|
|
accumulate_gradient = 1
|
|
|
|
use_pytorch_for_gpu_memory = false
|
|
|
|
# Control how scores are printed and checkpoints are evaluated.
|
2020-07-26 14:18:43 +03:00
|
|
|
score_weights = {}
|
2020-07-22 14:42:59 +03:00
|
|
|
# These settings are invalid for the transformer models.
|
|
|
|
init_tok2vec = null
|
|
|
|
discard_oversize = false
|
|
|
|
raw_text = null
|
|
|
|
tag_map = null
|
|
|
|
morph_rules = null
|
|
|
|
base_model = null
|
|
|
|
vectors = null
|
2020-07-26 16:27:39 +03:00
|
|
|
batch_by = "words"
|
|
|
|
batch_size = 1000
|
2020-07-22 14:42:59 +03:00
|
|
|
|
|
|
|
[training.optimizer]
|
|
|
|
@optimizers = "Adam.v1"
|
|
|
|
beta1 = 0.9
|
|
|
|
beta2 = 0.999
|
|
|
|
L2_is_weight_decay = true
|
|
|
|
L2 = 0.01
|
|
|
|
grad_clip = 1.0
|
|
|
|
use_averages = false
|
|
|
|
eps = 1e-8
|
|
|
|
|
|
|
|
[training.optimizer.learn_rate]
|
|
|
|
@schedules = "warmup_linear.v1"
|
|
|
|
warmup_steps = 250
|
|
|
|
total_steps = 20000
|
|
|
|
initial_rate = 0.001
|
|
|
|
|
|
|
|
[pretraining]
|
|
|
|
max_epochs = 1000
|
|
|
|
min_length = 5
|
|
|
|
max_length = 500
|
|
|
|
dropout = 0.2
|
|
|
|
n_save_every = null
|
|
|
|
batch_size = 3000
|
|
|
|
seed = ${training:seed}
|
|
|
|
use_pytorch_for_gpu_memory = ${training:use_pytorch_for_gpu_memory}
|
|
|
|
tok2vec_model = "components.tok2vec.model"
|
|
|
|
|
|
|
|
[pretraining.objective]
|
|
|
|
type = "characters"
|
|
|
|
n_characters = 4
|
|
|
|
|
|
|
|
[pretraining.optimizer]
|
|
|
|
@optimizers = "Adam.v1"
|
|
|
|
beta1 = 0.9
|
|
|
|
beta2 = 0.999
|
|
|
|
L2_is_weight_decay = true
|
|
|
|
L2 = 0.01
|
|
|
|
grad_clip = 1.0
|
|
|
|
use_averages = true
|
|
|
|
eps = 1e-8
|
|
|
|
learn_rate = 0.001
|