mirror of
https://github.com/explosion/spaCy.git
synced 2025-01-26 01:04:34 +03:00
43b960c01b
* Update with WIP * Update with WIP * Update with pipeline serialization * Update types and pipe factories * Add deep merge, tidy up and add tests * Fix pipe creation from config * Don't validate default configs on load * Update spacy/language.py Co-authored-by: Ines Montani <ines@ines.io> * Adjust factory/component meta error * Clean up factory args and remove defaults * Add test for failing empty dict defaults * Update pipeline handling and methods * provide KB as registry function instead of as object * small change in test to make functionality more clear * update example script for EL configuration * Fix typo * Simplify test * Simplify test * splitting pipes.pyx into separate files * moving default configs to each component file * fix batch_size type * removing default values from component constructors where possible (TODO: test 4725) * skip instead of xfail * Add test for config -> nlp with multiple instances * pipeline.pipes -> pipeline.pipe * Tidy up, document, remove kwargs * small cleanup/generalization for Tok2VecListener * use DEFAULT_UPSTREAM field * revert to avoid circular imports * Fix tests * Replace deprecated arg * Make model dirs require config * fix pickling of keyword-only arguments in constructor * WIP: clean up and integrate full config * Add helper to handle function args more reliably Now also includes keyword-only args * Fix config composition and serialization * Improve config debugging and add visual diff * Remove unused defaults and fix type * Remove pipeline and factories from meta * Update spacy/default_config.cfg Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> * Update spacy/default_config.cfg * small UX edits * avoid printing stack trace for debug CLI commands * Add support for language-specific factories * specify the section of the config which holds the model to debug * WIP: add Language.from_config * Update with language data refactor WIP * Auto-format * Add backwards-compat handling for Language.factories * Update morphologizer.pyx * Fix morphologizer * Update and simplify lemmatizers * Fix Japanese tests * Port over tagger changes * Fix Chinese and tests * Update to latest Thinc * WIP: xfail first Russian lemmatizer test * Fix component-specific overrides * fix nO for output layers in debug_model * Fix default value * Fix tests and don't pass objects in config * Fix deep merging * Fix lemma lookup data registry Only load the lookups if an entry is available in the registry (and if spacy-lookups-data is installed) * Add types * Add Vocab.from_config * Fix typo * Fix tests * Make config copying more elegant * Fix pipe analysis * Fix lemmatizers and is_base_form * WIP: move language defaults to config * Fix morphology type * Fix vocab * Remove comment * Update to latest Thinc * Add morph rules to config * Tidy up * Remove set_morphology option from tagger factory * Hack use_gpu * Move [pipeline] to top-level block and make [nlp.pipeline] list Allows separating component blocks from component order – otherwise, ordering the config would mean a changed component order, which is bad. Also allows initial config to define more components and not use all of them * Fix use_gpu and resume in CLI * Auto-format * Remove resume from config * Fix formatting and error * [pipeline] -> [components] * Fix types * Fix tagger test: requires set_morphology? Co-authored-by: Sofie Van Landeghem <svlandeg@users.noreply.github.com> Co-authored-by: svlandeg <sofie.vanlandeghem@gmail.com> Co-authored-by: Matthew Honnibal <honnibal+gh@gmail.com>
103 lines
2.2 KiB
INI
103 lines
2.2 KiB
INI
[nlp]
|
|
lang = null
|
|
stop_words = []
|
|
lex_attr_getters = {}
|
|
pipeline = []
|
|
|
|
[nlp.tokenizer]
|
|
@tokenizers = "spacy.Tokenizer.v1"
|
|
|
|
[nlp.lemmatizer]
|
|
@lemmatizers = "spacy.Lemmatizer.v1"
|
|
|
|
[nlp.writing_system]
|
|
direction = "ltr"
|
|
has_case = true
|
|
has_letters = true
|
|
|
|
[components]
|
|
|
|
# Training hyper-parameters and additional features.
|
|
[training]
|
|
# Whether to train on sequences with 'gold standard' sentence boundaries
|
|
# and tokens. If you set this to true, take care to ensure your run-time
|
|
# data is passed in sentence-by-sentence via some prior preprocessing.
|
|
gold_preproc = false
|
|
# Limitations on training document length or number of examples.
|
|
max_length = 5000
|
|
limit = 0
|
|
# Data augmentation
|
|
orth_variant_level = 0.0
|
|
dropout = 0.1
|
|
# Controls early-stopping. 0 or -1 mean unlimited.
|
|
patience = 1600
|
|
max_epochs = 0
|
|
max_steps = 20000
|
|
eval_frequency = 200
|
|
eval_batch_size = 128
|
|
# Other settings
|
|
seed = 0
|
|
accumulate_gradient = 1
|
|
use_pytorch_for_gpu_memory = false
|
|
# Control how scores are printed and checkpoints are evaluated.
|
|
scores = ["speed", "tags_acc", "uas", "las", "ents_f"]
|
|
score_weights = {"tags_acc": 0.2, "las": 0.4, "ents_f": 0.4}
|
|
# These settings are invalid for the transformer models.
|
|
init_tok2vec = null
|
|
discard_oversize = false
|
|
omit_extra_lookups = false
|
|
batch_by = "sequences"
|
|
raw_text = null
|
|
tag_map = null
|
|
morph_rules = null
|
|
base_model = null
|
|
vectors = null
|
|
|
|
[training.batch_size]
|
|
@schedules = "compounding.v1"
|
|
start = 1000
|
|
stop = 1000
|
|
compound = 1.001
|
|
|
|
[training.optimizer]
|
|
@optimizers = "Adam.v1"
|
|
beta1 = 0.9
|
|
beta2 = 0.999
|
|
L2_is_weight_decay = true
|
|
L2 = 0.01
|
|
grad_clip = 1.0
|
|
use_averages = false
|
|
eps = 1e-8
|
|
|
|
[training.optimizer.learn_rate]
|
|
@schedules = "warmup_linear.v1"
|
|
warmup_steps = 250
|
|
total_steps = 20000
|
|
initial_rate = 0.001
|
|
|
|
[pretraining]
|
|
max_epochs = 1000
|
|
min_length = 5
|
|
max_length = 500
|
|
dropout = 0.2
|
|
n_save_every = null
|
|
batch_size = 3000
|
|
seed = ${training:seed}
|
|
use_pytorch_for_gpu_memory = ${training:use_pytorch_for_gpu_memory}
|
|
tok2vec_model = "components.tok2vec.model"
|
|
|
|
[pretraining.objective]
|
|
type = "characters"
|
|
n_characters = 4
|
|
|
|
[pretraining.optimizer]
|
|
@optimizers = "Adam.v1"
|
|
beta1 = 0.9
|
|
beta2 = 0.999
|
|
L2_is_weight_decay = true
|
|
L2 = 0.01
|
|
grad_clip = 1.0
|
|
use_averages = true
|
|
eps = 1e-8
|
|
learn_rate = 0.001
|