mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-26 18:06:29 +03:00
e962784531
* Add Lemmatizer and simplify related components * Add `Lemmatizer` pipe with `lookup` and `rule` modes using the `Lookups` tables. * Reduce `Tagger` to a simple tagger that sets `Token.tag` (no pos or lemma) * Reduce `Morphology` to only keep track of morph tags (no tag map, lemmatizer, or morph rules) * Remove lemmatizer from `Vocab` * Adjust many many tests Differences: * No default lookup lemmas * No special treatment of TAG in `from_array` and similar required * Easier to modify labels in a `Tagger` * No extra strings added from morphology / tag map * Fix test * Initial fix for Lemmatizer config/serialization * Adjust init test to be more generic * Adjust init test to force empty Lookups * Add simple cache to rule-based lemmatizer * Convert language-specific lemmatizers Convert language-specific lemmatizers to component lemmatizers. Remove previous lemmatizer class. * Fix French and Polish lemmatizers * Remove outdated UPOS conversions * Update Russian lemmatizer init in tests * Add minimal init/run tests for custom lemmatizers * Add option to overwrite existing lemmas * Update mode setting, lookup loading, and caching * Make `mode` an immutable property * Only enforce strict `load_lookups` for known supported modes * Move caching into individual `_lemmatize` methods * Implement strict when lang is not found in lookups * Fix tables/lookups in make_lemmatizer * Reallow provided lookups and allow for stricter checks * Add lookups asset to all Lemmatizer pipe tests * Rename lookups in lemmatizer init test * Clean up merge * Refactor lookup table loading * Add helper from `load_lemmatizer_lookups` that loads required and optional lookups tables based on settings provided by a config. Additional slight refactor of lookups: * Add `Lookups.set_table` to set a table from a provided `Table` * Reorder class definitions to be able to specify type as `Table` * Move registry assets into test methods * Refactor lookups tables config Use class methods within `Lemmatizer` to provide the config for particular modes and to load the lookups from a config. * Add pipe and score to lemmatizer * Simplify Tagger.score * Add missing import * Clean up imports and auto-format * Remove unused kwarg * Tidy up and auto-format * Update docstrings for Lemmatizer Update docstrings for Lemmatizer. Additionally modify `is_base_form` API to take `Token` instead of individual features. * Update docstrings * Remove tag map values from Tagger.add_label * Update API docs * Fix relative link in Lemmatizer API docs
119 lines
2.6 KiB
INI
119 lines
2.6 KiB
INI
[paths]
|
|
train = ""
|
|
dev = ""
|
|
raw = null
|
|
init_tok2vec = null
|
|
|
|
[system]
|
|
seed = 0
|
|
use_pytorch_for_gpu_memory = false
|
|
|
|
[nlp]
|
|
lang = null
|
|
pipeline = []
|
|
load_vocab_data = true
|
|
before_creation = null
|
|
after_creation = null
|
|
after_pipeline_creation = null
|
|
|
|
[nlp.tokenizer]
|
|
@tokenizers = "spacy.Tokenizer.v1"
|
|
|
|
[components]
|
|
|
|
# Training hyper-parameters and additional features.
|
|
[training]
|
|
seed = ${system:seed}
|
|
dropout = 0.1
|
|
accumulate_gradient = 1
|
|
# Extra resources for transfer-learning or pseudo-rehearsal
|
|
init_tok2vec = ${paths:init_tok2vec}
|
|
raw_text = ${paths:raw}
|
|
vectors = null
|
|
# Controls early-stopping. 0 or -1 mean unlimited.
|
|
patience = 1600
|
|
max_epochs = 0
|
|
max_steps = 20000
|
|
eval_frequency = 200
|
|
# Control how scores are printed and checkpoints are evaluated.
|
|
score_weights = {}
|
|
# Names of pipeline components that shouldn't be updated during training
|
|
frozen_components = []
|
|
|
|
[training.train_corpus]
|
|
@readers = "spacy.Corpus.v1"
|
|
path = ${paths:train}
|
|
# Whether to train on sequences with 'gold standard' sentence boundaries
|
|
# and tokens. If you set this to true, take care to ensure your run-time
|
|
# data is passed in sentence-by-sentence via some prior preprocessing.
|
|
gold_preproc = false
|
|
# Limitations on training document length
|
|
max_length = 2000
|
|
# Limitation on number of training examples
|
|
limit = 0
|
|
|
|
[training.dev_corpus]
|
|
@readers = "spacy.Corpus.v1"
|
|
path = ${paths:dev}
|
|
# Whether to train on sequences with 'gold standard' sentence boundaries
|
|
# and tokens. If you set this to true, take care to ensure your run-time
|
|
# data is passed in sentence-by-sentence via some prior preprocessing.
|
|
gold_preproc = false
|
|
# Limitations on training document length
|
|
max_length = 2000
|
|
# Limitation on number of training examples
|
|
limit = 0
|
|
|
|
[training.batcher]
|
|
@batchers = "batch_by_words.v1"
|
|
discard_oversize = false
|
|
tolerance = 0.2
|
|
|
|
[training.batcher.size]
|
|
@schedules = "compounding.v1"
|
|
start = 100
|
|
stop = 1000
|
|
compound = 1.001
|
|
|
|
[training.optimizer]
|
|
@optimizers = "Adam.v1"
|
|
beta1 = 0.9
|
|
beta2 = 0.999
|
|
L2_is_weight_decay = true
|
|
L2 = 0.01
|
|
grad_clip = 1.0
|
|
use_averages = false
|
|
eps = 1e-8
|
|
|
|
[training.optimizer.learn_rate]
|
|
@schedules = "warmup_linear.v1"
|
|
warmup_steps = 250
|
|
total_steps = 20000
|
|
initial_rate = 0.001
|
|
|
|
[pretraining]
|
|
max_epochs = 1000
|
|
min_length = 5
|
|
max_length = 500
|
|
dropout = 0.2
|
|
n_save_every = null
|
|
batch_size = 3000
|
|
seed = ${system:seed}
|
|
use_pytorch_for_gpu_memory = ${system:use_pytorch_for_gpu_memory}
|
|
tok2vec_model = "components.tok2vec.model"
|
|
|
|
[pretraining.objective]
|
|
type = "characters"
|
|
n_characters = 4
|
|
|
|
[pretraining.optimizer]
|
|
@optimizers = "Adam.v1"
|
|
beta1 = 0.9
|
|
beta2 = 0.999
|
|
L2_is_weight_decay = true
|
|
L2 = 0.01
|
|
grad_clip = 1.0
|
|
use_averages = true
|
|
eps = 1e-8
|
|
learn_rate = 0.001
|