mirror of
https://github.com/explosion/spaCy.git
synced 2024-11-13 13:17:06 +03:00
569cc98982
* Add load_from_config function * Add train_from_config script * Merge configs and expose via spacy.config * Fix script * Suggest create_evaluation_callback * Hard-code for NER * Fix errors * Register command * Add TODO * Update train-from-config todos * Fix imports * Allow delayed setting of parser model nr_class * Get train-from-config working * Tidy up and fix scores and printing * Hide traceback if cancelled * Fix weighted score formatting * Fix score formatting * Make output_path optional * Add Tok2Vec component * Tidy up and add tok2vec_tensors * Add option to copy docs in nlp.update * Copy docs in nlp.update * Adjust nlp.update() for set_annotations * Don't shuffle pipes in nlp.update, decruft * Support set_annotations arg in component update * Support set_annotations in parser update * Add get_gradients method * Add get_gradients to parser * Update errors.py * Fix problems caused by merge * Add _link_components method in nlp * Add concept of 'listeners' and ControlledModel * Support optional attributes arg in ControlledModel * Try having tok2vec component in pipeline * Fix tok2vec component * Fix config * Fix tok2vec * Update for Example * Update for Example * Update config * Add eg2doc util * Update and add schemas/types * Update schemas * Fix nlp.update * Fix tagger * Remove hacks from train-from-config * Remove hard-coded config str * Calculate loss in tok2vec component * Tidy up and use function signatures instead of models * Support union types for registry models * Minor cleaning in Language.update * Make ControlledModel specifically Tok2VecListener * Fix train_from_config * Fix tok2vec * Tidy up * Add function for bilstm tok2vec * Fix type * Fix syntax * Fix pytorch optimizer * Add example configs * Update for thinc describe changes * Update for Thinc changes * Update for dropout/sgd changes * Update for dropout/sgd changes * Unhack gradient update * Work on refactoring _ml * Remove _ml.py module * WIP upgrade cli scripts for thinc * Move some _ml stuff to util * Import link_vectors from util * Update train_from_config * Import from util * Import from util * Temporarily add ml.component_models module * Move ml methods * Move typedefs * Update load vectors * Update gitignore * Move imports * Add PrecomputableAffine * Fix imports * Fix imports * Fix imports * Fix missing imports * Update CLI scripts * Update spacy.language * Add stubs for building the models * Update model definition * Update create_default_optimizer * Fix import * Fix comment * Update imports in tests * Update imports in spacy.cli * Fix import * fix obsolete thinc imports * update srsly pin * from thinc to ml_datasets for example data such as imdb * update ml_datasets pin * using STATE.vectors * small fix * fix Sentencizer.pipe * black formatting * rename Affine to Linear as in thinc * set validate explicitely to True * rename with_square_sequences to with_list2padded * rename with_flatten to with_list2array * chaining layernorm * small fixes * revert Optimizer import * build_nel_encoder with new thinc style * fixes using model's get and set methods * Tok2Vec in component models, various fixes * fix up legacy tok2vec code * add model initialize calls * add in build_tagger_model * small fixes * setting model dims * fixes for ParserModel * various small fixes * initialize thinc Models * fixes * consistent naming of window_size * fixes, removing set_dropout * work around Iterable issue * remove legacy tok2vec * util fix * fix forward function of tok2vec listener * more fixes * trying to fix PrecomputableAffine (not succesful yet) * alloc instead of allocate * add morphologizer * rename residual * rename fixes * Fix predict function * Update parser and parser model * fixing few more tests * Fix precomputable affine * Update component model * Update parser model * Move backprop padding to own function, for test * Update test * Fix p. affine * Update NEL * build_bow_text_classifier and extract_ngrams * Fix parser init * Fix test add label * add build_simple_cnn_text_classifier * Fix parser init * Set gpu off by default in example * Fix tok2vec listener * Fix parser model * Small fixes * small fix for PyTorchLSTM parameters * revert my_compounding hack (iterable fixed now) * fix biLSTM * Fix uniqued * PyTorchRNNWrapper fix * small fixes * use helper function to calculate cosine loss * small fixes for build_simple_cnn_text_classifier * putting dropout default at 0.0 to ensure the layer gets built * using thinc util's set_dropout_rate * moving layer normalization inside of maxout definition to optimize dropout * temp debugging in NEL * fixed NEL model by using init defaults ! * fixing after set_dropout_rate refactor * proper fix * fix test_update_doc after refactoring optimizers in thinc * Add CharacterEmbed layer * Construct tagger Model * Add missing import * Remove unused stuff * Work on textcat * fix test (again :)) after optimizer refactor * fixes to allow reading Tagger from_disk without overwriting dimensions * don't build the tok2vec prematuraly * fix CharachterEmbed init * CharacterEmbed fixes * Fix CharacterEmbed architecture * fix imports * renames from latest thinc update * one more rename * add initialize calls where appropriate * fix parser initialization * Update Thinc version * Fix errors, auto-format and tidy up imports * Fix validation * fix if bias is cupy array * revert for now * ensure it's a numpy array before running bp in ParserStepModel * no reason to call require_gpu twice * use CupyOps.to_numpy instead of cupy directly * fix initialize of ParserModel * remove unnecessary import * fixes for CosineDistance * fix device renaming * use refactored loss functions (Thinc PR 251) * overfitting test for tagger * experimental settings for the tagger: avoid zero-init and subword normalization * clean up tagger overfitting test * use previous default value for nP * remove toy config * bringing layernorm back (had a bug - fixed in thinc) * revert setting nP explicitly * remove setting default in constructor * restore values as they used to be * add overfitting test for NER * add overfitting test for dep parser * add overfitting test for textcat * fixing init for linear (previously affine) * larger eps window for textcat * ensure doc is not None * Require newer thinc * Make float check vaguer * Slop the textcat overfit test more * Fix textcat test * Fix exclusive classes for textcat * fix after renaming of alloc methods * fixing renames and mandatory arguments (staticvectors WIP) * upgrade to thinc==8.0.0.dev3 * refer to vocab.vectors directly instead of its name * rename alpha to learn_rate * adding hashembed and staticvectors dropout * upgrade to thinc 8.0.0.dev4 * add name back to avoid warning W020 * thinc dev4 * update srsly * using thinc 8.0.0a0 ! Co-authored-by: Matthew Honnibal <honnibal+gh@gmail.com> Co-authored-by: Ines Montani <ines@ines.io>
336 lines
14 KiB
Python
336 lines
14 KiB
Python
import random
|
|
import numpy
|
|
import time
|
|
import re
|
|
from collections import Counter
|
|
from pathlib import Path
|
|
from thinc.layers import Linear, Maxout
|
|
from thinc.util import prefer_gpu
|
|
from wasabi import msg
|
|
import srsly
|
|
from thinc.layers import chain, list2array
|
|
from thinc.loss import CosineDistance, L2Distance
|
|
|
|
from spacy.gold import Example
|
|
from ..errors import Errors
|
|
from ..tokens import Doc
|
|
from ..attrs import ID, HEAD
|
|
from ..ml.component_models import Tok2Vec
|
|
from ..ml.component_models import masked_language_model
|
|
from .. import util
|
|
from ..util import create_default_optimizer
|
|
from .train import _load_pretrained_tok2vec
|
|
|
|
|
|
def pretrain(
|
|
# fmt: off
|
|
texts_loc: ("Path to JSONL file with raw texts to learn from, with text provided as the key 'text' or tokens as the key 'tokens'", "positional", None, str),
|
|
vectors_model: ("Name or path to spaCy model with vectors to learn from", "positional", None, str),
|
|
output_dir: ("Directory to write models to on each epoch", "positional", None, str),
|
|
width: ("Width of CNN layers", "option", "cw", int) = 96,
|
|
depth: ("Depth of CNN layers", "option", "cd", int) = 4,
|
|
bilstm_depth: ("Depth of BiLSTM layers (requires PyTorch)", "option", "lstm", int) = 0,
|
|
cnn_pieces: ("Maxout size for CNN layers. 1 for Mish", "option", "cP", int) = 3,
|
|
sa_depth: ("Depth of self-attention layers", "option", "sa", int) = 0,
|
|
use_chars: ("Whether to use character-based embedding", "flag", "chr", bool) = False,
|
|
cnn_window: ("Window size for CNN layers", "option", "cW", int) = 1,
|
|
embed_rows: ("Number of embedding rows", "option", "er", int) = 2000,
|
|
loss_func: ("Loss function to use for the objective. Either 'L2' or 'cosine'", "option", "L", str) = "cosine",
|
|
use_vectors: ("Whether to use the static vectors as input features", "flag", "uv") = False,
|
|
dropout: ("Dropout rate", "option", "d", float) = 0.2,
|
|
n_iter: ("Number of iterations to pretrain", "option", "i", int) = 1000,
|
|
batch_size: ("Number of words per training batch", "option", "bs", int) = 3000,
|
|
max_length: ("Max words per example. Longer examples are discarded", "option", "xw", int) = 500,
|
|
min_length: ("Min words per example. Shorter examples are discarded", "option", "nw", int) = 5,
|
|
seed: ("Seed for random number generators", "option", "s", int) = 0,
|
|
n_save_every: ("Save model every X batches.", "option", "se", int) = None,
|
|
init_tok2vec: ("Path to pretrained weights for the token-to-vector parts of the models. See 'spacy pretrain'. Experimental.", "option", "t2v", Path) = None,
|
|
epoch_start: ("The epoch to start counting at. Only relevant when using '--init-tok2vec' and the given weight file has been renamed. Prevents unintended overwriting of existing weight files.", "option", "es", int) = None,
|
|
# fmt: on
|
|
):
|
|
"""
|
|
Pre-train the 'token-to-vector' (tok2vec) layer of pipeline components,
|
|
using an approximate language-modelling objective. Specifically, we load
|
|
pretrained vectors, and train a component like a CNN, BiLSTM, etc to predict
|
|
vectors which match the pretrained ones. The weights are saved to a directory
|
|
after each epoch. You can then pass a path to one of these pretrained weights
|
|
files to the 'spacy train' command.
|
|
|
|
This technique may be especially helpful if you have little labelled data.
|
|
However, it's still quite experimental, so your mileage may vary.
|
|
|
|
To load the weights back in during 'spacy train', you need to ensure
|
|
all settings are the same between pretraining and training. The API and
|
|
errors around this need some improvement.
|
|
"""
|
|
config = dict(locals())
|
|
for key in config:
|
|
if isinstance(config[key], Path):
|
|
config[key] = str(config[key])
|
|
util.fix_random_seed(seed)
|
|
|
|
has_gpu = prefer_gpu()
|
|
if has_gpu:
|
|
import torch
|
|
|
|
torch.set_default_tensor_type("torch.cuda.FloatTensor")
|
|
msg.info("Using GPU" if has_gpu else "Not using GPU")
|
|
|
|
output_dir = Path(output_dir)
|
|
if not output_dir.exists():
|
|
output_dir.mkdir()
|
|
msg.good("Created output directory")
|
|
srsly.write_json(output_dir / "config.json", config)
|
|
msg.good("Saved settings to config.json")
|
|
|
|
# Load texts from file or stdin
|
|
if texts_loc != "-": # reading from a file
|
|
texts_loc = Path(texts_loc)
|
|
if not texts_loc.exists():
|
|
msg.fail("Input text file doesn't exist", texts_loc, exits=1)
|
|
with msg.loading("Loading input texts..."):
|
|
texts = list(srsly.read_jsonl(texts_loc))
|
|
if not texts:
|
|
msg.fail("Input file is empty", texts_loc, exits=1)
|
|
msg.good("Loaded input texts")
|
|
random.shuffle(texts)
|
|
else: # reading from stdin
|
|
msg.text("Reading input text from stdin...")
|
|
texts = srsly.read_jsonl("-")
|
|
|
|
with msg.loading(f"Loading model '{vectors_model}'..."):
|
|
nlp = util.load_model(vectors_model)
|
|
msg.good(f"Loaded model '{vectors_model}'")
|
|
pretrained_vectors = None if not use_vectors else nlp.vocab.vectors
|
|
model = create_pretraining_model(
|
|
nlp,
|
|
Tok2Vec(
|
|
width,
|
|
embed_rows,
|
|
conv_depth=depth,
|
|
pretrained_vectors=pretrained_vectors,
|
|
bilstm_depth=bilstm_depth, # Requires PyTorch. Experimental.
|
|
subword_features=not use_chars, # Set to False for Chinese etc
|
|
cnn_maxout_pieces=cnn_pieces, # If set to 1, use Mish activation.
|
|
),
|
|
)
|
|
# Load in pretrained weights
|
|
if init_tok2vec is not None:
|
|
components = _load_pretrained_tok2vec(nlp, init_tok2vec)
|
|
msg.text(f"Loaded pretrained tok2vec for: {components}")
|
|
# Parse the epoch number from the given weight file
|
|
model_name = re.search(r"model\d+\.bin", str(init_tok2vec))
|
|
if model_name:
|
|
# Default weight file name so read epoch_start from it by cutting off 'model' and '.bin'
|
|
epoch_start = int(model_name.group(0)[5:][:-4]) + 1
|
|
else:
|
|
if not epoch_start:
|
|
msg.fail(
|
|
"You have to use the --epoch-start argument when using a renamed weight file for --init-tok2vec",
|
|
exits=True,
|
|
)
|
|
elif epoch_start < 0:
|
|
msg.fail(
|
|
f"The argument --epoch-start has to be greater or equal to 0. {epoch_start} is invalid",
|
|
exits=True,
|
|
)
|
|
else:
|
|
# Without '--init-tok2vec' the '--epoch-start' argument is ignored
|
|
epoch_start = 0
|
|
|
|
optimizer = create_default_optimizer()
|
|
tracker = ProgressTracker(frequency=10000)
|
|
msg.divider(f"Pre-training tok2vec layer - starting at epoch {epoch_start}")
|
|
row_settings = {"widths": (3, 10, 10, 6, 4), "aligns": ("r", "r", "r", "r", "r")}
|
|
msg.row(("#", "# Words", "Total Loss", "Loss", "w/s"), **row_settings)
|
|
|
|
def _save_model(epoch, is_temp=False):
|
|
is_temp_str = ".temp" if is_temp else ""
|
|
with model.use_params(optimizer.averages):
|
|
with (output_dir / f"model{epoch}{is_temp_str}.bin").open("wb") as file_:
|
|
file_.write(model.tok2vec.to_bytes())
|
|
log = {
|
|
"nr_word": tracker.nr_word,
|
|
"loss": tracker.loss,
|
|
"epoch_loss": tracker.epoch_loss,
|
|
"epoch": epoch,
|
|
}
|
|
with (output_dir / "log.jsonl").open("a") as file_:
|
|
file_.write(srsly.json_dumps(log) + "\n")
|
|
|
|
skip_counter = 0
|
|
for epoch in range(epoch_start, n_iter + epoch_start):
|
|
for batch_id, batch in enumerate(
|
|
util.minibatch_by_words(
|
|
(Example(doc=text) for text in texts), size=batch_size
|
|
)
|
|
):
|
|
docs, count = make_docs(
|
|
nlp,
|
|
[text for (text, _) in batch],
|
|
max_length=max_length,
|
|
min_length=min_length,
|
|
)
|
|
skip_counter += count
|
|
loss = make_update(
|
|
model, docs, optimizer, objective=loss_func, drop=dropout
|
|
)
|
|
progress = tracker.update(epoch, loss, docs)
|
|
if progress:
|
|
msg.row(progress, **row_settings)
|
|
if texts_loc == "-" and tracker.words_per_epoch[epoch] >= 10 ** 7:
|
|
break
|
|
if n_save_every and (batch_id % n_save_every == 0):
|
|
_save_model(epoch, is_temp=True)
|
|
_save_model(epoch)
|
|
tracker.epoch_loss = 0.0
|
|
if texts_loc != "-":
|
|
# Reshuffle the texts if texts were loaded from a file
|
|
random.shuffle(texts)
|
|
if skip_counter > 0:
|
|
msg.warn(f"Skipped {skip_counter} empty values")
|
|
msg.good("Successfully finished pretrain")
|
|
|
|
|
|
def make_update(model, docs, optimizer, drop=0.0, objective="L2"):
|
|
"""Perform an update over a single batch of documents.
|
|
|
|
docs (iterable): A batch of `Doc` objects.
|
|
drop (float): The dropout rate.
|
|
optimizer (callable): An optimizer.
|
|
RETURNS loss: A float for the loss.
|
|
"""
|
|
predictions, backprop = model.begin_update(docs, drop=drop)
|
|
loss, gradients = get_vectors_loss(model.ops, docs, predictions, objective)
|
|
backprop(gradients, sgd=optimizer)
|
|
# Don't want to return a cupy object here
|
|
# The gradients are modified in-place by the BERT MLM,
|
|
# so we get an accurate loss
|
|
return float(loss)
|
|
|
|
|
|
def make_docs(nlp, batch, min_length, max_length):
|
|
docs = []
|
|
skip_count = 0
|
|
for record in batch:
|
|
if not isinstance(record, dict):
|
|
raise TypeError(Errors.E137.format(type=type(record), line=record))
|
|
if "tokens" in record:
|
|
words = record["tokens"]
|
|
if not words:
|
|
skip_count += 1
|
|
continue
|
|
doc = Doc(nlp.vocab, words=words)
|
|
elif "text" in record:
|
|
text = record["text"]
|
|
if not text:
|
|
skip_count += 1
|
|
continue
|
|
doc = nlp.make_doc(text)
|
|
else:
|
|
raise ValueError(Errors.E138.format(text=record))
|
|
if "heads" in record:
|
|
heads = record["heads"]
|
|
heads = numpy.asarray(heads, dtype="uint64")
|
|
heads = heads.reshape((len(doc), 1))
|
|
doc = doc.from_array([HEAD], heads)
|
|
if len(doc) >= min_length and len(doc) < max_length:
|
|
docs.append(doc)
|
|
return docs, skip_count
|
|
|
|
|
|
def get_vectors_loss(ops, docs, prediction, objective="L2"):
|
|
"""Compute a mean-squared error loss between the documents' vectors and
|
|
the prediction.
|
|
|
|
Note that this is ripe for customization! We could compute the vectors
|
|
in some other word, e.g. with an LSTM language model, or use some other
|
|
type of objective.
|
|
"""
|
|
# The simplest way to implement this would be to vstack the
|
|
# token.vector values, but that's a bit inefficient, especially on GPU.
|
|
# Instead we fetch the index into the vectors table for each of our tokens,
|
|
# and look them up all at once. This prevents data copying.
|
|
ids = ops.flatten([doc.to_array(ID).ravel() for doc in docs])
|
|
target = docs[0].vocab.vectors.data[ids]
|
|
# TODO: this code originally didn't normalize, but shouldn't normalize=True ?
|
|
if objective == "L2":
|
|
distance = L2Distance(normalize=False)
|
|
elif objective == "cosine":
|
|
distance = CosineDistance(normalize=False)
|
|
else:
|
|
raise ValueError(Errors.E142.format(loss_func=objective))
|
|
d_target, loss = distance(prediction, target)
|
|
return loss, d_target
|
|
|
|
|
|
def create_pretraining_model(nlp, tok2vec):
|
|
"""Define a network for the pretraining. We simply add an output layer onto
|
|
the tok2vec input model. The tok2vec input model needs to be a model that
|
|
takes a batch of Doc objects (as a list), and returns a list of arrays.
|
|
Each array in the output needs to have one row per token in the doc.
|
|
"""
|
|
output_size = nlp.vocab.vectors.data.shape[1]
|
|
output_layer = chain(
|
|
Maxout(300, pieces=3, normalize=True, dropout=0.0), Linear(output_size)
|
|
)
|
|
# This is annoying, but the parser etc have the flatten step after
|
|
# the tok2vec. To load the weights in cleanly, we need to match
|
|
# the shape of the models' components exactly. So what we cann
|
|
# "tok2vec" has to be the same set of processes as what the components do.
|
|
tok2vec = chain(tok2vec, list2array())
|
|
model = chain(tok2vec, output_layer)
|
|
model = masked_language_model(nlp.vocab, model)
|
|
model.set_ref("tok2vec", tok2vec)
|
|
model.set_ref("output_layer", output_layer)
|
|
model.initialize(X=[nlp.make_doc("Give it a doc to infer shapes")])
|
|
return model
|
|
|
|
|
|
class ProgressTracker(object):
|
|
def __init__(self, frequency=1000000):
|
|
self.loss = 0.0
|
|
self.prev_loss = 0.0
|
|
self.nr_word = 0
|
|
self.words_per_epoch = Counter()
|
|
self.frequency = frequency
|
|
self.last_time = time.time()
|
|
self.last_update = 0
|
|
self.epoch_loss = 0.0
|
|
|
|
def update(self, epoch, loss, docs):
|
|
self.loss += loss
|
|
self.epoch_loss += loss
|
|
words_in_batch = sum(len(doc) for doc in docs)
|
|
self.words_per_epoch[epoch] += words_in_batch
|
|
self.nr_word += words_in_batch
|
|
words_since_update = self.nr_word - self.last_update
|
|
if words_since_update >= self.frequency:
|
|
wps = words_since_update / (time.time() - self.last_time)
|
|
self.last_update = self.nr_word
|
|
self.last_time = time.time()
|
|
loss_per_word = self.loss - self.prev_loss
|
|
status = (
|
|
epoch,
|
|
self.nr_word,
|
|
_smart_round(self.loss, width=10),
|
|
_smart_round(loss_per_word, width=6),
|
|
int(wps),
|
|
)
|
|
self.prev_loss = float(self.loss)
|
|
return status
|
|
else:
|
|
return None
|
|
|
|
|
|
def _smart_round(figure, width=10, max_decimal=4):
|
|
"""Round large numbers as integers, smaller numbers as decimals."""
|
|
n_digits = len(str(int(figure)))
|
|
n_decimal = width - (n_digits + 1)
|
|
if n_decimal <= 1:
|
|
return str(int(figure))
|
|
else:
|
|
n_decimal = min(n_decimal, max_decimal)
|
|
format_str = "%." + str(n_decimal) + "f"
|
|
return format_str % figure
|