mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-27 10:26:35 +03:00
83ac227bd3
The new spacy pretrain command implemented BERT/ULMFit/etc-like transfer learning, using our Language Modelling with Approximate Outputs version of BERT's cloze task. Pretraining is convenient, but in some ways it's a bit of a strange solution. All we're doing is initialising the weights. At the same time, we're putting a lot of work into our optimisation so that it's less sensitive to initial conditions, and more likely to find good optima. I discuss this a bit in the pseudo-rehearsal blog post: https://explosion.ai/blog/pseudo-rehearsal-catastrophic-forgetting Support semi-supervised learning in spacy train One obvious way to improve these pretraining methods is to do multi-task learning, instead of just transfer learning. This has been shown to work very well: https://arxiv.org/pdf/1809.08370.pdf . This patch makes it easy to do this sort of thing. Add a new argument to spacy train, --raw-text. This takes a jsonl file with unlabelled data that can be used in arbitrary ways to do semi-supervised learning. Add a new method to the Language class and to pipeline components, .rehearse(). This is like .update(), but doesn't expect GoldParse objects. It takes a batch of Doc objects, and performs an update on some semi-supervised objective. Move the BERT-LMAO objective out from spacy/cli/pretrain.py into spacy/_ml.py, so we can create a new pipeline component, ClozeMultitask. This can be specified as a parser or NER multitask in the spacy train command. Example usage: python -m spacy train en ./tmp ~/data/en-core-web/train/nw.json ~/data/en-core-web/dev/nw.json --pipeline parser --raw-textt ~/data/unlabelled/reddit-100k.jsonl --vectors en_vectors_web_lg --parser-multitasks cloze Implement rehearsal methods for pipeline components The new --raw-text argument and nlp.rehearse() method also gives us a good place to implement the the idea in the pseudo-rehearsal blog post in the parser. This works as follows: Add a new nlp.resume_training() method. This allocates copies of pre-trained models in the pipeline, setting things up for the rehearsal updates. It also returns an optimizer object. This also greatly reduces confusion around the nlp.begin_training() method, which randomises the weights, making it not suitable for adding new labels or otherwise fine-tuning a pre-trained model. Implement rehearsal updates on the Parser class, making it available for the dependency parser and NER. During rehearsal, the initial model is used to supervise the model being trained. The current model is asked to match the predictions of the initial model on some data. This minimises catastrophic forgetting, by keeping the model's predictions close to the original. See the blog post for details. Implement rehearsal updates for tagger Implement rehearsal updates for text categoriz
78 lines
2.4 KiB
Python
78 lines
2.4 KiB
Python
"""Prevent catastrophic forgetting with rehearsal updates."""
|
|
import plac
|
|
import random
|
|
import srsly
|
|
import spacy
|
|
from spacy.gold import GoldParse
|
|
from spacy.util import minibatch
|
|
|
|
|
|
LABEL = "ANIMAL"
|
|
TRAIN_DATA = [
|
|
(
|
|
"Horses are too tall and they pretend to care about your feelings",
|
|
{"entities": [(0, 6, "ANIMAL")]},
|
|
),
|
|
("Do they bite?", {"entities": []}),
|
|
(
|
|
"horses are too tall and they pretend to care about your feelings",
|
|
{"entities": [(0, 6, "ANIMAL")]},
|
|
),
|
|
("horses pretend to care about your feelings", {"entities": [(0, 6, "ANIMAL")]}),
|
|
(
|
|
"they pretend to care about your feelings, those horses",
|
|
{"entities": [(48, 54, "ANIMAL")]},
|
|
),
|
|
("horses?", {"entities": [(0, 6, "ANIMAL")]}),
|
|
]
|
|
|
|
|
|
def read_raw_data(nlp, jsonl_loc):
|
|
for json_obj in srsly.read_jsonl(jsonl_loc):
|
|
if json_obj["text"].strip():
|
|
doc = nlp.make_doc(json_obj["text"])
|
|
yield doc
|
|
|
|
|
|
def read_gold_data(nlp, gold_loc):
|
|
docs = []
|
|
golds = []
|
|
for json_obj in srsly.read_jsonl(gold_loc):
|
|
doc = nlp.make_doc(json_obj["text"])
|
|
ents = [(ent["start"], ent["end"], ent["label"]) for ent in json_obj["spans"]]
|
|
gold = GoldParse(doc, entities=ents)
|
|
docs.append(doc)
|
|
golds.append(gold)
|
|
return list(zip(docs, golds))
|
|
|
|
|
|
def main(model_name, unlabelled_loc):
|
|
n_iter = 10
|
|
dropout = 0.2
|
|
batch_size = 4
|
|
nlp = spacy.load(model_name)
|
|
nlp.get_pipe("ner").add_label(LABEL)
|
|
raw_docs = list(read_raw_data(nlp, unlabelled_loc))
|
|
optimizer = nlp.resume_training()
|
|
|
|
# get names of other pipes to disable them during training
|
|
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "ner"]
|
|
with nlp.disable_pipes(*other_pipes):
|
|
for itn in range(n_iter):
|
|
random.shuffle(TRAIN_DATA)
|
|
random.shuffle(raw_docs)
|
|
losses = {}
|
|
r_losses = {}
|
|
# batch up the examples using spaCy's minibatch
|
|
raw_batches = minibatch(raw_docs, size=batch_size)
|
|
for doc, gold in TRAIN_DATA:
|
|
nlp.update([doc], [gold], sgd=optimizer, drop=dropout, losses=losses)
|
|
raw_batch = list(next(raw_batches))
|
|
nlp.rehearse(raw_batch, sgd=optimizer, losses=r_losses)
|
|
print("Losses", losses)
|
|
print("R. Losses", r_losses)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
plac.call(main)
|