2018-11-30 22:16:14 +03:00
|
|
|
# coding: utf8
|
2018-11-16 00:17:16 +03:00
|
|
|
from __future__ import print_function, unicode_literals
|
2018-11-30 22:16:14 +03:00
|
|
|
|
2018-11-16 00:17:16 +03:00
|
|
|
import plac
|
|
|
|
import random
|
|
|
|
import numpy
|
|
|
|
import time
|
2019-07-09 22:48:30 +03:00
|
|
|
import re
|
2018-11-16 01:45:36 +03:00
|
|
|
from collections import Counter
|
2018-11-30 22:16:14 +03:00
|
|
|
from pathlib import Path
|
2018-11-29 15:36:43 +03:00
|
|
|
from thinc.v2v import Affine, Maxout
|
|
|
|
from thinc.misc import LayerNorm as LN
|
2019-08-20 16:08:59 +03:00
|
|
|
from thinc.neural.util import prefer_gpu
|
2019-11-04 04:38:45 +03:00
|
|
|
from wasabi import msg
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
import srsly
|
2018-11-16 00:17:16 +03:00
|
|
|
|
2019-06-16 14:22:57 +03:00
|
|
|
from ..errors import Errors
|
2018-11-30 22:16:14 +03:00
|
|
|
from ..tokens import Doc
|
|
|
|
from ..attrs import ID, HEAD
|
2019-02-08 16:14:49 +03:00
|
|
|
from .._ml import Tok2Vec, flatten, chain, create_default_optimizer
|
2019-08-20 16:08:59 +03:00
|
|
|
from .._ml import masked_language_model, get_cossim_loss
|
2018-11-30 22:16:14 +03:00
|
|
|
from .. import util
|
2019-06-12 14:29:23 +03:00
|
|
|
from .train import _load_pretrained_tok2vec
|
2018-11-16 00:17:16 +03:00
|
|
|
|
|
|
|
|
2018-11-30 22:16:14 +03:00
|
|
|
@plac.annotations(
|
2019-06-20 11:36:38 +03:00
|
|
|
texts_loc=(
|
|
|
|
"Path to JSONL file with raw texts to learn from, with text provided as the key 'text' or tokens as the "
|
|
|
|
"key 'tokens'",
|
|
|
|
"positional",
|
|
|
|
None,
|
|
|
|
str,
|
|
|
|
),
|
2019-06-20 11:30:44 +03:00
|
|
|
vectors_model=("Name or path to spaCy model with vectors to learn from"),
|
|
|
|
output_dir=("Directory to write models to on each epoch", "positional", None, str),
|
2018-11-30 22:16:14 +03:00
|
|
|
width=("Width of CNN layers", "option", "cw", int),
|
2020-02-16 19:16:41 +03:00
|
|
|
conv_depth=("Depth of CNN layers", "option", "cd", int),
|
2019-10-28 17:16:33 +03:00
|
|
|
cnn_window=("Window size for CNN layers", "option", "cW", int),
|
|
|
|
cnn_pieces=("Maxout size for CNN layers. 1 for Mish", "option", "cP", int),
|
|
|
|
use_chars=("Whether to use character-based embedding", "flag", "chr", bool),
|
|
|
|
sa_depth=("Depth of self-attention layers", "option", "sa", int),
|
2019-10-08 00:34:58 +03:00
|
|
|
bilstm_depth=("Depth of BiLSTM layers (requires PyTorch)", "option", "lstm", int),
|
2019-06-20 11:30:44 +03:00
|
|
|
embed_rows=("Number of embedding rows", "option", "er", int),
|
2019-06-20 11:36:38 +03:00
|
|
|
loss_func=(
|
|
|
|
"Loss function to use for the objective. Either 'L2' or 'cosine'",
|
|
|
|
"option",
|
|
|
|
"L",
|
|
|
|
str,
|
|
|
|
),
|
2018-11-30 22:16:14 +03:00
|
|
|
use_vectors=("Whether to use the static vectors as input features", "flag", "uv"),
|
2019-06-20 11:30:44 +03:00
|
|
|
dropout=("Dropout rate", "option", "d", float),
|
2019-03-16 23:38:45 +03:00
|
|
|
batch_size=("Number of words per training batch", "option", "bs", int),
|
2019-06-20 11:36:38 +03:00
|
|
|
max_length=(
|
|
|
|
"Max words per example. Longer examples are discarded",
|
|
|
|
"option",
|
|
|
|
"xw",
|
|
|
|
int,
|
|
|
|
),
|
|
|
|
min_length=(
|
|
|
|
"Min words per example. Shorter examples are discarded",
|
|
|
|
"option",
|
|
|
|
"nw",
|
|
|
|
int,
|
|
|
|
),
|
2019-06-03 19:32:47 +03:00
|
|
|
seed=("Seed for random number generators", "option", "s", int),
|
Add save after `--save-every` batches for `spacy pretrain` (#3510)
<!--- Provide a general summary of your changes in the title. -->
When using `spacy pretrain`, the model is saved only after every epoch. But each epoch can be very big since `pretrain` is used for language modeling tasks. So I added a `--save-every` option in the CLI to save after every `--save-every` batches.
## Description
<!--- Use this section to describe your changes. If your changes required
testing, include information about the testing environment and the tests you
ran. If your test fixes a bug reported in an issue, don't forget to include the
issue number. If your PR is still a work in progress, that's totally fine – just
include a note to let us know. -->
To test...
Save this file to `sample_sents.jsonl`
```
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
```
Then run `--save-every 2` when pretraining.
```bash
spacy pretrain sample_sents.jsonl en_core_web_md here -nw 1 -bs 1 -i 10 --save-every 2
```
And it should save the model to the `here/` folder after every 2 batches. The models that are saved during an epoch will have a `.temp` appended to the save name.
At the end the training, you should see these files (`ls here/`):
```bash
config.json model2.bin model5.bin model8.bin
log.jsonl model2.temp.bin model5.temp.bin model8.temp.bin
model0.bin model3.bin model6.bin model9.bin
model0.temp.bin model3.temp.bin model6.temp.bin model9.temp.bin
model1.bin model4.bin model7.bin
model1.temp.bin model4.temp.bin model7.temp.bin
```
### Types of change
<!-- What type of change does your PR cover? Is it a bug fix, an enhancement
or new feature, or a change to the documentation? -->
This is a new feature to `spacy pretrain`.
🌵 **Unfortunately, I haven't been able to test this because compiling from source is not working (cythonize error).**
```
Processing matcher.pyx
[Errno 2] No such file or directory: '/Users/mwu/github/spaCy/spacy/matcher.pyx'
Traceback (most recent call last):
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 169, in <module>
run(args.root)
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 158, in run
process(base, filename, db)
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 124, in process
preserve_cwd(base, process_pyx, root + ".pyx", root + ".cpp")
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 87, in preserve_cwd
func(*args)
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 63, in process_pyx
raise Exception("Cython failed")
Exception: Cython failed
Traceback (most recent call last):
File "setup.py", line 276, in <module>
setup_package()
File "setup.py", line 209, in setup_package
generate_cython(root, "spacy")
File "setup.py", line 132, in generate_cython
raise RuntimeError("Running cythonize failed")
RuntimeError: Running cythonize failed
```
Edit: Fixed! after deleting all `.cpp` files: `find spacy -name "*.cpp" | xargs rm`
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2019-04-22 15:10:16 +03:00
|
|
|
n_iter=("Number of iterations to pretrain", "option", "i", int),
|
|
|
|
n_save_every=("Save model every X batches.", "option", "se", int),
|
2019-06-12 14:29:23 +03:00
|
|
|
init_tok2vec=(
|
|
|
|
"Path to pretrained weights for the token-to-vector parts of the models. See 'spacy pretrain'. Experimental.",
|
|
|
|
"option",
|
|
|
|
"t2v",
|
|
|
|
Path,
|
|
|
|
),
|
2019-07-09 22:48:30 +03:00
|
|
|
epoch_start=(
|
|
|
|
"The epoch to start counting at. Only relevant when using '--init-tok2vec' and the given weight file has been "
|
|
|
|
"renamed. Prevents unintended overwriting of existing weight files.",
|
|
|
|
"option",
|
|
|
|
"es",
|
2019-08-20 16:06:31 +03:00
|
|
|
int,
|
2019-07-09 22:48:30 +03:00
|
|
|
),
|
2018-11-30 22:16:14 +03:00
|
|
|
)
|
|
|
|
def pretrain(
|
|
|
|
texts_loc,
|
|
|
|
vectors_model,
|
|
|
|
output_dir,
|
|
|
|
width=96,
|
2020-02-16 19:16:41 +03:00
|
|
|
conv_depth=4,
|
2019-10-28 17:16:33 +03:00
|
|
|
bilstm_depth=0,
|
|
|
|
cnn_pieces=3,
|
|
|
|
sa_depth=0,
|
|
|
|
use_chars=False,
|
|
|
|
cnn_window=1,
|
2018-11-30 22:16:14 +03:00
|
|
|
embed_rows=2000,
|
2019-03-20 14:06:35 +03:00
|
|
|
loss_func="cosine",
|
2018-11-30 22:16:14 +03:00
|
|
|
use_vectors=False,
|
|
|
|
dropout=0.2,
|
Add save after `--save-every` batches for `spacy pretrain` (#3510)
<!--- Provide a general summary of your changes in the title. -->
When using `spacy pretrain`, the model is saved only after every epoch. But each epoch can be very big since `pretrain` is used for language modeling tasks. So I added a `--save-every` option in the CLI to save after every `--save-every` batches.
## Description
<!--- Use this section to describe your changes. If your changes required
testing, include information about the testing environment and the tests you
ran. If your test fixes a bug reported in an issue, don't forget to include the
issue number. If your PR is still a work in progress, that's totally fine – just
include a note to let us know. -->
To test...
Save this file to `sample_sents.jsonl`
```
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
```
Then run `--save-every 2` when pretraining.
```bash
spacy pretrain sample_sents.jsonl en_core_web_md here -nw 1 -bs 1 -i 10 --save-every 2
```
And it should save the model to the `here/` folder after every 2 batches. The models that are saved during an epoch will have a `.temp` appended to the save name.
At the end the training, you should see these files (`ls here/`):
```bash
config.json model2.bin model5.bin model8.bin
log.jsonl model2.temp.bin model5.temp.bin model8.temp.bin
model0.bin model3.bin model6.bin model9.bin
model0.temp.bin model3.temp.bin model6.temp.bin model9.temp.bin
model1.bin model4.bin model7.bin
model1.temp.bin model4.temp.bin model7.temp.bin
```
### Types of change
<!-- What type of change does your PR cover? Is it a bug fix, an enhancement
or new feature, or a change to the documentation? -->
This is a new feature to `spacy pretrain`.
🌵 **Unfortunately, I haven't been able to test this because compiling from source is not working (cythonize error).**
```
Processing matcher.pyx
[Errno 2] No such file or directory: '/Users/mwu/github/spaCy/spacy/matcher.pyx'
Traceback (most recent call last):
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 169, in <module>
run(args.root)
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 158, in run
process(base, filename, db)
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 124, in process
preserve_cwd(base, process_pyx, root + ".pyx", root + ".cpp")
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 87, in preserve_cwd
func(*args)
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 63, in process_pyx
raise Exception("Cython failed")
Exception: Cython failed
Traceback (most recent call last):
File "setup.py", line 276, in <module>
setup_package()
File "setup.py", line 209, in setup_package
generate_cython(root, "spacy")
File "setup.py", line 132, in generate_cython
raise RuntimeError("Running cythonize failed")
RuntimeError: Running cythonize failed
```
Edit: Fixed! after deleting all `.cpp` files: `find spacy -name "*.cpp" | xargs rm`
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2019-04-22 15:10:16 +03:00
|
|
|
n_iter=1000,
|
2019-03-16 23:38:45 +03:00
|
|
|
batch_size=3000,
|
|
|
|
max_length=500,
|
|
|
|
min_length=5,
|
2018-11-30 22:16:14 +03:00
|
|
|
seed=0,
|
Add save after `--save-every` batches for `spacy pretrain` (#3510)
<!--- Provide a general summary of your changes in the title. -->
When using `spacy pretrain`, the model is saved only after every epoch. But each epoch can be very big since `pretrain` is used for language modeling tasks. So I added a `--save-every` option in the CLI to save after every `--save-every` batches.
## Description
<!--- Use this section to describe your changes. If your changes required
testing, include information about the testing environment and the tests you
ran. If your test fixes a bug reported in an issue, don't forget to include the
issue number. If your PR is still a work in progress, that's totally fine – just
include a note to let us know. -->
To test...
Save this file to `sample_sents.jsonl`
```
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
```
Then run `--save-every 2` when pretraining.
```bash
spacy pretrain sample_sents.jsonl en_core_web_md here -nw 1 -bs 1 -i 10 --save-every 2
```
And it should save the model to the `here/` folder after every 2 batches. The models that are saved during an epoch will have a `.temp` appended to the save name.
At the end the training, you should see these files (`ls here/`):
```bash
config.json model2.bin model5.bin model8.bin
log.jsonl model2.temp.bin model5.temp.bin model8.temp.bin
model0.bin model3.bin model6.bin model9.bin
model0.temp.bin model3.temp.bin model6.temp.bin model9.temp.bin
model1.bin model4.bin model7.bin
model1.temp.bin model4.temp.bin model7.temp.bin
```
### Types of change
<!-- What type of change does your PR cover? Is it a bug fix, an enhancement
or new feature, or a change to the documentation? -->
This is a new feature to `spacy pretrain`.
🌵 **Unfortunately, I haven't been able to test this because compiling from source is not working (cythonize error).**
```
Processing matcher.pyx
[Errno 2] No such file or directory: '/Users/mwu/github/spaCy/spacy/matcher.pyx'
Traceback (most recent call last):
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 169, in <module>
run(args.root)
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 158, in run
process(base, filename, db)
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 124, in process
preserve_cwd(base, process_pyx, root + ".pyx", root + ".cpp")
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 87, in preserve_cwd
func(*args)
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 63, in process_pyx
raise Exception("Cython failed")
Exception: Cython failed
Traceback (most recent call last):
File "setup.py", line 276, in <module>
setup_package()
File "setup.py", line 209, in setup_package
generate_cython(root, "spacy")
File "setup.py", line 132, in generate_cython
raise RuntimeError("Running cythonize failed")
RuntimeError: Running cythonize failed
```
Edit: Fixed! after deleting all `.cpp` files: `find spacy -name "*.cpp" | xargs rm`
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2019-04-22 15:10:16 +03:00
|
|
|
n_save_every=None,
|
2019-06-12 14:29:23 +03:00
|
|
|
init_tok2vec=None,
|
2019-07-09 22:48:30 +03:00
|
|
|
epoch_start=None,
|
2018-11-30 22:16:14 +03:00
|
|
|
):
|
|
|
|
"""
|
|
|
|
Pre-train the 'token-to-vector' (tok2vec) layer of pipeline components,
|
|
|
|
using an approximate language-modelling objective. Specifically, we load
|
2019-10-02 11:37:39 +03:00
|
|
|
pretrained vectors, and train a component like a CNN, BiLSTM, etc to predict
|
|
|
|
vectors which match the pretrained ones. The weights are saved to a directory
|
|
|
|
after each epoch. You can then pass a path to one of these pretrained weights
|
2018-11-30 22:16:14 +03:00
|
|
|
files to the 'spacy train' command.
|
|
|
|
|
|
|
|
This technique may be especially helpful if you have little labelled data.
|
|
|
|
However, it's still quite experimental, so your mileage may vary.
|
2018-11-16 00:17:16 +03:00
|
|
|
|
2018-11-30 22:16:14 +03:00
|
|
|
To load the weights back in during 'spacy train', you need to ensure
|
|
|
|
all settings are the same between pretraining and training. The API and
|
|
|
|
errors around this need some improvement.
|
|
|
|
"""
|
|
|
|
config = dict(locals())
|
2019-06-12 14:29:23 +03:00
|
|
|
for key in config:
|
|
|
|
if isinstance(config[key], Path):
|
|
|
|
config[key] = str(config[key])
|
2018-11-30 22:16:14 +03:00
|
|
|
util.fix_random_seed(seed)
|
|
|
|
|
|
|
|
has_gpu = prefer_gpu()
|
2019-10-08 00:34:58 +03:00
|
|
|
if has_gpu:
|
|
|
|
import torch
|
|
|
|
|
|
|
|
torch.set_default_tensor_type("torch.cuda.FloatTensor")
|
2018-11-30 22:16:14 +03:00
|
|
|
msg.info("Using GPU" if has_gpu else "Not using GPU")
|
|
|
|
|
|
|
|
output_dir = Path(output_dir)
|
2020-02-16 19:16:41 +03:00
|
|
|
if output_dir.exists() and [p for p in output_dir.iterdir()]:
|
|
|
|
msg.warn(
|
|
|
|
"Output directory is not empty",
|
|
|
|
"It is better to use an empty directory or refer to a new output path, "
|
|
|
|
"then the new directory will be created for you.",
|
|
|
|
)
|
2018-11-30 22:16:14 +03:00
|
|
|
if not output_dir.exists():
|
|
|
|
output_dir.mkdir()
|
2020-02-16 19:16:41 +03:00
|
|
|
msg.good("Created output directory: {}".format(output_dir))
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
srsly.write_json(output_dir / "config.json", config)
|
2018-11-30 22:16:14 +03:00
|
|
|
msg.good("Saved settings to config.json")
|
|
|
|
|
|
|
|
# Load texts from file or stdin
|
|
|
|
if texts_loc != "-": # reading from a file
|
|
|
|
texts_loc = Path(texts_loc)
|
|
|
|
if not texts_loc.exists():
|
|
|
|
msg.fail("Input text file doesn't exist", texts_loc, exits=1)
|
|
|
|
with msg.loading("Loading input texts..."):
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
texts = list(srsly.read_jsonl(texts_loc))
|
2019-06-16 14:22:57 +03:00
|
|
|
if not texts:
|
|
|
|
msg.fail("Input file is empty", texts_loc, exits=1)
|
2018-11-30 22:16:14 +03:00
|
|
|
msg.good("Loaded input texts")
|
|
|
|
random.shuffle(texts)
|
|
|
|
else: # reading from stdin
|
|
|
|
msg.text("Reading input text from stdin...")
|
💫 Replace ujson, msgpack and dill/pickle/cloudpickle with srsly (#3003)
Remove hacks and wrappers, keep code in sync across our libraries and move spaCy a few steps closer to only depending on packages with binary wheels 🎉
See here: https://github.com/explosion/srsly
Serialization is hard, especially across Python versions and multiple platforms. After dealing with many subtle bugs over the years (encodings, locales, large files) our libraries like spaCy and Prodigy have steadily grown a number of utility functions to wrap the multiple serialization formats we need to support (especially json, msgpack and pickle). These wrapping functions ended up duplicated across our codebases, so we wanted to put them in one place.
At the same time, we noticed that having a lot of small dependencies was making maintainence harder, and making installation slower. To solve this, we've made srsly standalone, by including the component packages directly within it. This way we can provide all the serialization utilities we need in a single binary wheel.
srsly currently includes forks of the following packages:
ujson
msgpack
msgpack-numpy
cloudpickle
* WIP: replace json/ujson with srsly
* Replace ujson in examples
Use regular json instead of srsly to make code easier to read and follow
* Update requirements
* Fix imports
* Fix typos
* Replace msgpack with srsly
* Fix warning
2018-12-03 03:28:22 +03:00
|
|
|
texts = srsly.read_jsonl("-")
|
2018-11-30 22:16:14 +03:00
|
|
|
|
|
|
|
with msg.loading("Loading model '{}'...".format(vectors_model)):
|
|
|
|
nlp = util.load_model(vectors_model)
|
|
|
|
msg.good("Loaded model '{}'".format(vectors_model))
|
|
|
|
pretrained_vectors = None if not use_vectors else nlp.vocab.vectors.name
|
|
|
|
model = create_pretraining_model(
|
|
|
|
nlp,
|
|
|
|
Tok2Vec(
|
|
|
|
width,
|
|
|
|
embed_rows,
|
2020-02-16 19:16:41 +03:00
|
|
|
conv_depth=conv_depth,
|
2018-11-30 22:16:14 +03:00
|
|
|
pretrained_vectors=pretrained_vectors,
|
2019-10-08 00:34:58 +03:00
|
|
|
bilstm_depth=bilstm_depth, # Requires PyTorch. Experimental.
|
2019-10-28 17:16:33 +03:00
|
|
|
subword_features=not use_chars, # Set to False for Chinese etc
|
|
|
|
cnn_maxout_pieces=cnn_pieces, # If set to 1, use Mish activation.
|
2018-11-30 22:16:14 +03:00
|
|
|
),
|
2018-12-18 21:19:26 +03:00
|
|
|
)
|
2019-10-02 11:37:39 +03:00
|
|
|
# Load in pretrained weights
|
2019-06-12 14:29:23 +03:00
|
|
|
if init_tok2vec is not None:
|
|
|
|
components = _load_pretrained_tok2vec(nlp, init_tok2vec)
|
|
|
|
msg.text("Loaded pretrained tok2vec for: {}".format(components))
|
2019-07-09 22:48:30 +03:00
|
|
|
# Parse the epoch number from the given weight file
|
|
|
|
model_name = re.search(r"model\d+\.bin", str(init_tok2vec))
|
|
|
|
if model_name:
|
|
|
|
# Default weight file name so read epoch_start from it by cutting off 'model' and '.bin'
|
|
|
|
epoch_start = int(model_name.group(0)[5:][:-4]) + 1
|
|
|
|
else:
|
|
|
|
if not epoch_start:
|
|
|
|
msg.fail(
|
|
|
|
"You have to use the '--epoch-start' argument when using a renamed weight file for "
|
2019-08-20 16:06:31 +03:00
|
|
|
"'--init-tok2vec'",
|
|
|
|
exits=True,
|
2019-07-09 22:48:30 +03:00
|
|
|
)
|
|
|
|
elif epoch_start < 0:
|
|
|
|
msg.fail(
|
2019-08-20 16:06:31 +03:00
|
|
|
"The argument '--epoch-start' has to be greater or equal to 0. '%d' is invalid"
|
|
|
|
% epoch_start,
|
|
|
|
exits=True,
|
2019-07-09 22:48:30 +03:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
# Without '--init-tok2vec' the '--epoch-start' argument is ignored
|
|
|
|
epoch_start = 0
|
|
|
|
|
2018-11-30 22:16:14 +03:00
|
|
|
optimizer = create_default_optimizer(model.ops)
|
2018-12-18 21:19:26 +03:00
|
|
|
tracker = ProgressTracker(frequency=10000)
|
2019-07-09 22:48:30 +03:00
|
|
|
msg.divider("Pre-training tok2vec layer - starting at epoch %d" % epoch_start)
|
2018-11-30 22:16:14 +03:00
|
|
|
row_settings = {"widths": (3, 10, 10, 6, 4), "aligns": ("r", "r", "r", "r", "r")}
|
|
|
|
msg.row(("#", "# Words", "Total Loss", "Loss", "w/s"), **row_settings)
|
Add save after `--save-every` batches for `spacy pretrain` (#3510)
<!--- Provide a general summary of your changes in the title. -->
When using `spacy pretrain`, the model is saved only after every epoch. But each epoch can be very big since `pretrain` is used for language modeling tasks. So I added a `--save-every` option in the CLI to save after every `--save-every` batches.
## Description
<!--- Use this section to describe your changes. If your changes required
testing, include information about the testing environment and the tests you
ran. If your test fixes a bug reported in an issue, don't forget to include the
issue number. If your PR is still a work in progress, that's totally fine – just
include a note to let us know. -->
To test...
Save this file to `sample_sents.jsonl`
```
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
```
Then run `--save-every 2` when pretraining.
```bash
spacy pretrain sample_sents.jsonl en_core_web_md here -nw 1 -bs 1 -i 10 --save-every 2
```
And it should save the model to the `here/` folder after every 2 batches. The models that are saved during an epoch will have a `.temp` appended to the save name.
At the end the training, you should see these files (`ls here/`):
```bash
config.json model2.bin model5.bin model8.bin
log.jsonl model2.temp.bin model5.temp.bin model8.temp.bin
model0.bin model3.bin model6.bin model9.bin
model0.temp.bin model3.temp.bin model6.temp.bin model9.temp.bin
model1.bin model4.bin model7.bin
model1.temp.bin model4.temp.bin model7.temp.bin
```
### Types of change
<!-- What type of change does your PR cover? Is it a bug fix, an enhancement
or new feature, or a change to the documentation? -->
This is a new feature to `spacy pretrain`.
🌵 **Unfortunately, I haven't been able to test this because compiling from source is not working (cythonize error).**
```
Processing matcher.pyx
[Errno 2] No such file or directory: '/Users/mwu/github/spaCy/spacy/matcher.pyx'
Traceback (most recent call last):
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 169, in <module>
run(args.root)
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 158, in run
process(base, filename, db)
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 124, in process
preserve_cwd(base, process_pyx, root + ".pyx", root + ".cpp")
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 87, in preserve_cwd
func(*args)
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 63, in process_pyx
raise Exception("Cython failed")
Exception: Cython failed
Traceback (most recent call last):
File "setup.py", line 276, in <module>
setup_package()
File "setup.py", line 209, in setup_package
generate_cython(root, "spacy")
File "setup.py", line 132, in generate_cython
raise RuntimeError("Running cythonize failed")
RuntimeError: Running cythonize failed
```
Edit: Fixed! after deleting all `.cpp` files: `find spacy -name "*.cpp" | xargs rm`
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2019-04-22 15:10:16 +03:00
|
|
|
|
|
|
|
def _save_model(epoch, is_temp=False):
|
|
|
|
is_temp_str = ".temp" if is_temp else ""
|
|
|
|
with model.use_params(optimizer.averages):
|
|
|
|
with (output_dir / ("model%d%s.bin" % (epoch, is_temp_str))).open(
|
|
|
|
"wb"
|
|
|
|
) as file_:
|
|
|
|
file_.write(model.tok2vec.to_bytes())
|
|
|
|
log = {
|
|
|
|
"nr_word": tracker.nr_word,
|
|
|
|
"loss": tracker.loss,
|
|
|
|
"epoch_loss": tracker.epoch_loss,
|
|
|
|
"epoch": epoch,
|
|
|
|
}
|
|
|
|
with (output_dir / "log.jsonl").open("a") as file_:
|
|
|
|
file_.write(srsly.json_dumps(log) + "\n")
|
|
|
|
|
2019-06-16 14:22:57 +03:00
|
|
|
skip_counter = 0
|
2019-07-09 22:48:30 +03:00
|
|
|
for epoch in range(epoch_start, n_iter + epoch_start):
|
Add save after `--save-every` batches for `spacy pretrain` (#3510)
<!--- Provide a general summary of your changes in the title. -->
When using `spacy pretrain`, the model is saved only after every epoch. But each epoch can be very big since `pretrain` is used for language modeling tasks. So I added a `--save-every` option in the CLI to save after every `--save-every` batches.
## Description
<!--- Use this section to describe your changes. If your changes required
testing, include information about the testing environment and the tests you
ran. If your test fixes a bug reported in an issue, don't forget to include the
issue number. If your PR is still a work in progress, that's totally fine – just
include a note to let us know. -->
To test...
Save this file to `sample_sents.jsonl`
```
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
```
Then run `--save-every 2` when pretraining.
```bash
spacy pretrain sample_sents.jsonl en_core_web_md here -nw 1 -bs 1 -i 10 --save-every 2
```
And it should save the model to the `here/` folder after every 2 batches. The models that are saved during an epoch will have a `.temp` appended to the save name.
At the end the training, you should see these files (`ls here/`):
```bash
config.json model2.bin model5.bin model8.bin
log.jsonl model2.temp.bin model5.temp.bin model8.temp.bin
model0.bin model3.bin model6.bin model9.bin
model0.temp.bin model3.temp.bin model6.temp.bin model9.temp.bin
model1.bin model4.bin model7.bin
model1.temp.bin model4.temp.bin model7.temp.bin
```
### Types of change
<!-- What type of change does your PR cover? Is it a bug fix, an enhancement
or new feature, or a change to the documentation? -->
This is a new feature to `spacy pretrain`.
🌵 **Unfortunately, I haven't been able to test this because compiling from source is not working (cythonize error).**
```
Processing matcher.pyx
[Errno 2] No such file or directory: '/Users/mwu/github/spaCy/spacy/matcher.pyx'
Traceback (most recent call last):
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 169, in <module>
run(args.root)
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 158, in run
process(base, filename, db)
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 124, in process
preserve_cwd(base, process_pyx, root + ".pyx", root + ".cpp")
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 87, in preserve_cwd
func(*args)
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 63, in process_pyx
raise Exception("Cython failed")
Exception: Cython failed
Traceback (most recent call last):
File "setup.py", line 276, in <module>
setup_package()
File "setup.py", line 209, in setup_package
generate_cython(root, "spacy")
File "setup.py", line 132, in generate_cython
raise RuntimeError("Running cythonize failed")
RuntimeError: Running cythonize failed
```
Edit: Fixed! after deleting all `.cpp` files: `find spacy -name "*.cpp" | xargs rm`
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2019-04-22 15:10:16 +03:00
|
|
|
for batch_id, batch in enumerate(
|
|
|
|
util.minibatch_by_words(((text, None) for text in texts), size=batch_size)
|
2018-11-30 22:16:14 +03:00
|
|
|
):
|
2019-06-16 14:22:57 +03:00
|
|
|
docs, count = make_docs(
|
2019-03-16 23:38:45 +03:00
|
|
|
nlp,
|
|
|
|
[text for (text, _) in batch],
|
|
|
|
max_length=max_length,
|
|
|
|
min_length=min_length,
|
|
|
|
)
|
2019-06-16 14:22:57 +03:00
|
|
|
skip_counter += count
|
2019-04-01 13:11:27 +03:00
|
|
|
loss = make_update(
|
|
|
|
model, docs, optimizer, objective=loss_func, drop=dropout
|
|
|
|
)
|
2018-11-30 22:16:14 +03:00
|
|
|
progress = tracker.update(epoch, loss, docs)
|
|
|
|
if progress:
|
|
|
|
msg.row(progress, **row_settings)
|
|
|
|
if texts_loc == "-" and tracker.words_per_epoch[epoch] >= 10 ** 7:
|
|
|
|
break
|
Add save after `--save-every` batches for `spacy pretrain` (#3510)
<!--- Provide a general summary of your changes in the title. -->
When using `spacy pretrain`, the model is saved only after every epoch. But each epoch can be very big since `pretrain` is used for language modeling tasks. So I added a `--save-every` option in the CLI to save after every `--save-every` batches.
## Description
<!--- Use this section to describe your changes. If your changes required
testing, include information about the testing environment and the tests you
ran. If your test fixes a bug reported in an issue, don't forget to include the
issue number. If your PR is still a work in progress, that's totally fine – just
include a note to let us know. -->
To test...
Save this file to `sample_sents.jsonl`
```
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
{"text": "hello there."}
```
Then run `--save-every 2` when pretraining.
```bash
spacy pretrain sample_sents.jsonl en_core_web_md here -nw 1 -bs 1 -i 10 --save-every 2
```
And it should save the model to the `here/` folder after every 2 batches. The models that are saved during an epoch will have a `.temp` appended to the save name.
At the end the training, you should see these files (`ls here/`):
```bash
config.json model2.bin model5.bin model8.bin
log.jsonl model2.temp.bin model5.temp.bin model8.temp.bin
model0.bin model3.bin model6.bin model9.bin
model0.temp.bin model3.temp.bin model6.temp.bin model9.temp.bin
model1.bin model4.bin model7.bin
model1.temp.bin model4.temp.bin model7.temp.bin
```
### Types of change
<!-- What type of change does your PR cover? Is it a bug fix, an enhancement
or new feature, or a change to the documentation? -->
This is a new feature to `spacy pretrain`.
🌵 **Unfortunately, I haven't been able to test this because compiling from source is not working (cythonize error).**
```
Processing matcher.pyx
[Errno 2] No such file or directory: '/Users/mwu/github/spaCy/spacy/matcher.pyx'
Traceback (most recent call last):
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 169, in <module>
run(args.root)
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 158, in run
process(base, filename, db)
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 124, in process
preserve_cwd(base, process_pyx, root + ".pyx", root + ".cpp")
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 87, in preserve_cwd
func(*args)
File "/Users/mwu/github/spaCy/bin/cythonize.py", line 63, in process_pyx
raise Exception("Cython failed")
Exception: Cython failed
Traceback (most recent call last):
File "setup.py", line 276, in <module>
setup_package()
File "setup.py", line 209, in setup_package
generate_cython(root, "spacy")
File "setup.py", line 132, in generate_cython
raise RuntimeError("Running cythonize failed")
RuntimeError: Running cythonize failed
```
Edit: Fixed! after deleting all `.cpp` files: `find spacy -name "*.cpp" | xargs rm`
## Checklist
<!--- Before you submit the PR, go over this checklist and make sure you can
tick off all the boxes. [] -> [x] -->
- [x] I have submitted the spaCy Contributor Agreement.
- [x] I ran the tests, and all new and existing tests passed.
- [x] My changes don't require a change to the documentation, or if they do, I've added all required information.
2019-04-22 15:10:16 +03:00
|
|
|
if n_save_every and (batch_id % n_save_every == 0):
|
|
|
|
_save_model(epoch, is_temp=True)
|
|
|
|
_save_model(epoch)
|
2018-11-30 22:16:14 +03:00
|
|
|
tracker.epoch_loss = 0.0
|
|
|
|
if texts_loc != "-":
|
|
|
|
# Reshuffle the texts if texts were loaded from a file
|
|
|
|
random.shuffle(texts)
|
2019-06-16 14:22:57 +03:00
|
|
|
if skip_counter > 0:
|
|
|
|
msg.warn("Skipped {count} empty values".format(count=str(skip_counter)))
|
|
|
|
msg.good("Successfully finished pretrain")
|
2018-11-16 01:44:07 +03:00
|
|
|
|
2018-11-28 20:04:58 +03:00
|
|
|
|
2019-02-08 16:14:49 +03:00
|
|
|
def make_update(model, docs, optimizer, drop=0.0, objective="L2"):
|
2018-11-16 00:17:16 +03:00
|
|
|
"""Perform an update over a single batch of documents.
|
|
|
|
|
|
|
|
docs (iterable): A batch of `Doc` objects.
|
2019-10-14 13:28:53 +03:00
|
|
|
drop (float): The dropout rate.
|
2018-11-16 00:17:16 +03:00
|
|
|
optimizer (callable): An optimizer.
|
|
|
|
RETURNS loss: A float for the loss.
|
|
|
|
"""
|
|
|
|
predictions, backprop = model.begin_update(docs, drop=drop)
|
2018-12-18 21:19:26 +03:00
|
|
|
loss, gradients = get_vectors_loss(model.ops, docs, predictions, objective)
|
2018-11-16 00:17:16 +03:00
|
|
|
backprop(gradients, sgd=optimizer)
|
2018-11-28 20:04:58 +03:00
|
|
|
# Don't want to return a cupy object here
|
|
|
|
# The gradients are modified in-place by the BERT MLM,
|
|
|
|
# so we get an accurate loss
|
2018-12-18 21:19:26 +03:00
|
|
|
return float(loss)
|
2018-11-16 00:17:16 +03:00
|
|
|
|
|
|
|
|
2019-03-16 23:38:45 +03:00
|
|
|
def make_docs(nlp, batch, min_length, max_length):
|
2018-11-28 20:04:58 +03:00
|
|
|
docs = []
|
2019-06-16 14:22:57 +03:00
|
|
|
skip_count = 0
|
2018-11-28 20:04:58 +03:00
|
|
|
for record in batch:
|
2019-06-16 14:22:57 +03:00
|
|
|
if not isinstance(record, dict):
|
|
|
|
raise TypeError(Errors.E137.format(type=type(record), line=record))
|
2018-11-28 20:04:58 +03:00
|
|
|
if "tokens" in record:
|
2019-06-16 14:22:57 +03:00
|
|
|
words = record["tokens"]
|
|
|
|
if not words:
|
|
|
|
skip_count += 1
|
|
|
|
continue
|
|
|
|
doc = Doc(nlp.vocab, words=words)
|
|
|
|
elif "text" in record:
|
2019-05-11 16:41:29 +03:00
|
|
|
text = record["text"]
|
2019-06-16 14:22:57 +03:00
|
|
|
if not text:
|
|
|
|
skip_count += 1
|
|
|
|
continue
|
2018-11-28 20:04:58 +03:00
|
|
|
doc = nlp.make_doc(text)
|
2019-06-16 14:22:57 +03:00
|
|
|
else:
|
|
|
|
raise ValueError(Errors.E138.format(text=record))
|
2018-11-28 20:04:58 +03:00
|
|
|
if "heads" in record:
|
|
|
|
heads = record["heads"]
|
|
|
|
heads = numpy.asarray(heads, dtype="uint64")
|
|
|
|
heads = heads.reshape((len(doc), 1))
|
|
|
|
doc = doc.from_array([HEAD], heads)
|
2018-11-30 23:58:18 +03:00
|
|
|
if len(doc) >= min_length and len(doc) < max_length:
|
2018-11-28 20:04:58 +03:00
|
|
|
docs.append(doc)
|
2019-06-16 14:22:57 +03:00
|
|
|
return docs, skip_count
|
2018-11-28 20:04:58 +03:00
|
|
|
|
|
|
|
|
2019-02-08 16:14:49 +03:00
|
|
|
def get_vectors_loss(ops, docs, prediction, objective="L2"):
|
2018-11-16 00:17:16 +03:00
|
|
|
"""Compute a mean-squared error loss between the documents' vectors and
|
2018-11-30 22:16:14 +03:00
|
|
|
the prediction.
|
2018-11-16 00:17:16 +03:00
|
|
|
|
|
|
|
Note that this is ripe for customization! We could compute the vectors
|
|
|
|
in some other word, e.g. with an LSTM language model, or use some other
|
|
|
|
type of objective.
|
|
|
|
"""
|
|
|
|
# The simplest way to implement this would be to vstack the
|
|
|
|
# token.vector values, but that's a bit inefficient, especially on GPU.
|
|
|
|
# Instead we fetch the index into the vectors table for each of our tokens,
|
|
|
|
# and look them up all at once. This prevents data copying.
|
|
|
|
ids = ops.flatten([doc.to_array(ID).ravel() for doc in docs])
|
|
|
|
target = docs[0].vocab.vectors.data[ids]
|
2019-02-08 16:14:49 +03:00
|
|
|
if objective == "L2":
|
2019-03-20 14:06:35 +03:00
|
|
|
d_target = prediction - target
|
|
|
|
loss = (d_target ** 2).sum()
|
|
|
|
elif objective == "cosine":
|
|
|
|
loss, d_target = get_cossim_loss(prediction, target)
|
2019-06-20 11:30:44 +03:00
|
|
|
else:
|
2019-06-20 11:35:51 +03:00
|
|
|
raise ValueError(Errors.E142.format(loss_func=objective))
|
2019-03-20 14:06:35 +03:00
|
|
|
return loss, d_target
|
|
|
|
|
|
|
|
|
2018-12-18 21:19:26 +03:00
|
|
|
def create_pretraining_model(nlp, tok2vec):
|
2018-11-30 22:16:14 +03:00
|
|
|
"""Define a network for the pretraining. We simply add an output layer onto
|
2018-11-16 00:17:16 +03:00
|
|
|
the tok2vec input model. The tok2vec input model needs to be a model that
|
|
|
|
takes a batch of Doc objects (as a list), and returns a list of arrays.
|
|
|
|
Each array in the output needs to have one row per token in the doc.
|
2018-11-30 22:16:14 +03:00
|
|
|
"""
|
2018-11-16 00:17:16 +03:00
|
|
|
output_size = nlp.vocab.vectors.data.shape[1]
|
2018-11-29 15:36:43 +03:00
|
|
|
output_layer = chain(
|
2019-02-08 16:14:49 +03:00
|
|
|
LN(Maxout(300, pieces=3)), Affine(output_size, drop_factor=0.0)
|
2018-11-29 15:36:43 +03:00
|
|
|
)
|
2018-11-16 02:34:35 +03:00
|
|
|
# This is annoying, but the parser etc have the flatten step after
|
|
|
|
# the tok2vec. To load the weights in cleanly, we need to match
|
|
|
|
# the shape of the models' components exactly. So what we cann
|
|
|
|
# "tok2vec" has to be the same set of processes as what the components do.
|
|
|
|
tok2vec = chain(tok2vec, flatten)
|
2018-11-30 22:16:14 +03:00
|
|
|
model = chain(tok2vec, output_layer)
|
2018-11-28 20:04:58 +03:00
|
|
|
model = masked_language_model(nlp.vocab, model)
|
2018-11-16 02:34:35 +03:00
|
|
|
model.tok2vec = tok2vec
|
2018-11-16 00:17:16 +03:00
|
|
|
model.output_layer = output_layer
|
2018-11-30 22:16:14 +03:00
|
|
|
model.begin_training([nlp.make_doc("Give it a doc to infer shapes")])
|
2018-11-16 00:17:16 +03:00
|
|
|
return model
|
|
|
|
|
|
|
|
|
|
|
|
class ProgressTracker(object):
|
2018-11-29 15:36:43 +03:00
|
|
|
def __init__(self, frequency=1000000):
|
2018-11-28 20:04:58 +03:00
|
|
|
self.loss = 0.0
|
|
|
|
self.prev_loss = 0.0
|
2018-11-16 00:17:16 +03:00
|
|
|
self.nr_word = 0
|
2018-11-16 01:44:07 +03:00
|
|
|
self.words_per_epoch = Counter()
|
2018-11-16 00:17:16 +03:00
|
|
|
self.frequency = frequency
|
|
|
|
self.last_time = time.time()
|
|
|
|
self.last_update = 0
|
2018-11-29 15:36:43 +03:00
|
|
|
self.epoch_loss = 0.0
|
2018-11-16 00:17:16 +03:00
|
|
|
|
|
|
|
def update(self, epoch, loss, docs):
|
|
|
|
self.loss += loss
|
2018-11-29 15:36:43 +03:00
|
|
|
self.epoch_loss += loss
|
2018-11-16 01:44:07 +03:00
|
|
|
words_in_batch = sum(len(doc) for doc in docs)
|
|
|
|
self.words_per_epoch[epoch] += words_in_batch
|
|
|
|
self.nr_word += words_in_batch
|
2018-11-16 00:17:16 +03:00
|
|
|
words_since_update = self.nr_word - self.last_update
|
|
|
|
if words_since_update >= self.frequency:
|
|
|
|
wps = words_since_update / (time.time() - self.last_time)
|
|
|
|
self.last_update = self.nr_word
|
|
|
|
self.last_time = time.time()
|
2018-11-28 20:04:58 +03:00
|
|
|
loss_per_word = self.loss - self.prev_loss
|
|
|
|
status = (
|
|
|
|
epoch,
|
|
|
|
self.nr_word,
|
2019-03-16 23:38:45 +03:00
|
|
|
_smart_round(self.loss, width=10),
|
|
|
|
_smart_round(loss_per_word, width=6),
|
2018-11-28 20:04:58 +03:00
|
|
|
int(wps),
|
|
|
|
)
|
|
|
|
self.prev_loss = float(self.loss)
|
2018-11-16 00:17:16 +03:00
|
|
|
return status
|
|
|
|
else:
|
|
|
|
return None
|
2019-03-16 23:38:45 +03:00
|
|
|
|
|
|
|
|
|
|
|
def _smart_round(figure, width=10, max_decimal=4):
|
|
|
|
"""Round large numbers as integers, smaller numbers as decimals."""
|
|
|
|
n_digits = len(str(int(figure)))
|
|
|
|
n_decimal = width - (n_digits + 1)
|
|
|
|
if n_decimal <= 1:
|
|
|
|
return str(int(figure))
|
|
|
|
else:
|
|
|
|
n_decimal = min(n_decimal, max_decimal)
|
|
|
|
format_str = "%." + str(n_decimal) + "f"
|
|
|
|
return format_str % figure
|