Merge pull request #6523 from adrianeboyd/bugfix/remove-use-chars

Remove non-working --use-chars from train CLI
This commit is contained in:
Ines Montani 2020-12-08 09:30:48 +01:00 committed by GitHub
commit b87793a89a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 0 additions and 4 deletions

View File

@ -38,7 +38,6 @@ from .. import about
conv_depth=("Depth of CNN layers of Tok2Vec component", "option", "cd", int), conv_depth=("Depth of CNN layers of Tok2Vec component", "option", "cd", int),
cnn_window=("Window size for CNN layers of Tok2Vec component", "option", "cW", int), cnn_window=("Window size for CNN layers of Tok2Vec component", "option", "cW", int),
cnn_pieces=("Maxout size for CNN layers of Tok2Vec component. 1 for Mish", "option", "cP", int), cnn_pieces=("Maxout size for CNN layers of Tok2Vec component. 1 for Mish", "option", "cP", int),
use_chars=("Whether to use character-based embedding of Tok2Vec component", "flag", "chr", bool),
bilstm_depth=("Depth of BiLSTM layers of Tok2Vec component (requires PyTorch)", "option", "lstm", int), bilstm_depth=("Depth of BiLSTM layers of Tok2Vec component (requires PyTorch)", "option", "lstm", int),
embed_rows=("Number of embedding rows of Tok2Vec component", "option", "er", int), embed_rows=("Number of embedding rows of Tok2Vec component", "option", "er", int),
n_iter=("Number of iterations", "option", "n", int), n_iter=("Number of iterations", "option", "n", int),
@ -78,7 +77,6 @@ def train(
conv_depth=4, conv_depth=4,
cnn_window=1, cnn_window=1,
cnn_pieces=3, cnn_pieces=3,
use_chars=False,
bilstm_depth=0, bilstm_depth=0,
embed_rows=2000, embed_rows=2000,
n_iter=30, n_iter=30,
@ -294,7 +292,6 @@ def train(
cfg["cnn_maxout_pieces"] = cnn_pieces cfg["cnn_maxout_pieces"] = cnn_pieces
cfg["embed_size"] = embed_rows cfg["embed_size"] = embed_rows
cfg["conv_window"] = cnn_window cfg["conv_window"] = cnn_window
cfg["subword_features"] = not use_chars
optimizer = nlp.begin_training(lambda: corpus.train_tuples, **cfg) optimizer = nlp.begin_training(lambda: corpus.train_tuples, **cfg)
nlp._optimizer = None nlp._optimizer = None

View File

@ -384,7 +384,6 @@ $ python -m spacy train [lang] [output_path] [train_path] [dev_path]
| `--conv-depth`, `-cd` <Tag variant="new">2.2.4</Tag> | option | Depth of CNN layers of `Tok2Vec` component. | | `--conv-depth`, `-cd` <Tag variant="new">2.2.4</Tag> | option | Depth of CNN layers of `Tok2Vec` component. |
| `--cnn-window`, `-cW` <Tag variant="new">2.2.4</Tag> | option | Window size for CNN layers of `Tok2Vec` component. | | `--cnn-window`, `-cW` <Tag variant="new">2.2.4</Tag> | option | Window size for CNN layers of `Tok2Vec` component. |
| `--cnn-pieces`, `-cP` <Tag variant="new">2.2.4</Tag> | option | Maxout size for CNN layers of `Tok2Vec` component. | | `--cnn-pieces`, `-cP` <Tag variant="new">2.2.4</Tag> | option | Maxout size for CNN layers of `Tok2Vec` component. |
| `--use-chars`, `-chr` <Tag variant="new">2.2.4</Tag> | flag | Whether to use character-based embedding of `Tok2Vec` component. |
| `--bilstm-depth`, `-lstm` <Tag variant="new">2.2.4</Tag> | option | Depth of BiLSTM layers of `Tok2Vec` component (requires PyTorch). | | `--bilstm-depth`, `-lstm` <Tag variant="new">2.2.4</Tag> | option | Depth of BiLSTM layers of `Tok2Vec` component (requires PyTorch). |
| `--embed-rows`, `-er` <Tag variant="new">2.2.4</Tag> | option | Number of embedding rows of `Tok2Vec` component. | | `--embed-rows`, `-er` <Tag variant="new">2.2.4</Tag> | option | Number of embedding rows of `Tok2Vec` component. |
| `--noise-level`, `-nl` | option | Float indicating the amount of corruption for data augmentation. | | `--noise-level`, `-nl` | option | Float indicating the amount of corruption for data augmentation. |