mirror of
https://github.com/explosion/spaCy.git
synced 2024-12-24 00:46:28 +03:00
Support specifying which GPU
This commit is contained in:
parent
3f5c85d8de
commit
21eef90dbc
|
@ -27,14 +27,14 @@ from .. import displacy
|
|||
dev_data=("location of JSON-formatted development data (optional)", "positional", None, str),
|
||||
n_iter=("number of iterations", "option", "n", int),
|
||||
n_sents=("number of sentences", "option", "ns", int),
|
||||
use_gpu=("Use GPU", "flag", "G", bool),
|
||||
use_gpu=("Use GPU", "option", "g", int),
|
||||
resume=("Whether to resume training", "flag", "R", bool),
|
||||
no_tagger=("Don't train tagger", "flag", "T", bool),
|
||||
no_parser=("Don't train parser", "flag", "P", bool),
|
||||
no_entities=("Don't train NER", "flag", "N", bool)
|
||||
)
|
||||
def train(cmd, lang, output_dir, train_data, dev_data, n_iter=20, n_sents=0,
|
||||
use_gpu=False, resume=False, no_tagger=False, no_parser=False, no_entities=False):
|
||||
use_gpu=-1, resume=False, no_tagger=False, no_parser=False, no_entities=False):
|
||||
"""
|
||||
Train a model. Expects data in spaCy's JSON format.
|
||||
"""
|
||||
|
@ -76,7 +76,7 @@ def train(cmd, lang, output_dir, train_data, dev_data, n_iter=20, n_sents=0,
|
|||
corpus = GoldCorpus(train_path, dev_path, limit=n_sents)
|
||||
n_train_docs = corpus.count_train()
|
||||
|
||||
optimizer = nlp.begin_training(lambda: corpus.train_tuples, use_gpu=use_gpu)
|
||||
optimizer = nlp.begin_training(lambda: corpus.train_tuples, device=use_gpu)
|
||||
|
||||
print("Itn.\tLoss\tUAS\tNER P.\tNER R.\tNER F.\tTag %\tToken %")
|
||||
try:
|
||||
|
|
|
@ -279,9 +279,14 @@ class Language(object):
|
|||
for word in annots[1]:
|
||||
_ = self.vocab[word]
|
||||
contexts = []
|
||||
if cfg.get('use_gpu'):
|
||||
if cfg.get('device', -1) >= 0:
|
||||
import cupy.cuda.device
|
||||
device = cupy.cuda.device.Device(cfg['device'])
|
||||
device.use()
|
||||
Model.ops = CupyOps()
|
||||
Model.Ops = CupyOps
|
||||
else:
|
||||
device = None
|
||||
for proc in self.pipeline:
|
||||
if hasattr(proc, 'begin_training'):
|
||||
context = proc.begin_training(get_gold_tuples(),
|
||||
|
@ -296,6 +301,7 @@ class Language(object):
|
|||
optimizer = Adam(Model.ops, learn_rate, L2=L2, beta1=beta1,
|
||||
beta2=beta2, eps=eps)
|
||||
optimizer.max_grad_norm = max_grad_norm
|
||||
optimizer.device = device
|
||||
return optimizer
|
||||
|
||||
def evaluate(self, docs_golds):
|
||||
|
|
Loading…
Reference in New Issue
Block a user