Fix parser @ GPU (#5210)

* ensure self.bias is numpy array in parser model

* 2 more little bug fixes for parser on GPU

* removing testing GPU statement

* remove commented code
This commit is contained in:
Sofie Van Landeghem 2020-03-28 23:09:35 +01:00 committed by GitHub
parent 9b412516e7
commit 1f9852abc3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 7 additions and 6 deletions

View File

@ -79,7 +79,7 @@ def _backprop_precomputable_affine_padding(model, dY, ids):
# for b in range(nB):
# for f in range(nF):
# if ids[b, f] < 0:
# d_padding[0, f] += dY[b]
# d_pad[0, f] += dY[b]
#
# Which can be rewritten as:
#
@ -88,9 +88,13 @@ def _backprop_precomputable_affine_padding(model, dY, ids):
#
# I don't know how to avoid the loop without building a whole array :(.
# Cursed numpy.
#
# Note by Sofie: rewritten to longer loop because "CuPy only supports slices that consist of one boolean array."
d_pad = model.ops.alloc((1, nF, nO, nP))
for b in range(nB):
d_pad[0, ids[b] < 0] += dY[b]
for f in range(nF):
if ids[b, f] < 0:
d_pad[0, f] += dY[b]
return d_pad

View File

@ -371,8 +371,6 @@ class ParserStepModel(Model):
self.ops.scatter_add(d_tokvecs, ids,
d_state_features)
# Padded -- see update()
if isinstance(self.ops, CupyOps):
d_tokvecs = self.ops.to_numpy(d_tokvecs)
self.bp_tokvecs(d_tokvecs[:-1])
return d_tokvecs
@ -445,8 +443,7 @@ cdef class precompute_hiddens:
else:
cached = gpu_cached
if not isinstance(lower_model.get_param("b"), numpy.ndarray):
# self.bias = lower_model.get_param("b").get(stream=cuda_stream) ???
self.bias = lower_model.get_param("b")
self.bias = lower_model.get_param("b").get(stream=cuda_stream)
else:
self.bias = lower_model.get_param("b")
self.nF = cached.shape[1]