Beispiel #1
0
 def forward_cpu(self, inputs):
     x, t = inputs
     self.y, = sigmoid.Sigmoid().forward_cpu((x,))
     # stable computation of the cross entropy.
     loss = -numpy.sum(
         x * (t - (x >= 0)) - numpy.log1p(numpy.exp(-numpy.abs(x))))
     return numpy.array(loss / t.shape[0], dtype=x.dtype),
 def forward_gpu(self, inputs):
     x, t = inputs
     self.y, = sigmoid.Sigmoid(self.use_cudnn).forward_gpu((x, ))
     loss = -cuda.reduce(
         'int* t, float* x',
         'x[i] * (t[i] - (x[i] >= 0)) - log1pf(expf(-fabsf(x[i])))', 'a+b',
         '0', 'sigmoid_crossent_fwd', numpy.float32)(t, x)
     return loss / t.shape[0],
Beispiel #3
0
 def forward_gpu(self, inputs):
     x, t = inputs
     self.y, = sigmoid.Sigmoid(self.use_cudnn).forward_gpu((x, ))
     loss = cuda.reduce('T x, S t, T inv_cnt', 'T out',
                        'x * (t - (x >= 0)) - log1p(exp(-fabs(x)))',
                        'a + b', 'out = a * inv_cnt', 0,
                        'sigmoid_crossent_fwd')(x, t, -1.0 / t.shape[0])
     return loss,
 def backward(self, inputs, grad_outputs):
     x, t = inputs
     gloss = grad_outputs[0]
     y, = sigmoid.Sigmoid(self.use_cudnn).forward((x,))
     dtype = y.dtype
     gx = (gloss * self.ignore_mask * (y - t.astype(dtype)) /
           dtype.type(self.count))
     return gx, None
Beispiel #5
0
 def backward(self, inputs, grad_outputs):
     xp = cuda.get_array_module(*inputs)
     x, t = inputs
     gloss = grad_outputs[0]
     y, = sigmoid.Sigmoid(self.use_cudnn).forward((x, ))
     gx = xp.divide(gloss * self.ignore_mask * (y - t),
                    self.count,
                    dtype=y.dtype)
     return gx, None