def forward_gpu(self, inputs): x, t = inputs self.y, = Softmax(self.use_cudnn).forward_gpu((x, )) ret = cuda.reduce('int* t, float* y, int n_channel', '-log(y[i * n_channel + t[i]])', 'a+b', '0', 'crossent_fwd', numpy.float32)(t, self.y, self.y.shape[1]) ret /= t.size return ret,
def forward_cpu(self, inputs): x, t = inputs self.y, = Softmax().forward_cpu((x, )) return -numpy.log(self.y[range(len(t)), t]).sum(keepdims=True) / t.size,