def backward_gpu(self, x, gy): value = utils.force_type(x[0].dtype, self.value) gx = cuda.elementwise( 'T x, T gy, T value', 'T gx', 'gx = log(value) * pow(value, x) * gy', 'pow_const_var_bwd')(x[0], gy[0], value) return gx,
def forward_cpu(self, inputs): self.retain_inputs((0, 1)) x, gy = inputs self.val_1 = _preprocess_const(x, self.value - 1) gx = utils.force_type(x.dtype, self.value) * (x ** self.val_1) * gy gx = utils.force_array(gx) return gx,
def forward_cpu(self, inputs): x0, x1 = inputs zero = utils.force_type(x0.dtype, 0) diff = x1 - x0 pos_diff = numpy.maximum(zero, diff.ravel()) neg_diff = numpy.minimum(zero, diff.ravel()) loss = (pos_diff.dot(pos_diff) + self.beta*neg_diff.dot(neg_diff)) / diff.size return numpy.array(loss, numpy.float32),
def forward_cpu(self, inputs): self.retain_inputs((0, 1)) x, gy = inputs self.val_1 = _preprocess_const(x, self.value - 1) gx = utils.force_type(x.dtype, self.value) * (x**self.val_1) * gy gx = utils.force_array(gx) return gx,
def forward_cpu(self, inputs): x0, x1 = inputs zero = utils.force_type(x0.dtype, 0) diff = x1 - x0 pos_diff = numpy.maximum(zero, diff.ravel()) #n_pos = max(1,numpy.count_nonzero(pos_diff)) x0s = x0.ravel() loss = (pos_diff.dot(pos_diff) + self.beta*x0s.dot(x0s)) / diff.size return numpy.array(loss, numpy.float32),
def backward_cpu(self, inputs, gy): x0, x1 = inputs zero = utils.force_type(x0.dtype, 0) diff = x1 - x0 pos_diff = numpy.maximum(zero, diff) #n_pos = max(1,numpy.count_nonzero(pos_diff)) coeff = 2. * gy[0] / diff.size gx1 = coeff * pos_diff gx0 = -gx1 + self.beta * coeff * x0 return gx0, gx1
def backward_cpu(self, inputs, gy): x0, x1 = inputs zero = utils.force_type(x0.dtype, 0) diff = x1 - x0 pos_diff = numpy.maximum(zero, diff) neg_diff = numpy.minimum(zero, diff) coeff = 2. * gy[0] / diff.size gx1 = coeff * (pos_diff + self.beta*neg_diff) gx0 = -gx1 return gx0, gx1
def _preprocess_const(x, value): xp = cuda.get_array_module(x) if not numpy.isscalar(value) and cuda.get_array_module(value) != xp: # TODO(unno): We can transfer arrays automatically raise TypeError('Cannot mix cupy.ndarray and numpy.ndarray') b = xp.broadcast(x, value) if b.shape != x.shape: raise ValueError('Failed to broadcast arrays') return utils.force_type(x.dtype, value)
def backward_cpu(self, x, gy): value = utils.force_type(gy[0].dtype, self.value) return utils.force_array(numpy.log(value) * self.y * gy[0]),
def forward_cpu(self, x): zero = utils.force_type(x[0].dtype, 0) return utils.force_array(numpy.maximum(zero, x[0])),
def _preprocess_rhs(x, value): if isinstance(value, chainer.Variable): return value _check_constant_type(value) return utils.force_type(x.dtype, value)
def forward_cpu(self, x): zero = utils.force_type(x[0].dtype, 0) return (utils.force_array(numpy.maximum(zero, x[0])),)
def backward_cpu(self, x, gy): value = utils.force_type(x[0].dtype, self.value) return utils.force_array(numpy.log(value) * self.y * gy[0]),
def backward(self, x, gy): value = utils.force_type(gy[0].dtype, self.value) return utils.force_array(value * gy[0]),
def backward_gpu(self, x, gy): value = utils.force_type(x[0].dtype, self.value) gx = cuda.elementwise('T x, T gy, T value', 'T gx', 'gx = -value * gy / (x * x)', 'div_from_const_bwd')(x[0], gy[0], value) return gx,
def backward_cpu(self, x, gy): val_1 = utils.force_type(x[0].dtype, self.value - 1) gx = utils.force_type(x[0].dtype, self.value) * (x[0]**val_1) * gy[0] return utils.force_array(gx),
def backward(self, x, gy): value = utils.force_type(x[0].dtype, self.value) return utils.force_array(value * gy[0]),
def backward_cpu(self, x, gy): val_1 = _preprocess_const(x[0], self.value - 1) gx = utils.force_type(x[0].dtype, self.value) * (x[0] ** val_1) * gy[0] return utils.force_array(gx),
def forward_cpu(self, x): value = utils.force_type(x[0].dtype, self.value) self.y = utils.force_array(value ** x[0]) return self.y,
def forward(self, x): value = utils.force_type(x[0].dtype, self.value) return utils.force_array(x[0] ** value),
def backward_cpu(self, x, gy): value = utils.force_type(gy[0].dtype, self.value) return utils.force_array(-value * gy[0] / (x[0] ** 2)),
def test_force_type_scalar(self): x = numpy.int32(1) y = utils.force_type(numpy.dtype(numpy.float32), x) self.assertEqual(y.dtype, numpy.float32)
def test_force_type_array_no_change(self): x = numpy.array([1], dtype=numpy.float32) y = utils.force_type(numpy.dtype(numpy.float32), x) self.assertEqual(y.dtype, numpy.float32)
def backward_cpu(self, x, gy): value = utils.force_type(x[0].dtype, self.value) return utils.force_array(-value * gy[0] / (x[0]**2)),
def forward(self, x): value = utils.force_type(x[0].dtype, self.value) return utils.force_array(x[0]**value),
def forward(self, x): value = utils.force_type(x[0].dtype, self.value) self.y = utils.force_array(value**x[0]) return self.y,
def backward_gpu(self, x, gy): value = utils.force_type(x[0].dtype, self.value) gx = cuda.elementwise('T x, T gy, T value', 'T gx', 'gx = log(value) * pow(value, x) * gy', 'pow_const_var_bwd')(x[0], gy[0], value) return gx,