Example #1
0
 def backward_gpu(self, x, gy):
     value = utils.force_type(x[0].dtype, self.value)
     gx = cuda.elementwise(
         'T x, T gy, T value', 'T gx',
         'gx = log(value) * pow(value, x) * gy',
         'pow_const_var_bwd')(x[0], gy[0], value)
     return gx,
Example #2
0
    def forward_cpu(self, inputs):
        self.retain_inputs((0, 1))
        x, gy = inputs

        self.val_1 = _preprocess_const(x, self.value - 1)
        gx = utils.force_type(x.dtype, self.value) * (x ** self.val_1) * gy
        gx = utils.force_array(gx)
        return gx,
Example #3
0
 def forward_cpu(self, inputs):
     x0, x1 = inputs
     zero = utils.force_type(x0.dtype, 0)
     diff = x1 - x0
     pos_diff = numpy.maximum(zero, diff.ravel())
     neg_diff = numpy.minimum(zero, diff.ravel())
     loss = (pos_diff.dot(pos_diff) + self.beta*neg_diff.dot(neg_diff)) / diff.size
     return numpy.array(loss, numpy.float32),
Example #4
0
    def forward_cpu(self, inputs):
        self.retain_inputs((0, 1))
        x, gy = inputs

        self.val_1 = _preprocess_const(x, self.value - 1)
        gx = utils.force_type(x.dtype, self.value) * (x**self.val_1) * gy
        gx = utils.force_array(gx)
        return gx,
Example #5
0
 def forward_cpu(self, inputs):
     x0, x1 = inputs
     zero = utils.force_type(x0.dtype, 0)
     diff = x1 - x0
     pos_diff = numpy.maximum(zero, diff.ravel())
     #n_pos = max(1,numpy.count_nonzero(pos_diff))
     x0s = x0.ravel()
     loss = (pos_diff.dot(pos_diff) + self.beta*x0s.dot(x0s)) / diff.size
     return numpy.array(loss, numpy.float32),
Example #6
0
 def backward_cpu(self, inputs, gy):
     x0, x1 = inputs
     zero = utils.force_type(x0.dtype, 0)
     diff = x1 - x0
     pos_diff = numpy.maximum(zero, diff)
     #n_pos = max(1,numpy.count_nonzero(pos_diff))
     coeff = 2. * gy[0] / diff.size
     gx1 = coeff * pos_diff
     gx0 = -gx1 + self.beta * coeff * x0
     return gx0, gx1
Example #7
0
 def backward_cpu(self, inputs, gy):
     x0, x1 = inputs
     zero = utils.force_type(x0.dtype, 0)
     diff = x1 - x0
     pos_diff = numpy.maximum(zero, diff)
     neg_diff = numpy.minimum(zero, diff)
     coeff = 2. * gy[0] / diff.size
     gx1 = coeff * (pos_diff + self.beta*neg_diff)
     gx0 = -gx1
     return gx0, gx1
Example #8
0
def _preprocess_const(x, value):
    xp = cuda.get_array_module(x)
    if not numpy.isscalar(value) and cuda.get_array_module(value) != xp:
        # TODO(unno): We can transfer arrays automatically
        raise TypeError('Cannot mix cupy.ndarray and numpy.ndarray')

    b = xp.broadcast(x, value)
    if b.shape != x.shape:
        raise ValueError('Failed to broadcast arrays')
    return utils.force_type(x.dtype, value)
Example #9
0
def _preprocess_const(x, value):
    xp = cuda.get_array_module(x)
    if not numpy.isscalar(value) and cuda.get_array_module(value) != xp:
        # TODO(unno): We can transfer arrays automatically
        raise TypeError('Cannot mix cupy.ndarray and numpy.ndarray')

    b = xp.broadcast(x, value)
    if b.shape != x.shape:
        raise ValueError('Failed to broadcast arrays')
    return utils.force_type(x.dtype, value)
Example #10
0
 def backward_cpu(self, x, gy):
     value = utils.force_type(gy[0].dtype, self.value)
     return utils.force_array(numpy.log(value) * self.y * gy[0]),
Example #11
0
 def forward_cpu(self, x):
     zero = utils.force_type(x[0].dtype, 0)
     return utils.force_array(numpy.maximum(zero, x[0])),
Example #12
0
def _preprocess_rhs(x, value):
    if isinstance(value, chainer.Variable):
        return value
    _check_constant_type(value)
    return utils.force_type(x.dtype, value)
Example #13
0
 def forward_cpu(self, x):
     zero = utils.force_type(x[0].dtype, 0)
     return (utils.force_array(numpy.maximum(zero, x[0])),)
 def backward_cpu(self, x, gy):
     value = utils.force_type(x[0].dtype, self.value)
     return utils.force_array(numpy.log(value) * self.y * gy[0]),
Example #15
0
 def backward(self, x, gy):
     value = utils.force_type(gy[0].dtype, self.value)
     return utils.force_array(value * gy[0]),
 def backward_gpu(self, x, gy):
     value = utils.force_type(x[0].dtype, self.value)
     gx = cuda.elementwise('T x, T gy, T value', 'T gx',
                           'gx = -value * gy / (x * x)',
                           'div_from_const_bwd')(x[0], gy[0], value)
     return gx,
 def backward_cpu(self, x, gy):
     val_1 = utils.force_type(x[0].dtype, self.value - 1)
     gx = utils.force_type(x[0].dtype, self.value) * (x[0]**val_1) * gy[0]
     return utils.force_array(gx),
 def backward(self, x, gy):
     value = utils.force_type(x[0].dtype, self.value)
     return utils.force_array(value * gy[0]),
Example #19
0
 def backward_cpu(self, x, gy):
     val_1 = _preprocess_const(x[0], self.value - 1)
     gx = utils.force_type(x[0].dtype, self.value) * (x[0] ** val_1) * gy[0]
     return utils.force_array(gx),
Example #20
0
 def forward_cpu(self, x):
     value = utils.force_type(x[0].dtype, self.value)
     self.y = utils.force_array(value ** x[0])
     return self.y,
Example #21
0
 def forward(self, x):
     value = utils.force_type(x[0].dtype, self.value)
     return utils.force_array(x[0] ** value),
Example #22
0
 def backward_cpu(self, x, gy):
     value = utils.force_type(gy[0].dtype, self.value)
     return utils.force_array(-value * gy[0] / (x[0] ** 2)),
Example #23
0
 def test_force_type_scalar(self):
     x = numpy.int32(1)
     y = utils.force_type(numpy.dtype(numpy.float32), x)
     self.assertEqual(y.dtype, numpy.float32)
Example #24
0
 def test_force_type_array_no_change(self):
     x = numpy.array([1], dtype=numpy.float32)
     y = utils.force_type(numpy.dtype(numpy.float32), x)
     self.assertEqual(y.dtype, numpy.float32)
 def backward_cpu(self, x, gy):
     value = utils.force_type(x[0].dtype, self.value)
     return utils.force_array(-value * gy[0] / (x[0]**2)),
Example #26
0
 def test_force_type_scalar(self):
     x = numpy.int32(1)
     y = utils.force_type(numpy.dtype(numpy.float32), x)
     self.assertEqual(y.dtype, numpy.float32)
 def forward(self, x):
     value = utils.force_type(x[0].dtype, self.value)
     return utils.force_array(x[0]**value),
Example #28
0
 def test_force_type_array_no_change(self):
     x = numpy.array([1], dtype=numpy.float32)
     y = utils.force_type(numpy.dtype(numpy.float32), x)
     self.assertEqual(y.dtype, numpy.float32)
 def forward(self, x):
     value = utils.force_type(x[0].dtype, self.value)
     self.y = utils.force_array(value**x[0])
     return self.y,
Example #30
0
def _preprocess_rhs(x, value):
    if isinstance(value, chainer.Variable):
        return value
    _check_constant_type(value)
    return utils.force_type(x.dtype, value)
 def backward_gpu(self, x, gy):
     value = utils.force_type(x[0].dtype, self.value)
     gx = cuda.elementwise('T x, T gy, T value', 'T gx',
                           'gx = log(value) * pow(value, x) * gy',
                           'pow_const_var_bwd')(x[0], gy[0], value)
     return gx,
Example #32
0
 def backward_gpu(self, x, gy):
     value = utils.force_type(x[0].dtype, self.value)
     gx = cuda.elementwise('T x, T gy, T value', 'T gx',
                           'gx = -value * gy / (x * x)',
                           'div_from_const_bwd')(x[0], gy[0], value)
     return gx,