Example #1
0
 def check_backward(self, x_data, y_grad):
     def f(x):
         return functions.unpooling_2d(x, self.ksize, outsize=self.outsize,
                                       cover_all=self.cover_all)
     gradient_check.check_backward(
         f, x_data, y_grad, dtype=numpy.float64,
         **self.check_backward_options)
Example #2
0
    def check_backward(self, x_data, W_data, y_grad):
        def f(x, W):
            return chainer.functions.embed_id(x, W, self.ignore_label)

        gradient_check.check_backward(
            f, (x_data, W_data), y_grad, dtype=numpy.float64,
            **self.check_backward_options)
Example #3
0
    def check_backward(self, x_data, W_data, b_data, y_grad,
                       use_cudnn='never'):
        if not self.c_contiguous:
            xp = backend.get_array_module(x_data)
            x_data = xp.asfortranarray(x_data)
            W_data = xp.asfortranarray(W_data)
            y_grad = xp.asfortranarray(y_grad)
            self.assertFalse(x_data.flags.c_contiguous)
            self.assertFalse(W_data.flags.c_contiguous)
            self.assertFalse(y_grad.flags.c_contiguous)
            if b_data is not None:
                b = xp.empty((len(b_data) * 2,), dtype=self.b.dtype)
                b[::2] = b_data
                b_data = b[::2]
                self.assertFalse(b_data.flags.c_contiguous)

        args = (x_data, W_data)
        if b_data is not None:
            args += (b_data,)

        def f(*args):
            return F.deconvolution_nd(*args, stride=self.stride, pad=self.pad,
                                      outsize=self.outsize, dilate=self.dilate,
                                      groups=self.groups)

        with chainer.using_config('use_cudnn', use_cudnn):
            with chainer.using_config('autotune', self.autotune):
                gradient_check.check_backward(
                    f, args, y_grad, **self.check_backward_options)
Example #4
0
    def check_backward(self, link, x_data, y_grad):
        params = [link.W]
        if not self.nobias:
            params.append(link.b)

        gradient_check.check_backward(
            link, x_data, y_grad, params, **self.check_backward_options)
Example #5
0
 def check_backward(self, x_data, roi_data, y_grad):
     gradient_check.check_backward(
         functions.ROIPooling2D(outh=self.outh,
                                outw=self.outw,
                                spatial_scale=self.spatial_scale),
         (x_data, roi_data), y_grad, no_grads=[False, True],
         **self.check_backward_options)
Example #6
0
 def check_backward(self, c_prev1_data, c_prev2_data, x1_data, x2_data,
                    c_grad, h_grad):
     gradient_check.check_backward(
         functions.SLSTM(),
         (c_prev1_data, c_prev2_data, x1_data, x2_data),
         (c_grad, h_grad), dtype=numpy.float64,
         **self.check_backward_options)
    def check_backward(self, x_data, t_data, w_data, y_grad, sampler):
        def f(x, w):
            return functions.negative_sampling(
                x, t_data, w, sampler, self.sample_size, reduce=self.reduce)

        gradient_check.check_backward(
            f, (x_data, w_data), y_grad, **self.check_backward_options)
Example #8
0
    def check_backward(self, x_data, y_data, gy_data, ggx_data):
        def f(y, gy):
            return tanh.TanhGrad(x_data).apply((y, gy))[0]

        gradient_check.check_backward(
            f, (y_data, gy_data), ggx_data, dtype=numpy.float64,
            **self.check_backward_options)
Example #9
0
    def check_backward(self, x_data, g_data):
        def func(x):
            return functions.cast(x, self.out_type)

        gradient_check.check_backward(
            func, x_data, g_data, dtype='d',
            eps=2.0 ** -2, atol=1e-2, rtol=1e-3)
Example #10
0
 def check_zero_det(self, x, gy, err):
     if self.batched:
         x[0, ...] = 0.0
     else:
         x[...] = 0.0
     with self.assertRaises(err):
         gradient_check.check_backward(self.det, x, gy)
Example #11
0
    def check_backward(
            self, h_data, xs_data, gh_data, gys_data):

        def fun(*args):
            if self.hidden_none:
                h = None
                xs = args
            else:
                h, = args[:1]
                xs = args[1:]
            hy, ys = self.rnn(h, xs)
            return tuple([hy, ] + list(ys))

        params = []
        for layer in self.rnn:
            for p in layer.params():
                params.append(p)

        if self.hidden_none:
            in_data = xs_data
        else:
            in_data = [h_data, ] + xs_data
        gradient_check.check_backward(
            fun, tuple(in_data),
            tuple([gh_data, ] + gys_data),
            tuple(params), eps=1e-2, rtol=1e-3, atol=1e-3)
Example #12
0
    def check_backward(self, rx_data, ix_data, rg_data, ig_data):
        def f(real, imag):
            return getattr(chainer.functions, self.method)((real, imag))

        gradient_check.check_backward(
            f, (rx_data, ix_data),
            (rg_data, ig_data), dtype='d', eps=2.0 ** -2, atol=1e-2, rtol=1e-3)
Example #13
0
    def check_backward(self, x0_data, x1_data, t_data, gy_data):
        def f(x0, x1, t):
            return functions.contrastive(x0, x1, t, self.margin, self.reduce)

        gradient_check.check_backward(
            f, (x0_data, x1_data, t_data), gy_data, dtype='d',
            **self.check_backward_options)
Example #14
0
    def check_backward(self, inputs, grad_outputs, backend_config):

        xp = backend_config.xp
        if backend_config.use_cuda:
            inputs = cuda.to_gpu(inputs)
            grad_outputs = cuda.to_gpu(grad_outputs)

        x_data, W_data, b_data = inputs
        y_grad, = grad_outputs

        if not self.c_contiguous:
            x_data = xp.asfortranarray(x_data)
            W_data = xp.asfortranarray(W_data)
            y_grad = xp.asfortranarray(y_grad)
            assert not x_data.flags.c_contiguous
            assert not W_data.flags.c_contiguous
            assert not y_grad.flags.c_contiguous
            if b_data is not None:
                b = xp.empty((len(b_data) * 2,), dtype=b_data.dtype)
                b[::2] = b_data
                b_data = b[::2]
                assert not b_data.flags.c_contiguous

        args = (x_data, W_data)
        if b_data is not None:
            args = args + (b_data,)

        def f(*args):
            return F.deconvolution_2d(
                *args, stride=self.stride, pad=self.pad, outsize=self.outsize,
                dilate=self.dilate, groups=self.groups)

        with backend_config:
            gradient_check.check_backward(
                f, args, y_grad, **self.check_backward_options)
    def check_backward(self, x_data, W_data, b_data, y_grad):
        xp = backend.get_array_module(x_data)
        if not self.c_contiguous:
            x_data = xp.asfortranarray(x_data)
            W_data = xp.asfortranarray(W_data)
            y_grad = xp.asfortranarray(y_grad)
            self.assertFalse(x_data.flags.c_contiguous)
            self.assertFalse(W_data.flags.c_contiguous)
            self.assertFalse(y_grad.flags.c_contiguous)
            if b_data is not None:
                b = xp.empty((len(b_data) * 2,), dtype=self.b.dtype)
                b[::2] = b_data
                b_data = b[::2]
                self.assertFalse(b_data.flags.c_contiguous)

        args = (x_data, W_data)
        if b_data is not None:
            args = args + (b_data,)

        def f(*args):
            return F.dilated_convolution_2d(*args, stride=self.stride,
                                            pad=self.pad, dilate=self.dilate,
                                            cover_all=self.cover_all)

        with chainer.using_config('use_cudnn', self.use_cudnn):
            gradient_check.check_backward(
                f, args, y_grad, dtype=numpy.float64,
                **self.check_backward_options)
Example #16
0
def test_backward_gpu(model, data):
    atom_data, adj_data, y_grad = [cuda.to_gpu(d) for d in data]
    model.to_gpu()
    params = tuple(model.params())
    gradient_check.check_backward(model, (atom_data, adj_data), y_grad,
                                  params=params, no_grads=[True, True],
                                  atol=1e3, rtol=1e3)
Example #17
0
    def check_backward(self, x_data, t_data, w_data, samples_data, gy_data):
        def _black_out(x, t, W, samples):
            return functions.black_out(x, t, W, samples, self.reduce)

        gradient_check.check_backward(
            _black_out, (x_data, t_data, w_data, samples_data),
            gy_data, atol=1.e-3)
 def check_backward(self, x_data, t_data, class_weight, use_cudnn=True):
     func = functions.SoftmaxCrossEntropy(
         use_cudnn=use_cudnn, cache_score=self.cache_score,
         class_weight=class_weight)
     gradient_check.check_backward(
         func, (x_data, t_data), None, eps=0.02,
         **self.check_backward_options)
Example #19
0
    def check_backward(self, x_data, W_data, b_data, y_grad):
        xp = cuda.get_array_module(x_data)

        if not self.c_contiguous:
            x_data = xp.asfortranarray(x_data)
            W_data = xp.asfortranarray(W_data)
            y_grad = xp.asfortranarray(y_grad)
            self.assertFalse(x_data.flags.c_contiguous)
            self.assertFalse(W_data.flags.c_contiguous)
            self.assertFalse(y_grad.flags.c_contiguous)
            if b_data is not None:
                b = xp.empty((len(b_data) * 2,), dtype=self.b.dtype)
                b[::2] = b_data
                b_data = b[::2]
                self.assertFalse(b_data.flags.c_contiguous)

        args = (x_data, W_data)
        if b_data is not None:
            args = args + (b_data,)

        with chainer.using_config('use_cudnn', self.use_cudnn):
            with chainer.using_config('cudnn_deterministic',
                                      self.cudnn_deterministic):
                gradient_check.check_backward(
                    convolution_2d.Convolution2DFunction(
                        self.stride, self.pad, self.cover_all),
                    args, y_grad, **self.check_backward_options)
def test_backward_cpu(readouts, data):
    atom_data, y_grad = data
    for readout in readouts:
        if readout.mode == 'summax':
            y_grad = functions.concat((y_grad, y_grad), axis=1).data
        gradient_check.check_backward(
            readout, atom_data, y_grad, atol=1e-2, rtol=1e-2)
 def check_backward(self, x_data, t_data, class_weight, use_cudnn='always'):
     with chainer.using_config('use_cudnn', use_cudnn):
         func = functions.SoftmaxCrossEntropy(
             cache_score=self.cache_score, class_weight=class_weight)
         gradient_check.check_backward(
             func, (x_data, t_data), None,
             **self.check_backward_options)
Example #22
0
def test_backward_cpu(model, model_processed, data):
    atom_data_processed, atom_data, adj_data, y_grad = data
    gradient_check.check_backward(model, (atom_data, adj_data), y_grad,
                                  atol=1e-1, rtol=1e-1)
    gradient_check.check_backward(model_processed, (atom_data_processed,
                                                    adj_data), y_grad,
                                  atol=1e-1, rtol=1e-1)
Example #23
0
 def check_backward(self, x_data, y_grad):
     if self.dtype == numpy.float16:
         tol = 0.1
     else:
         tol = 1e-4
     gradient_check.check_backward(
         lambda x: x ** 2, x_data, y_grad, atol=tol, rtol=tol)
Example #24
0
    def check_backward(self, x_data, W_data, b_data, y_grad,
                       use_cudnn='never'):
        xp = cuda.get_array_module(x_data)
        if not self.c_contiguous:
            x_data = xp.asfortranarray(x_data)
            W_data = xp.asfortranarray(W_data)
            y_grad = xp.asfortranarray(y_grad)
            self.assertTrue(x_data.flags.f_contiguous)
            self.assertTrue(W_data.flags.f_contiguous)
            self.assertTrue(y_grad.flags.f_contiguous)
            if b_data is not None:
                b = xp.empty((len(b_data) * 2,), dtype=b_data.dtype)
                b[::2] = b_data
                b_data = b[::2]
                self.assertFalse(b_data.flags.c_contiguous)

        args = (x_data, W_data)
        if b_data is not None:
            args = args + (b_data,)

        ndim = len(self.dims)
        with chainer.using_config('use_cudnn', use_cudnn):
            gradient_check.check_backward(
                convolution_nd.ConvolutionND(
                    ndim, self.stride, self.pad, self.cover_all),
                args, y_grad, **self.check_backward_options)
Example #25
0
    def check_backward(self, x_data, y_grad):
        def f(x):
            return functions.elu(x, alpha=self.alpha)

        gradient_check.check_backward(
            f, x_data, y_grad, dtype=numpy.float64,
            **self.check_backward_options)
    def check_backward(self, x_data, W_data, b_data, y_grad):
        xp = cuda.get_array_module(x_data)

        if not self.c_contiguous:
            x_data = xp.asfortranarray(x_data)
            W_data = xp.asfortranarray(W_data)
            y_grad = xp.asfortranarray(y_grad)
            self.assertFalse(x_data.flags.c_contiguous)
            self.assertFalse(W_data.flags.c_contiguous)
            self.assertFalse(y_grad.flags.c_contiguous)
            if b_data is not None:
                b = xp.empty((len(b_data) * 2,), dtype=self.b.dtype)
                b[::2] = b_data
                b_data = b[::2]
                self.assertFalse(b_data.flags.c_contiguous)

        args = (x_data, W_data)
        if b_data is not None:
            args = args + (b_data,)

        def f(*args):
            return F.convolution_2d(*args, stride=self.stride, pad=self.pad,
                                    cover_all=self.cover_all,
                                    dilate=self.dilate)

        with chainer.using_config('use_cudnn', self.use_cudnn):
            with chainer.using_config('cudnn_deterministic',
                                      self.cudnn_deterministic):
                with chainer.using_config('autotune', self.autotune):
                    gradient_check.check_backward(
                        f, args, y_grad, dtype='d', atol=5e-4, rtol=5e-3)
Example #27
0
    def check_backward(self, x_data, y_grad):
        def f(x):
            x_min, x_max = self.x_min_max
            return functions.clip(x, x_min, x_max)

        gradient_check.check_backward(
            f, x_data, y_grad, dtype=numpy.float64)
def check_backward_ignore_nan_with_nonnan_value(inputs):
    x0_data, x1_data, _ = inputs

    def func(x0, x1):
        return chainer_chemistry.functions.mean_squared_error(x0, x1,
                                                              ignore_nan=True)
    gradient_check.check_backward(func, (x0_data, x1_data), None, eps=1e-2)
    def check_backward(self, x_data, y_grad):
        params = [self.link.W]
        if not self.nobias:
            params.append(self.link.b)

        gradient_check.check_backward(
            self.link, x_data, y_grad, params, eps=1e-2)
Example #30
0
 def check_backward(self, op, x_data, y_grad):
     options = {}
     if self.dtype == numpy.float16:
         options = {'atol': 5e-3, 'rtol': 5e-2}
     gradient_check.check_backward(lambda x: op(x, self.value),
                                   x_data, y_grad,
                                   dtype=numpy.float64, **options)
Example #31
0
    def check_backward(self, x_data, y_grad):
        def f(x):
            return functions.get_item(x, self.slices)

        gradient_check.check_backward(f, (x_data, ), y_grad, dtype='d')
Example #32
0
 def check_backward(self, x_data, y_grad):
     params = [self.link.linear.W]
     if self.initial_bias is not None:
         params.append(self.link.linear.b)
     gradient_check.check_backward(
         self.link, x_data, y_grad, params, atol=1e-2)
 def check_backward(self, x_data, y_grad, use_cudnn='always'):
     with chainer.using_config('use_cudnn', use_cudnn):
         gradient_check.check_backward(
             functions.AveragePooling2D(3, 2, 1, False), x_data, y_grad,
             **self.check_backward_options)
Example #34
0
 def check_backward(self, x_data, y_grad):
     gradient_check.check_backward(self.link, x_data, y_grad,
                                   (self.link.W, self.link.b),
                                   **self.check_backward_options)
Example #35
0
 def check_backward(self, x_data, y_grad, **kwargs):
     gradient_check.check_backward(functions.batch_inv, x_data, y_grad,
                                   **self.check_backward_options)
 def check_backward(self, x_data, g_data):
     gradient_check.check_backward(
         lambda x: functions.squeeze(x, self.axis),
         x_data, g_data, **self.check_backward_options)
 def check_backward(self, x_data, y_grad):
     gradient_check.check_backward(F.log1p, x_data, y_grad)
Example #38
0
 def check_backward(self, x_data, y_grad):
     gradient_check.check_backward(self.link,
                                   x_data,
                                   y_grad, (self.link.W, self.link.b),
                                   eps=1e-2)
 def check_backward(self, x_data, y_grad, use_cudnn=True):
     gradient_check.check_backward(functions.AveragePooling2D(
         3, 2, 1, False, use_cudnn),
                                   x_data,
                                   y_grad,
                                   eps=1e-2)
Example #40
0
 def check_backward(self, x1_data, x2_data, axis, y_grad):
     x = (x1_data, x2_data)
     gradient_check.check_backward(lambda x, y: functions.scale(x, y, axis),
                                   x, y_grad, **self.check_backward_options)
Example #41
0
 def check_backward(self, args, y_grad, use_cudnn='always'):
     with chainer.using_config('use_cudnn', use_cudnn), \
             chainer.using_config('train', self.train):
         gradient_check.check_backward(self.batch_normalization, args,
                                       y_grad,
                                       **self.check_backward_options)
Example #42
0
 def check_backward(self, args, y_grad):
     with chainer.using_config('train', self.train):
         gradient_check.check_backward(
             batch_normalization.BatchNormalizationFunction(
                 mean=None, var=None, decay=self.decay, eps=self.eps), args,
             y_grad, **self.check_backward_options)
Example #43
0
 def check_backward(self, inputs_data, output_grad, atol, rtol):
     gradient_check.check_backward(
         self.op, inputs_data, output_grad, atol=atol, rtol=rtol,
         dtype=numpy.float64)
Example #44
0
 def check_backward(self, x_data, gy_data, use_cudnn='always'):
     with chainer.using_config('use_cudnn', use_cudnn):
         gradient_check.check_backward(functions.LogSoftmax(), x_data,
                                       gy_data,
                                       **self.check_backward_options)
Example #45
0
    def check_backward(self, x_data, y_grad):
        def f(x):
            return functions.space2depth(x, self.r)

        gradient_check.check_backward(f, x_data, y_grad, dtype=numpy.float64,
                                      **self.check_backward_options)
Example #46
0
 def check_backward(self, x_data, axis, y_grad):
     gradient_check.check_backward(
         lambda x: functions.cumsum(x, axis), x_data, y_grad,
         dtype=numpy.float64, **self.check_backward_options)
Example #47
0
 def check_backward(self, x_data, t_data, class_weight, use_cudnn='always'):
     with chainer.using_config('use_cudnn', use_cudnn):
         func = functions.SoftmaxCrossEntropy(cache_score=self.cache_score,
                                              class_weight=class_weight)
         gradient_check.check_backward(func, (x_data, t_data), None,
                                       **self.check_backward_options)
Example #48
0
# -*- coding: utf-8 -*-
import sys, os
from chainer import cuda, gradient_check, Variable
sys.path.append(os.path.split(os.getcwd())[0])
import model

xp = cuda.cupy
context = xp.random.uniform(-1, 1, (2, 3)).astype(xp.float32)
weight = xp.random.uniform(-1, 1, (2, 1)).astype(xp.float32)
z = xp.full((2, 1), 10.0).astype(xp.float32)
y = model.apply_attention(Variable(context), Variable(weight) / Variable(z))
print y.data
y_grad = xp.random.uniform(-1.0, 1.0, (2, 3)).astype(xp.float32)
gradient_check.check_backward(model.Attention(), (context, weight / z),
                              y_grad,
                              eps=1e-2)
Example #49
0
 def check_backward(self, x_data, axis, y_grad):
     gradient_check.check_backward(
         lambda x: functions.cumprod(x, axis), x_data, y_grad,
         atol=1e-3, dtype=numpy.float64)
Example #50
0
 def check_backward(self, x_data, t_data, g_data, class_weight):
     func = functions.SoftmaxCrossEntropy(cache_score=self.cache_score,
                                          class_weight=class_weight,
                                          reduce='no')
     gradient_check.check_backward(func, (x_data, t_data), g_data,
                                   **self.check_backward_options)
Example #51
0
 def check_backward(self, x_data, gy_data, use_cudnn=True):
     gradient_check.check_backward(functions.Softmax(use_cudnn), x_data,
                                   gy_data, **self.check_backward_options)
 def check_backward(self, h_data, gloss_data):
     gradient_check.check_backward(functions.DeCov(self.reduce), (h_data, ),
                                   gloss_data,
                                   eps=0.02,
                                   atol=1e-3)
 def check_backward(self, x_data, grad):
     gradient_check.check_backward(functions.HardSigmoid(), x_data, grad,
                                   **self.check_backward_options)
Example #54
0
 def check_backward(self, x_data, roi_data, y_grad):
     gradient_check.check_backward(functions.ROIPooling2D(
         outh=self.outh, outw=self.outw, spatial_scale=self.spatial_scale),
                                   (x_data, roi_data),
                                   y_grad,
                                   no_grads=[False, True])
Example #55
0
 def check_backward(self, x_data, y_grad):
     gradient_check.check_backward(chainerrl.functions.SumArrays(),
                                   x_data,
                                   y_grad,
                                   eps=1e-2,
                                   rtol=1e-2)
Example #56
0
 def check_backward(self, x1_data, x2_data, axis, y_grad):
     x = (x1_data, x2_data)
     gradient_check.check_backward(lambda x, y: functions.bias(x, y, axis),
                                   x, y_grad)
Example #57
0
    def check_backward(self, x_data, t_data, y_grad):
        def f(x, t):
            return functions.huber_loss(x, t, delta=1, reduce=self.reduce)

        gradient_check.check_backward(
            f, (x_data, t_data), y_grad, **self.backward_options)
 def check_backward(self, x_data, y_grad, axis=None):
     gradient_check.check_backward(lambda x: functions.logsumexp(x, axis),
                                   x_data, y_grad,
                                   **self.check_backward_option)
Example #59
0
def test_backward_gpu(model, data):
    # type: (GIN, Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]) -> None
    atom_data, adj_data, y_grad = map(cuda.to_gpu, data)
    model.to_gpu()
    gradient_check.check_backward(
        model, (atom_data, adj_data), y_grad, atol=1e-5, rtol=1e-5)
Example #60
0
 def check_backward(self, x_data, g_data):
     gradient_check.check_backward(
         functions.Tile(self.reps), x_data, g_data,
         **self.check_backward_options)