コード例 #1
0
    def check_negative(self, xp, func_name, input, eps, nout):
        # Should be differentiable
        func = getattr(self, '_func_{}'.format(func_name))
        grad_outputs = [
            xp.random.uniform(-1, 1, input.shape).astype(input.dtype)
            for _ in range(nout)]

        def f():
            return func(input) * nout

        try:
            gradient_check.numerical_grad(
                f, (input,), grad_outputs, eps=eps,
                detect_nondifferentiable=True)
        except gradient_check.NondifferentiableError as e:
            raise AssertionError(
                'Function `{}` is expected to be differentiable, '
                'but determined to be non-differentiable.\n\n'
                'eps: {}\n'
                'input: {}\n'
                'xp: {}\n\n'
                '{}: {}'
                ''.format(
                    func_name, eps, input, xp.__name__,
                    e.__class__.__name__, e))
コード例 #2
0
    def check_backward(self, x_data, W_data, b_data, y_grad):
        x = chainer.Variable(x_data)
        W = chainer.Variable(W_data)
        b = None if b_data is None else chainer.Variable(b_data)
        y = functions.convolution_2d(x,
                                     W,
                                     b,
                                     stride=self.stride,
                                     pad=self.pad,
                                     use_cudnn=self.use_cudnn)

        y.grad = y_grad
        y.backward()

        func = y.creator
        if b is None:
            f = lambda: func.forward((x.data, W.data))
            gx, gW = gradient_check.numerical_grad(f, (x.data, W.data),
                                                   (y.grad, ),
                                                   eps=1e-2)
        else:
            f = lambda: func.forward((x.data, W.data, b.data))
            gx, gW, gb = gradient_check.numerical_grad(
                f, (x.data, W.data, b.data), (y.grad, ), eps=1e-2)

        gradient_check.assert_allclose(gx, x.grad)
        gradient_check.assert_allclose(gW, W.grad)
        if b is not None:
            gradient_check.assert_allclose(gb, b.grad)
コード例 #3
0
ファイル: test_maxout.py プロジェクト: hillbig/chainer
    def check_backward(self, x_data, W_data, b_data, y_grad):
        x = chainer.Variable(x_data)
        W = chainer.Variable(W_data)
        if b_data is None:
            y = functions.maxout(x, W)
        else:
            b = chainer.Variable(b_data)
            y = functions.maxout(x, W, b)

        y.grad = y_grad
        y.backward()
        func = y.creator

        if b_data is None:
            f = lambda: func.forward((x.data, W.data))
            gx, gW = gradient_check.numerical_grad(f, (x.data, W.data),
                                                   (y.grad, ),
                                                   eps=1e-2)
        else:
            f = lambda: func.forward((x.data, W.data, b.data))
            gx, gW, gb = gradient_check.numerical_grad(
                f, (x.data, W.data, b.data), (y.grad, ), eps=1e-2)

        gradient_check.assert_allclose(gx, x.grad, atol=1e-2)
        gradient_check.assert_allclose(gW, W.grad, atol=1e-2)
        if b_data is not None:
            gradient_check.assert_allclose(gb, b.grad, atol=1e-2)
コード例 #4
0
    def check_negative(self, xp, func_name, inputs, eps, nout):
        # Should be differentiable
        func = getattr(self, '_func_{}'.format(func_name))
        grad_outputs = [
            xp.random.uniform(-1, 1, _.shape).astype(_.dtype) for _ in inputs
        ]

        def f():
            return func(*inputs) * nout

        try:
            gradient_check.numerical_grad(f,
                                          inputs,
                                          grad_outputs,
                                          eps=eps,
                                          detect_nondifferentiable=True)
        except gradient_check.NondifferentiableError as e:
            raise AssertionError(
                'Function `{}` is expected to be differentiable, '
                'but determined to be non-differentiable.\n\n'
                'eps: {}\n'
                'inputs: {}\n'
                'xp: {}\n\n'
                '{}: {}'
                ''.format(func_name, eps, inputs, xp.__name__,
                          e.__class__.__name__, e))
コード例 #5
0
    def check_positive(self, xp, func_name, input, eps, nout):
        # Should be non-differentiable
        func = getattr(self, '_func_{}'.format(func_name))
        grad_outputs = [
            xp.random.uniform(-1, 1, input.shape).astype(input.dtype)
            for _ in range(nout)
        ]

        def f():
            return func(input) * nout

        try:
            gradient_check.numerical_grad(f, (input, ),
                                          grad_outputs,
                                          eps=eps,
                                          detect_nondifferentiable=True)
        except gradient_check.NondifferentiableError:
            pass
        else:
            raise AssertionError(
                'Function `{}` is expected to be non-differentiable, '
                'but determined to be differentiable.\n\n'
                'eps: {}\n'
                'input: {}\n'
                'xp: {}\n'
                ''.format(func_name, eps, input, xp.__name__))
コード例 #6
0
    def check_backward(self, x0_data, x1_data, t_data):
        x0 = chainer.Variable(x0_data)
        x1 = chainer.Variable(x1_data)
        t = chainer.Variable(t_data)
        loss = functions.contrastive(x0, x1, t, self.margin)
        loss.backward()
        self.assertEqual(None, t.grad)

        func = loss.creator
        f = lambda: func.forward((x0.data, x1.data, t.data))
        gx0, = gradient_check.numerical_grad(f, (x0.data, ), (1, ))
        gx1, = gradient_check.numerical_grad(f, (x1.data, ), (1, ))

        gradient_check.assert_allclose(gx0, x0.grad, rtol=1e-4, atol=1e-4)
        gradient_check.assert_allclose(gx1, x1.grad, rtol=1e-4, atol=1e-4)
コード例 #7
0
    def check_backward(self, x0_data, x1_data, t_data, use_cudnn=True):
        x0 = chainer.Variable(x0_data)
        x1 = chainer.Variable(x1_data)
        t = chainer.Variable(t_data)
        loss = contrastive(x0, x1, t, use_cudnn)
        loss.backward()
        self.assertEqual(None, t.grad)

        func = loss.creator
        f = lambda: func.forward((x0.data, x1.data, t.data))
        gx0, = gradient_check.numerical_grad(f, (x0.data, ), (1, ))
        gx1, = gradient_check.numerical_grad(f, (x1.data, ), (1, ))

        gradient_check.assert_allclose(gx0, x0.grad)
        gradient_check.assert_allclose(gx1, x1.grad)
コード例 #8
0
ファイル: test_gru.py プロジェクト: asi1024/chainer
    def check_backward(self, h_data, x_data, y_grad):
        h = chainer.Variable(h_data)
        x = chainer.Variable(x_data)
        y = self._forward(self.link, h, x)
        y.grad = y_grad
        y.backward()

        def f():
            return _gru(self.link, h_data, x_data),
        gx, = gradient_check.numerical_grad(f, (x.data,), (y_grad,))
        testing.assert_allclose(gx, x.grad, atol=1e-3)

        if isinstance(self.link, links.StatelessGRU):
            gh, = gradient_check.numerical_grad(f, (h.data,), (y_grad,))
            testing.assert_allclose(gh, h.grad, atol=1e-3)
コード例 #9
0
ファイル: test_gru.py プロジェクト: PhysicsTeacher13/CHAINER
    def check_backward(self, h_data, x_data, y_grad):
        h = chainer.Variable(h_data)
        x = chainer.Variable(x_data)
        y = self._forward(self.link, h, x)
        y.grad = y_grad
        y.backward()

        def f():
            return _gru(self.link, h_data, x_data),
        gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))
        testing.assert_allclose(gx, x.grad, atol=1e-3)

        if isinstance(self.link, links.GRU):
            gh, = gradient_check.numerical_grad(f, (h.data,), (y.grad,))
            testing.assert_allclose(gh, h.grad, atol=1e-3)
コード例 #10
0
    def check_different_eps(self, x, y):
        def f():
            if -1 < x < 1:
                return x.copy(),
            elif -2 < x < 2:
                return 2 * x,
            else:
                return 0,

        gx, = gradient_check.numerical_grad(f, (x, ), (y, ), eps=0.5)
        self.assertEqual(gx, 1.)
        gx, = gradient_check.numerical_grad(f, (x, ), (y, ), eps=1.5)
        self.assertEqual(gx, 2.)
        gx, = gradient_check.numerical_grad(f, (x, ), (y, ), eps=2.5)
        self.assertEqual(gx, 0.)
コード例 #11
0
    def check_backward(self, x0_data, x1_data, t_data, use_cudnn=True):
        x0 = chainer.Variable(x0_data)
        x1 = chainer.Variable(x1_data)
        t = chainer.Variable(t_data)
        loss = contrastive(x0, x1, t, use_cudnn)
        loss.backward()
        self.assertEqual(None, t.grad)

        func = loss.creator
        f = lambda: func.forward((x0.data, x1.data, t.data))
        gx0, = gradient_check.numerical_grad(f, (x0.data,), (1,))
        gx1, = gradient_check.numerical_grad(f, (x1.data,), (1,))

        gradient_check.assert_allclose(gx0, x0.grad)
        gradient_check.assert_allclose(gx1, x1.grad)
コード例 #12
0
ファイル: test_gradient_check.py プロジェクト: RE-ID/chainer
    def check_different_eps(self, x, y):
        def f():
            if -1 < x < 1:
                return x.copy(),
            elif -2 < x < 2:
                return 2 * x,
            else:
                return 0,

        gx, = gradient_check.numerical_grad(f, (x,), (y,), eps=0.5)
        self.assertEqual(gx, 1.)
        gx, = gradient_check.numerical_grad(f, (x,), (y,), eps=1.5)
        self.assertEqual(gx, 2.)
        gx, = gradient_check.numerical_grad(f, (x,), (y,), eps=2.5)
        self.assertEqual(gx, 0.)
コード例 #13
0
ファイル: test_contrastive.py プロジェクト: BRETT71/chainer
    def check_backward(self, x0_data, x1_data, t_data):
        x0 = chainer.Variable(x0_data)
        x1 = chainer.Variable(x1_data)
        t = chainer.Variable(t_data)
        loss = functions.contrastive(x0, x1, t, self.margin)
        loss.backward()
        self.assertEqual(None, t.grad)

        func = loss.creator
        f = lambda: func.forward((x0.data, x1.data, t.data))
        gx0, = gradient_check.numerical_grad(f, (x0.data,), (1,))
        gx1, = gradient_check.numerical_grad(f, (x1.data,), (1,))

        gradient_check.assert_allclose(gx0, x0.grad, rtol=1e-4, atol=1e-4)
        gradient_check.assert_allclose(gx1, x1.grad, rtol=1e-4, atol=1e-4)
コード例 #14
0
def check_backward(update, atom_data, adj_data, y_grad):
    """Check gradient of GGNNUpdate.

    This function is different from other backward tests.
    Because of GRU, reset_state method has to be called explicitly
    before gradient calculation.

    Args:
        update (callable):
        atom_data (numpy.ndarray):
        adj_data (numpy.ndarray):
        y_grad (numpy.ndarray):
    """
    atom = chainer.Variable(atom_data)
    update.reset_state()
    y = update(atom, adj_data)
    y.grad = y_grad
    y.backward()

    def f():
        update.reset_state()
        return update(atom_data, adj_data).data,

    gx, = gradient_check.numerical_grad(f, (atom.data, ), (y.grad, ))
    numpy.testing.assert_allclose(cuda.to_cpu(gx),
                                  cuda.to_cpu(atom.grad),
                                  atol=1e-3,
                                  rtol=1e-3)
    return gx
コード例 #15
0
def check_backward(update, atom_data, adj_data, y_grad):
    # type: (MPNNUpdate, numpy.ndarray, numpy.ndarray, numpy.ndarray) -> None
    """Check gradient of MPNNUpdate.

    This function is different from other backward tests.
    Because of GRU, reset_state method has to be called explicitly
    before gradient calculation.

    Args:
        update (callable):
        atom_data (numpy.ndarray):
        adj_data (numpy.ndarray):
        y_grad (numpy.ndarray):
    """
    atom = chainer.Variable(atom_data)
    adj = chainer.Variable(adj_data)
    update.reset_state()
    y = update(atom, adj)
    y.grad = y_grad
    y.backward()

    def f():
        # type: () -> numpy.ndarray
        update.reset_state()
        return update(atom_data, adj_data).data,

    gx, = gradient_check.numerical_grad(f, (atom.data, ), (y.grad, ))
    numpy.testing.assert_allclose(gx, atom.grad, atol=1e-3, rtol=1e-3)
コード例 #16
0
ファイル: test_set2set.py プロジェクト: ir5/chainer-chemistry
def check_backward(readout, atom_data, y_grad):
    # type: (Set2Set, numpy.ndarray, numpy.ndarray) -> None
    """Check gradient of Set2Set.

    This function is different from other backward tests.
    Because of LSTM, reset_state method has to be called explicitly
    before gradient calculation.

    Args:
        readout:
        atom_data:
        y_grad:
    """
    atom = chainer.Variable(atom_data)
    readout.reset_state()
    y = readout(atom)
    y.grad = y_grad
    y.backward()

    def f():
        readout.reset_state()
        return readout(atom_data).data,

    gx, = gradient_check.numerical_grad(f, (atom.data, ), (y.grad, ))
    numpy.testing.assert_allclose(gx, atom.grad, atol=1e-2, rtol=1e-2)
コード例 #17
0
ファイル: test_ctc.py プロジェクト: heiterwelt/chainer
    def check_backward(self, t_data, xs_data, l_length, x_length, grad, gx):
        xs = tuple(chainer.Variable(x_data) for x_data in xs_data)
        t = chainer.Variable(t_data)

        loss = functions.connectionist_temporal_classification(
            xs,
            t,
            2,
            input_length=chainer.Variable(x_length),
            label_length=chainer.Variable(l_length))

        loss.grad = grad
        loss.backward()

        func = loss.creator
        xs_data = tuple(x.data for x in xs)
        f = lambda: func.forward((
            x_length,
            l_length,
            t.data,
        ) + xs_data)
        gx_0, gx_1, gx_2, gx_3 = gradient_check.numerical_grad(
            f, (xs_data), (gx, ))
        gradient_check.assert_allclose(xs[0].grad, gx_0, atol=1e-04)
        gradient_check.assert_allclose(xs[1].grad, gx_1, atol=1e-04)
        gradient_check.assert_allclose(xs[2].grad, gx_2, atol=1e-04)
        gradient_check.assert_allclose(xs[3].grad, gx_3, atol=1e-04)
コード例 #18
0
ファイル: test_gradient_check.py プロジェクト: zwcdp/chainer
 def check_reference(self, x):
     # A returned value and an input refers the same memory.
     # See issue #488
     def func():
         return x,
     gx, = gradient_check.numerical_grad(func, (x,), (1,))
     testing.assert_allclose(cuda.to_cpu(gx), 1)
コード例 #19
0
def check_backward(readout, atom_data, y_grad):
    # type: (Set2Set, numpy.ndarray, numpy.ndarray) -> None
    """Check gradient of Set2Set.

    This function is different from other backward tests.
    Because of LSTM, reset_state method has to be called explicitly
    before gradient calculation.

    Args:
        readout:
        atom_data:
        y_grad:
    """
    atom = chainer.Variable(atom_data)
    readout.reset_state()
    y = readout(atom)
    y.grad = y_grad
    y.backward()

    def f():
        readout.reset_state()
        return readout(atom_data).data,

    gx, = gradient_check.numerical_grad(f, (atom.data, ), (y.grad, ))
    numpy.testing.assert_allclose(cuda.to_cpu(gx),
                                  cuda.to_cpu(atom.grad),
                                  atol=1e-2,
                                  rtol=1e-2)
コード例 #20
0
    def check_backward(self, x_data, t_data, y_grad):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)
        W = self.link.W

        y = self.link(x, t)
        y.grad = y_grad
        y.backward()

        # fix samples
        negative_sampling.NegativeSamplingFunction.samples = y.creator.samples

        def f():
            return self.link(x, t).data,

        gx, gW = gradient_check.numerical_grad(f, (x.data, W.data), (y.grad, ),
                                               eps=1e-2)
        del negative_sampling.NegativeSamplingFunction.samples  # clean up

        gradient_check.assert_allclose(cuda.to_cpu(gx),
                                       cuda.to_cpu(x.grad),
                                       atol=1.e-4)
        gradient_check.assert_allclose(cuda.to_cpu(gW),
                                       cuda.to_cpu(W.grad),
                                       atol=1.e-4)
コード例 #21
0
ファイル: test_gradient_check.py プロジェクト: RE-ID/chainer
 def check_reference(self, x):
     # A returned value and an input refers the same memory.
     # See issue #488
     def func():
         return x,
     gx, = gradient_check.numerical_grad(func, (x,), (1,))
     testing.assert_allclose(cuda.to_cpu(gx), 1)
コード例 #22
0
    def check_backward(self, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = self.link(x)
        y.grad = y_grad
        y.backward()

        f = lambda: (self.link(x).data,)
        gx, gW = gradient_check.numerical_grad(
            f, (x.data, self.link.W.data), (y.grad,), eps=1e-2)
        if not self.nobias:
            gb, = gradient_check.numerical_grad(
                f, (self.link.b.data,), (y.grad,), eps=1e-2)

        gradient_check.assert_allclose(gx, x.grad)
        gradient_check.assert_allclose(gW, self.link.W.grad)
        if not self.nobias:
            gradient_check.assert_allclose(gb, self.link.b.grad)
コード例 #23
0
ファイル: test_embed_id.py プロジェクト: BRETT71/chainer
    def check_backward(self, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = self.link(x)
        y.grad = y_grad
        y.backward()

        f = lambda: (self.link(x).data,)
        gW, = gradient_check.numerical_grad(f, (self.link.W.data,), (y.grad,))
        gradient_check.assert_allclose(gW, self.link.W.grad)
コード例 #24
0
ファイル: test_tanh.py プロジェクト: ALEXGUOQ/chainer
    def check_backward(self, x_data, gy_data, use_cudnn=True):
        x = Variable(x_data)
        y = tanh(x, use_cudnn=use_cudnn)
        y.grad = gy_data
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data,))
        gx, = numerical_grad(f, (x.data,), (y.grad,))
        assert_allclose(gx, x.grad)
コード例 #25
0
    def check_backward(self, x_data, gy_data, use_cudnn=True):
        x = chainer.Variable(x_data)
        y = functions.tanh(x, use_cudnn=use_cudnn)
        y.grad = gy_data
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data, ))
        gx, = gradient_check.numerical_grad(f, (x.data, ), (y.grad, ))
        gradient_check.assert_allclose(gx, x.grad)
コード例 #26
0
ファイル: test_tanh.py プロジェクト: BRETT71/chainer
    def check_backward(self, x_data, gy_data, use_cudnn=True):
        x = chainer.Variable(x_data)
        y = functions.tanh(x, use_cudnn=use_cudnn)
        y.grad = gy_data
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data,))
        gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))
        gradient_check.assert_allclose(gx, x.grad)
コード例 #27
0
    def check_backward(self, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = self.link(x)
        y.grad = y_grad
        y.backward()

        f = lambda: (self.link(x).data, )
        gW, = gradient_check.numerical_grad(f, (self.link.W.data, ),
                                            (y.grad, ))
        gradient_check.assert_allclose(gW, self.link.W.grad)
コード例 #28
0
    def check_backward(self, x_data, gy_data, use_cudnn=True):
        x = Variable(x_data)
        y = tanh(x, use_cudnn=use_cudnn)
        y.grad = gy_data
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data,))
        gx, = numerical_grad(f, (x.data,), (y.grad,))
        assert_allclose(gx, x.grad)
コード例 #29
0
    def check_backward(self, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = self.link(x)
        y.grad = y_grad
        y.backward()

        f = lambda: (self.link(x).data, )
        gx, gW = gradient_check.numerical_grad(f, (x.data, self.link.W.data),
                                               (y.grad, ),
                                               eps=1e-2)
        if not self.nobias:
            gb, = gradient_check.numerical_grad(f, (self.link.b.data, ),
                                                (y.grad, ),
                                                eps=1e-2)

        gradient_check.assert_allclose(gx, x.grad)
        gradient_check.assert_allclose(gW, self.link.W.grad)
        if not self.nobias:
            gradient_check.assert_allclose(gb, self.link.b.grad)
コード例 #30
0
ファイル: test_embed_id.py プロジェクト: Hiroshi123/chainer
    def check_backward(self, x_data, y_grad):
        x = Variable(x_data)
        y = self.func(x)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data,))
        gW, = numerical_grad(f, (func.W,), (y.grad,))
        assert_allclose(gW, func.gW)
コード例 #31
0
    def check_backward(self, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = self.func(x)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data, ))
        gW, = gradient_check.numerical_grad(f, (func.W, ), (y.grad, ))
        gradient_check.assert_allclose(gW, func.gW)
コード例 #32
0
ファイル: test_expand_dims.py プロジェクト: tsumuz/chainer
    def check_backward(self, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = functions.expand_dims(x, self.axis)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x_data,))
        gx, = gradient_check.numerical_grad(f, (x_data,), (y_grad,))
        gradient_check.assert_allclose(cuda.to_cpu(x.grad),
                                       cuda.to_cpu(gx))
コード例 #33
0
ファイル: test_leaky_relu.py プロジェクト: nihohi0428/chainer
    def check_backward(self, x_data, y_grad):
        x = Variable(x_data)
        y = leaky_relu(x, slope=self.slope)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data, ))
        gx, = numerical_grad(f, (x.data, ), (y.grad, ))

        assert_allclose(gx, x.grad)
コード例 #34
0
ファイル: test_leaky_relu.py プロジェクト: nozawat/chainer
    def check_backward(self, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = functions.leaky_relu(x, slope=self.slope)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data,))
        gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))

        gradient_check.assert_allclose(gx, x.grad)
コード例 #35
0
ファイル: test_pooling_2d.py プロジェクト: umitanuki/chainer
    def check_backward(self, x_data, y_grad, use_cudnn=True):
        x = chainer.Variable(x_data)
        y = functions.max_pooling_2d(x, 3, stride=2, pad=1, cover_all=self.cover_all, use_cudnn=use_cudnn)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data,))
        gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))

        gradient_check.assert_allclose(cuda.to_cpu(gx), cuda.to_cpu(x.grad))
コード例 #36
0
    def check_backward(self, op, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = op(x, self.value)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data, ))
        gx, = gradient_check.numerical_grad(f, (x.data, ), (y.grad, ))

        gradient_check.assert_allclose(gx, x.grad)
コード例 #37
0
ファイル: test_minmax.py プロジェクト: BRETT71/chainer
    def check_backward(self, x_data, y_grad, axis=None, keepdims=False):
        x = chainer.Variable(x_data)
        y = functions.max(x, axis=axis, keepdims=keepdims)

        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data.copy(),))
        gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,), eps=1e-5)
        gradient_check.assert_allclose(gx, x.grad, rtol=1e-3, atol=1e-3)
コード例 #38
0
    def check_backward(self, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = self.link(x)
        y.grad = y_grad
        y.backward()

        f = lambda: (self.link(x).data, )
        if self.initial_bias is None:
            gx, gW = gradient_check.numerical_grad(
                f, (x.data, self.link.W.data),
                (y.grad, ), eps=1e-4)
        else:
            gx, gW, gb = gradient_check.numerical_grad(
                f, (x.data, self.link.W.data, self.link.b.data),
                (y.grad, ), eps=1e-4)

        gradient_check.assert_allclose(gx, x.grad, atol=1e-2)
        gradient_check.assert_allclose(gW, self.link.W.grad, atol=1e-2)
        if self.initial_bias is not None:
            gradient_check.assert_allclose(gb, self.link.b.grad, atol=1e-2)
コード例 #39
0
ファイル: test_basic_math.py プロジェクト: nihohi0428/chainer
    def check_backward(self, op, x_data, y_grad):
        x = Variable(x_data)
        y = op(x)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data, ))
        gx, = numerical_grad(f, (x.data, ), (y.grad, ))

        assert_allclose(gx, x.grad)
コード例 #40
0
    def check_backward(self, random_array, random_grad_array):
        x = chainer.Variable(random_array)
        y = functions.depth2space(x, 2)
        y.grad = random_grad_array
        y.backward()

        def func():
            return (functions.depth2space(x, 2).data,)
        gx, = gradient_check.numerical_grad(func, (x.data,), (y.grad,))

        testing.assert_allclose(x.grad, gx, rtol=0.0001)
コード例 #41
0
    def check_backward(self, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = functions.local_response_normalization(x)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data,))
        gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,), eps=1)

        gradient_check.assert_allclose(gx, x.grad, atol=1e-3)
コード例 #42
0
    def check_backward(self, x_data, y_grad, use_cudnn=True):
        x = Variable(x_data)
        y = average_pooling_2d(x, 3, stride=2, pad=1, use_cudnn=use_cudnn)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data, ))
        gx, = numerical_grad(f, (x.data, ), (y.grad, ), eps=1e-2)

        assert_allclose(to_cpu(gx), to_cpu(x.grad))
コード例 #43
0
    def check_backward(self, x_data, y_grad):
        x = Variable(x_data)
        y = local_response_normalization(x)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data,))
        gx, = numerical_grad(f, (x.data,), (y.grad,), eps=1)

        assert_allclose(gx, x.grad, atol=1e-3)
コード例 #44
0
ファイル: test_broadcast.py プロジェクト: nel215/chainer
    def check_backward(self, data, grad):
        x = chainer.Variable(data)
        bx = functions.broadcast_to(x, self.out_shape)

        func = bx.creator
        f = lambda: func.forward((data,))

        bx.grad = grad
        bx.backward()
        gx, = gradient_check.numerical_grad(f, (data,), (bx.grad,))
        gradient_check.assert_allclose(gx, x.grad)
コード例 #45
0
ファイル: test_basic_math.py プロジェクト: ALEXGUOQ/chainer
    def check_backward(self, op, x_data, y_grad):
        x = Variable(x_data)
        y = op(x)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data,))
        gx, = numerical_grad(f, (x.data,), (y.grad,))

        assert_allclose(gx, x.grad)
コード例 #46
0
ファイル: test_basic_math.py プロジェクト: chemouda/chainer
    def check_backward(self, op, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = op(x, self.value)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data,))
        gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))

        gradient_check.assert_allclose(gx, x.grad)
コード例 #47
0
    def test_backward_cpu(self):
        x = Variable(np.random.randn(3, 2).astype(np.float32))
        y = F.relu(x)
        y.grad = np.random.randn(3, 2).astype(np.float32)
        y.backward()

        def f():
            return F.relu(x).data,

        gx, = gradient_check.numerical_grad(f, (x.data, ), (y.grad, ))
        testing.assert_allclose(gx, x.grad)
コード例 #48
0
ファイル: test_linear.py プロジェクト: ryuuji5/chainer
    def check_backward(self, x_data, W_data, b_data, y_grad):
        x = chainer.Variable(x_data)
        W = chainer.Variable(W_data)
        b = None if b_data is None else chainer.Variable(b_data)
        y = functions.linear(x, W, b)
        y.grad = y_grad
        y.backward()

        func = y.creator
        if b_data is None:
            f = lambda: func.forward((x.data, W.data))
            gx, gW = gradient_check.numerical_grad(f, (x.data, W.data), (y.grad,), eps=1e-2)
        else:
            f = lambda: func.forward((x.data, W.data, b.data))
            gx, gW, gb = gradient_check.numerical_grad(f, (x.data, W.data, b.data), (y.grad,), eps=1e-2)

        gradient_check.assert_allclose(gx, x.grad)
        gradient_check.assert_allclose(gW, W.grad)
        if b_data is not None:
            gradient_check.assert_allclose(gb, b.grad)
コード例 #49
0
ファイル: test_pooling_2d.py プロジェクト: wakamori/chainer
    def check_backward(self, x_data, y_grad, use_cudnn=True):
        x = Variable(x_data)
        y = average_pooling_2d(x, 3, stride=2, pad=1, use_cudnn=use_cudnn)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data,))
        gx, = numerical_grad(f, (x.data,), (y.grad,), eps=1e-2)

        assert_allclose(to_cpu(gx), to_cpu(x.grad))
コード例 #50
0
ファイル: test_swapaxes.py プロジェクト: BRETT71/chainer
    def check_backward(self, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = functions.swapaxes(x, self.axis1, self.axis2)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data.copy(),))

        gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,), eps=1e-5)
        gradient_check.assert_allclose(gx, x.grad, rtol=1e-5)
コード例 #51
0
ファイル: test_peephole.py プロジェクト: RuixueLiu/chainer
    def check_backward(self, c_data, h_data, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = self._forward(self.link, x)
        y.grad = y_grad
        y.backward()

        def f():
            c, y = _peephole(self.link, c_data, h_data, x_data)
            return y,
        gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))
        gradient_check.assert_allclose(gx, x.grad, atol=1e-3)
コード例 #52
0
    def check_backward(self, c_data, h_data, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = self._forward(self.link, x)
        y.grad = y_grad
        y.backward()

        def f():
            c, y = _peephole(self.link, c_data, h_data, x_data)
            return y,
        gx, = gradient_check.numerical_grad(f, (x.data,), (y_grad,))
        testing.assert_allclose(gx, x.grad, atol=1e-3)
コード例 #53
0
def _check_backward(e1, e2, y_grad, link, bias):
    e1 = chainer.Variable(e1)
    e2 = chainer.Variable(e2)
    y = link(e1, e2)
    y.grad = y_grad
    y.backward()
    f = lambda: (link(e1, e2).data, )

    ge1, ge2, gW = gradient_check.numerical_grad(
        f, (e1.data, e2.data, link.W.data), (y.grad, ), eps=1e-2)
    gradient_check.assert_allclose(ge1, e1.grad, rtol=1e-3)
    gradient_check.assert_allclose(ge2, e2.grad, rtol=1e-3)
    gradient_check.assert_allclose(gW, link.W.grad, rtol=1e-3)

    if bias:
        gV1, gV2, gb = gradient_check.numerical_grad(
            f, (link.V1.data, link.V2.data, link.b.data), (y.grad, ), eps=1e-2)
        gradient_check.assert_allclose(gV1, link.V1.grad, rtol=1e-3)
        gradient_check.assert_allclose(gV2, link.V2.grad, rtol=1e-3)
        gradient_check.assert_allclose(gb, link.b.grad, rtol=1e-3)
コード例 #54
0
ファイル: test_leaky_relu.py プロジェクト: umitanuki/chainer
    def check_backward(self, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = functions.leaky_relu(x, slope=self.slope)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data, ))
        gx, = gradient_check.numerical_grad(f, (x.data, ), (y.grad, ))

        gradient_check.assert_allclose(gx, x.grad)
コード例 #55
0
ファイル: test_leaky_relu.py プロジェクト: ALEXGUOQ/chainer
    def check_backward(self, x_data, y_grad):
        x = Variable(x_data)
        y = leaky_relu(x, slope=self.slope)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data,))
        gx, = numerical_grad(f, (x.data,), (y.grad,))

        assert_allclose(gx, x.grad)
コード例 #56
0
    def check_backward(self, x_data, y_grad, use_cudnn=True):
        x = chainer.Variable(x_data)
        y = functions.spatial_pyramid_pooling_2d(x, self.pyramid_height, self.pooling_class, use_cudnn=use_cudnn)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data,))
        gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))

        gradient_check.assert_allclose(cuda.to_cpu(gx), cuda.to_cpu(x.grad), atol=1e-04)
コード例 #57
0
    def check_backward(self, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = functions.dropout(x, self.ratio)
        creator = y.creator
        y.grad = y_grad
        y.backward()

        def f():
            y = _dropout(x_data, creator)
            return y,
        gx, = gradient_check.numerical_grad(f, (x_data, ), (y.grad, ), eps=0.1)
        testing.assert_allclose(gx, x.grad, **self.check_backward_options)