Exemplo n.º 1
0
 def check_forward(self, x_data, ys_data, indices_or_sections, axis):
     x = chainer.Variable(x_data)
     ys = functions.split_axis(x, indices_or_sections, axis)
     for yd, y in zip(ys_data, ys):
         self.assertEqual(y.data.dtype, self.dtype)
         self.assertIsInstance(y.data.shape, tuple)
         gradient_check.assert_allclose(yd, y.data, atol=0, rtol=0)
Exemplo n.º 2
0
 def check_forward(self, x_data):
     x = chainer.Variable(x_data)
     y = self.function(x, axis=self.axis)
     self.assertEqual(y.data.dtype, numpy.int32)
     y_expect = self.expect(self.x, axis=self.axis)
     self.assertEqual(y.data.shape, y_expect.shape)
     gradient_check.assert_allclose(y_expect, y.data)
Exemplo n.º 3
0
    def check_forward(self, x_data, t_data):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)
        y = chainer.functions.binary_accuracy(x, t)

        expected = 0.0
        gradient_check.assert_allclose(expected, cuda.to_cpu(y.data))
Exemplo n.º 4
0
    def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.unpooling_2d(x, self.ksize, outsize=self.outsize,
                                   cover_all=self.cover_all)
        self.assertEqual(y.data.dtype, self.dtype)
        y_data = cuda.to_cpu(y.data)

        self.assertEqual(self.gy.shape, y_data.shape)
        for i in six.moves.range(self.N):
            for c in six.moves.range(self.n_channels):
                outsize = self.outsize or self.expected_outsize
                assert y_data.shape[2:] == outsize
                if outsize == (5, 2):
                    expect = numpy.zeros(outsize, dtype=self.dtype)
                    expect[:2, :] = self.x[i, c, 0, 0]
                    expect[2:4, :] = self.x[i, c, 1, 0]
                elif outsize == (4, 2):
                    expect = numpy.array([
                        [self.x[i, c, 0, 0], self.x[i, c, 0, 0]],
                        [self.x[i, c, 0, 0], self.x[i, c, 0, 0]],
                        [self.x[i, c, 1, 0], self.x[i, c, 1, 0]],
                        [self.x[i, c, 1, 0], self.x[i, c, 1, 0]],
                    ])
                elif outsize == (3, 1):
                    expect = numpy.array([
                        [self.x[i, c, 0, 0]],
                        [self.x[i, c, 0, 0]],
                        [self.x[i, c, 1, 0]],
                    ])
                else:
                    raise ValueError('Unsupported outsize: {}'.format(outsize))
                gradient_check.assert_allclose(expect, y_data[i, c])
    def check_forward_ones(self, x_data, use_cudnn=True):
        x = chainer.Variable(x_data)
        y = functions.spatial_pyramid_pooling_2d(x, self.pyramid_height, self.pooling_class, use_cudnn=use_cudnn)
        y_data = cuda.to_cpu(y.data)

        self.assertEqual((self.n, self.output_dim, 1, 1), y_data.shape)
        gradient_check.assert_allclose(y_data, numpy.ones_like(y_data))
Exemplo n.º 6
0
    def check_forward(self, x_data, use_cudnn=True):
        x = chainer.Variable(x_data)
        y = functions.max_pooling_2d(x, 3, stride=2, pad=1,
                                     cover_all=self.cover_all,
                                     use_cudnn=use_cudnn)
        self.assertEqual(y.data.dtype, numpy.float32)
        y_data = cuda.to_cpu(y.data)

        self.assertEqual(self.gy.shape, y_data.shape)
        for k in six.moves.range(2):
            for c in six.moves.range(3):
                if self.cover_all:
                    expect = numpy.array([
                        [self.x[k, c, 0:2, 0:2].max(), self.x[
                            k, c, 0:2, 1:3].max()],
                        [self.x[k, c, 1:4, 0:2].max(), self.x[
                            k, c, 1:4, 1:3].max()],
                        [self.x[k, c, 3:4, 0:2].max(), self.x[
                            k, c, 3:4, 1:3].max()]])
                else:
                    expect = numpy.array([
                        [self.x[k, c, 0:2, 0:2].max(), self.x[
                            k, c, 0:2, 1:3].max()],
                        [self.x[k, c, 1:4, 0:2].max(), self.x[
                            k, c, 1:4, 1:3].max()]])
                gradient_check.assert_allclose(expect, y_data[k, c])
Exemplo n.º 7
0
    def check_forward(self, x_data, t_data):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)
        y = chainer.functions.accuracy(x, t, self.ignore_label)
        self.assertEqual(y.data.dtype, numpy.float32)
        self.assertEqual((), y.data.shape)

        if self.ignore_label is not None:
            count = 0
            for i in six.moves.range(self.t.size):
                pred = self.x[i].argmax()
                if self.t[i] != self.ignore_label and pred == self.t[i]:
                    count += 1
                total = (self.t != self.ignore_label).sum()
        else:
            count = 0
            for i in six.moves.range(self.t.size):
                pred = self.x[i].argmax()
                if pred == self.t[i]:
                    count += 1
                total = self.t.size

        if total == 0:
            expected = 0.0
        else:
            expected = float(count) / total
        gradient_check.assert_allclose(expected, cuda.to_cpu(y.data))
Exemplo n.º 8
0
 def check_forward(self, op, x_data, gpu):
     value = self.value
     if gpu:
         value = cuda.to_gpu(value)
     x = Variable(x_data)
     y = op(x, value)
     assert_allclose(op(self.x, self.value), y.data, atol=1e-6, rtol=1e-6)
Exemplo n.º 9
0
 def check_reference(self, x):
     # A returned value and an input refers the same memory.
     # See issue #488
     def func():
         return x,
     gx, = gradient_check.numerical_grad(func, (x,), (1,))
     gradient_check.assert_allclose(cuda.to_cpu(gx), 1)
Exemplo n.º 10
0
 def check_forward(self, op, x1_data, x2_data):
     x1 = chainer.Variable(x1_data)
     x2 = chainer.Variable(x2_data)
     y = op(x1, x2)
     if isinstance(y.data, cuda.GPUArray):
         self.assertTrue(hasattr(y.data.gpudata, "device"))
     gradient_check.assert_allclose(op(self.x1, self.x2), y.data)
Exemplo n.º 11
0
    def check_forward(self, x_data, use_cudnn=True):
        x = chainer.Variable(x_data)
        y = functions.sigmoid(x, use_cudnn=use_cudnn)
        self.assertEqual(y.data.dtype, numpy.float32)
        y_expect = functions.sigmoid(chainer.Variable(self.x))

        gradient_check.assert_allclose(y_expect.data, y.data)
Exemplo n.º 12
0
    def check_forward(self, x_data, t_data):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)

        self.link.sample_data = self.link.sampler.sample(
            (self.batch_size, self.n_samples))
        y = self.link(x, t)

        expect_y = numpy.empty((self.batch_size), dtype=numpy.float32)
        samples = cuda.to_cpu(self.link.sample_data)
        for b in range(self.batch_size):
            z = 0
            for i in range(self.n_samples):
                w = samples[b, i]
                z += numpy.exp(self.w[w].dot(self.x[b]))
            y0 = self.w[self.t[b]].dot(self.x[b])
            z += numpy.exp(y0)
            l = y0 - numpy.log(z)
            for i in range(self.n_samples):
                w = samples[b, i]
                l += numpy.log(1 - numpy.exp(self.w[w].dot(self.x[b])) / z)

            expect_y[b] = l

        loss = -numpy.sum(expect_y) / self.batch_size
        gradient_check.assert_allclose(y.data, loss, atol=1.e-4)
Exemplo n.º 13
0
 def check_atol(self, x, y):
     x_cpu = cuda.to_cpu(x)
     y_cpu = cuda.to_cpu(y)
     max_abs_diff = numpy.max(numpy.abs(x_cpu - y_cpu))
     with self.assertRaises(AssertionError):
         gradient_check.assert_allclose(x, y, atol=max_abs_diff - 1, rtol=0)
     gradient_check.assert_allclose(x, y, atol=max_abs_diff + 1, rtol=0)
Exemplo n.º 14
0
 def check_forward(self, x_data):
     x = chainer.Variable(x_data)
     y = functions.hard_sigmoid(x)
     self.assertIs(y.data.dtype, x_data.dtype)
     expect = numpy.minimum(1.0, numpy.maximum(0.0, self.x * 0.2 + 0.5))
     gradient_check.assert_allclose(
         y.data, expect, **self.check_forward_option)
Exemplo n.º 15
0
 def check_rtol(self, x, y):
     x_cpu = cuda.to_cpu(x)
     y_cpu = cuda.to_cpu(y)
     max_ratio = numpy.max(numpy.abs(x_cpu - y_cpu) / y_cpu)
     with self.assertRaises(AssertionError):
         gradient_check.assert_allclose(x, y, atol=0, rtol=max_ratio - 1)
     gradient_check.assert_allclose(x, y, atol=0, rtol=max_ratio + 1)
Exemplo n.º 16
0
    def check_forward(self, c_prev1_data, c_prev2_data, x1_data, x2_data):
        c_prev1 = chainer.Variable(c_prev1_data)
        c_prev2 = chainer.Variable(c_prev2_data)
        x1 = chainer.Variable(x1_data)
        x2 = chainer.Variable(x2_data)
        c, h = functions.slstm(c_prev1, c_prev2, x1, x2)
        self.assertEqual(c.data.dtype, numpy.float32)
        self.assertEqual(h.data.dtype, numpy.float32)

        # Compute expected out
        a1_in = self.x1[:, [0, 4]]
        i1_in = self.x1[:, [1, 5]]
        f1_in = self.x1[:, [2, 6]]
        o1_in = self.x1[:, [3, 7]]
        a2_in = self.x2[:, [0, 4]]
        i2_in = self.x2[:, [1, 5]]
        f2_in = self.x2[:, [2, 6]]
        o2_in = self.x2[:, [3, 7]]

        c_expect = _sigmoid(i1_in) * numpy.tanh(a1_in) + \
            _sigmoid(i2_in) * numpy.tanh(a2_in) + \
            _sigmoid(f1_in) * self.c_prev1 + \
            _sigmoid(f2_in) * self.c_prev2
        h_expect = _sigmoid(o1_in + o2_in) * numpy.tanh(c_expect)

        gradient_check.assert_allclose(c_expect, c.data)
        gradient_check.assert_allclose(h_expect, h.data)
Exemplo n.º 17
0
 def check_forward(self, op, x1_data, x2_data):
     x1 = Variable(x1_data)
     x2 = Variable(x2_data)
     y = op(x1, x2)
     if isinstance(y.data, cuda.GPUArray):
         self.assertTrue(hasattr(y.data.gpudata, 'device'))
     assert_allclose(op(self.x1, self.x2), y.data)
Exemplo n.º 18
0
    def check_forward(self, x_data, t_data, use_cudnn=True):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)
        loss = functions.softmax_cross_entropy(
            x, t, use_cudnn=use_cudnn, normalize=self.normalize,
            cache_score=self.cache_score)
        self.assertEqual(loss.data.shape, ())
        self.assertEqual(loss.data.dtype, self.dtype)
        self.assertEqual(hasattr(loss.creator, 'y'), self.cache_score)
        loss_value = float(cuda.to_cpu(loss.data))

        # Compute expected value
        loss_expect = 0.0
        count = 0
        x = numpy.rollaxis(self.x, 1, self.x.ndim).reshape(
            (self.t.size, self.x.shape[1]))
        t = self.t.ravel()
        for xi, ti in six.moves.zip(x, t):
            if ti == -1:
                continue
            log_z = numpy.ufunc.reduce(numpy.logaddexp, xi)
            loss_expect -= (xi - log_z)[ti]
            count += 1

        if self.normalize:
            if count == 0:
                loss_expect = 0.0
            else:
                loss_expect /= count
        else:
            loss_expect /= len(t_data)

        gradient_check.assert_allclose(
            loss_expect, loss_value, **self.check_forward_options)
Exemplo n.º 19
0
 def check_forward(self, x_data, axis=None, keepdims=False):
     x = chainer.Variable(x_data)
     y = functions.max(x, axis=axis, keepdims=keepdims)
     self.assertEqual(y.data.dtype, numpy.float32)
     y_expect = self.x.max(axis=axis, keepdims=keepdims)
     self.assertEqual(y.data.shape, y_expect.shape)
     gradient_check.assert_allclose(y_expect, y.data)
 def test_cpu_versus_gpu(self):
     self.context = lambda x: x
     cpu, closs = self.check()
     self.context = cuda.to_gpu
     gpu, gloss = self.check()
     numpy.testing.assert_almost_equal(closs, cuda.to_cpu(gloss))
     gradient_check.assert_allclose(gpu, cpu)
Exemplo n.º 21
0
    def check_forward(self, args):
        y = functions.fixed_batch_normalization(*[chainer.Variable(i) for i in args], eps=self.eps)
        self.assertEqual(y.data.dtype, self.dtype)

        y_expect = _batch_normalization(self.expander, self.gamma, self.beta, self.x, self.mean, self.var)

        gradient_check.assert_allclose(y_expect, y.data, **self.check_forward_optionss)
Exemplo n.º 22
0
    def check_forward(self, x_data, t_data, w_data, samples_data):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)
        w = chainer.Variable(w_data)
        samples = chainer.Variable(samples_data)

        y = functions.black_out(x, t, w, samples)

        expect_y = numpy.empty((self.batch_size), dtype=numpy.float32)
        for b in range(self.batch_size):
            z = 0
            for i in range(self.n_samples):
                w = self.samples[b, i]
                z += numpy.exp(self.W[w].dot(self.x[b]))
            y0 = self.W[self.t[b]].dot(self.x[b])
            z += numpy.exp(y0)
            l = y0 - numpy.log(z)
            for i in range(self.n_samples):
                w = self.samples[b, i]
                l += numpy.log(1 - numpy.exp(self.W[w].dot(self.x[b])) / z)

            expect_y[b] = l

        loss = -numpy.sum(expect_y) / self.batch_size
        gradient_check.assert_allclose(y.data, loss, atol=1.e-4)
Exemplo n.º 23
0
 def check_forward(self, x_data, axis=None):
     x = chainer.Variable(x_data)
     y = functions.logsumexp(x, axis=axis)
     self.assertEqual(y.data.dtype, self.dtype)
     y_expect = numpy.log(numpy.exp(self.x).sum(axis=axis))
     gradient_check.assert_allclose(
         y_expect, y.data, **self.check_forward_option)
Exemplo n.º 24
0
 def check_forward(self, x1_data, x2_data, y_expected):
     x1 = chainer.Variable(x1_data)
     x2 = chainer.Variable(x2_data)
     y = functions.minimum(x1, x2)
     self.assertEqual(y.data.dtype, self.dtype)
     gradient_check.assert_allclose(
         y_expected, y.data, **self.check_forward_options)
Exemplo n.º 25
0
 def check_forward(self, x1_data, x2_data):
     x1 = chainer.Variable(x1_data)
     x2 = chainer.Variable(x2_data)
     y = self.op(x1, x2)
     if isinstance(y.data, cuda.GPUArray):
         self.assertTrue(hasattr(y.data.gpudata, 'device'))
     gradient_check.assert_allclose(self.forward_answer, y.data)
Exemplo n.º 26
0
 def check_sample(self):
     counts = numpy.zeros(len(self.ps), numpy.float32)
     for _ in range(1000):
         vs = self.sampler.sample((4, 3))
         numpy.add.at(counts, cuda.to_cpu(vs), 1)
     counts /= (1000 * 12)
     counts *= sum(self.ps)
     gradient_check.assert_allclose(self.ps, counts, atol=0.1, rtol=0.1)
Exemplo n.º 27
0
    def check_backward(self, x_data, y_grad):
        x = chainer.Variable(x_data)
        y = functions.sum(x)
        y.grad = y_grad
        y.backward()

        gx_expect = numpy.full_like(self.x, self.gy[0])
        gradient_check.assert_allclose(gx_expect, x.grad)
Exemplo n.º 28
0
    def check_backward(self, xs_data, axis):
        xs = tuple(chainer.Variable(x_data) for x_data in xs_data)
        y = functions.concat(xs, axis=axis)
        y.grad = y.data
        y.backward()

        for x in xs:
            gradient_check.assert_allclose(x.data, x.grad, atol=0, rtol=0)
Exemplo n.º 29
0
    def check_forward(self, h_data, x_data):
        h = chainer.Variable(h_data)
        x = chainer.Variable(x_data)
        y = self.link(h, x)
        self.assertEqual(y.data.dtype, numpy.float32)

        y_expect = _gru(self.link, h_data, x_data)
        gradient_check.assert_allclose(y_expect, y.data)
	def check_forward(self, args):
		y = bnlstm.fixed_batch_normalization(
			*[chainer.Variable(i) for i in args], eps=self.eps)
		self.assertEqual(y.data.dtype, numpy.float32)

		y_expect = _batch_normalization(self.expander, self.gamma, self.x, self.mean, self.var)

		gradient_check.assert_allclose(y_expect, y.data, rtol=1e-3, atol=1e-4)
Exemplo n.º 31
0
 def check_forward(self, xs):
     y = chainerrl.functions.sum_arrays(xs)
     correct_y = sum(self.xs)
     gradient_check.assert_allclose(correct_y, cuda.to_cpu(y.data))
Exemplo n.º 32
0
 def test_identity_cpu(self):
     eye = _make_eye(self.x.shape)
     x = chainer.Variable(self.x)
     y = functions.batch_matmul(x, functions.batch_inv(x))
     gradient_check.assert_allclose(y.data, eye,
                                    **self.check_forward_options)
Exemplo n.º 33
0
 def check_forward(self, x1_data, x2_data, axis, y_expected):
     x1 = chainer.Variable(x1_data)
     x2 = chainer.Variable(x2_data)
     y = functions.bias(x1, x2, axis)
     gradient_check.assert_allclose(y_expected, y.data)
Exemplo n.º 34
0
 def check_backward(self, x_data):
     x = chainer.Variable(x_data)
     y = functions.transpose(x, self.axes)
     y.grad = y.data
     y.backward()
     gradient_check.assert_allclose(x.data, x.grad, atol=0, rtol=0)
Exemplo n.º 35
0
	def check_forward(self, context, weight, y_expect):
		context = chainer.Variable(context)
		weight = chainer.Variable(weight)
		y = reader.apply_attention(context, weight)
		gradient_check.assert_allclose(y_expect, y.data)
Exemplo n.º 36
0
 def check_forward(self, x_data, use_cudnn=True):
     x = chainer.Variable(x_data)
     y = functions.tanh(x, use_cudnn=use_cudnn)
     self.assertEqual(y.data.dtype, self.dtype)
     y_expect = functions.tanh(chainer.Variable(self.x))
     gradient_check.assert_allclose(y_expect.data, y.data)
Exemplo n.º 37
0
 def test_identity_gpu(self):
     eye = cuda.to_gpu(_make_eye(self.x.shape))
     x = chainer.Variable(cuda.to_gpu(self.x))
     y = functions.matmul(x, functions.inv(x))
     gradient_check.assert_allclose(y.data, eye,
                                    **self.check_forward_options)
Exemplo n.º 38
0
 def test_forward_gpu(self, use_cudnn=True):
     x = Variable(to_gpu(self.x))
     y = tanh(x, use_cudnn=use_cudnn)
     y_expect = tanh(Variable(self.x))
     assert_allclose(y_expect.data, y.data)
Exemplo n.º 39
0
 def check_forward(self, xs_data, y_data, axis):
     xs = tuple(chainer.Variable(x_data) for x_data in xs_data)
     y = functions.concat(xs, axis=axis)
     self.assertEqual(y.data.dtype, self.dtype)
     gradient_check.assert_allclose(y_data, y.data, atol=0, rtol=0)
     self.assertIsInstance(y.data.shape, tuple)
Exemplo n.º 40
0
def _check_forward(e1, e2, f, y_expect):
    e1 = chainer.Variable(e1)
    e2 = chainer.Variable(e2)
    y = f(e1, e2)
    gradient_check.assert_allclose(y_expect, y.data)
Exemplo n.º 41
0
 def check_forward(self, x1_data, x2_data):
     x1 = chainer.Variable(x1_data)
     x2 = chainer.Variable(x2_data)
     y = self.op(x1, x2)
     gradient_check.assert_allclose(self.forward_answer, y.data)
Exemplo n.º 42
0
 def check_identical(self, x):
     gradient_check.assert_allclose(x, x, atol=0, rtol=0)
Exemplo n.º 43
0
 def check_forward(self, op, x1_data, x2_data):
     x1 = chainer.Variable(x1_data)
     x2 = chainer.Variable(x2_data)
     y = op(x1, x2)
     gradient_check.assert_allclose(op(self.x1, self.x2), y.data)
Exemplo n.º 44
0
def _check_backward(e1, e2, y_grad, f, bias):
    e1 = chainer.Variable(e1)
    e2 = chainer.Variable(e2)
    y = f(e1, e2)
    y.grad = y_grad
    y.backward()

    func = y.creator
    f = lambda: func.forward((e1.data, e2.data))

    ge1, ge2, gW = gradient_check.numerical_grad(f, (e1.data, e2.data, func.W),
                                                 (y.grad, ),
                                                 eps=1e-2)

    gradient_check.assert_allclose(ge1, e1.grad, rtol=1e-3)
    gradient_check.assert_allclose(ge2, e2.grad, rtol=1e-3)
    gradient_check.assert_allclose(gW, func.gW, rtol=1e-3)

    if bias:
        gV1, gV2, gb = gradient_check.numerical_grad(
            f, (func.V1, func.V2, func.b), (y.grad, ), eps=1e-2)
        gradient_check.assert_allclose(gV1, func.gV1, rtol=1e-3)
        gradient_check.assert_allclose(gV2, func.gV2, rtol=1e-3)
        gradient_check.assert_allclose(gb, func.gb, rtol=1e-3)
Exemplo n.º 45
0
 def check_forward(self, x_data):
     x = chainer.Variable(x_data)
     y = functions.maxout(x, self.pool_size, self.axis)
     gradient_check.assert_allclose(self.y, y.data)
Exemplo n.º 46
0
 def check_reference(self, x):
     # A returned value and an input refers the same memory.
     # See issue #488
     func = lambda: (x, )
     gx, = gradient_check.numerical_grad(func, (x, ), (1, ))
     gradient_check.assert_allclose(cuda.to_cpu(gx), 1)
 def check_forward(self, xs):
     y = chainerrl.functions.weighted_sum_arrays(xs, weights=self.weights)
     correct_y = sum(x * w for x, w in zip(self.xs, self.weights))
     gradient_check.assert_allclose(correct_y, cuda.to_cpu(y.array))
Exemplo n.º 48
0
 def check_forward(self, x_data):
     x = chainer.Variable(x_data)
     y = functions.softplus(x, beta=self.beta)
     x_value = cuda.to_cpu(x_data)
     y_exp = numpy.log(1 + numpy.exp(self.beta * x_value)) / self.beta
     gradient_check.assert_allclose(y_exp, y.data)
Exemplo n.º 49
0
 def check_forward(self, x_data):
     x = Variable(x_data)
     y = self.func(x)
     y_expect = self.x.dot(self.W.T) + self.b
     assert_allclose(y_expect, y.data)
Exemplo n.º 50
0
 def check_orthogonality(self, w):
     self.initializer(w)
     xp = cuda.get_array_module(w)
     w = w.reshape(len(w), -1)
     dots = xp.tensordot(w, w, (1, 1))
     gradient_check.assert_allclose(dots, xp.identity(len(w)))
Exemplo n.º 51
0
 def test_forward_gpu(self, use_cudnn=True):
     x = chainer.Variable(cuda.to_gpu(self.x))
     y = functions.tanh(x, use_cudnn=use_cudnn)
     self.assertEqual(y.data.dtype, numpy.float32)
     y_expect = functions.tanh(chainer.Variable(self.x))
     gradient_check.assert_allclose(y_expect.data, y.data)
Exemplo n.º 52
0
 def check_forward(self, x_data):
     x = chainer.Variable(x_data)
     y = functions.inv(x)
     gradient_check.assert_allclose(
         _inv(self.x), y.data, **self.check_forward_options)
Exemplo n.º 53
0
 def check_orthogonality(self, w):
     self.initializer(w)
     xp = cuda.get_array_module(w)
     gradient_check.assert_allclose(w, xp.ones((), dtype=numpy.float32) * 2)
Exemplo n.º 54
0
 def check_forward(self, x_data, axis=None):
     x = chainer.Variable(x_data)
     y = functions.sum(x, axis=axis)
     self.assertEqual(y.data.dtype, numpy.float32)
     y_expect = self.x.sum(axis=axis)
     gradient_check.assert_allclose(y_expect, y.data)
Exemplo n.º 55
0
 def check_weight_decay(self):
     self.optimizer.weight_decay(0.1)
     g = cuda.to_cpu(self.target.param.grad)
     expect = np.array([0.0, 1.1, 2.2], dtype=np.float32)
     gradient_check.assert_allclose(g, expect)
Exemplo n.º 56
0
 def check_param(self):
     linear_out_size = self.out_size * self.pool_size
     initialW = self.initialW.reshape((linear_out_size, -1))
     gradient_check.assert_allclose(initialW, self.link.linear.W.data)
     initial_bias = self.initial_bias.reshape((linear_out_size,))
     gradient_check.assert_allclose(initial_bias, self.link.linear.b.data)
Exemplo n.º 57
0
 def check_forward(self, x_data, ys_data, indices_or_sections, axis):
     x = chainer.Variable(x_data)
     ys = functions.split_axis(x, indices_or_sections, axis)
     for yd, y in zip(ys_data, ys):
         gradient_check.assert_allclose(yd, y.data, atol=0, rtol=0)
Exemplo n.º 58
0
 def check_forward(self, x_data):
     x = chainer.Variable(x_data)
     y = self.func(x)
     y_expect = self.x.dot(self.W.T) + self.b
     gradient_check.assert_allclose(y_expect, y.data)
Exemplo n.º 59
0
    def check_backward(self, x, g1, g2):
        split = self._make_split(x)

        grads = (g1, g2, None)
        gx, = split.backward((x, ), grads)
        gradient_check.assert_allclose(g1 + g2, gx)
Exemplo n.º 60
0
 def check_forward(self, x_data):
     x = chainer.Variable(x_data)
     y = self.link(x)
     self.assertEqual(y.data.dtype, numpy.float32)
     gradient_check.assert_allclose(self.y, y.data)