Exemple #1
0
    def test_add_param(self):
        with testing.assert_warns(DeprecationWarning):
            self.link.add_param('z', (2, 3))
        self.check_param_init('z', (2, 3), 'f')

        with testing.assert_warns(DeprecationWarning):
            self.link.add_param('w', (2, 3), dtype='d')
        self.check_param_init('w', (2, 3), 'd')

        with testing.assert_warns(DeprecationWarning):
            self.link.add_param('r')
        self.check_param_uninit('r')
        self.link.r.initialize((2, 3))
        self.check_param_init('r', (2, 3), 'f')

        with testing.assert_warns(DeprecationWarning):
            self.link.add_param('s', dtype='d')
        self.check_param_uninit('s')
        self.link.s.initialize((2, 3))
        self.check_param_init('s', (2, 3), 'd')

        initializer = initializers.Zero('d')
        with testing.assert_warns(DeprecationWarning):
            self.link.add_param('t', initializer=initializer)
        self.check_param_uninit('t', initializer)
        self.link.t.initialize((2, 3))
        self.check_param_init('t', (2, 3), 'd', 0)
Exemple #2
0
 def check_forward(self, x1_data, x2_data):
     tol = _matmul_tol(x1_data.dtype, x2_data.dtype)
     x1 = chainer.Variable(x1_data)
     x2 = chainer.Variable(x2_data)
     with testing.assert_warns(DeprecationWarning):
         y = self.op(x1, x2)
     testing.assert_allclose(self.forward_answer, y.data, **tol)
Exemple #3
0
 def test_numpy_array_async2(self):
     with testing.assert_warns(DeprecationWarning):
         y = cuda.to_gpu(self.x, device=self.device_dtype(1),
                         stream=cuda.Stream.null)
     assert isinstance(y, cuda.ndarray)
     cuda.cupy.testing.assert_array_equal(self.x, y)
     assert int(y.device) == 1
Exemple #4
0
    def check_debug_print(self, v, mean, std):
        result = v.debug_print()
        self.assertIn(v.summary(), result)
        self.assertIn('dtype: float32', result)
        # py2.7 on win64 returns shape as long
        self.assertTrue(re.match(r'- shape: \(5L?, 3L?, 5L?, 5L?\)',
                                 result.splitlines()[3]))

        # no grad
        msg = 'statistics: mean={mean:.8f}, std={std:.8f}'
        msg = msg.format(mean=mean, std=std)
        self.assertIn(msg, result)
        self.assertIn('grad: None', result)

        # zero grad
        with testing.assert_warns(DeprecationWarning):
            v.zerograd()
        result = v.debug_print()
        self.assertIn('grad: 0', result)

        # add grad
        v.grad = v.data
        result = v.debug_print()

        msg = 'grad: mean={mean:.8f}, std={std:.8f}'.format(mean=mean, std=std)
        self.assertIn(msg, result)
    def test_stalled_getitem(self):
        nth = self.nth
        batch_size = 2
        sleep = 0.5
        timeout = 0.1

        dataset = StallingDataset(nth, sleep)
        it = iterators.MultiprocessIterator(
            dataset, batch_size=batch_size, shuffle=False,
            dataset_timeout=timeout, repeat=False)

        # TimeoutWarning should be issued.
        warning_cls = iterators.MultiprocessIterator.TimeoutWarning
        data = []
        # No warning until the stalling batch
        for i in range(nth // batch_size):
            data.append(it.next())
        # Warning on the stalling batch
        with testing.assert_warns(warning_cls):
            data.append(it.next())
        # Retrieve data until the end
        while True:
            try:
                data.append(it.next())
            except StopIteration:
                break

        # All data must be retrieved
        assert data == [
            dataset.data[i * batch_size: (i+1) * batch_size]
            for i in range((len(dataset) + batch_size - 1) // batch_size)]
Exemple #6
0
 def test_numpy_array_async3(self):
     with cuda.Device(1):
         with testing.assert_warns(DeprecationWarning):
             y = cuda.to_gpu(self.x, stream=cuda.Stream.null)
     self.assertIsInstance(y, cuda.ndarray)
     cuda.cupy.testing.assert_array_equal(self.x, y)
     self.assertEqual(int(y.device), 1)
Exemple #7
0
 def check_backward_zero_input(self, x_data):
     x = chainer.Variable(x_data)
     y = distributions.utils._modified_xlogx(x)
     if numpy.prod(y.shape) > 1:
         y = chainer.functions.sum(y)
     with testing.assert_warns(RuntimeWarning):
         y.backward()
Exemple #8
0
 def test_to_gpu_zerograd(self):
     x = chainer.Parameter()
     x.to_gpu()
     with testing.assert_warns(DeprecationWarning):
         x.zerograd()
     x.initialize((3, 2))
     self.check_zerograd(x, cuda.cupy)
Exemple #9
0
 def check_double_backward(
         self, x1_data, x2_data, y_grad, x1_grad_grad, x2_grad_grad,
         atol, rtol):
     with testing.assert_warns(DeprecationWarning):
         gradient_check.check_double_backward(
             self.op, (x1_data, x2_data), y_grad,
             (x1_grad_grad, x2_grad_grad),
             atol=atol, rtol=rtol, dtype=numpy.float32)
    def check_invalid_poolings(self):
        with self.assertRaises(ValueError):
            functions.spatial_pyramid_pooling_2d(self.v, 3, pooling='avg')

        with testing.assert_warns(DeprecationWarning), \
                self.assertRaises(ValueError):
            functions.spatial_pyramid_pooling_2d(
                self.v, 3, pooling_class=functions.AveragePooling2D)
Exemple #11
0
 def test_zerograds(self):
     with testing.assert_warns(DeprecationWarning):
         self.c2.zerograds()
     numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3)))
     numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2))
     numpy.testing.assert_array_equal(self.l3.x.grad, numpy.zeros(3))
     self.l1.y.initialize((2, 3))
     numpy.testing.assert_array_equal(self.l1.y.grad, numpy.zeros((2, 3)))
    def check_ambiguous_poolings(self):
        with self.assertRaises(ValueError):
            functions.spatial_pyramid_pooling_2d(self.v, 3)

        with testing.assert_warns(DeprecationWarning), \
                self.assertRaises(ValueError):
            functions.spatial_pyramid_pooling_2d(
                self.v, 3, pooling_class=functions.MaxPooling2D, pooling='max')
Exemple #13
0
 def test_cupy_array_async1(self):
     x = cuda.to_gpu(self.x)
     if not self.c_contiguous:
         x = cuda.cupy.asfortranarray(x)
     with testing.assert_warns(DeprecationWarning):
         y = cuda.to_gpu(x, stream=cuda.Stream())
     self.assertIsInstance(y, cuda.ndarray)
     self.assertIs(x, y)  # Do not copy
     cuda.cupy.testing.assert_array_equal(x, y)
Exemple #14
0
 def test_cupy_array_async2(self):
     x = cuda.to_gpu(self.x, device=0)
     with x.device:
         if not self.c_contiguous:
             x = cuda.cupy.asfortranarray(x)
     with testing.assert_warns(DeprecationWarning):
         y = cuda.to_gpu(x, device=1, stream=cuda.Stream.null)
     self.assertIsInstance(y, cuda.ndarray)
     self.assertIsNot(x, y)  # Do copy
     cuda.cupy.testing.assert_array_equal(x, y)
Exemple #15
0
 def test_cupy_array_async3(self):
     with cuda.Device(0):
         x = cuda.to_gpu(self.x)
         if not self.c_contiguous:
             x = cuda.cupy.asfortranarray(x)
     with cuda.Device(1):
         with testing.assert_warns(DeprecationWarning):
             y = cuda.to_gpu(x, stream=cuda.Stream.null)
     assert isinstance(y, cuda.ndarray)
     assert x is not y  # Do copy
     cuda.cupy.testing.assert_array_equal(x, y)
Exemple #16
0
 def test_cupy_array_async3(self):
     with cuda.Device(0):
         x = cuda.to_gpu(self.x)
         if not self.c_contiguous:
             x = cuda.cupy.asfortranarray(x)
     with cuda.Device(1):
         with testing.assert_warns(DeprecationWarning):
             y = cuda.to_gpu(x, stream=cuda.Stream.null)
     assert isinstance(y, cuda.ndarray)
     assert x is not y  # Do copy
     cuda.cupy.testing.assert_array_equal(x, y)
 def func(self, x):
     if hasattr(self, 'pooling'):
         y = functions.spatial_pyramid_pooling_2d(
             x, self.pyramid_height, pooling=self.pooling)
     elif hasattr(self, 'pooling_class'):
         with testing.assert_warns(DeprecationWarning):
             y = functions.spatial_pyramid_pooling_2d(
                 x, self.pyramid_height, self.pooling_class)
     else:
         assert False
     return y
Exemple #18
0
 def test_zerograds(self):
     with testing.assert_warns(DeprecationWarning):
         self.s2.zerograds()
         numpy.testing.assert_array_equal(self.l1.b.grad, numpy.zeros((3,)))
         numpy.testing.assert_array_equal(
             self.l2.W.grad, numpy.zeros((2, 3)))
         numpy.testing.assert_array_equal(
             self.l3.W.grad, numpy.zeros((3, 2)))
         self.l1.W.initialize((3, 2))
         numpy.testing.assert_array_equal(
             self.l1.W.grad, numpy.zeros((3, 2)))
Exemple #19
0
 def test_zerograds_fill_multi_gpu(self):
     cupy = cuda.cupy
     with cuda.get_device_from_id(1):
         a = chainer.Variable(cupy.empty(3, dtype=np.float32))
         a.grad = cupy.empty_like(a.data)
     with testing.assert_warns(DeprecationWarning):
         a.zerograd()
     self.assertEqual(int(a.grad.device), 1)
     with cuda.get_device_from_id(1):
         g_expect = cupy.zeros_like(a.data)
         cupy.testing.assert_array_equal(a.grad, g_expect)
Exemple #20
0
    def test_zerograds(self):
        self.set_count_parameters()
        with testing.assert_warns(DeprecationWarning):
            self.c2.zerograds()
        numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3)))
        numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2))
        self.assertEqual(self.l1.x.count_zerograd, 1)
        self.assertEqual(self.l2.x.count_zerograd, 1)
        self.assertEqual(self.l3.x.count_zerograd, 1)

        self.l3.x.initialize(3)
        numpy.testing.assert_array_equal(self.l3.x.grad, numpy.zeros(3))
Exemple #21
0
    def test_zerograds(self):
        self.set_count_parameters()
        with testing.assert_warns(DeprecationWarning):
            self.c2.zerograds()
        numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3)))
        numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2))
        self.assertEqual(self.l1.x.count_zerograd, 1)
        self.assertEqual(self.l2.x.count_zerograd, 1)
        self.assertEqual(self.l3.x.count_zerograd, 1)

        self.l3.x.initialize(3)
        numpy.testing.assert_array_equal(self.l3.x.grad, numpy.zeros(3))
Exemple #22
0
 def test_cupy_array_async2(self):
     x = cuda.to_gpu(self.x, device=self.device_dtype(0))
     with x.device:
         if not self.c_contiguous:
             x = cuda.cupy.asfortranarray(x)
     with testing.assert_warns(DeprecationWarning):
         y = cuda.to_gpu(x,
                         device=self.device_dtype(1),
                         stream=cuda.Stream.null)
     self.assertIsInstance(y, cuda.ndarray)
     self.assertIsNot(x, y)  # Do copy
     cuda.cupy.testing.assert_array_equal(x, y)
 def func(self, x):
     if hasattr(self, 'pooling'):
         y = functions.spatial_pyramid_pooling_2d(x,
                                                  self.pyramid_height,
                                                  pooling=self.pooling)
     elif hasattr(self, 'pooling_class'):
         with testing.assert_warns(DeprecationWarning):
             y = functions.spatial_pyramid_pooling_2d(
                 x, self.pyramid_height, self.pooling_class)
     else:
         assert False
     return y
Exemple #24
0
 def test_zerograds(self):
     with testing.assert_warns(DeprecationWarning):
         self.s2.zerograds()
         numpy.testing.assert_array_equal(self.l1.b.grad, numpy.zeros(
             (3, )))
         numpy.testing.assert_array_equal(self.l2.W.grad, numpy.zeros(
             (2, 3)))
         numpy.testing.assert_array_equal(self.l3.W.grad, numpy.zeros(
             (3, 2)))
         self.l1.W.initialize((3, 2))
         numpy.testing.assert_array_equal(self.l1.W.grad, numpy.zeros(
             (3, 2)))
Exemple #25
0
    def check_forward_consistency(self):
        x_cpu = chainer.Variable(self.x)
        y_cpu = self.link(x_cpu)
        self.assertEqual(y_cpu.data.dtype, numpy.float32)

        with testing.assert_warns(DeprecationWarning):
            self.link.to_gpu()
        x_gpu = chainer.Variable(cuda.to_gpu(self.x))
        y_gpu = self.link(x_gpu)
        self.assertEqual(y_gpu.data.dtype, numpy.float32)

        testing.assert_allclose(y_cpu.data, y_gpu.data.get())
 def test_inference_gpu(self):
     bn = links.BatchNormalization(axis=self.axis)
     with testing.assert_warns(DeprecationWarning):
         bn.to_gpu()
     bn(cuda.to_gpu(self.x))
     assert isinstance(bn.beta.data, cuda.cupy.ndarray)
     assert isinstance(bn.gamma.data, cuda.cupy.ndarray)
     assert isinstance(bn.avg_mean, cuda.cupy.ndarray)
     assert isinstance(bn.avg_var, cuda.cupy.ndarray)
     assert bn.beta.shape == self.expected_size
     assert bn.gamma.shape == self.expected_size
     assert bn.avg_mean.shape == self.expected_size
     assert bn.avg_var.shape == self.expected_size
Exemple #27
0
 def test_zerograds(self):
     gx_expect = numpy.zeros_like(self.link.x.data)
     gy_expect = numpy.zeros_like(self.link.y.data)
     with testing.assert_warns(DeprecationWarning):
         self.link.zerograds()
     numpy.testing.assert_array_equal(self.link.x.grad, gx_expect)
     numpy.testing.assert_array_equal(self.link.y.grad, gy_expect)
     self.link.u.initialize((2, 3))
     self.link.v.initialize((2, 3))
     gu_expect = numpy.zeros_like(self.link.u.data)
     gv_expect = numpy.zeros_like(self.link.v.data)
     numpy.testing.assert_array_equal(self.link.u.grad, gu_expect)
     numpy.testing.assert_array_equal(self.link.v.grad, gv_expect)
Exemple #28
0
 def test_zerograds(self):
     gx_expect = numpy.zeros_like(self.link.x.data)
     gy_expect = numpy.zeros_like(self.link.y.data)
     with testing.assert_warns(DeprecationWarning):
         self.link.zerograds()
     numpy.testing.assert_array_equal(self.link.x.grad, gx_expect)
     numpy.testing.assert_array_equal(self.link.y.grad, gy_expect)
     self.link.u.initialize((2, 3))
     self.link.v.initialize((2, 3))
     gu_expect = numpy.zeros_like(self.link.u.data)
     gv_expect = numpy.zeros_like(self.link.v.data)
     numpy.testing.assert_array_equal(self.link.u.grad, gu_expect)
     numpy.testing.assert_array_equal(self.link.v.grad, gv_expect)
Exemple #29
0
    def check_forward(self, h_data, c_data, xs_data):
        if self.hidden_none:
            h = c = None
        else:
            h = chainer.Variable(h_data)
            c = chainer.Variable(c_data)
        xs = [chainer.Variable(x) for x in xs_data]
        hy, cy, ys = self.rnn(h, c, xs)

        assert hy.shape == h_data.shape
        assert cy.shape == c_data.shape
        assert len(xs) == len(ys)
        for x, y in zip(xs, ys):
            assert len(x) == len(y)
            assert y.shape[1] == self.out_size

        with testing.assert_warns(DeprecationWarning):
            self.rnn.to_cpu()

        for batch, seq in enumerate(self.xs):
            for layer in range(self.n_layers):
                p = self.rnn[layer]
                h_prev = self.h[layer, batch]
                c_prev = self.c[layer, batch]
                hs = []
                for x in seq:
                    i = sigmoid(
                        x.dot(p.w0.array.T) + h_prev.dot(p.w4.array.T) +
                        p.b0.array + p.b4.array)
                    f = sigmoid(
                        x.dot(p.w1.array.T) + h_prev.dot(p.w5.array.T) +
                        p.b1.array + p.b5.array)
                    c_bar = numpy.tanh(
                        x.dot(p.w2.array.T) + h_prev.dot(p.w6.array.T) +
                        p.b2.array + p.b6.array)
                    o = sigmoid(
                        x.dot(p.w3.array.T) + h_prev.dot(p.w7.array.T) +
                        p.b3.array + p.b7.array)
                    e_c = (f * c_prev + i * c_bar)
                    e_h = o * numpy.tanh(e_c)

                    h_prev = e_h
                    c_prev = e_c
                    hs.append(e_h)

                seq = hs
                testing.assert_allclose(hy.array[layer, batch], h_prev)
                testing.assert_allclose(cy.array[layer, batch], c_prev)

            for y, ey in zip(ys[batch].array, seq):
                testing.assert_allclose(y, ey)
 def check_to_cpu_to_gpu(self, c, h):
     self.link.c = c
     self.link.h = h
     with testing.assert_warns(DeprecationWarning):
         self.link.to_gpu()
     self.assertIs(self.link.xp, cuda.cupy)
     self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
     self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
     with testing.assert_warns(DeprecationWarning):
         self.link.to_gpu()
     self.assertIs(self.link.xp, cuda.cupy)
     self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
     self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
     with testing.assert_warns(DeprecationWarning):
         self.link.to_cpu()
     self.assertIs(self.link.xp, numpy)
     self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
     self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
     with testing.assert_warns(DeprecationWarning):
         self.link.to_gpu()
     self.assertIs(self.link.xp, cuda.cupy)
     self.assertIsInstance(self.link.c.data, self.link.xp.ndarray)
     self.assertIsInstance(self.link.h.data, self.link.xp.ndarray)
Exemple #31
0
    def check_zerograd(self, a_data, fill=False):
        xp = cuda.get_array_module(a_data)
        a = chainer.Variable(a_data)
        if fill:
            a.grad_var = chainer.Variable(xp.full_like(a_data, np.nan))
            a.grad_var.creator_node = chainer.FunctionNode()

        with testing.assert_warns(DeprecationWarning):
            a.zerograd()
        self.assertIsNot(a.grad, None)
        if fill:
            self.assertIsNone(a.grad_var.creator_node)
        g_expect = xp.zeros_like(a.data)
        xp.testing.assert_array_equal(a.grad, g_expect)
Exemple #32
0
    def check_zerograd(self, a_data, fill=False):
        xp = cuda.get_array_module(a_data)
        a = chainer.Variable(a_data)
        if fill:
            a.grad_var = chainer.Variable(xp.full_like(a_data, np.nan))
            a.grad_var.creator_node = chainer.FunctionNode()

        with testing.assert_warns(DeprecationWarning):
            a.zerograd()
        self.assertIsNot(a.grad, None)
        if fill:
            self.assertIsNone(a.grad_var.creator_node)
        g_expect = xp.zeros_like(a.data)
        xp.testing.assert_array_equal(a.grad, g_expect)
Exemple #33
0
 def test_to_gpu(self):
     cupy = cuda.cupy
     with testing.assert_warns(DeprecationWarning):
         self.s2.to_gpu()
     self.assertIs(self.s2.xp, cupy)
     self.assertIs(self.s1.xp, cupy)
     self.assertIs(self.l1.xp, cupy)
     self.assertIs(self.l2.xp, cupy)
     self.assertIs(self.l3.xp, cupy)
     self.assertIsInstance(self.l1.b.data, cupy.ndarray)
     self.assertIsInstance(self.l1.b.grad, cupy.ndarray)
     self.assertIsInstance(self.l2.W.data, cupy.ndarray)
     self.assertIsInstance(self.l2.W.grad, cupy.ndarray)
     self.assertIsInstance(self.l3.W.data, cupy.ndarray)
     self.assertIsInstance(self.l3.W.grad, cupy.ndarray)
def test_export_external_converters_custom_op(tmpdir, domain, version):
    path = str(tmpdir)

    class Dummy(chainer.FunctionNode):
        def forward_cpu(self, inputs):
            self.x = inputs[0]
            return np.ones_like(inputs[0]),

        def backward(self, indexes, grad_outputs):
            return chainer.Variable(np.zeros_like(self.x)),

    def dummy_function(x):
        return Dummy().apply((x, ))[0]

    model = chainer.Sequential(dummy_function)
    x = input_generator.increasing(2, 5)

    def custom_converter(params):
        return onnx_helper.make_node('Dummy',
                                     params.input_names,
                                     params.output_names,
                                     domain=domain),

    addon_converters = {'Dummy': custom_converter}

    external_opset_imports = {}
    is_set_domain = domain is not None
    if is_set_domain:
        external_opset_imports[domain] = version
    if is_set_domain and onnx_helper.is_support_non_standard_domain():
        export_testcase(model,
                        x,
                        path,
                        external_converters=addon_converters,
                        external_opset_imports=external_opset_imports)
    else:
        with testing.assert_warns(UserWarning):
            export_testcase(model,
                            x,
                            path,
                            external_converters=addon_converters,
                            external_opset_imports=external_opset_imports)

    output_path = os.path.join(path, 'test_data_set_0', 'output_0.pb')
    assert os.path.isfile(output_path)
    output = onnx.numpy_helper.to_array(onnx.load_tensor(output_path))
    expected_output = np.ones_like(x)
    np.testing.assert_allclose(output, expected_output, rtol=1e-5, atol=1e-5)
Exemple #35
0
    def check_mean(self, is_gpu):
        with testing.assert_warns(RuntimeWarning):
            if is_gpu:
                mean1 = self.gpu_dist.mean.data
            else:
                mean1 = self.cpu_dist.mean.data

        if self.scipy_onebyone:
            mean2 = []
            for one_params in self.scipy_onebyone_params_iter():
                mean2.append(self.scipy_dist.mean(**one_params))
            mean2 = numpy.vstack(mean2).reshape(
                self.shape + self.cpu_dist.event_shape)
        else:
            mean2 = self.scipy_dist.mean(**self.scipy_params)
        array.assert_allclose(mean1, mean2)
Exemple #36
0
    def test_serialize_backward_compat(self):
        with tempfile.NamedTemporaryFile(delete=False) as f:
            # old version does not save anything
            numpy.savez(f, dummy=0)
            with testing.assert_warns(UserWarning):
                chainer.serializers.load_npz(f.name, self.summary)

        self.summary.add(2.)
        self.summary.add(3.)

        mean = self.summary.compute_mean()
        testing.assert_allclose(mean, 2.5)

        mean, std = self.summary.make_statistics()
        testing.assert_allclose(mean, 2.5)
        testing.assert_allclose(std, 0.5)
Exemple #37
0
    def check_variance(self, is_gpu):
        with testing.assert_warns(RuntimeWarning):
            if is_gpu:
                variance1 = self.gpu_dist.variance.data
            else:
                variance1 = self.cpu_dist.variance.data

        if self.scipy_onebyone:
            variance2 = []
            for one_params in self.scipy_onebyone_params_iter():
                variance2.append(self.scipy_dist.var(**one_params))
            variance2 = numpy.vstack(variance2).reshape(
                self.shape + self.cpu_dist.event_shape)
        else:
            variance2 = self.scipy_dist.var(**self.scipy_params)
        array.assert_allclose(variance1, variance2)
Exemple #38
0
    def check_mean(self, is_gpu):
        with testing.assert_warns(RuntimeWarning):
            if is_gpu:
                mean1 = self.gpu_dist.mean.data
            else:
                mean1 = self.cpu_dist.mean.data

        if self.scipy_onebyone:
            mean2 = []
            for one_params in self.scipy_onebyone_params_iter():
                mean2.append(self.scipy_dist.mean(**one_params))
            mean2 = numpy.vstack(mean2).reshape(self.shape +
                                                self.cpu_dist.event_shape)
        else:
            mean2 = self.scipy_dist.mean(**self.scipy_params)
        array.assert_allclose(mean1, mean2)
Exemple #39
0
    def test_serialize_backward_compat(self):
        with tempfile.NamedTemporaryFile(delete=False) as f:
            # old version does not save anything
            numpy.savez(f, dummy=0)
            with testing.assert_warns(UserWarning):
                chainer.serializers.load_npz(f.name, self.summary)

        self.summary.add(2.)
        self.summary.add(3.)

        mean = self.summary.compute_mean()
        testing.assert_allclose(mean, 2.5)

        mean, std = self.summary.make_statistics()
        testing.assert_allclose(mean, 2.5)
        testing.assert_allclose(std, 0.5)
Exemple #40
0
    def check_variance(self, is_gpu):
        with testing.assert_warns(RuntimeWarning):
            if is_gpu:
                variance1 = self.gpu_dist.variance.data
            else:
                variance1 = self.cpu_dist.variance.data

        if self.scipy_onebyone:
            variance2 = []
            for one_params in self.scipy_onebyone_params_iter():
                variance2.append(self.scipy_dist.var(**one_params))
            variance2 = numpy.vstack(variance2).reshape(
                self.shape + self.cpu_dist.event_shape)
        else:
            variance2 = self.scipy_dist.var(**self.scipy_params)
        array.assert_allclose(variance1, variance2)
Exemple #41
0
 def test_model_setup_multi_gpu(self):
     skip, msg = self.skip_loss_scaling()
     if skip:
         return unittest.SkipTest(msg)
     with cuda.Device(0):
         model = self.model.model
         optimizer = self.model.optimizer
         with testing.assert_warns(DeprecationWarning):
             model.to_gpu(1)
         optimizer.setup(model)
         _optimizer_loss_scaling(optimizer, self.loss_scaling)
     # Initialize the optimizer state by running an update
     for param in optimizer.target.params(False):
         param.cleargrad()
         param.update()
         for v in six.itervalues(param.update_rule.state):
             self.assertEqual(int(param.data.device), int(v.device))
Exemple #42
0
 def setUp(self):
     x_shape_0 = 2
     x_shape_1 = numpy.int64(3)
     with testing.assert_warns(DeprecationWarning):
         self.link = chainer.Link(x=((x_shape_0, x_shape_1), 'd'),
                                  u=(None, 'd'))
     with self.link.init_scope():
         self.link.y = chainer.Parameter(shape=(2,))
         self.link.v = chainer.Parameter()
     self.p = numpy.array([1, 2, 3], dtype='f')
     self.link.add_persistent('p', self.p)
     self.link.name = 'a'
     self.link.x.update_rule = chainer.UpdateRule()
     self.link.x.update_rule.enabled = False
     self.link.u.update_rule = chainer.UpdateRule()
     if cuda.available:
         self.current_device_id = cuda.cupy.cuda.get_device_id()
Exemple #43
0
    def test_resumed_trigger_backward_compat(self):
        trainer = testing.get_trainer_with_mock_updater(
            stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
        with tempfile.NamedTemporaryFile(delete=False) as f:
            trigger = training.triggers.OnceTrigger(self.call_on_resume)
            for expected in self.expected:
                trainer.updater.update()
                self.assertEqual(trigger(trainer), expected)
            # old version does not save anything
            np.savez(f, dummy=0)

            trigger = training.triggers.OnceTrigger(self.call_on_resume)
            with testing.assert_warns(UserWarning):
                serializers.load_npz(f.name, trigger)
            for expected in self.expected_resume:
                trainer.updater.update()
                self.assertEqual(trigger(trainer), expected)
    def test_resumed_trigger_backward_compat(self):
        trainer = testing.get_trainer_with_mock_updater(
            stop_trigger=None, iter_per_epoch=self.iter_per_epoch)
        with tempfile.NamedTemporaryFile(delete=False) as f:
            trigger = training.triggers.ManualScheduleTrigger(*self.schedule)
            for expected in self.expected[:self.resume]:
                trainer.updater.update()
                self.assertEqual(trigger(trainer), expected)
            # old version does not save anything
            np.savez(f, dummy=0)

            trigger = training.triggers.ManualScheduleTrigger(*self.schedule)
            with testing.assert_warns(UserWarning):
                serializers.load_npz(f.name, trigger)
            for expected in self.expected[self.resume:]:
                trainer.updater.update()
                self.assertEqual(trigger(trainer), expected)
Exemple #45
0
    def test_serialize_backward_compat_overwrite(self):
        self.summary.add({'a': 3., 'b': 1., 'c': 4.})
        self.summary.add({'a': 1., 'b': 5., 'c': 9.})

        with tempfile.NamedTemporaryFile(delete=False) as f:
            # old version does not save anything
            numpy.savez(f, dummy=0)
            with testing.assert_warns(UserWarning):
                chainer.serializers.load_npz(f.name, self.summary)

        self.summary.add({'a': 9., 'b': 2.})
        self.summary.add({'a': 6., 'b': 5.})

        self.check(self.summary, {
            'a': (9., 6.),
            'b': (2., 5.),
        })
Exemple #46
0
    def test_to_cpu_on_cpu(self):
        x1 = self.l1.b.data
        gx1 = self.l1.b.grad
        x2 = self.l2.W.data
        gx2 = self.l2.W.grad
        x3 = self.l3.W.data
        gx3 = self.l3.W.grad

        with testing.assert_warns(DeprecationWarning):
            self.s2.to_cpu()

        self.assertIs(self.l1.b.data, x1)
        self.assertIs(self.l1.b.grad, gx1)
        self.assertIs(self.l2.W.data, x2)
        self.assertIs(self.l2.W.grad, gx2)
        self.assertIs(self.l3.W.data, x3)
        self.assertIs(self.l3.W.grad, gx3)
Exemple #47
0
    def test_serialize_backward_compat_overwrite(self):
        self.summary.add({'a': 3., 'b': 1., 'c': 4.})
        self.summary.add({'a': 1., 'b': 5., 'c': 9.})

        with tempfile.NamedTemporaryFile(delete=False) as f:
            # old version does not save anything
            numpy.savez(f, dummy=0)
            with testing.assert_warns(UserWarning):
                chainer.serializers.load_npz(f.name, self.summary)

        self.summary.add({'a': 9., 'b': 2.})
        self.summary.add({'a': 6., 'b': 5.})

        self.check(self.summary, {
            'a': (9., 6.),
            'b': (2., 5.),
        })
Exemple #48
0
 def setUp(self):
     x_shape_0 = 2
     x_shape_1 = numpy.int64(3)
     with testing.assert_warns(DeprecationWarning):
         self.link = chainer.Link(x=((x_shape_0, x_shape_1), 'd'),
                                  u=(None, 'd'))
     with self.link.init_scope():
         self.link.y = chainer.Parameter(shape=(2,))
         self.link.v = chainer.Parameter()
     self.p = numpy.array([1, 2, 3], dtype='f')
     self.link.add_persistent('p', self.p)
     self.link.name = 'a'
     self.link.x.update_rule = chainer.UpdateRule()
     self.link.x.update_rule.enabled = False
     self.link.u.update_rule = chainer.UpdateRule()
     if cuda.available:
         self.current_device_id = cuda.cupy.cuda.get_device_id()
    def check_deleted(self, gpu):
        layer, hook = self.layer, self.hook
        layer.add_hook(hook)
        if gpu:
            with testing.assert_warns(DeprecationWarning):
                layer = layer.to_gpu()
        x = cuda.to_gpu(self.x) if gpu else self.x

        y1 = layer(x).array
        with chainer.using_config('train', False):
            y2 = layer(x).array
        layer.delete_hook(hook.name)
        y3 = layer(x).array
        if gpu:
            y1, y2, y3 = cuda.to_cpu(y1), cuda.to_cpu(y2), cuda.to_cpu(y3)
        assert not numpy.array_equal(y1, y3)
        assert not numpy.array_equal(y2, y3)
Exemple #50
0
def test_replace_func_collection_return(tmpdir, return_type):
    path = str(tmpdir)

    class Model(chainer.Chain):
        def __init__(self, return_type):
            super().__init__()
            self.return_type = return_type

        def tiled_array(self, xs, n=5):
            if self.return_type == 'list':
                return [xs.array * i for i in range(1, 1+n)]
            else:
                assert self.return_type == 'dict'
                return {str(i): xs.array * i for i in range(1, 1+n)}

        def __call__(self, xs):
            return self.tiled_array(xs)

    model = Model(return_type)
    x = input_generator.increasing(1, 5)

    with warnings.catch_warnings(record=True):
        model.tiled_array = fake_as_funcnode(model.tiled_array, 'xTiledArray')

    def tiled_array_converter(params):
        return onnx_helper.make_node(
            'xTiledArray', params.input_names, params.output_names),

    addon_converters = {'xTiledArray': tiled_array_converter}

    with testing.assert_warns(UserWarning):
        export_testcase(model, x, path, external_converters=addon_converters)

    model_filepath = os.path.join(path, 'model.onnx')
    assert os.path.isfile(model_filepath)

    onnx_model = onnx.load(model_filepath)
    node_names = [n.name for n in onnx_model.graph.node]
    assert len(node_names) == 1
    assert node_names[0] == 'xTiledArray_0'
    output_names = [n.name for n in onnx_model.graph.output]
    assert len(output_names) == 5
    for i, name in enumerate(output_names):
        assert name == 'xTiledArray_0_{:d}'.format(i)
Exemple #51
0
    def test_output(self):

        # FIXME(syoyo): Currently the test will fail due to the different
        # behavior of bilinear interpolation between Chainer and onnxruntime.
        # So disable output value check for a while.
        #
        # Currently Chainer will give [64, 53.333336, 42.666668, 32]
        # (same result with tensorflow r1.13.1 with `align_corners=True`),
        # while onnxruntime gives [64, 48, 32, 32]
        # (same result with tensorflow r1.13.1 with `align_corners=False`)
        #
        # However, the correct behavior will be [64, 54, 40, 32].
        # (cv2.resize and tensorflow master(r1.14 or r2.0) after this fix:
        #  https://github.com/tensorflow/tensorflow/issues/6720)

        self.check_out_values = None  # Skip output value check

        with testing.assert_warns(UserWarning):
            self.expect(self.model, self.x, expected_num_initializers=0)
Exemple #52
0
    def check_forward(self, h_data, xs_data):
        if self.hidden_none:
            h = None
        else:
            h = chainer.Variable(h_data)
        xs = [chainer.Variable(x) for x in xs_data]
        hy, ys = self.rnn(h, xs)

        assert hy.shape == h_data.shape
        assert len(xs) == len(ys)
        for x, y in zip(xs, ys):
            assert len(x) == len(y)
            assert y.shape[1] == self.out_size

        with testing.assert_warns(DeprecationWarning):
            self.rnn.to_cpu()

        for batch, seq in enumerate(self.xs):
            for layer in range(self.n_layers):
                p = self.rnn[layer]
                h_prev = self.h[layer, batch]
                hs = []
                for x in seq:
                    # GRU
                    z = sigmoid(
                        x.dot(p.w1.array.T) + h_prev.dot(p.w4.array.T) +
                        p.b1.array + p.b4.array)
                    r = sigmoid(
                        x.dot(p.w0.array.T) + h_prev.dot(p.w3.array.T) +
                        p.b0.array + p.b3.array)
                    h_bar = numpy.tanh(
                        x.dot(p.w2.array.T) + r *
                        ((h_prev).dot(p.w5.array.T) + p.b5.array) + p.b2.array)
                    e_h = (1 - z) * h_bar + z * h_prev

                    h_prev = e_h
                    hs.append(e_h)

                seq = hs
                testing.assert_allclose(hy.array[layer, batch], h_prev)

            for y, ey in zip(ys[batch].array, seq):
                testing.assert_allclose(y, ey)
Exemple #53
0
def test():
    model = SimpleNetRawArray()
    dataset = [((numpy.ones((2, 5, 5)) * i).astype(numpy.float32),
                numpy.int32(0)) for i in range(100)]

    batch_size = 5
    devices = (0,)
    iters = [chainer.iterators.SerialIterator(i, batch_size) for i in
             chainer.datasets.split_dataset_n_random(
                 dataset, len(devices))]
    optimizer = chainer.optimizers.SGD(lr=1.0)
    optimizer.setup(model)

    with testing.assert_warns(UserWarning):
        updater = mpu.MultiprocessParallelUpdater(
            iters, optimizer, devices=devices)
    updater.update()

    assert model.call_called == 1
Exemple #54
0
    def setUp(self):
        self.l1 = chainer.Link()
        with self.l1.init_scope():
            self.l1.x = chainer.Parameter(shape=(2, 3))
        self.l2 = chainer.Link()
        with self.l2.init_scope():
            self.l2.x = chainer.Parameter(shape=2)
        self.l3 = chainer.Link()
        with self.l3.init_scope():
            self.l3.x = chainer.Parameter()

        self.c1 = chainer.Chain()
        with self.c1.init_scope():
            self.c1.l1 = self.l1
        with testing.assert_warns(DeprecationWarning):
            self.c1.add_link('l2', self.l2)
        self.c2 = chainer.Chain()
        with self.c2.init_scope():
            self.c2.c1 = self.c1
            self.c2.l3 = self.l3
Exemple #55
0
    def test_outputs(self):
        class MLP(chainer.Chain):
            def __init__(self, n_units, n_out):
                super(MLP, self).__init__()
                with self.init_scope():
                    self.l1 = L.Linear(None, n_units)
                    self.l2 = L.Linear(None, n_units)
                    self.l3 = L.Linear(None, n_out)

            def __call__(self, x):
                h1 = F.relu(self.l1(x))
                # Unused for some reason, then params are not initialized.
                # h2 = F.relu(self.l2(h1))
                return self.l3(h1)

        model = MLP(100, 10)
        x = np.random.rand(1, 768).astype(np.float32)

        with testing.assert_warns(UserWarning):
            self.expect(model, x)
Exemple #56
0
    def setUp(self):
        self.l1 = chainer.Link()
        with self.l1.init_scope():
            self.l1.x = chainer.Parameter(shape=(2, 3))
        self.l2 = chainer.Link()
        with self.l2.init_scope():
            self.l2.x = chainer.Parameter(shape=2)
        self.l3 = chainer.Link()
        with self.l3.init_scope():
            self.l3.x = chainer.Parameter()

        self.c1 = chainer.Chain()
        with self.c1.init_scope():
            self.c1.l1 = self.l1
        with testing.assert_warns(DeprecationWarning):
            self.c1.add_link('l2', self.l2)
        self.c2 = chainer.Chain()
        with self.c2.init_scope():
            self.c2.c1 = self.c1
            self.c2.l3 = self.l3
Exemple #57
0
    def check_forward(self, xp):
        link = MyModel()
        if xp is cuda.cupy:
            with testing.assert_warns(DeprecationWarning):
                link = link.to_gpu()
        hook = link_hooks.TimerHook()

        with hook:
            link(chainer.Variable(xp.array([[7, 5]], numpy.float32)))
            link(chainer.Variable(xp.array([[8, 1]], numpy.float32)))

        # call_history
        hist = hook.call_history
        assert len(hist) == 6
        assert all(len(h) == 2 for h in hist)
        names = [h[0] for h in hist]
        times = [h[1] for h in hist]
        assert names == [
            'Linear', 'Linear', 'MyModel', 'Linear', 'Linear', 'MyModel'
        ]
        assert times[0] + times[1] < times[2]
        assert times[3] + times[4] < times[5]

        # summary
        summary = hook.summary()
        assert sorted(summary.keys()) == ['Linear', 'MyModel']
        assert summary['Linear']['occurrence'] == 4
        numpy.testing.assert_allclose(
            summary['Linear']['elapsed_time'],
            times[0] + times[1] + times[3] + times[4])
        assert summary['MyModel']['occurrence'] == 2
        numpy.testing.assert_allclose(summary['MyModel']['elapsed_time'],
                                      times[2] + times[5])

        # print_report
        s = six.StringIO()
        hook.print_report(unit=self.unit, file=s)
        report = s.getvalue()
        assert len(report.splitlines()) == 3
        assert re.search(r'Linear +[.0-9a-z]+ +4', report) is not None
        assert re.search(r'MyModel +[.0-9a-z]+ +2', report) is not None
Exemple #58
0
    def check_forward(self, h_data, xs_data):
        if self.hidden_none:
            h = None
        else:
            h = chainer.Variable(h_data)
        xs = [chainer.Variable(x) for x in xs_data]
        hy, ys = self.rnn(h, xs)

        assert hy.shape == h_data.shape
        assert len(xs) == len(ys)
        for x, y in zip(xs, ys):
            assert len(x) == len(y)
            assert y.shape[1] == self.out_size

        with testing.assert_warns(DeprecationWarning):
            self.rnn.to_cpu()

        for batch, seq in enumerate(self.xs):
            for layer in range(self.n_layers):
                p = self.rnn[layer]
                h_prev = self.h[layer, batch]
                hs = []
                for x in seq:
                    if self.activation == 'tanh':
                        activation_func = numpy.tanh
                    elif self.activation == 'relu':
                        activation_func = relu

                    h_prev = activation_func(
                        x.dot(p.w0.array.T) + h_prev.dot(p.w1.array.T) +
                        p.b0.array + p.b1.array)

                    hs.append(h_prev)

                seq = hs
                testing.assert_allclose(hy.data[layer, batch], h_prev)

            for y, ey in zip(ys[batch].array, seq):
                testing.assert_allclose(y, ey)
Exemple #59
0
def test_fake_as_funcnode_keep_structure(tmpdir):
    path = str(tmpdir)

    class Model(chainer.Chain):
        def __init__(self):
            super().__init__()

        def f(self, x):
            return {'a': (x, x+1), 'b': [x+2, x+3, x+4]}

        def __call__(self, x):
            ret = self.f(x)
            return ret['a'][0] + ret['b'][1]

    model = Model()
    x = input_generator.increasing(2, 3)

    with warnings.catch_warnings(record=True):
        model.f = fake_as_funcnode(model.f, 'xF')

    def f_converter(params):
        return onnx_helper.make_node(
            'xF', params.input_names, params.output_names),

    addon_converters = {'xF': f_converter}

    with testing.assert_warns(UserWarning):
        export_testcase(model, x, path, external_converters=addon_converters)
        export_testcase(model, x, ".", external_converters=addon_converters)

    model_filepath = os.path.join(path, 'model.onnx')
    assert os.path.isfile(model_filepath)

    onnx_model = onnx.load(model_filepath)
    node_names = [n.name for n in onnx_model.graph.node]
    assert len(node_names) == 2
    assert node_names[0] == 'xF_0'
    assert len(onnx_model.graph.node[0].output) == 5
    assert len(onnx_model.graph.output) == 1
 def _forward(self, *args):
     with testing.assert_warns(DeprecationWarning):
         return batch_renormalization.fixed_batch_renormalization(
             *args, eps=self.eps)