Example #1
0
    def test_return_samples(self, backend_config):
        batch_size = self.t.shape[0]
        link = self.create_link()
        if backend_config.use_cuda:
            link.to_gpu()

        x_data = backend_config.get_array(self.x)
        t_data = backend_config.get_array(self.t)
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data, requires_grad=False)

        # return_samples=True
        y, samples = link(x, t, reduce=self.reduce, return_samples=True)

        assert isinstance(samples, backend_config.xp.ndarray)
        assert samples.shape == (batch_size, self.sample_size + 1)
        assert samples.dtype == numpy.int32

        # return_samples=False, with saved samples
        y_ = self.call_link_with_samples(
            samples,
            lambda: link(x, t, reduce=self.reduce))

        # y and y_ should equal
        numpy.testing.assert_array_equal(
            cuda.to_cpu(y.array), cuda.to_cpu(y_.array))
Example #2
0
 def check_atol(self, x, y):
     x_cpu = cuda.to_cpu(x)
     y_cpu = cuda.to_cpu(y)
     max_abs_diff = numpy.max(numpy.abs(x_cpu - y_cpu))
     with self.assertRaises(AssertionError):
         testing.assert_allclose(x, y, atol=max_abs_diff - 1, rtol=0)
     testing.assert_allclose(x, y, atol=max_abs_diff + 1, rtol=0)
    def check_forward(self, x_data, t_data):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)
        y = self.link(x, t, reduce=self.reduce)
        self.assertEqual(y.shape, self.gy.shape)

        W = cuda.to_cpu(self.link.W.data)
        samples = cuda.to_cpu(y.creator.samples)

        loss = numpy.empty((len(self.x),), numpy.float32)
        for i in range(len(self.x)):
            ix = self.x[i]
            it = self.t[i]
            if it == -1:
                loss[i] = 0
            else:
                w = W[samples[i]]
                f = w.dot(ix)
                # first one is positive example
                f[0] *= -1
                loss[i] = numpy.logaddexp(f, 0).sum()

        if self.reduce == 'sum':
            loss = loss.sum()

        testing.assert_allclose(y.data, loss)
Example #4
0
def copyto(dst, src):
    """Copies the elements of an ndarray to those of another one.

    This function can copy the CPU/GPU arrays to the destination arrays on
    another device.

    Args:
        dst (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`):
            Destination array.
        src (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`):
            Source array.

    """
    if isinstance(dst, numpy.ndarray):
        numpy.copyto(dst, numpy.asarray(cuda.to_cpu(src)))
    elif isinstance(dst, intel64.mdarray):
        intel64.ideep.basic_copyto(dst, cuda.to_cpu(src))
    elif isinstance(dst, cuda.ndarray):
        if isinstance(src, chainer.get_cpu_array_types()):
            src = numpy.asarray(src)
            if dst.flags.c_contiguous or dst.flags.f_contiguous:
                dst.set(src)
            else:
                cuda.cupy.copyto(dst, cuda.to_gpu(src, device=dst.device))
        elif isinstance(src, cuda.ndarray):
            cuda.cupy.copyto(dst, src)
        else:
            raise TypeError('cannot copy from non-array object of type {}'
                            .format(type(src)))
    else:
        raise TypeError('cannot copy to non-array object of type {}'.format(
            type(dst)))
Example #5
0
 def check_backward(self, gpu):
     gx1, gx2 = self.f.backward((self.x1, self.x2), (self.gy1, self.gy2))
     self.assertEqual(self._get_method('backward', not gpu).call_count, 0)
     self._get_method('backward', gpu).assert_called_once_with(
         (self.x1, self.x2), (self.gy1, self.gy2))
     self.assertTrue((cuda.to_cpu(gx1) == cuda.to_cpu(self.gx1)).all())
     self.assertIsNone(gx2)
Example #6
0
    def check_concat_dicts_padding(self, xp):
        dicts = [
            {'x': xp.random.rand(3, 4), 'y': xp.random.rand(2, 5)},
            {'x': xp.random.rand(4, 4), 'y': xp.random.rand(3, 4)},
            {'x': xp.random.rand(2, 5), 'y': xp.random.rand(2, 6)},
        ]
        arrays = dataset.concat_examples(dicts, padding=0)
        self.assertIn('x', arrays)
        self.assertIn('y', arrays)
        self.assertEqual(arrays['x'].shape, (3, 4, 5))
        self.assertEqual(arrays['y'].shape, (3, 3, 6))
        self.assertEqual(type(arrays['x']), type(dicts[0]['x']))
        self.assertEqual(type(arrays['y']), type(dicts[0]['y']))

        for d in dicts:
            d['x'] = cuda.to_cpu(d['x'])
            d['y'] = cuda.to_cpu(d['y'])
        arrays = {'x': cuda.to_cpu(arrays['x']), 'y': cuda.to_cpu(arrays['y'])}
        numpy.testing.assert_array_equal(arrays['x'][0, :3, :4], dicts[0]['x'])
        numpy.testing.assert_array_equal(arrays['x'][0, 3:, :], 0)
        numpy.testing.assert_array_equal(arrays['x'][0, :, 4:], 0)
        numpy.testing.assert_array_equal(arrays['x'][1, :4, :4], dicts[1]['x'])
        numpy.testing.assert_array_equal(arrays['x'][1, :, 4:], 0)
        numpy.testing.assert_array_equal(arrays['x'][2, :2, :5], dicts[2]['x'])
        numpy.testing.assert_array_equal(arrays['x'][2, 2:, :], 0)
        numpy.testing.assert_array_equal(arrays['y'][0, :2, :5], dicts[0]['y'])
        numpy.testing.assert_array_equal(arrays['y'][0, 2:, :], 0)
        numpy.testing.assert_array_equal(arrays['y'][0, :, 5:], 0)
        numpy.testing.assert_array_equal(arrays['y'][1, :3, :4], dicts[1]['y'])
        numpy.testing.assert_array_equal(arrays['y'][1, 3:, :], 0)
        numpy.testing.assert_array_equal(arrays['y'][1, :, 4:], 0)
        numpy.testing.assert_array_equal(arrays['y'][2, :2, :6], dicts[2]['y'])
        numpy.testing.assert_array_equal(arrays['y'][2, 2:, :], 0)
Example #7
0
    def test_forward(self, backend_config):
        x_data = backend_config.get_array(self.x)
        t_data = backend_config.get_array(self.t)
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)

        link = self.create_link()
        if backend_config.use_cuda:
            link.to_gpu()

        y, samples = link(x, t, reduce=self.reduce, return_samples=True)

        self.assertEqual(y.shape, self.gy.shape)

        W = cuda.to_cpu(link.W.data)
        samples = cuda.to_cpu(samples)

        loss = numpy.empty((len(self.x),), self.dtype)
        for i in range(len(self.x)):
            ix = self.x[i]
            it = self.t[i]
            if it == -1:
                loss[i] = 0
            else:
                w = W[samples[i]]
                f = w.dot(ix)
                # first one is positive example
                f[0] *= -1
                loss[i] = numpy.logaddexp(f, 0).sum()

        if self.reduce == 'sum':
            loss = loss.sum()

        testing.assert_allclose(y.data, loss, **self.test_forward_options)
Example #8
0
    def check_concat_tuples_padding(self, xp):
        tuples = [
            (xp.random.rand(3, 4), xp.random.rand(2, 5)),
            (xp.random.rand(4, 4), xp.random.rand(3, 4)),
            (xp.random.rand(2, 5), xp.random.rand(2, 6)),
        ]
        arrays = dataset.concat_examples(tuples, padding=0)
        self.assertEqual(len(arrays), 2)
        self.assertEqual(arrays[0].shape, (3, 4, 5))
        self.assertEqual(arrays[1].shape, (3, 3, 6))
        self.assertEqual(type(arrays[0]), type(tuples[0][0]))
        self.assertEqual(type(arrays[1]), type(tuples[0][1]))

        for i in range(len(tuples)):
            tuples[i] = cuda.to_cpu(tuples[i][0]), cuda.to_cpu(tuples[i][1])
        arrays = tuple(cuda.to_cpu(array) for array in arrays)
        numpy.testing.assert_array_equal(arrays[0][0, :3, :4], tuples[0][0])
        numpy.testing.assert_array_equal(arrays[0][0, 3:, :], 0)
        numpy.testing.assert_array_equal(arrays[0][0, :, 4:], 0)
        numpy.testing.assert_array_equal(arrays[0][1, :4, :4], tuples[1][0])
        numpy.testing.assert_array_equal(arrays[0][1, :, 4:], 0)
        numpy.testing.assert_array_equal(arrays[0][2, :2, :5], tuples[2][0])
        numpy.testing.assert_array_equal(arrays[0][2, 2:, :], 0)
        numpy.testing.assert_array_equal(arrays[1][0, :2, :5], tuples[0][1])
        numpy.testing.assert_array_equal(arrays[1][0, 2:, :], 0)
        numpy.testing.assert_array_equal(arrays[1][0, :, 5:], 0)
        numpy.testing.assert_array_equal(arrays[1][1, :3, :4], tuples[1][1])
        numpy.testing.assert_array_equal(arrays[1][1, 3:, :], 0)
        numpy.testing.assert_array_equal(arrays[1][1, :, 4:], 0)
        numpy.testing.assert_array_equal(arrays[1][2, :2, :6], tuples[2][1])
        numpy.testing.assert_array_equal(arrays[1][2, 2:, :], 0)
Example #9
0
 def check_forward(self, x_data):
     x = chainer.Variable(x_data)
     y = functions.expand_dims(x, self.axis)
     self.assertEqual(y.data.shape, self.out_shape)
     y_expect = numpy.expand_dims(cuda.to_cpu(x_data), self.axis)
     self.assertEqual(y.data.dtype, self.dtype)
     numpy.testing.assert_array_equal(cuda.to_cpu(y.data), y_expect)
Example #10
0
 def check_rtol(self, x, y):
     x_cpu = cuda.to_cpu(x)
     y_cpu = cuda.to_cpu(y)
     max_ratio = numpy.max(numpy.abs(x_cpu - y_cpu) / y_cpu)
     with self.assertRaises(AssertionError):
         testing.assert_allclose(x, y, atol=0, rtol=max_ratio - 1)
     testing.assert_allclose(x, y, atol=0, rtol=max_ratio + 1)
 def check_call(self, x_data):
     with chainer.using_config('use_cudnn', self.use_cudnn):
         x = chainer.Variable(x_data)
         actual = self.mlp(x)
         act = functions.sigmoid
         expect = self.mlp[2](act(self.mlp[1](act(self.mlp[0](x)))))
     numpy.testing.assert_array_equal(
         cuda.to_cpu(expect.data), cuda.to_cpu(actual.data))
Example #12
0
    def to_cpu(self):
        """Make a sampler CPU mode.

        """
        if self.use_gpu:
            self.threshold = cuda.to_cpu(self.threshold)
            self.values = cuda.to_cpu(self.values)
            self.use_gpu = False
Example #13
0
    def check_concat_arrays(self, arrays, device=None):
        array = dataset.concat_examples(arrays, device)
        self.assertEqual(array.shape, (len(arrays),) + arrays[0].shape)
        self.check_device(array, device)

        for x, y in zip(array, arrays):
            numpy.testing.assert_array_equal(
                cuda.to_cpu(x), cuda.to_cpu(y))
Example #14
0
    def check_forward(self, x_data, t_data):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)
        y = functions.select_item(x, t)
        y_exp = cuda.to_cpu(x_data)[range(t_data.size), cuda.to_cpu(t_data)]

        self.assertEqual(y.data.dtype, self.dtype)
        numpy.testing.assert_equal(cuda.to_cpu(y.data), y_exp)
Example #15
0
 def check_forward(self, gpu):
     y1, y2 = self.f.forward((self.x1, self.x2))
     self.assertEqual(self.f.check_type_forward.call_count, 0)
     self.assertEqual(self._get_method('forward', not gpu).call_count, 0)
     self._get_method('forward', gpu).assert_called_once_with(
         (self.x1, self.x2))
     self.assertTrue((cuda.to_cpu(y1) == cuda.to_cpu(self.y1)).all())
     self.assertTrue((cuda.to_cpu(y2) == cuda.to_cpu(self.y2)).all())
 def test_forward_gpu_cpu(self):
     cpu_res = self.get_result(cuda.to_cpu(self.input),
                               cuda.to_cpu(self.gt))
     gpu_res = self.get_result(cuda.to_gpu(self.input),
                               cuda.to_gpu(self.gt))
     for idx in range(len(gpu_res)):
         gpu_res[idx].to_cpu()
         numpy.testing.assert_almost_equal(cpu_res[idx].data,
                                           gpu_res[idx].data)
Example #17
0
 def check_gaussian_kl_divergence(self, mean, ln_var):
     if self.wrap_m:
         mean = chainer.Variable(mean)
     if self.wrap_v:
         ln_var = chainer.Variable(ln_var)
     actual = cuda.to_cpu(
         F.gaussian_kl_divergence(mean, ln_var, self.reduce).data)
     actual = cuda.to_cpu(
         F.gaussian_kl_divergence(mean, ln_var, self.reduce).data)
     testing.assert_allclose(self.expect, actual)
Example #18
0
 def check_forward(self, x1_data, x2_data):
     y = F.arctan2(x1_data, x2_data)
     numpy.testing.assert_array_less(
         cuda.to_cpu(y.data),
         numpy.full(y.shape, numpy.pi))
     numpy.testing.assert_array_less(
         numpy.full(y.shape, -numpy.pi),
         cuda.to_cpu(y.data))
     testing.assert_allclose(
         numpy.arctan2(self.x1, self.x2), y.data, atol=1e-4, rtol=1e-4)
Example #19
0
    def check_forward(self, h_data, x_data):
        h = chainer.Variable(h_data)
        x = chainer.Variable(x_data)
        y = self.mgu(h, x)

        W_f = cuda.to_cpu(self.mgu.W_f.W.data)
        W_h = cuda.to_cpu(self.mgu.W_h.W.data)
        for i in six.moves.range(3):
            h_new = mgu(W_f, W_h, self.h[i], self.x[i])
            testing.assert_allclose(h_new, y.data[i])
Example #20
0
    def check_concat_dicts(self, dicts, device=None):
        arrays = dataset.concat_examples(dicts, device)
        self.assertEqual(frozenset(arrays.keys()), frozenset(dicts[0].keys()))
        for key in arrays:
            shape = (len(dicts),) + dicts[0][key].shape
            self.assertEqual(arrays[key].shape, shape)
            self.check_device(arrays[key], device)

            for x, y in zip(arrays[key], dicts):
                numpy.testing.assert_array_equal(
                    cuda.to_cpu(x), cuda.to_cpu(y[key]))
Example #21
0
    def check_concat_tuples(self, tuples, device=None):
        arrays = dataset.concat_examples(tuples, device)
        self.assertEqual(len(arrays), len(tuples[0]))
        for i in range(len(arrays)):
            shape = (len(tuples),) + tuples[0][i].shape
            self.assertEqual(arrays[i].shape, shape)
            self.check_device(arrays[i], device)

            for x, y in zip(arrays[i], tuples):
                numpy.testing.assert_array_equal(
                    cuda.to_cpu(x), cuda.to_cpu(y[i]))
Example #22
0
 def check_predict(self):
     x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
     x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
     result = self.link.predict([x1, x2], oversample=False)
     y = cuda.to_cpu(result.data)
     self.assertEqual(y.shape, (2, 1000))
     self.assertEqual(y.dtype, numpy.float32)
     result = self.link.predict([x1, x2], oversample=True)
     y = cuda.to_cpu(result.data)
     self.assertEqual(y.shape, (2, 1000))
     self.assertEqual(y.dtype, numpy.float32)
Example #23
0
    def check_forward(self, a_data, b_data):
        a = chainer.Variable(a_data)
        b = chainer.Variable(b_data)
        y = functions.scatter_add(a, self.slices, b)
        self.assertEqual(y.data.dtype, numpy.float32)
        # Test to make sure that the input values are not changed
        numpy.testing.assert_equal(cuda.to_cpu(a.data), self.a_data_original)

        a_data_copy = cuda.to_cpu(a_data).copy()
        numpy.add.at(a_data_copy, self.slices, cuda.to_cpu(b_data))
        numpy.testing.assert_equal(a_data_copy, cuda.to_cpu(y.data))
Example #24
0
    def check_predict(self):
        x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
        x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)

        with numpy.errstate(divide='ignore'):
            result = self.link.predict([x1, x2], oversample=False)
            y = cuda.to_cpu(result.data)
            assert y.shape == (2, 1000)
            assert y.dtype == self.dtype
            result = self.link.predict([x1, x2], oversample=True)
            y = cuda.to_cpu(result.data)
            assert y.shape == (2, 1000)
            assert y.dtype == self.dtype
Example #25
0
    def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        W_f = cuda.to_cpu(self.mgu.W_f.W.data)
        W_h = cuda.to_cpu(self.mgu.W_h.W.data)
        y1 = self.mgu(x)
        y2 = self.mgu(x)

        h = numpy.zeros(self.out_size, dtype='f')
        for i in six.moves.range(3):
            h1 = mgu(W_f, W_h, h, self.x[i])
            testing.assert_allclose(h1, y1.data[i])
            h2 = mgu(W_f, W_h, h1, self.x[i])
            testing.assert_allclose(h2, y2.data[i])
Example #26
0
    def check_call(self):
        xp = self.link.xp

        # Suppress warning that arises from zero division in BatchNormalization
        with numpy.errstate(divide='ignore'):
            x1 = Variable(xp.asarray(numpy.random.uniform(
                -1, 1, (1, 3, 224, 224)).astype(numpy.float32)))
            y1 = cuda.to_cpu(self.link(x1)['prob'].data)
            self.assertEqual(y1.shape, (1, 1000))

            x2 = Variable(xp.asarray(numpy.random.uniform(
                -1, 1, (1, 3, 128, 128)).astype(numpy.float32)))
            y2 = cuda.to_cpu(self.link(x2, layers=['pool5'])['pool5'].data)
            self.assertEqual(y2.shape, (1, 2048))
 def check_call(self, x_data):
     with chainer.using_config('use_cudnn', self.use_cudnn):
         x = chainer.Variable(x_data)
         actual = self.mlp(x)
         act = functions.sigmoid
         expect = self.mlp[2](act(self.mlp[1](act(self.mlp[0](x)))))
     numpy.testing.assert_array_equal(
         cuda.to_cpu(expect.data), cuda.to_cpu(actual.data))
     for i, conv in enumerate(self.mlp):
         self.assertIsInstance(conv, links.Convolution2D)
         if i == 0:
             self.assertEqual(conv.W.data.shape, (96, 3, 11, 11))
         else:
             self.assertEqual(conv.W.data.shape, (96, 96, 1, 1))
Example #28
0
 def check_forward(self, x_data):
     slices = []
     for i, s in enumerate(self.slices):
         if isinstance(s, numpy.ndarray):
             s = chainer.backends.cuda.cupy.array(s)
         if isinstance(s, list):
             s = chainer.backends.cuda.cupy.array(s, dtype=numpy.int32)
         slices.append(s)
     slices = tuple(slices)
     x = chainer.Variable(x_data)
     y = functions.get_item(x, slices)
     self.assertEqual(y.data.dtype, numpy.float32)
     numpy.testing.assert_equal(cuda.to_cpu(x_data)[self.slices],
                                cuda.to_cpu(y.data))
Example #29
0
    def check_forward(self, x_data, t_data, w_data, sampler):
        batch_size = len(self.t)
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)
        w = chainer.Variable(w_data)

        # return_samples=False
        y = functions.negative_sampling(
            x, t, w, sampler, self.sample_size, reduce=self.reduce)
        assert y.dtype == self.dtype

        # return_samples=True
        y_, samples = functions.negative_sampling(
            x, t, w, sampler, self.sample_size, reduce=self.reduce,
            return_samples=True)

        xp = chainer.backend.get_array_module(x)
        assert isinstance(samples, xp.ndarray)
        assert samples.dtype == numpy.int32
        assert samples.shape == (batch_size, self.sample_size + 1)

        # Sampler is deterministic, so y and y_ should equal.
        assert y.dtype == y_.dtype
        numpy.testing.assert_array_equal(
            cuda.to_cpu(y.array), cuda.to_cpu(y_.array))

        assert y.shape == self.gy.shape

        samples = cuda.to_cpu(samples)

        loss = numpy.empty((len(self.x),), self.dtype)
        for i in six.moves.range(len(self.x)):
            ix = self.x[i]
            it = self.t[i]
            if it == -1:
                loss[i] = 0
            else:
                iw = self.w[samples[i]]

                f = iw.dot(ix)
                # first one is positive example
                f[0] *= -1
                loss[i] = numpy.logaddexp(f, 0).sum()

        if self.reduce == 'sum':
            loss = loss.sum()

        assert y.dtype == loss.dtype
        testing.assert_allclose(y.data, loss, **self.check_forward_options)
Example #30
0
def _cross_covariance(y, z):
    row = y.shape[1]
    col = z.shape[1]
    y, z = cuda.to_cpu(y), cuda.to_cpu(z)
    y_mean = y.mean(axis=0)
    z_mean = z.mean(axis=0)
    N = y.shape[0]
    loss_expect = numpy.zeros((row, col), dtype=numpy.float32)
    for i in six.moves.xrange(row):
        for j in six.moves.xrange(col):
            for n in six.moves.xrange(N):
                loss_expect[i, j] += (y[n, i] - y_mean[i]) * (
                    z[n, j] - z_mean[j])
    loss_expect /= N
    return loss_expect
Example #31
0
    def check_extract(self):
        x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
        x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)

        with numpy.errstate(divide='ignore'):
            result = self.link.extract([x1, x2], layers=['res3', 'pool5'])
            self.assertEqual(len(result), 2)
            y1 = cuda.to_cpu(result['res3'].data)
            self.assertEqual(y1.shape, (2, 512, 28, 28))
            self.assertEqual(y1.dtype, self.dtype)
            y2 = cuda.to_cpu(result['pool5'].data)
            self.assertEqual(y2.shape, (2, 2048))
            self.assertEqual(y2.dtype, self.dtype)

            x3 = numpy.random.uniform(0, 255, (80, 60)).astype(numpy.uint8)
            result = self.link.extract([x3], layers=['res2'], size=None)
            self.assertEqual(len(result), 1)
            y3 = cuda.to_cpu(result['res2'].data)
            self.assertEqual(y3.shape, (1, 256, 20, 15))
            self.assertEqual(y3.dtype, self.dtype)
    def check_call(self, x, expects):
        outs = self.link(x)

        if isinstance(self.pick, tuple):
            pick = self.pick
        else:
            if self.pick is None:
                pick = ('l2',)
            else:
                pick = (self.pick,)
            outs = (outs,)

        self.assertEqual(len(outs), len(pick))

        for out, layer_name in zip(outs, pick):
            self.assertIsInstance(out, chainer.Variable)
            self.assertIsInstance(out.array, self.link.xp.ndarray)

            out = to_cpu(out.array)
            np.testing.assert_equal(out, to_cpu(expects[layer_name].array))
Example #33
0
 def check_forward(self, x_data, roi_data, roi_index_data):
     x = chainer.Variable(x_data)
     rois = chainer.Variable(roi_data)
     roi_indices = chainer.Variable(roi_index_data)
     y = functions.psroi_max_align_2d(x, rois, roi_indices, self.out_c,
                                      self.out_h, self.out_w,
                                      self.spatial_scale, self.group_size)
     self.assertEqual(y.data.dtype, np.float32)
     y_data = cuda.to_cpu(y.data)
     self.assertEqual((self.n_roi, self.out_c, self.out_h, self.out_w),
                      y_data.shape)
Example #34
0
    def check_forward(self, depth_data, space_data):
        depth = chainer.Variable(depth_data)
        d2s = functions.depth2space(depth, self.r)
        d2s_value = cuda.to_cpu(d2s.data)

        self.assertEqual(d2s_value.dtype, self.dtype)
        self.assertEqual(d2s_value.shape, (2, 2, 6, 4))

        d2s_expect = space_data

        testing.assert_allclose(d2s_value, d2s_expect)
    def _check_forward(self, mb_locs_local, mb_confs_local, gt_mb_locs_local,
                       gt_mb_labels_local, k):
        loc_loss_local, conf_loss_local = multibox_loss(
            mb_locs_local, mb_confs_local, gt_mb_locs_local,
            gt_mb_labels_local, k, self.comm)

        loc_loss_local = cuda.to_cpu(loc_loss_local.array)
        conf_loss_local = cuda.to_cpu(conf_loss_local.array)
        from mpi4py import MPI
        self.comm.mpi_comm.Allreduce(MPI.IN_PLACE, loc_loss_local)
        self.comm.mpi_comm.Allreduce(MPI.IN_PLACE, conf_loss_local)

        loc_loss, conf_loss = multibox_loss(self.mb_locs, self.mb_confs,
                                            self.gt_mb_locs, self.gt_mb_labels,
                                            k)
        np.testing.assert_almost_equal(loc_loss_local,
                                       loc_loss.array,
                                       decimal=2)
        np.testing.assert_almost_equal(conf_loss_local,
                                       conf_loss.array,
                                       decimal=2)
Example #36
0
def _deconv(h):
    h = cuda.to_cpu(h)
    h_mean = h.mean(axis=0)
    N, M = h.shape
    loss_expect = numpy.zeros((M, M), dtype=h.dtype)
    for i in six.moves.range(M):
        for j in six.moves.range(M):
            if i != j:
                for n in six.moves.range(N):
                    loss_expect[i, j] += (h[n, i] - h_mean[i]) * (h[n, j] -
                                                                  h_mean[j])
    return loss_expect / N
Example #37
0
    def check_forward(self, x0_data, x1_data):
        x0 = chainer.Variable(x0_data)
        x1 = chainer.Variable(x1_data)
        loss = functions.absolute_error(x0, x1)
        loss_value = cuda.to_cpu(loss.data)
        assert loss_value.dtype == numpy.float32
        assert loss_value.shape == x0_data.shape

        for i in numpy.ndindex(self.x0.shape):
            # Compute expected value
            loss_expect = abs(self.x0[i] - self.x1[i])
            assert round(loss_value[i] - loss_expect, 5) == 0
Example #38
0
    def to_cpu(self):
        """Copies the data and gradient arrays to CPU."""
        if self.data is None:
            return

        self._data = [cuda.to_cpu(self.data)]
        if self._grad_var is not None:
            self._grad_var.to_cpu()
        # ensure that the node tracks the device migration
        node = self._node
        if node._data is not None:
            node.retain_data()
Example #39
0
    def check_forward(self, x_data, roi_data):
        x = chainer.Variable(x_data)
        rois = chainer.Variable(roi_data)
        y = functions.roi_pooling_2d(x,
                                     rois,
                                     outh=self.outh,
                                     outw=self.outw,
                                     spatial_scale=self.spatial_scale)
        self.assertEqual(y.data.dtype, numpy.float32)
        y_data = cuda.to_cpu(y.data)

        self.assertEqual(self.gy.shape, y_data.shape)
Example #40
0
    def check_forward(self, x0_data, x1_data):
        x0 = chainer.Variable(x0_data)
        x1 = chainer.Variable(x1_data)
        loss = functions.squared_error(x0, x1)
        loss_value = cuda.to_cpu(loss.data)
        assert loss_value.dtype == self.dtype
        assert loss_value.shape == x0_data.shape

        for i in numpy.ndindex(self.x0.shape):
            # Compute expected value
            loss_expect = (self.x0[i] - self.x1[i])**2
            assert round(loss_value[i] - loss_expect, self.places) == 0
Example #41
0
    def check_concat_arrays(self, arrays, device, expected_type):
        array = dataset.concat_examples(arrays, device, self.padding)
        self.assertEqual(array.shape, (len(arrays), ))
        self.check_device(array, device)

        for x, y in zip(array, arrays):
            if backend.get_array_module(x) == numpy:
                numpy.testing.assert_array_equal(
                    numpy.array(x), numpy.array(y, dtype=expected_type))
            else:
                numpy.testing.assert_array_equal(
                    cuda.to_cpu(x), numpy.array(y, dtype=expected_type))
Example #42
0
    def check_backprop_step(self, gxs):
        flag_none = gxs[0] is None

        x1 = chainer.Variable(self.x1)
        x2 = chainer.Variable(self.x2)
        self.f.inputs = (x1.node, x2.node)
        gxrefs = [[gx] if gx is not None else [] for gx in gxs]
        grad_outputs = (self.gy1, self.gy2)
        grad_inputs = dict(zip(self.f.inputs, gxrefs))
        _backprop_utils.backprop_step(self.f, (0, 1), grad_outputs,
                                      grad_inputs, True)
        if not chainer.configuration.config.lazy_grad_sum:
            # assert eager grad sum
            for gxref in gxrefs:
                self.assertLessEqual(len(gxref), 1)
        gx1 = _backprop_utils._reduce(gxrefs[0])
        gx2 = _backprop_utils._reduce(gxrefs[1])
        if flag_none:
            numpy.testing.assert_array_equal(cuda.to_cpu(gx1.data),
                                             cuda.to_cpu(self.gx1.data))
            self.assertIsNone(gx2)
        else:
            numpy.testing.assert_array_equal(cuda.to_cpu(gx1.data),
                                             cuda.to_cpu(self.gx1_accum.data))
            numpy.testing.assert_array_equal(cuda.to_cpu(gx2.data),
                                             cuda.to_cpu(self.gx2_orig.data))
Example #43
0
    def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        # Make the batch normalization to be the identity function.
        self.l.dw_bn.avg_var[:] = 1
        self.l.dw_bn.avg_mean[:] = 0
        self.l.pw_bn.avg_var[:] = 1
        self.l.pw_bn.avg_mean[:] = 0
        with chainer.using_config('train', False):
            y = self.l(x)

        self.assertIsInstance(y, chainer.Variable)
        self.assertIsInstance(y.array, self.l.xp.ndarray)

        if self.dilate == 1:
            _x_data = x_data
        elif self.dilate == 2:
            _x_data = x_data[:, :, 1:-1, 1:-1]
        if self.activ == 'relu':
            np.testing.assert_almost_equal(cuda.to_cpu(y.array),
                                           np.maximum(cuda.to_cpu(_x_data), 0),
                                           decimal=4)
        elif self.activ == 'add_one':
            np.testing.assert_almost_equal(cuda.to_cpu(y.array),
                                           cuda.to_cpu(_x_data) + 1,
                                           decimal=4)
        elif self.activ is None:
            np.testing.assert_almost_equal(cuda.to_cpu(y.array),
                                           cuda.to_cpu(_x_data),
                                           decimal=4)
Example #44
0
    def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        # Make the batch normalization to be the identity function.
        if self.expansion_size != 1:
            self.l.expand.bn.avg_var[:] = 1
            self.l.expand.bn.avg_mean[:] = 0
        self.l.depthwise.bn.avg_var[:] = 1
        self.l.depthwise.bn.avg_mean[:] = 0
        self.l.project.bn.avg_var[:] = 1
        self.l.project.bn.avg_mean[:] = 0
        with chainer.using_config('train', False):
            y = self.l(x)

        self.assertIsInstance(y, chainer.Variable)
        self.assertIsInstance(y.array, self.l.xp.ndarray)

        _x_data = x_data
        if self.expansion_size > self.in_channels:
            np.testing.assert_almost_equal(
                cuda.to_cpu(y.array),
                cuda.to_cpu(_x_data) + self.expansion_size *
                np.maximum(np.minimum(cuda.to_cpu(_x_data), 6), 0),
                decimal=4)
        else:
            np.testing.assert_almost_equal(
                cuda.to_cpu(y.array),
                cuda.to_cpu(_x_data) +
                np.maximum(np.minimum(cuda.to_cpu(_x_data), 6), 0),
                decimal=4)
    def check_forward(self, x_data, t_data, w_data, sampler):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)
        w = chainer.Variable(w_data)

        # return_samples=False
        y = functions.negative_sampling(
            x, t, w, sampler, self.sample_size, reduce=self.reduce)

        # return_samples=True
        y_, samples = functions.negative_sampling(
            x, t, w, sampler, self.sample_size, reduce=self.reduce,
            return_samples=True)

        # Sampler is deterministic, so y and y_ should equal.
        numpy.testing.assert_array_equal(
            cuda.to_cpu(y.array), cuda.to_cpu(y_.array))

        self.assertEqual(y.shape, self.gy.shape)

        samples = cuda.to_cpu(samples)

        loss = numpy.empty((len(self.x),), self.dtype)
        for i in six.moves.range(len(self.x)):
            ix = self.x[i]
            it = self.t[i]
            if it == -1:
                loss[i] = 0
            else:
                iw = self.w[samples[i]]

                f = iw.dot(ix)
                # first one is positive example
                f[0] *= -1
                loss[i] = numpy.logaddexp(f, 0).sum()

        if self.reduce == 'sum':
            loss = loss.sum()

        testing.assert_allclose(y.data, loss, **self.check_forward_options)
Example #46
0
    def classify(self, image: Image.Image,
                 bboxes: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:

        if not len(bboxes):
            return (np.empty(
                (0, ), dtype=np.int32), np.empty((0, ), dtype=np.float32))

        # crop character regions
        # - with small padding around character bbox
        # - keep original aspect ratio
        crop_bboxes = _calc_padded_bboxes(bboxes)
        images = [image.crop(bbox) for bbox in crop_bboxes]

        # convert to NCHW tensor format
        images = [
            img.resize(self.input_size, resample=Image.BILINEAR)
            for img in images
        ]
        img_arr = np.array([
            np.asarray(img, dtype=np.float32).transpose(2, 0, 1)
            for img in images
        ])

        # inference
        if self.xp != np:
            img_arr = cuda.to_gpu(img_arr)

        img_arr = (img_arr - 127.5) / 128.
        with chainer.no_backprop_mode(), chainer.using_config('train', False):
            probs = F.softmax(self(img_arr)).array

        # get labels and scores
        labels = self.xp.argmax(probs, axis=1)
        scores = self.xp.max(probs, axis=1)

        if self.xp != np:
            labels = cuda.to_cpu(labels)
            scores = cuda.to_cpu(scores)

        return labels, scores
Example #47
0
    def check_backward(self, src_id, dst_id):
        x_data = _to_gpu(self.x_data, src_id)
        x = chainer.Variable(x_data)

        y = functions.copy(x, dst_id)
        gy = _to_gpu(self.gy, dst_id)
        y.grad = gy

        y.backward()

        x_grad = x.grad
        self.assertEqual(cuda.get_device_from_array(x_grad).id, src_id)
        numpy.testing.assert_array_equal(cuda.to_cpu(x_grad), self.gy)
Example #48
0
def converter(batch, device):
    if device == 0:
        batch = np.array(batch)

    elif device == 1:
        batch = cuda.to_gpu(xp.array(batch))

    elif device >= 2:
        batch = cuda.to_cpu(batch)
        batch = xp.split(xp.array(batch).astype(xp.int64), device)
        batch = [cuda.to_gpu(batch[i], i) for i in range(device)]

    return batch
Example #49
0
    def check_forward(self, x_data, ind_data):
        x = chainer.Variable(x_data)
        indices = chainer.Variable(ind_data)
        y = functions.permutate(x, indices, axis=self.axis, inv=self.inv)

        y_cpu = cuda.to_cpu(y.data)
        y_cpu = numpy.rollaxis(y_cpu, axis=self.axis)
        x_data = numpy.rollaxis(self.x, axis=self.axis)
        for i, ind in enumerate(self.indices):
            if self.inv:
                numpy.testing.assert_array_equal(y_cpu[ind], x_data[i])
            else:
                numpy.testing.assert_array_equal(y_cpu[i], x_data[ind])
Example #50
0
 def error(self, x, t):
     xp = cuda.get_array_module(x, False)
     size = len(t)
     with chainer.no_backprop_mode():
         with chainer.using_config("train", False):
             h = xp.reshape(xp.sign(self.calculate(x).data), size)
     if isinstance(h, chainer.Variable):
         h = h.data
     if isinstance(t, chainer.Variable):
         t = t.data
     result = (h != t).sum() / size
     chainer.reporter.report({'error': result}, self)
     return cuda.to_cpu(result) if xp != np else result
Example #51
0
    def forward(self, inputs):
        gpu = backend.get_array_module(*inputs) is not numpy
        inputs = [cuda.to_cpu(x) for x in inputs]

        outputs = self.forward_func(*inputs)

        if gpu:
            # TODO(unno): We can remove redundant gpu-cpu copy using
            # theano.sandbox.cuda.CudaNdarray.gpudata
            device = cuda.get_device_from_array(inputs)
            outputs = [cuda.to_gpu(x, device) for x in outputs]

        return tuple(outputs)
Example #52
0
def _check_indices(indices):
    if len(indices) == 0:
        return
    # TODO(unno): Check indices without cpu
    indices = cuda.to_cpu(indices)
    for i in indices:
        if 0 <= i < len(indices):
            continue
        raise ValueError('Out of bounds index: {}'.format(i))
    sort = numpy.sort(indices)
    for s, t in six.moves.zip(sort, sort[1:]):
        if s == t:
            raise ValueError('indices contains duplicate value: {}'.format(s))
    def check_forward(self, x_data):
        x = chainer.Variable(x_data)

        y = functions.batch_l2_norm_squared(x)
        self.assertEqual(y.data.dtype, np.float32)
        y_data = cuda.to_cpu(y.data)

        x_two_dim = _as_two_dim(self.x)
        y_expect = np.empty(len(self.x))
        for n in six.moves.range(len(self.x)):
            y_expect[n] = sum(map(lambda x: x * x, x_two_dim[n]))

        testing.assert_allclose(y_expect, y_data)
Example #54
0
    def predict(self, images, return_visual_backprop=False):
        with cuda.Device(self._device_id):
            images = [self.xp.array(image) for image in images]
            images = self.xp.stack(images, axis=0)
            with chainer.using_config('train', False):
                rois, bboxes = self(images)
                if return_visual_backprop:
                    if not hasattr(self, 'visual_backprop'):
                        self.visual_backprop = VisualBackprop()
                    visual_backprop = cuda.to_cpu(
                        self.visual_backprop.perform_visual_backprop(
                            self.visual_backprop_anchors[0]))
                else:
                    visual_backprop = None

                bboxes = self.extract_corners(bboxes)
                bboxes = self.scale_bboxes(bboxes,
                                           Size._make(images.shape[-2:]))

        bboxes = [cuda.to_cpu(bbox).reshape(1, -1) for bbox in bboxes.data]

        return bboxes, rois, np.ones((len(bboxes), 1)), visual_backprop
Example #55
0
def predict(img):
    #print(img)
    a = np.asarray(img).transpose(2, 0, 1).astype(np.float32) / 255.
    x = np.expand_dims(a, axis=0)
    if use_gpu:
        y = model(cuda.to_gpu(x))
    else:
        y = model(x)

    ret = y.data[0][0] > -3
    if use_gpu:
        ret = cuda.to_cpu(ret)
    return ret.astype(np.int32)
Example #56
0
    def check_forward(self, x_data, roi_data, roi_index_data):
        x = chainer.Variable(x_data)
        rois = chainer.Variable(roi_data)
        roi_indices = chainer.Variable(roi_index_data)
        y = functions.roi_average_pooling_2d(x,
                                             rois,
                                             roi_indices,
                                             outsize=self.outsize,
                                             spatial_scale=self.spatial_scale)
        self.assertEqual(y.data.dtype, self.dtype)
        y_data = cuda.to_cpu(y.data)

        self.assertEqual(self.gy.shape, y_data.shape)
def train_model(model):
    chainer.using_config('train', True)
    print('training model: ', model.name)
    train, test = mnist.get_mnist(withlabel=True, ndim=1)

    x, t = train[0]
    print('train[0] label: ', t)
    plt.imshow(x.reshape(28, 28), cmap='gray')
    # plt.show() # uncomment to show image

    batch_size = 128

    train_iter = iterators.SerialIterator(train,
                                          batch_size,
                                          repeat=True,
                                          shuffle=True)
    test_iter = iterators.SerialIterator(test,
                                         batch_size,
                                         repeat=False,
                                         shuffle=False)

    if use_gpu:
        model.to_gpu(gpu_id)
    if model.is_convolution():
        optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
    else:
        optimizer = optimizers.Adam()
    optimizer.setup(model)

    max_epoch = 10
    while train_iter.epoch < max_epoch:
        chainer.using_config('train', True)
        train_batch = train_iter.next()
        image_train, _ = concat_examples(train_batch, gpu_id)
        if model.is_convolution():
            image_train = image_train.reshape(-1, 1, 28, 28)
        # print('training image shape: ', image_train.shape)

        loss = model.loss(image_train, image_train)
        # print('loss shape: ', loss.shape)
        model.cleargrads()
        loss.backward()

        optimizer.update()

        if train_iter.is_new_epoch:
            print('epoch:{:02d} train_loss:{:.04f} '.format(
                train_iter.epoch, float(cuda.to_cpu(loss.data))),
                  end='')
            test_model(model, test_iter)
    return model
Example #58
0
    def go(self):
        if self.board.is_game_over():
            print('bestmove resign')
            return

        xp = cuda.cupy
        features = make_input_features_from_board(self.board)
        x = Variable(xp.array([features], dtype=xp.float32))

        with chainer.no_backprop_mode():
            y = self.model(x)

            logits = cuda.to_cpu(y.data)[0]
            probabilities = cuda.to_cpu(F.softmax(y).data)[0]

        # 全ての合法手について
        turn = self.board.turn
        legal_moves = []
        legal_logits = []
        for move in self.board.legal_moves:
            # ラベルに変換
            label = make_output_label(move, turn)
            # 合法手とその指し手の確率(logits)を格納
            legal_moves.append(move)
            legal_logits.append(logits[label])
            # 確率を表示
            print('info string {:5} : {:.5f}'.format(move.usi(),
                                                     probabilities[label]))

        # 確率が最大の手を選ぶ(グリーディー戦略)
        # selected_index = greedy(legal_logits)
        # 確率に応じて手を選ぶ(ソフトマックス戦略)
        # selected_index = boltzmann(np.array(legal_logits, dtype=np.float32), 0.5)
        # 確率上位3からランダム選択
        selected_index = legal_logits.index(choice(nlargest(3, legal_logits)))
        bestmove = legal_moves[selected_index]

        print('bestmove', bestmove.usi())
def test_model(model, test_iter):
    test_losses = []
    test_accuracies = []
    while True:
        test_batch = test_iter.next()
        image_test, target_test = concat_examples(test_batch, gpu_id)
        prediction_test = model(image_test)

        loss_test = F.softmax_cross_entropy(
            prediction_test, target_test)
        test_losses.append(cuda.to_cpu(loss_test.data))

        accuracy = F.accuracy(prediction_test, target_test)
        test_accuracies.append(cuda.to_cpu(accuracy.data))

        if test_iter.is_new_epoch:
            test_iter.epoch = 0
            test_iter.current_position = 0
            test_iter.is_new_epoch = False
            test_iter._pushed_position = None
            break
    print('val_loss:{:.04f} val_accuracy:{:.04f}'.format(
        np.mean(test_losses), np.mean(test_accuracies)))
Example #60
0
    def __call__(self, model, _, batch, weights=None):
        q_values = model(batch.pre_states)
        q_subset = F.reshape(F.select_item(q_values, batch.actions), (-1, 1))
        margin = model.xp.ones(q_values.data.shape,
                               dtype=model.xp.float32) * self._margin_size
        margin[model.xp.arange(margin.shape[0]), batch.actions] = 0
        loss = F.reshape(F.max(q_values + margin, axis=1), (-1, 1)) - q_subset
        self._loss_summary.add(np.asscalar(cuda.to_cpu(F.average(loss).data)))
        if weights is not None:
            # Chainer raises a broadcast error if loss and weight aren't the same shape
            loss = F.squeeze(loss)
            loss *= weights

        return loss