Пример #1
0
    def test_forward(self, backend_config):
        x_data = backend_config.get_array(self.x)
        t_data = backend_config.get_array(self.t)
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data, requires_grad=False)

        link = self.create_link()
        link.to_device(backend_config.device)

        y, samples = link(x, t, reduce=self.reduce, return_samples=True)

        self.assertEqual(y.shape, self.gy.shape)

        cpu_device = CpuDevice()
        W = cpu_device.send(link.W.data)
        samples = cpu_device.send(samples)

        loss = numpy.empty((len(self.x), ), self.dtype)
        for i in range(len(self.x)):
            ix = self.x[i]
            it = self.t[i]
            if it == -1:
                loss[i] = 0
            else:
                w = W[samples[i]]
                f = w.dot(ix)
                # first one is positive example
                f[0] *= -1
                loss[i] = numpy.logaddexp(f, 0).sum()

        if self.reduce == 'sum':
            loss = loss.sum()

        testing.assert_allclose(y.data, loss, **self.test_forward_options)
Пример #2
0
    def test_return_samples(self, backend_config):
        batch_size = self.t.shape[0]
        link = self.create_link()
        link.to_device(backend_config.device)

        x_data = backend_config.get_array(self.x)
        t_data = backend_config.get_array(self.t)
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data, requires_grad=False)

        # return_samples=True
        y, samples = link(x, t, reduce=self.reduce, return_samples=True)

        assert isinstance(samples, backend_config.xp.ndarray)
        assert samples.shape == (batch_size, self.sample_size + 1)
        assert samples.dtype == numpy.int32

        # return_samples=False, with saved samples
        y_ = self.call_link_with_samples(
            samples, lambda: link(x, t, reduce=self.reduce))

        # y and y_ should equal
        cpu_device = CpuDevice()
        numpy.testing.assert_array_equal(cpu_device.send(y.array),
                                         cpu_device.send(y_.array))
Пример #3
0
    def test_forward(self, backend_config):
        x_data = backend_config.get_array(self.x)
        t_data = backend_config.get_array(self.t)
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data, requires_grad=False)

        link = self.create_link()
        link.to_device(backend_config.device)

        y, samples = link(x, t, reduce=self.reduce, return_samples=True)

        self.assertEqual(y.shape, self.gy.shape)

        cpu_device = CpuDevice()
        W = cpu_device.send(link.W.data)
        samples = cpu_device.send(samples)

        loss = numpy.empty((len(self.x),), self.dtype)
        for i in range(len(self.x)):
            ix = self.x[i]
            it = self.t[i]
            if it == -1:
                loss[i] = 0
            else:
                w = W[samples[i]]
                f = w.dot(ix)
                # first one is positive example
                f[0] *= -1
                loss[i] = numpy.logaddexp(f, 0).sum()

        if self.reduce == 'sum':
            loss = loss.sum()

        testing.assert_allclose(y.data, loss, **self.test_forward_options)
Пример #4
0
    def test_return_samples(self, backend_config):
        batch_size = self.t.shape[0]
        link = self.create_link()
        link.to_device(backend_config.device)

        x_data = backend_config.get_array(self.x)
        t_data = backend_config.get_array(self.t)
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data, requires_grad=False)

        # return_samples=True
        y, samples = link(x, t, reduce=self.reduce, return_samples=True)

        assert isinstance(samples, backend_config.xp.ndarray)
        assert samples.shape == (batch_size, self.sample_size + 1)
        assert samples.dtype == numpy.int32

        # return_samples=False, with saved samples
        y_ = self.call_link_with_samples(
            samples,
            lambda: link(x, t, reduce=self.reduce))

        # y and y_ should equal
        cpu_device = CpuDevice()
        numpy.testing.assert_array_equal(
            cpu_device.send(y.array), cpu_device.send(y_.array))
Пример #5
0
    def test_forward(self, backend_config):
        sampler = make_sampler(backend_config, self.label_size)
        x_data = backend_config.get_array(self.x)
        t_data = backend_config.get_array(self.t)
        w_data = backend_config.get_array(self.w)
        batch_size = len(self.t)
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data, requires_grad=False)
        w = chainer.Variable(w_data)

        # return_samples=False
        y = functions.negative_sampling(
            x, t, w, sampler, self.sample_size, reduce=self.reduce)
        assert y.dtype == self.dtype

        # return_samples=True
        y_, samples = functions.negative_sampling(
            x, t, w, sampler, self.sample_size, reduce=self.reduce,
            return_samples=True)

        xp = chainer.backend.get_array_module(x)
        assert isinstance(samples, xp.ndarray)
        assert samples.dtype == numpy.int32
        assert samples.shape == (batch_size, self.sample_size + 1)

        # Sampler is deterministic, so y and y_ should equal.
        assert y.dtype == y_.dtype
        cpu_device = CpuDevice()
        numpy.testing.assert_array_equal(
            cpu_device.send(y.array), cpu_device.send(y_.array))

        assert y.shape == self.gy.shape

        samples = cpu_device.send(samples)

        loss = numpy.empty((len(self.x),), self.dtype)
        for i in six.moves.range(len(self.x)):
            ix = self.x[i]
            it = self.t[i]
            if it == -1:
                loss[i] = 0
            else:
                iw = self.w[samples[i]]

                f = iw.dot(ix)
                # first one is positive example
                f[0] *= -1
                loss[i] = numpy.logaddexp(f, 0).sum()

        if self.reduce == 'sum':
            loss = loss.sum()

        assert y.dtype == loss.dtype
        testing.assert_allclose(y.data, loss, **self.check_forward_options)
Пример #6
0
def device(string):
    value = string.upper()
    if value == "CPU":
        return CpuDevice()
    if value == "GPU":
        return GpuDevice.from_device_id(0)
    value = int(string)
    if value >= 0:
        return GpuDevice.from_device_id(value)
    if value == -1:
        return CpuDevice()
    raise ValueError()
Пример #7
0
 def forward_expected(self, inputs):
     x1, x2 = inputs
     if self.transa and x1.ndim >= 2:
         x1 = x1.swapaxes(-1, -2)
     if self.transb and x2.ndim >= 2:
         x2 = x2.swapaxes(-1, -2)
     if x1.ndim <= 2 or x2.ndim <= 2:
         y = numpy.dot(x1, x2)
         device = CpuDevice()
         y = device.send(y)
     else:
         y = numpy.einsum('...ij,...jk->...ik', x1, x2)
     return y,
Пример #8
0
 def forward_expected(self, inputs):
     x1, x2 = inputs
     if self.transa and x1.ndim >= 2:
         x1 = x1.swapaxes(-1, -2)
     if self.transb and x2.ndim >= 2:
         x2 = x2.swapaxes(-1, -2)
     if x1.ndim <= 2 or x2.ndim <= 2:
         y = numpy.dot(x1, x2)
         device = CpuDevice()
         y = device.send(y)
     else:
         y = numpy.einsum('...ij,...jk->...ik', x1, x2)
     return y,
def test_bn(device_name, translator, computation_order):
    if skip_check(device_name, translator, computation_order):
        pytest.skip()

    np.random.seed(40)
    if has_cupy:
        cupy.random.seed(40)

    batch_size = 3
    in_size = 5
    n_out = 10

    device = chainer.get_device(device_name)
    device.use()

    bn = BN(in_size, n_out)
    bn.to_device(device)

    input = np.random.rand(batch_size, in_size, 1, 1).astype(np.float32)
    input = device.xp.array(input)
    target = device.xp.array(np.random.randint(n_out, size=batch_size))

    bn_compiled = chainer_compiler.compile(
        bn, [input], translator=translator,
        computation_order=computation_order)
    model = L.Classifier(bn_compiled)
    model.to_device(device)

    old_avg_mean = CpuDevice().send(model.predictor.mc.bn.avg_mean.copy())
    old_avg_var = CpuDevice().send(model.predictor.mc.bn.avg_var.copy())

    loss, grads = _run_fwd_bwd(model, [input, target])

    new_avg_mean = CpuDevice().send(model.predictor.mc.bn.avg_mean.copy())
    new_avg_var = CpuDevice().send(model.predictor.mc.bn.avg_var.copy())

    # running_mean and running_var should be updated
    assert not np.allclose(old_avg_mean, new_avg_mean)
    assert not np.allclose(old_avg_var, new_avg_var)
    def test_forward(self, backend_config):
        # TODO(niboshi): Support it
        if backend_config.use_chainerx and self.dtype == numpy.float16:
            raise unittest.SkipTest('ChainerX does not support float16')
        sampler = make_sampler(backend_config, self.label_size)
        x_data = backend_config.get_array(self.x)
        t_data = backend_config.get_array(self.t)
        w_data = backend_config.get_array(self.w)
        batch_size = len(self.t)
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data, requires_grad=False)
        w = chainer.Variable(w_data)

        # return_samples=False
        y = functions.negative_sampling(x,
                                        t,
                                        w,
                                        sampler,
                                        self.sample_size,
                                        reduce=self.reduce)
        assert y.dtype == self.dtype

        # return_samples=True
        y_, samples = functions.negative_sampling(x,
                                                  t,
                                                  w,
                                                  sampler,
                                                  self.sample_size,
                                                  reduce=self.reduce,
                                                  return_samples=True)

        xp = chainer.backend.get_array_module(x)
        assert isinstance(samples, xp.ndarray)
        assert samples.dtype == numpy.int32
        assert samples.shape == (batch_size, self.sample_size + 1)

        # Sampler is deterministic, so y and y_ should equal.
        assert y.dtype == y_.dtype
        cpu_device = CpuDevice()
        numpy.testing.assert_array_equal(cpu_device.send(y.array),
                                         cpu_device.send(y_.array))

        assert y.shape == self.gy.shape

        samples = cpu_device.send(samples)

        loss = numpy.empty((len(self.x), ), self.dtype)
        for i in six.moves.range(len(self.x)):
            ix = self.x[i]
            it = self.t[i]
            if it == -1:
                loss[i] = 0
            else:
                iw = self.w[samples[i]]

                f = iw.dot(ix)
                # first one is positive example
                f[0] *= -1
                loss[i] = numpy.logaddexp(f, 0).sum()

        if self.reduce == 'sum':
            loss = loss.sum()

        assert y.dtype == loss.dtype
        testing.assert_allclose(y.data, loss, **self.check_forward_options)