def parameterize_batch_renormalization():
    return testing.parameterize(*(testing.product({
        'ndim': [0, 1, 2],
        'eps': [2e-5, 1e-1],
        'dtype': [numpy.float32],
        'update_statistics': [True, False],
    }) + testing.product({
        'ndim': [1],
        'eps': [2e-5, 1e-1],
        'dtype': [numpy.float16, numpy.float32, numpy.float64],
        'update_statistics': [True, False],
    })))
    def f(klass):
        assert issubclass(klass, unittest.TestCase)

        def setUp(self):
            self.x, self.gy = make_data(self.shape, self.dtype)
            if self.dtype == numpy.float16:
                self.backward_options = {
                    'eps': 2 ** -4, 'atol': 2 ** -4, 'rtol': 2 ** -4,
                    'dtype': numpy.float64}
            else:
                self.backward_options = {'atol': 1e-4, 'rtol': 1e-4}
        setattr(klass, "setUp", setUp)

        def check_forward(self, x_data):
            x = chainer.Variable(x_data)
            y = func(x)
            self.assertEqual(y.data.dtype, x_data.dtype)
            y_expected = func_expected(cuda.to_cpu(x_data), dtype=x_data.dtype)
            testing.assert_allclose(y_expected, y.data, atol=1e-4, rtol=1e-4)
        setattr(klass, "check_forward", check_forward)

        @condition.retry(3)
        def test_forward_cpu(self):
            self.check_forward(self.x)
        setattr(klass, "test_forward_cpu", test_forward_cpu)

        @attr.gpu
        @condition.retry(3)
        def test_forward_gpu(self):
            self.check_forward(cuda.to_gpu(self.x))
        setattr(klass, "test_forward_gpu", test_forward_gpu)

        def check_backward(self, x_data, y_grad):
            gradient_check.check_backward(
                func, x_data, y_grad, **self.backward_options)
        setattr(klass, "check_backward", check_backward)

        @condition.retry(3)
        def test_backward_cpu(self):
            self.check_backward(self.x, self.gy)
        setattr(klass, "test_backward_cpu", test_backward_cpu)

        @attr.gpu
        @condition.retry(3)
        def test_backward_gpu(self):
            self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
        setattr(klass, "test_backward_gpu", test_backward_gpu)

        def test_label(self):
            self.assertEqual(func.label, label_expected)
        setattr(klass, "test_label", test_label)

        # Return parameterized class.
        return testing.parameterize(*testing.product({
            'shape': [(3, 2), ()],
            'dtype': [numpy.float16, numpy.float32, numpy.float64]
        }))(klass)
Esempio n. 3
0
def inject_backend_tests(method_names):
    decorator = backend.inject_backend_tests(
        method_names,
        # CPU tests
        testing.product({
            'use_cuda': [False],
            'use_ideep': ['never', 'always'],
        })
        # GPU tests
        + [{'use_cuda': True}])
    return decorator
Esempio n. 4
0
def _create_paramters():
    split_years = testing.product({
        'split': ['train', 'trainval', 'val'],
        'year': ['2007', '2012']})
    split_years += [{'split': 'test', 'year': '2007'}]
    params = testing.product_dict(
        split_years,
        [{'use_difficult': True, 'return_difficult': True},
         {'use_difficult': True, 'return_difficult': False},
         {'use_difficult': False, 'return_difficult': True},
         {'use_difficult': False, 'return_difficult': False}])
    return params
Esempio n. 5
0
def inject_backend_tests(method_names):
    decorator = backend.inject_backend_tests(
        method_names,
        # CPU tests
        testing.product({
            'use_cuda': [False],
            'use_ideep': ['never', 'always'],
        })
        # GPU tests
        + [{'use_cuda': True}]
        # ChainerX tests
        + [
            {'use_chainerx': True, 'chainerx_device': 'native:0'},
            {'use_chainerx': True, 'chainerx_device': 'cuda:0'},
        ])
    return decorator
Esempio n. 6
0
import unittest

from chainer import cuda
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
import numpy

from chainerrl.functions.mellowmax import maximum_entropy_mellowmax
from chainerrl.functions.mellowmax import mellowmax


@testing.parameterize(*testing.product({
    'shape': [(1, 1), (2, 3), (2, 3, 4), (2, 3, 4, 5)],
    'dtype': [numpy.float32],
    'omega': [10, 5, 1, -1, -5, -10],
    'axis': [0, 1, -1, -2],
    'same_value': [True, False],
}))
class TestMellowmax(unittest.TestCase):
    def setUp(self):
        self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
        if self.same_value:
            self.x[:] = numpy.random.uniform(-1, 1, 1).astype(self.dtype)

    def check_forward(self, x_data):
        xp = cuda.get_array_module(x_data)
        y = mellowmax(x_data, axis=self.axis, omega=self.omega)
        self.assertEqual(y.array.dtype, self.dtype)

        x_min = xp.min(x_data, axis=self.axis)
Esempio n. 7
0
    def test_rtol_gpu(self):
        self.check_rtol(cuda.to_gpu(self.x), cuda.to_gpu(self.y))


class Ident(chainer.Function):

    def forward(self, inputs):
        return inputs

    def backward(self, inputs, grads):
        return grads


# numpy.float16 is not tested because of the low precision.
@testing.parameterize(*testing.product({
    'dtype': [None, numpy.float32, numpy.float64],
}))
@backend.inject_backend_tests(None, [
    {},
    {'use_cuda': True},
    {'use_chainerx': True, 'chainerx_device': 'native:0'},
    {'use_chainerx': True, 'chainerx_device': 'cuda:0'},
])
class TestCheckBackward(unittest.TestCase):

    def test_multiple_output(self, backend_config):
        x1 = backend_config.get_array(numpy.array([1], dtype='f'))
        x2 = backend_config.get_array(numpy.array([1], dtype='f'))
        g1 = backend_config.get_array(numpy.array([1], dtype='f'))
        g2 = backend_config.get_array(numpy.array([1], dtype='f'))
Esempio n. 8
0
import unittest

import chainer
import numpy

from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check


@testing.parameterize(*testing.product({
    'shape': [(3, 2), ()],
    'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))
class TestMinimum(unittest.TestCase):

    def setUp(self):
        shape = self.shape
        self.gy = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
        self.ggx1 = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
        self.ggx2 = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
        self.check_forward_options = {}
        self.check_backward_options = {'dtype': numpy.float64}
        self.check_double_backward_options = {'dtype': numpy.float64}
        if self.dtype == numpy.float16:
            eps = 2 ** -3
            self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
            self.check_backward_options = {'atol': 1e-2, 'rtol': 1e-1}
Esempio n. 9
0
            self._print_variables('gxs            ', gxs)
            self._print_variables('ggxs (actual)  ', ggxs)
            self._print_variables('ggxs (expected)', expected)
            raise

    def test_double_grad_cpu(self):
        self.check_double_grad()

    @attr.gpu
    def test_double_grad_gpu(self):
        self.use_gpu()
        self.check_double_grad()


@testing.parameterize(*testing.product({
    'loss_scale': [None, 1, 10],
}))
class TestGradSimple(GradTestBase, unittest.TestCase):

    x_names = 'x',
    y_names = 'y',

    def forward(self):
        self.y = self.x * self.x

    def expected_grad(self):
        grad = 2 * self.x * self.gy
        if self.loss_scale is not None:
            grad *= self.loss_scale
        return [grad]
import six

import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import conv
import pooling_nd_helper


@testing.parameterize(*testing.product({
    'dims': [(4,), (4, 3), (4, 3, 2)],
    'cover_all': [True, False],
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestMaxPoolingND(unittest.TestCase):

    def setUp(self):
        self.ndim = len(self.dims)
        self.ksize = (3,) * self.ndim
        self.stride = (2,) * self.ndim
        self.pad = (1,) * self.ndim

        # Avoid unstability of numerical gradient
        x_shape = (2, 3) + self.dims
        self.x = numpy.arange(
            functools.reduce(mul, x_shape), dtype=self.dtype).reshape(x_shape)
        self.x = 2 * self.x / self.x.size - 1
Esempio n. 11
0
@testing.parameterize(*testing.product({
    # repeats is any of (int, bool or tuple) and
    # axis is any of (int or None).
    'params': (
        # Repeats 1-D array
        testing.product({
            'shape': [(2,)],
            'repeats': [0, 1, 2, True, (0,), (1,), (2,), (True,)],
            'axis': [None, 0],
        }) +
        # Repeats 2-D array (with axis=None)
        testing.product({
            'shape': [(3, 2)],
            'repeats': [4, (4,), (4,) * 6, (True,) * 6],
            'axis': [None],
        }) +
        # Repeats 2-D array (with axis=0)
        testing.product({
            'shape': [(3, 2)],
            'repeats': [5, (5,), (5,) * 3],
            'axis': [0],
        }) +
        # Repeats 2-D array (with axis=1)
        testing.product({
            'shape': [(3, 2)],
            'repeats': [5, (5,), (5,) * 2],
            'axis': [1],
        }) +
        # Repeats 3-D array (with axis=-2)
        testing.product({
            'shape': [(3, 2, 4)],
            'repeats': [5, (5,), (5,) * 2],
            'axis': [-2],
        })
    ),
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
Esempio n. 12
0
from __future__ import division
from __future__ import absolute_import
from builtins import *  # NOQA
from future import standard_library
standard_library.install_aliases()

import collections
import unittest

from chainer import testing

from chainerrl.misc.collections import RandomAccessQueue


@testing.parameterize(*(testing.product({
    'maxlen': [1, 10, None],
    'init_seq': [None, [], range(5)],
})))
class TestRandomAccessQueue(unittest.TestCase):
    def setUp(self):
        if self.init_seq:
            self.y_queue = RandomAccessQueue(self.init_seq, maxlen=self.maxlen)
            self.t_queue = collections.deque(self.init_seq, maxlen=self.maxlen)
        else:
            self.y_queue = RandomAccessQueue(maxlen=self.maxlen)
            self.t_queue = collections.deque(maxlen=self.maxlen)

    def test1(self):
        self.check_all()

        self.check_popleft()
        self.do_append(10)
    grid = numpy.repeat(grid[None], in_shape[0], axis=0).astype(numpy.float32)
    return grid


def _rotate_BCHW(x):
    rotated_xs = []
    for i in range(x.shape[0]):
        x_i = x[i].transpose(1, 2, 0)
        x_i = numpy.rot90(x_i)
        rotated_xs.append(x_i.transpose(2, 0, 1))
    rotated_xs = numpy.concatenate([r_x[None] for r_x in rotated_xs], axis=0)
    return rotated_xs


@testing.parameterize(*testing.product({
    'use_cudnn': ['always', 'never'],
}))
class TestSpatialTransformerSampler(unittest.TestCase):

    in_shape = (2, 2, 4, 4)
    out_shape = (2, 2, 3, 3)
    grid_shape = (2, 2, 3, 3)

    def setUp(self):
        self.x = numpy.random.uniform(
            size=self.in_shape).astype(numpy.float32)
        self.grid = numpy.random.uniform(
            low=-2., high=2., size=self.grid_shape).astype(numpy.float32)
        self.grads = numpy.random.uniform(
            size=self.out_shape).astype(numpy.float32)
import numpy as np
import unittest

import chainer
from chainer import testing
from chainer.testing import attr

from chainercv.links import FasterRCNNFPNResNet101
from chainercv.links import FasterRCNNFPNResNet50


@testing.parameterize(*testing.product({
    'model': [FasterRCNNFPNResNet50, FasterRCNNFPNResNet101],
    'n_fg_class': [1, 5, 20],
}))
class TestFasterRCNNFPNResNet(unittest.TestCase):
    def setUp(self):
        self.link = self.model(n_fg_class=self.n_fg_class)

    def _check_call(self):
        imgs = [
            np.random.uniform(-1, 1, size=(3, 48, 48)).astype(np.float32),
            np.random.uniform(-1, 1, size=(3, 32, 64)).astype(np.float32),
        ]
        x, _ = self.link.prepare(imgs)
        with chainer.using_config('train', False):
            self.link(self.link.xp.array(x))

    @attr.slow
    def test_call_cpu(self):
        self._check_call()
Esempio n. 15
0
            self.assertEqual(features.dtype, np.float32)

    @attr.slow
    def test_call_cpu(self):
        self.check_call()

    @attr.gpu
    @attr.slow
    def test_call_gpu(self):
        self.link.to_gpu()
        self.check_call()


@testing.parameterize(*testing.product({
    'model': [SEResNeXt50, SEResNeXt101],
    'n_class': [None, 500, 1000],
    'pretrained_model': ['imagenet'],
    'mean': [None, np.random.uniform((3, 1, 1)).astype(np.float32)],
}))
class TestSEResNeXtPretrained(unittest.TestCase):
    @attr.slow
    def test_pretrained(self):
        kwargs = {
            'n_class': self.n_class,
            'pretrained_model': self.pretrained_model,
            'mean': self.mean,
        }

        if self.pretrained_model == 'imagenet':
            valid = self.n_class in {None, 1000}

        if valid:
Esempio n. 16
0
    ], [
        {'w_dtype': numpy.float16},
        {'w_dtype': numpy.float32},
        {'w_dtype': numpy.float64},
    ], [
        {'x_dtype': numpy.int16},
        {'x_dtype': numpy.int32},
        {'x_dtype': numpy.int64},
    ]
))
@testing.fix_random()
@backend.inject_backend_tests(
    None,
    # ChainerX tests
    testing.product({
        'use_chainerx': [True],
        'chainerx_device': ['native:0', 'cuda:0'],
    })
    # CPU tests
    + testing.product({
        'use_cuda': [False],
        'use_ideep': ['never', 'always'],
    })
    # GPU tests
    + testing.product([
        [{'use_cuda': True}],

        # Without cuDNN
        testing.product({
            'use_cudnn': ['never'],
        })
        # With cuDNN
        self.check_double_backward(
            cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),
            cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)),
            cuda.cupy.asfortranarray(cuda.to_gpu(self.ggx)))

    @attr.gpu
    def test_double_backward_gpu_no_cudnn(self):
        self.check_double_backward(
            cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx),
            'never')


@testing.parameterize(*testing.product({
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
    'h': [5],
    'k': [3],
    's': [3],
    'p': [0],
    'cover_all': [True, False],
}))
class TestMaxPoolingUnpooling(unittest.TestCase):

    def check_left_inverse(self, xp, use_cudnn='never'):
        x = xp.arange(self.h * self.h).reshape(
            (1, 1, self.h, self.h)).astype(self.dtype)
        with chainer.using_config('use_cudnn', use_cudnn):
            y = chainer.functions.unpooling_2d(
                x, self.k, self.s, self.p, None, self.cover_all)
            x_ = chainer.functions.max_pooling_2d(
                y, self.k, self.s, self.p, self.cover_all).data

        self.assertEqual(x.shape, x_.shape)
Esempio n. 18
0
            testing.assert_allclose(expect, param.array)

    def test_weight_decay(self, backend_config0,
                          backend_config1, backend_config2):
        self.check_weight_decay(
            [backend_config0, backend_config1, backend_config2])


# TODO(kshitij12345): Test with chainerx when `loss_scale` in `backward`.
@testing.inject_backend_tests(
    None,
    # CPU tests
    [{}, {'use_ideep': 'always'}]
    # GPU tests
    + testing.product({
        'use_cuda': [True],
    })
)
class TestWeightDecayLossScale(unittest.TestCase):

    def test_weight_decay_loss_scale(self, backend_config):
        a = self._updated_array(backend_config, None)
        b = self._updated_array(backend_config, loss_scale=4.)
        testing.assert_allclose(a, b)

    def _updated_array(self, backend_config, loss_scale):
        arr = np.arange(3, dtype=np.float32)
        param = chainer.Parameter(arr)
        link = chainer.Link()
        with link.init_scope():
            link.p = param
Esempio n. 19
0
import numpy

from chainer import functions
from chainer import testing
from chainer import utils


@testing.parameterize(*testing.product({
    'shape': [(3, 2), ()],
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@testing.inject_backend_tests(
    None,
    # CPU tests
    [
        {},
    ]
    # GPU tests
    + testing.product({
        'use_cuda': [True],
        'cuda_device': [0, 1],
    })
    # ChainerX tests
    + [
        {'use_chainerx': True, 'chainerx_device': 'native:0'},
        {'use_chainerx': True, 'chainerx_device': 'cuda:0'},
        {'use_chainerx': True, 'chainerx_device': 'cuda:1'},
    ])
class TestSign(testing.FunctionTestCase):
Esempio n. 20
0
            'dtype': numpy.float64,
            'forward_options': {},
            'backward_options': {
                'eps': 1e-3,
                'rtol': 1e-2,
                'atol': 1e-2
            },
            'double_backward_options': {
                'eps': 1e-3,
                'rtol': 1e-3,
                'atol': 1e-3
            }
        },
    ],
    testing.product({
        'shape': [(), (3, )],
        'reduce': ['no'],
    }) + testing.product({
        'shape': [(4, 10), (2, 5, 3, 3)],
        'reduce': ['no', 'sum_along_second_axis'],
    }),
))
class TestHuberLoss(unittest.TestCase):
    def setUp(self):
        self._config_user = chainer.using_config('dtype', self.dtype)
        self._config_user.__enter__()

        self.x = utils.force_array((numpy.random.random(self.shape) - 0.5) * 3,
                                   self.dtype)
        self.t = utils.force_array((numpy.random.random(self.shape) - 0.5),
                                   self.dtype)
        if self.reduce == 'sum_along_second_axis':
Esempio n. 21
0
 def test_product(self):
     self.assertListEqual(testing.product(self.actual), self.expect)
Esempio n. 22
0
import numpy
from operator import mul
import six

import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.utils import conv
from chainer_tests.functions_tests.pooling_tests import pooling_nd_helper


@testing.parameterize(*testing.product({
    'in_dims': [(4, ), (4, 3), (4, 3, 2), (1, 1, 1, 1)],
    'cover_all': [True, False],
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.inject_backend_tests(
    None,
    # CPU tests
    [
        {},
        {
            'use_ideep': 'always'
        },
    ]
    # GPU tests
    + testing.product({
        'use_cuda': [True],
        'use_cudnn': ['never', 'always'],
Esempio n. 23
0
import unittest

import numpy

import chainer
from chainer import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr


@testing.parameterize(*testing.product({
    'in_shape': [(4, 3, 2)],
    'out_shape': [(2, 2, 6), (2, -1, 6)],
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestReshape(unittest.TestCase):

    def setUp(self):
        self.x = numpy.random.uniform(-1, 1, self.in_shape).astype(self.dtype)

    def check_forward(self, x_data):
        shape = self.out_shape
        x = chainer.Variable(x_data)
        y = functions.reshape(x, shape)
        self.assertEqual(y.data.dtype, self.dtype)
        self.assertTrue((self.x.reshape(shape) == cuda.to_cpu(y.data)).all())

    def test_forward_cpu(self):
        self.check_forward(self.x)
            self.assertIsInstance(out_0, np.ndarray)
            self.assertIsInstance(out_1, np.ndarray)

    def test_cpu(self):
        self.check(self.x)

    @attr.gpu
    def test_gpu(self):
        self.link.to_gpu()
        self.check(self.x)


@testing.parameterize(*testing.product({
    'crop': ['center', '10'],
    'crop_size': [192, (192, 256), (256, 192)],
    'scale_size': [None, 256, (256, 256)],
    'in_channels': [1, 3],
    'mean': [None, np.float32(1)]
}))
class TestFeaturePredictor(unittest.TestCase):
    def setUp(self):

        self.link = FeaturePredictor(DummyFeatureExtractor(
            self.in_channels, (1, ), None),
                                     crop_size=self.crop_size,
                                     scale_size=self.scale_size,
                                     crop=self.crop,
                                     mean=self.mean)

        if isinstance(self.crop_size, int):
            hw = (self.crop_size, self.crop_size)
Esempio n. 25
0
import chainer
from chainer import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr


@testing.inject_backend_tests(
    None,
    # CPU tests
    [
        {},
    ]
    # GPU tests
    + testing.product({
        'use_cuda': [True],
        'cuda_device': [0, 1],
    })
    # ChainerX tests
    + testing.product({
        'use_chainerx': [True],
        'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
    })
)
@testing.parameterize(*testing.product({
    'train': [True, False],
    'shape': [(3, 2), ()],
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestRReLU(testing.FunctionTestCase):

    dodge_nondifferentiable = True
import unittest

import numpy

import chainer
from chainer import cuda
from chainer.functions.loss import negative_sampling
from chainer import links
from chainer import testing
from chainer.testing import attr


@testing.parameterize(*testing.product({
    't': [[0, 2], [-1, 1, 2]],
    'reduce': ['sum', 'no'],
}))
class TestNegativeSampling(unittest.TestCase):

    in_size = 3
    sample_size = 2

    def setUp(self):
        batch = len(self.t)
        x_shape = (batch, self.in_size)
        self.link = links.NegativeSampling(
            self.in_size, [10, 5, 2, 5, 2], self.sample_size)
        self.link.cleargrads()
        self.x = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
        self.t = numpy.array(self.t).astype(numpy.int32)

        if self.reduce == 'no':
Esempio n. 27
0
import unittest

import mock
import numpy

import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition


@testing.parameterize(*testing.product({
    'shape': [(3, 2), ()],
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestTanh(unittest.TestCase):

    def setUp(self):
        self.x = numpy.random.uniform(-.5, .5, self.shape).astype(self.dtype)
        self.gy = numpy.random.uniform(-.1, .1, self.shape).astype(self.dtype)
        self.check_backward_options = {}
        if self.dtype == numpy.float16:
            self.check_backward_options = {
                'dtype': numpy.float64, 'atol': 1e-4, 'rtol': 1e-3}

    def check_forward(self, x_data, use_cudnn=True):
        x = chainer.Variable(x_data)
        y = functions.tanh(x, use_cudnn=use_cudnn)
        self.assertEqual(y.data.dtype, self.dtype)
Esempio n. 28
0
    [
        {'n_layers': 1, 'hidden_size': 2,
            'input_size': 1, 'batches': (1, 1, 1)},
        {'n_layers': 2, 'hidden_size': 2,
            'input_size': 3, 'batches': (3, 2, 1)},
        {'n_layers': 4, 'hidden_size': 6,
            'input_size': 3, 'batches': (5, 3, 1)},
        {'n_layers': 5, 'hidden_size': 10,
            'input_size': 6, 'batches': (6, 5, 3)},
    ]))
@testing.fix_random()
@backend.inject_backend_tests(
    None,
    # ChainerX tests
    testing.product({
        'use_chainerx': [True],
        'chainerx_device': ['native:0', 'cuda:0'],
    })
    # CPU tests
    + testing.product({
        'use_cuda': [False],
        'use_ideep': ['never', 'always'],
    })
    # GPU tests
    + testing.product([
        [{'use_cuda': True}],

        testing.product({
            'use_cudnn': ['always'],
            'cudnn_deterministic': [True, False],
            'autotune': [True, False],
        })]))
Esempio n. 29
0
    if x.ndim == 2:
        return numpy.linalg.inv(x)
    return numpy.array([numpy.linalg.inv(ix) for ix in x])


def _make_eye(shape):
    if len(shape) == 2:
        n = shape[0]
        return numpy.eye(n, dtype=numpy.float32)
    m = shape[0]
    n = shape[1]
    return numpy.array([numpy.eye(n, dtype=numpy.float32)] * m)


@testing.parameterize(*testing.product({
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
    'shape': [(1, 1), (5, 5)],
}))
@testing.fix_random()
@testing.inject_backend_tests(
    None,
    # CPU tests
    [
        {},
    ]
    # GPU tests
    + testing.product({
        'use_cuda': [True],
        'cuda_device': [0, 1],
    })
    # ChainerX tests
    + testing.product({
        with backend_config:
            return self._train_linear_classifier(
                model, optimizer, backend_config.use_cuda)

    def accuracy_gpu(self, device):
        with cuda.get_device_from_id(device):
            return self.accuracy(
                backend.BackendConfig({'use_cuda': True}),
                device)


@backend.inject_backend_tests(
    ['test_linear_model'],
    # CPU tests
    testing.product({
        'use_cuda': [False],
        'use_ideep': ['never', 'always'],
    })
    # GPU tests
    + [{'use_cuda': True}])
class OptimizerTestBase(object):

    def create(self):
        raise NotImplementedError()

    def setUp(self):
        self.model = LinearModel(self.create(), self.dtype,
                                 self.use_placeholder)

    @condition.retry(10)
    def test_linear_model(self, backend_config):
        try:
Esempio n. 31
0
    y_expected_shape = (N, c) + outs
    y_expected = numpy.zeros(y_expected_shape, dtype=x_data.dtype)
    for i in six.moves.range(N):
        for _c in six.moves.range(c):
            for x in xs_iter(dims):
                x_idx = (i, _c) + x
                for kx in kxs_iter(x, outs, ksize, stride, pad):
                    y_idx = (i, _c) + kx
                    y_expected[y_idx] += x_data[x_idx]
    return y_expected


@testing.parameterize(*(testing.product({
    'dims': [(5, ), (2, 3, 4)],
    '_ksize': [3],
    '_stride': [3],
    '_pad': [1],
    'cover_all': [True],
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
}) + testing.product({
    'dims': [(3, 2)],
    '_ksize': [1, 2, 3],
    '_stride': [1, 2, 3],
    '_pad': [0, 1],
    'cover_all': [True, False],
    'dtype': [numpy.float32],
})))
class TestUnpoolingND(unittest.TestCase):
    def setUp(self):
        N = 2
        c = 3
        ndim = len(self.dims)
Esempio n. 32
0
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.testing import parameterize
from chainer.utils import conv
from chainer.utils import type_check


@parameterize(*testing.product({
    'dims': [(4, 3, 2), (2,)],
    'dilate': [1, 2],
    'groups': [1, 2],
    'nobias': [False],
    'test_outsize': [False],
    'c_contiguous': [True],
    'x_dtype': [numpy.float32],
    'W_dtype': [numpy.float32],
    'autotune': [True, False],
}) + testing.product({
    'dims': [(3, 2)],
    'dilate': [1, 2],
    'groups': [1],
    'nobias': [False],
    'test_outsize': [False],
    'c_contiguous': [True],
    'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
    'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
    'autotune': [False],
}) + testing.product({
Esempio n. 33
0
     {'slices': numpy.array([False, True, False, True]),
      'b_data': numpy.random.uniform(size=(2, 2, 3))},
     {'slices': [], 'b_data': numpy.empty(shape=(0, 2, 3))},
     ]
))
@testing.fix_random()
@testing.inject_backend_tests(
    None,
    # CPU tests
    [
        {},
    ]
    # GPU tests
    + testing.product({
        'use_cuda': [True],
        'use_cudnn': ['never', 'always'],
        'cuda_device': [0, 1],
    })
    # ChainerX tests
    + testing.product({
        'use_chainerx': [True],
        'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
    })
)
class TestScatterAdd(testing.FunctionTestCase):

    def setUp(self):
        self.check_backward_options.update({'atol': 5e-4, 'rtol': 5e-4})
        self.check_double_backward_options.update({
            'atol': 1e-3, 'rtol': 1e-3})
Esempio n. 34
0
def _to_fcontiguous(arrays):
    xp = cuda.get_array_module(*arrays)
    return [xp.asfortranarray(a) for a in arrays]


def _batch_normalization(args):
    x, gamma, beta, mean, var, eps, expander = args
    mean = mean[expander]
    std = numpy.sqrt(var + eps)[expander]
    y_expect = (gamma[expander] * (x - mean) / std + beta[expander])
    return y_expect


@testing.parameterize(*(testing.product_dict(
    testing.product({
        'param_shape': [(3, ), (3, 4), (3, 2, 3)],
        'ndim': [0, 1, 2],
    }) + [
        {
            'input_shape': (5, 4, 3, 2),
            'axis': (0, 2, 3)
        },
        {
            'input_shape': (5, 4),
            'axis': 0
        },
        {
            'input_shape': (5, 4, 3),
            'axis': (0, 1)
        },
    ],
    testing.product({
import unittest

import numpy
import six

from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import backend


@testing.parameterize(*testing.product({
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@backend.inject_backend_tests(
    ['test_forward', 'test_backward'],
    # CPU tests
    testing.product({
        'use_cuda': [False],
        'use_ideep': ['never', 'always'],
    })
    # GPU tests
    + [{'use_cuda': True}])
class TestLocalResponseNormalization(unittest.TestCase):

    def setUp(self):
        x = numpy.random.uniform(-1, 1, (2, 7, 3, 2)).astype(self.dtype)
        gy = numpy.random.uniform(-1, 1, (2, 7, 3, 2)).astype(self.dtype)

        self.inputs = [x]
Esempio n. 36
0
import unittest

import numpy

import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr


@testing.parameterize(*testing.product({
    'shape': [(3, 4), ()],
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestLinearInterpolate(unittest.TestCase):
    def setUp(self):
        self.p = numpy.random.uniform(0, 1, self.shape).astype(self.dtype)
        self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
        self.y = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
        self.g = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
        self.ggp = numpy.random.uniform(0, 1, self.shape).astype(self.dtype)
        self.ggx = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
        self.ggy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)

        self.check_forward_options = {}
        self.check_backward_options = {'dtype': numpy.float64}
        self.check_double_backward_options = {
            'dtype': numpy.float64,
            'atol': 5e-3,
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer.serializers import npz
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check


def gen_mask(ratio, shape):
    return numpy.random.rand(*shape) >= ratio


@testing.parameterize(*testing.product({
    'in_shape': [(3,), (3, 2, 2)],
    'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
    'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
    'use_batchwise_mask': [True, False],
}))
class TestSimplifiedDropconnect(unittest.TestCase):

    out_size = 2
    ratio = 0.5

    def setUp(self):
        in_size = numpy.prod(self.in_shape)

        self.link = links.SimplifiedDropconnect(
            in_size, self.out_size,
            initialW=chainer.initializers.Normal(1, self.W_dtype),
            initial_bias=chainer.initializers.Normal(1, self.x_dtype))
        self.link.cleargrads()
Esempio n. 38
0
        'fan_option': None
    }, {
        'target': initializers.LeCunNormal,
        'fan_option': None
    }, {
        'target': initializers.GlorotNormal,
        'fan_option': None
    }, {
        'target': initializers.HeNormal,
        'fan_option': 'fan_in'
    }, {
        'target': initializers.HeNormal,
        'fan_option': 'fan_out'
    }],
    testing.product({
        'shape': [(2, 3), (2, 3, 4)],
        'dtype': [numpy.float16, numpy.float32, numpy.float64]
    }))))
class NormalBase(unittest.TestCase):
    def setUp(self):
        pass

    def check_initializer(self, w):
        if self.fan_option is None:
            initializer = self.target(scale=0.1)
        else:
            initializer = self.target(scale=0.1, fan_option=self.fan_option)
        initializer(w)
        self.assertTupleEqual(w.shape, self.shape)
        self.assertEqual(w.dtype, self.dtype)

    def test_initializer_cpu(self):
import chainer
from chainer import cuda
from chainer.functions import convolution_2d
from chainer.functions import deformable_convolution_2d_sampler
from chainer import utils

from chainer import testing
from chainer.testing import attr


@testing.parameterize(*testing.product({
    'params': [
        (1, 1, 1, 1, 1, 1),
        (2, 2, 2, 2, 2, 2),
        (1, 2, 2, 1, 1, 2),
        (1, 2, 3, 4, 1, 2),
        (1, 2, 3, 4, 4, 5),
        (3, 3, 2, 2, 1, 1),
    ],
    'use_cudnn': ['always', 'never']
}))
class TestDeformableConvolution2DSamplerFunctionZeroOffset(unittest.TestCase):

    def setUp(self):
        in_channels = 3
        out_channels = 2
        batch_size = 2
        h = 9
        w = 9

        kh, kw, sy, sx, ph, pw = self.params
Esempio n. 40
0
@testing.parameterize(*testing.product({
    # repeats is any of (int, bool or tuple) and
    # axis is any of (int or None).
    'params': (
        # Repeats 1-D array
        testing.product({
            'shape': [(2, )],
            'repeats': [0, 1, 2, True, (0, ), (1, ), (2, ), (True, )],
            'axis': [None, 0],
        }) +
        # Repeats 2-D array (with axis=None)
        testing.product({
            'shape': [(3, 2)],
            'repeats': [4, (4, ), (4, ) * 6, (True, ) * 6],
            'axis': [None],
        }) +
        # Repeats 2-D array (with axis=0)
        testing.product({
            'shape': [(3, 2)],
            'repeats': [5, (5, ), (5, ) * 3],
            'axis': [0],
        }) +
        # Repeats 2-D array (with axis=1)
        testing.product({
            'shape': [(3, 2)],
            'repeats': [5, (5, ), (5, ) * 2],
            'axis': [1],
        }) +
        # Repeats 3-D array (with axis=-2)
        testing.product({
            'shape': [(3, 2, 4)],
            'repeats': [5, (5, ), (5, ) * 2],
            'axis': [-2],
        })),
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
import unittest

import numpy

import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr


@testing.parameterize(*testing.product({
    'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
    'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
    'ratio': [0.0, 0.9],
    'train': [True, False],
    'use_batchwise_mask': [True, False],
}))
class TestSimplifiedDropconnect(unittest.TestCase):

    def setUp(self):
        self.W = numpy.random.uniform(
            -1, 1, (2, 3)).astype(self.W_dtype)
        self.b = numpy.random.uniform(
            -1, 1, 2).astype(self.x_dtype)

        self.x = numpy.random.uniform(-1, 1, (4, 3)).astype(self.x_dtype)
        self.gy = numpy.random.uniform(-1, 1, (4, 2)).astype(self.x_dtype)
        self.ggW = numpy.random.uniform(
            -1, 1, (2, 3)).astype(self.W_dtype)
Esempio n. 42
0
from __future__ import division
from __future__ import absolute_import
from builtins import *  # NOQA
from future import standard_library
standard_library.install_aliases()  # NOQA

import mock
import unittest

from chainer import testing

import chainerrl


@testing.parameterize(*testing.product({
    'max_episode_steps': [1, 2, 3],
}))
class TestContinuingTimeLimit(unittest.TestCase):
    def test(self):
        env = mock.Mock()
        env.reset.side_effect = ['state'] * 2
        # Since info dicts are modified by the wapper, each step call needs to
        # return a new info dict.
        env.step.side_effect = [('state', 0, False, {}) for _ in range(6)]
        env = chainerrl.wrappers.ContinuingTimeLimit(
            env, max_episode_steps=self.max_episode_steps)

        env.reset()
        for t in range(2):
            _, _, done, info = env.step(0)
            if t + 1 >= self.max_episode_steps:
Esempio n. 43
0
from chainer.testing import attr


@testing.parameterize(*testing.product_dict(
    [
        {'shape': None, 'axis': 1},
        {'shape': (5,), 'axis': 0},
        {'shape': (2, 3), 'axis': 0},
        {'shape': (2, 3), 'axis': 1},
        {'shape': (2, 3, 4), 'axis': 0},
        {'shape': (2, 3, 4), 'axis': -1},
        {'shape': (2, 3, 2, 3), 'axis': -3},
        {'shape': (2, 3, 2, 3), 'axis': 3},
    ],
    testing.product({
        'dtype': [numpy.float16, numpy.float32, numpy.float64],
    }),
))
@testing.fix_random()
@testing.inject_backend_tests(
    None,
    # CPU tests
    [
        {},
        {'use_ideep': 'always'},
    ]
    # GPU tests
    + testing.product({
        'use_cuda': [True],
        'cuda_device': [0, 1],
    })
Esempio n. 44
0
import unittest

import numpy

import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition


@testing.parameterize(*testing.product({
    'axis': [None, 0, 1, 2, -1, (0, 1), (1, 0), (0, -1), (-2, 0)],
    'keepdims': [True, False],
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestSum(unittest.TestCase):

    def setUp(self):
        self.x = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
        g_shape = self.x.sum(axis=self.axis, keepdims=self.keepdims).shape
        self.gy = numpy.random.uniform(-1, 1, g_shape).astype(self.dtype)

    def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.sum(x, axis=self.axis, keepdims=self.keepdims)
        self.assertEqual(y.data.dtype, self.dtype)
        y_expect = self.x.sum(axis=self.axis, keepdims=self.keepdims)
Esempio n. 45
0
from chainer.testing import condition


def _batch_normalization(expander, gamma, beta, x, mean, var, eps, test):
    mean = mean[expander]
    if test:
        std = numpy.sqrt(var[expander])
    else:
        std = numpy.sqrt(var[expander] + eps)
    y_expect = gamma * (x - mean) / std + beta
    return y_expect


@testing.parameterize(*testing.product({
    'test': [True, False],
    'volatile': ['on', 'off'],
    'ndim': [0, 1, 2, 3],
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class BatchNormalizationTest(unittest.TestCase):

    def setUp(self):
        self.expander = (None, Ellipsis) + (None,) * self.ndim
        self.aggr_axes = (0,) + tuple(six.moves.range(2, self.ndim + 2))

        self.link = links.BatchNormalization(3, dtype=self.dtype)
        gamma = self.link.gamma.data
        gamma[...] = numpy.random.uniform(.5, 1, gamma.shape)
        beta = self.link.beta.data
        beta[...] = numpy.random.uniform(-1, 1, beta.shape)
        self.link.zerograds()
Esempio n. 46
0
from chainer.utils import type_check


def _batch_normalization(expander, gamma, beta, x, mean, var, eps, test):
    mean = mean[expander]
    if test:
        std = numpy.sqrt(var[expander])
    else:
        std = numpy.sqrt(var[expander] + eps)
    y_expect = gamma * (x - mean) / std + beta
    return y_expect


@testing.parameterize(*(testing.product({
    'test': [True, False],
    'volatile': ['on'],
    'ndim': [0],
    'dtype': [numpy.float32],
}) + testing.product({
    'test': [True, False],
    'volatile': ['off'],
    'ndim': [0, 1, 2, 3],
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
})))
class BatchNormalizationTest(unittest.TestCase):
    def setUp(self):
        self.expander = (None, Ellipsis) + (None, ) * self.ndim
        self.aggr_axes = (0, ) + tuple(six.moves.range(2, self.ndim + 2))

        self.link = links.BatchNormalization(3, dtype=self.dtype)
        gamma = self.link.gamma.data
        gamma[...] = numpy.random.uniform(.5, 1, gamma.shape)
Esempio n. 47
0
import unittest

import numpy

import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition


@testing.parameterize(*testing.product({
    'axis': [None, 0, 1, 2, -1, (0, 1), (1, 0), (0, -1), (-2, 0)],
    'keepdims': [True, False],
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestSum(unittest.TestCase):

    def setUp(self):
        self.x = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
        g_shape = self.x.sum(axis=self.axis, keepdims=self.keepdims).shape
        self.gy = numpy.random.uniform(-1, 1, g_shape).astype(self.dtype)

    def check_forward(self, x_data):
        x = chainer.Variable(x_data)
        y = functions.sum(x, axis=self.axis, keepdims=self.keepdims)
        self.assertEqual(y.data.dtype, self.dtype)
        y_expect = self.x.sum(axis=self.axis, keepdims=self.keepdims)
Esempio n. 48
0
import mock
import numpy
import six

import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition


@testing.parameterize(*testing.product({
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestAveragePooling2D(unittest.TestCase):
    def setUp(self):
        self.x = numpy.random.uniform(-1, 1, (2, 3, 4, 3)).astype(self.dtype)
        self.gy = numpy.random.uniform(-1, 1, (2, 3, 2, 2)).astype(self.dtype)
        self.check_forward_options = {}
        self.check_backward_options = {'eps': 1e-2}
        if self.dtype == numpy.float16:
            self.check_forward_options = {'atol': 5e-4, 'rtol': 5e-3}
            self.check_backward_options = {
                'eps': 1e-1,
                'atol': 5e-3,
                'rtol': 5e-2
            }
Esempio n. 49
0
import unittest

import numpy

import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr


@testing.parameterize(*testing.product({
    'shape': [None, (2, 3), (2, 2, 3), (2, 2, 2, 3)],
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
class TestLogSoftmax(unittest.TestCase):

    def setUp(self):
        if self.shape is None:
            # For checking numerical stability
            value = -5 if self.dtype == numpy.float16 else -1000
            self.x = numpy.array([[value, 1]], dtype=self.dtype)
        else:
            self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
        self.gy = numpy.random.uniform(-1, 1, self.x.shape).astype(self.dtype)
        self.ggx = numpy.random.uniform(-1, 1, self.x.shape).astype(self.dtype)

        self.check_forward_options = {}
        self.check_backward_options = {}
Esempio n. 50
0
        self.assertIs(ret, 10)

    def test_serialize_none(self):
        ret = self.serializer('x', None)
        dset = self.serializer.target['x']

        self.assertIsInstance(dset, numpy.ndarray)
        self.assertEqual(dset.shape, ())
        self.assertEqual(dset.dtype, numpy.object)
        self.assertIs(dset[()], None)

        self.assertIs(ret, None)


@testing.parameterize(*testing.product({'compress': [False, True]}))
class TestNpzDeserializer(unittest.TestCase):
    def setUp(self):
        self.data = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)

        fd, path = tempfile.mkstemp()
        os.close(fd)
        self.temp_file_path = path
        with open(path, 'wb') as f:
            savez = numpy.savez_compressed if self.compress else numpy.savez
            savez(
                f, **{
                    'x/': None,
                    'y': self.data,
                    'z': numpy.asarray(10),
                    'w': None
Esempio n. 51
0
    mean = chainer.functions.mean(x, axis=axis, keepdims=True)
    std = chainer.functions.sqrt(
        eps +
        chainer.functions.mean(
            chainer.functions.square(x - mean),
            axis=axis, keepdims=True))
    r = (std.array / avg_std).clip(1./rmax, rmax)
    d = ((mean.array - avg_mean) / avg_std).clip(-dmax, dmax)
    xhat = ((x - mean) / std) * r + d
    return gamma * xhat + beta


@testing.parameterize(*(testing.product({
    'ndim': [0, 1, 2],
    'eps': [2e-5, 1e-1],
    'dtype': [numpy.float32],
}) + testing.product({
    'ndim': [1],
    'eps': [2e-5, 1e-1],
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
})))
class TestBatchRenormalization(unittest.TestCase):

    def setUp(self):
        self.expander = (None, Ellipsis) + (None,) * self.ndim
        self.aggr_axes = (0,) + tuple(six.moves.range(2, self.ndim + 2))
        self.decay = 0.9

        self.rmax = self.dtype(3)
        self.dmax = self.dtype(5)
Esempio n. 52
0
        },
        {
            'shape': (2, 3, 4),
            'axis': -2
        },
        {
            'shape': (2, 3, 4),
            'axis': -1
        },
        {
            'shape': (2, 3, 4),
            'axis': None
        },
    ],
    testing.product({
        'dtype': [numpy.float16, numpy.float32, numpy.float64],
        'contain_zero': [True, False],
    }),
) + testing.product({
    'shape': [(0, 3)],
    'axis': [-2, 1, None],
    'dtype': [numpy.float64],
    'contain_zero': [False],
})))
class TestCumprod(unittest.TestCase):
    def setUp(self):
        self.x = numpy.random.uniform(-2, 2, self.shape).astype(self.dtype)
        if self.contain_zero:
            index = numpy.random.choice(self.x.size)
            self.x.ravel()[index] = 0
        self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
        self.ggx = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
Esempio n. 53
0
import unittest

import numpy
import six

import chainer
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check


@testing.parameterize(*testing.product({
    'shape': [(9, 11), (99,)],
    'dtype': [numpy.float16, numpy.float32, numpy.float64],
    'label_dtype': [numpy.int8, numpy.int16, numpy.int32, numpy.int64],
}))
class TestBinaryAccuracy(unittest.TestCase):

    def setUp(self):
        self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
        self.t = numpy.random.randint(-1, 2, self.shape).astype(
            self.label_dtype)
        self.check_forward_options = {}
        if self.dtype == numpy.float16:
            self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}

    def check_forward(self, x_data, t_data):
        x = chainer.Variable(x_data)
        t = chainer.Variable(t_data)
        y = chainer.functions.binary_accuracy(x, t)
        # Not under git control
        with work_dir(tmp):
            self.assertFalse(chainerrl.experiments.is_under_git_control())

        # Run: git init
        with work_dir(tmp):
            subprocess.call(['git', 'init'])

        # Under git control
        with work_dir(tmp):
            self.assertTrue(chainerrl.experiments.is_under_git_control())


@testing.parameterize(*testing.product({
    'git': [True, False],
    'user_specified_dir': [tempfile.mkdtemp(), None],
    'argv': [['command', '--option'], None],
    'time_format': ['%Y%m%dT%H%M%S.%f', '%Y%m%d'],
}))
class TestPrepareOutputDir(unittest.TestCase):

    def test_prepare_output_dir(self):

        tmp = tempfile.mkdtemp()
        args = dict(a=1, b='2')
        os.environ['CHAINERRL_TEST_PREPARE_OUTPUT_DIR'] = 'T'

        with work_dir(tmp):

            if self.git:
                subprocess.call(['git', 'init'])
                with open('not_utf-8.txt', 'wb') as f:
    def __getitem__(self, key):
        raise NotImplementedError

    def __call__(self, key, value):
        if value is None:
            value = self.target[key]
        elif isinstance(value, numpy.ndarray):
            numpy.copyto(value, self.target[key])
        else:
            value = type(value)(numpy.asarray(self.target[key]))
        return value


@testing.parameterize(*testing.product({
    'n_prefetch': [1, 2],
    'shared_mem': [None, 1000000],
}))
class TestMultiprocessIterator(unittest.TestCase):

    def setUp(self):
        self.n_processes = 2
        self.options = {'n_processes': self.n_processes,
                        'n_prefetch': self.n_prefetch,
                        'shared_mem': self.shared_mem}

    def test_iterator_repeat(self):
        dataset = [1, 2, 3, 4, 5, 6]
        it = iterators.MultiprocessIterator(dataset, 2, **self.options)
        for i in range(3):
            self.assertEqual(it.epoch, i)
            self.assertAlmostEqual(it.epoch_detail, i + 0 / 6)
from chainer import gradient_check
from chainer import testing
from chainer.testing import array
from chainer.testing import attr
from chainer.testing import condition
from chainer.testing import parameterize
from chainer.utils import conv
from chainer.utils import type_check


@parameterize(*testing.product({
    'dims': [(4, 3, 2), (2, )],
    'dilate': [1, 2],
    'groups': [1, 2],
    'nobias': [False],
    'test_outsize': [False],
    'c_contiguous': [True],
    'x_dtype': [numpy.float32],
    'W_dtype': [numpy.float32],
    'autotune': [True, False],
}) + testing.product({
    'dims': [(3, 2)],
    'dilate': [1, 2],
    'groups': [1],
    'nobias': [False],
    'test_outsize': [False],
    'c_contiguous': [True],
    'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
    'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
    'autotune': [False],
}) + testing.product({
    def clear(self):
        self.loss = None
        self.accuracy = None

    def __call__(self, x, t):
        h = chainer.functions.relu(self.conv(x))
        y = self.fc(h)

        self.loss = chainer.functions.softmax_cross_entropy(y, t)
        self.accuracy = chainer.functions.accuracy(y, t)

        return self.loss


@testing.parameterize(*testing.product({
    'dtype': [numpy.float32, numpy.float16],
}))
class TestGatherScatter(unittest.TestCase):

    def setUp(self):
        pass

    @attr.gpu
    def test_gather_scatter_grads(self):
        cupy = cuda.cupy
        model0 = SimpleNet(dtype=self.dtype)
        model1 = copy.deepcopy(model0)

        model0.to_gpu()
        model1.to_gpu()
Esempio n. 58
0
 testing.product([
     [
         {
             'dtype': numpy.float16
         },
         {
             'dtype': numpy.float32
         },
         {
             'dtype': numpy.float64
         },
     ],
     [
         {
             'shape': (4, 15),
             'axis': 1
         },
         {
             'shape': (4, ),
             'axis': 0
         },
         {
             'shape': (4, 3, 2, 5),
             'axis': 0
         },
         {
             'shape': (4, 3, 2, 5),
             'axis': 1
         },
         {
             'shape': (4, 3, 2, 5),
             'axis': 2
         },
         {
             'shape': (4, 3, 2, 5),
             'axis': 3
         },
         {
             'shape': (4, 3, 2),
             'axis': (0, 1)
         },
         {
             'shape': (4, 3, 2, 4, 3, 2, 2),
             'axis': (1, 4, 3, 6)
         },
         {
             'shape': (0, 2),
             'axis': 1
         },
         {
             'shape': (),
             'axis': ()
         },
     ],
     [
         # nonzeros (optional int): number of nonzero elems in input
         # truezero (bool): flag whether zero elems are exactly zero. If false,
         #     randomly-chosen small values are used.
         {
             'eps': 1e-5,
             'nonzeros': None
         },
         {
             'eps': 1e-1,
             'nonzeros': None
         },
         {
             'eps': 1e-1,
             'nonzeros': 0,
             'truezero': True
         },
         {
             'eps': 1e-1,
             'nonzeros': 0,
             'truezero': False
         },
         {
             'eps': 1e-1,
             'nonzeros': 2,
             'truezero': True
         },
         {
             'eps': 1e-1,
             'nonzeros': 2,
             'truezero': False
         },
     ],
 ])))
Esempio n. 59
0
    return x.reshape(len(x), -1)


def _maxout(x, W, b):
    W_r = numpy.rollaxis(W, 2)
    y = numpy.tensordot(_as_mat(x), W_r, axes=1)
    if b is not None:
        y += b
    return numpy.max(y, axis=2)


@testing.parameterize(
    *testing.product(
        {'in_shape': [(2, ), (2, 5)],
         'pool_size': [3],
         'out_size': [4],
         'wscale': [1],
         'initial_bias': ['random', 'scalar', None],
         'batchsize': [7]}
    )
)
class TestMaxout(unittest.TestCase):

    def setUp(self):
        # x, W, and b are set so that the result of forward
        # propagation gets stable, meaning that their small pertubations
        # do not change :math:`argmax_{j} W_{ij\cdot} x + b_{ij}`.

        x_shape = (self.batchsize, ) + self.in_shape
        self.x = numpy.random.uniform(
            -0.05, 0.05, x_shape).astype(numpy.float32) + 1
        self.gy = numpy.random.uniform(
Esempio n. 60
0
    if x.ndim == 2:
        return numpy.linalg.inv(x)
    return numpy.array([numpy.linalg.inv(ix) for ix in x])


def _make_eye(shape):
    if len(shape) == 2:
        n = shape[0]
        return numpy.eye(n, dtype=numpy.float32)
    m = shape[0]
    n = shape[1]
    return numpy.array([numpy.eye(n, dtype=numpy.float32)] * m)


@testing.parameterize(*testing.product({
    'shape': [(1, 1), (5, 5)],
}))
class InvFunctionTest(unittest.TestCase):
    def setUp(self):
        self.x = (numpy.eye(self.shape[-1]) +
                  numpy.random.uniform(-0.01, 0.01, self.shape)).astype(
            numpy.float32)
        self.gy = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)

    def check_forward(self, x_data, atol=1e-7, rtol=1e-7):
        x = chainer.Variable(x_data)
        y = functions.inv(x)
        gradient_check.assert_allclose(
            _inv(self.x), y.data, atol=atol, rtol=rtol)

    def check_backward(self, x_data, y_grad, **kwargs):