def f(klass):
        assert issubclass(klass, unittest.TestCase)

        def setUp(self):
            self.x, self.gy = make_data(self.shape, self.dtype)
            if self.dtype == numpy.float16:
                self.backward_options = {
                    'eps': 2 ** -4, 'atol': 2 ** -4, 'rtol': 2 ** -4,
                    'dtype': numpy.float64}
            else:
                self.backward_options = {'atol': 1e-4, 'rtol': 1e-4}
        setattr(klass, "setUp", setUp)

        def check_forward(self, x_data):
            x = chainer.Variable(x_data)
            y = func(x)
            self.assertEqual(y.data.dtype, x_data.dtype)
            y_expected = func_expected(cuda.to_cpu(x_data), dtype=x_data.dtype)
            testing.assert_allclose(y_expected, y.data, atol=1e-4, rtol=1e-4)
        setattr(klass, "check_forward", check_forward)

        @condition.retry(3)
        def test_forward_cpu(self):
            self.check_forward(self.x)
        setattr(klass, "test_forward_cpu", test_forward_cpu)

        @attr.gpu
        @condition.retry(3)
        def test_forward_gpu(self):
            self.check_forward(cuda.to_gpu(self.x))
        setattr(klass, "test_forward_gpu", test_forward_gpu)

        def check_backward(self, x_data, y_grad):
            gradient_check.check_backward(
                func, x_data, y_grad, **self.backward_options)
        setattr(klass, "check_backward", check_backward)

        @condition.retry(3)
        def test_backward_cpu(self):
            self.check_backward(self.x, self.gy)
        setattr(klass, "test_backward_cpu", test_backward_cpu)

        @attr.gpu
        @condition.retry(3)
        def test_backward_gpu(self):
            self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
        setattr(klass, "test_backward_gpu", test_backward_gpu)

        def test_label(self):
            self.assertEqual(func.label, label_expected)
        setattr(klass, "test_label", test_label)

        # Return parameterized class.
        return testing.parameterize(*testing.product({
            'shape': [(3, 2), ()],
            'dtype': [numpy.float16, numpy.float32, numpy.float64]
        }))(klass)
Ejemplo n.º 2
0
    def f(klass):
        assert issubclass(klass, unittest.TestCase)

        def setUp(self):
            self.x, self.gy = make_data(self.shape, self.dtype)
            if self.dtype == numpy.float16:
                self.backward_options = {
                    'eps': 2 ** -4, 'atol': 2 ** -4, 'rtol': 2 ** -4,
                    'dtype': numpy.float64}
            else:
                self.backward_options = {'atol': 1e-4, 'rtol': 1e-4}
        setattr(klass, "setUp", setUp)

        def check_forward(self, x_data):
            x = variable.Variable(x_data)
            y = func(x)
            self.assertEqual(y.data.dtype, x_data.dtype)
            y_expected = func_expected(cuda.to_cpu(x_data), dtype=x_data.dtype)
            testing.assert_allclose(y_expected, y.data, atol=1e-4, rtol=1e-4)
        setattr(klass, "check_forward", check_forward)

        @condition.retry(3)
        def test_forward_cpu(self):
            self.check_forward(self.x)
        setattr(klass, "test_forward_cpu", test_forward_cpu)

        @attr.gpu
        @condition.retry(3)
        def test_forward_gpu(self):
            self.check_forward(cuda.to_gpu(self.x))
        setattr(klass, "test_forward_gpu", test_forward_gpu)

        def check_backward(self, x_data, y_grad):
            gradient_check.check_backward(
                func, x_data, y_grad, **self.backward_options)
        setattr(klass, "check_backward", check_backward)

        @condition.retry(3)
        def test_backward_cpu(self):
            self.check_backward(self.x, self.gy)
        setattr(klass, "test_backward_cpu", test_backward_cpu)

        @attr.gpu
        @condition.retry(3)
        def test_backward_gpu(self):
            self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
        setattr(klass, "test_backward_gpu", test_backward_gpu)

        def test_label(self):
            self.assertEqual(func.label, label_expected)
        setattr(klass, "test_label", test_label)

        # Return parameterized class.
        return testing.parameterize(*testing.product({
            'shape': [(3, 2), ()],
            'dtype': [numpy.float16, numpy.float32, numpy.float64]
        }))(klass)
Ejemplo n.º 3
0
def parameterize_batch_renormalization():
    return testing.parameterize(*(testing.product({
        'ndim': [0, 1, 2],
        'eps': [2e-5, 1e-1],
        'dtype': [numpy.float32],
        'update_statistics': [True, False],
    }) + testing.product({
        'ndim': [1],
        'eps': [2e-5, 1e-1],
        'dtype': [numpy.float16, numpy.float32, numpy.float64],
        'update_statistics': [True, False],
    })))
Ejemplo n.º 4
0
def parameterize_batch_renormalization():
    return testing.parameterize(*(testing.product({
        'ndim': [0, 1, 2],
        'eps': [2e-5, 1e-1],
        'dtype': [numpy.float32],
        'update_statistics': [True, False],
    }) + testing.product({
        'ndim': [1],
        'eps': [2e-5, 1e-1],
        'dtype': [numpy.float16, numpy.float32, numpy.float64],
        'update_statistics': [True, False],
    })))
Ejemplo n.º 5
0
    def deco(cls):
        setUp_orig = cls.setUp

        def setUp(self):
            param = params[self._chainercv_parameterize_index]
            print('params: {}'.format(param))
            for k, v in six.iteritems(param):
                setattr(self, k, v)
            setUp_orig(self)

        cls.setUp = setUp

        params_indices = [{
            '_chainercv_parameterize_index': i
        } for i in range(len(params))]
        return testing.parameterize(*params_indices)(cls)
Ejemplo n.º 6
0
    def f(klass):
        assert issubclass(klass, unittest.TestCase)

        def setUp(self):
            if is_new_style:
                self.x, self.gy, self.ggx = make_data(self.shape, self.dtype)
            else:
                self.x, self.gy = make_data(self.shape, self.dtype)

            self.forward_options = {'atol': 1e-4, 'rtol': 1e-4}
            if self.dtype == numpy.float16:
                self.backward_options = {
                    'eps': 2 ** -4, 'atol': 2 ** -4, 'rtol': 2 ** -4,
                    'dtype': numpy.float64}
                self.double_backward_options = {
                    'eps': 2 ** -4, 'atol': 2 ** -4, 'rtol': 2 ** -4,
                    'dtype': numpy.float64}
            else:
                self.backward_options = {
                    'dtype': numpy.float64, 'atol': 1e-4, 'rtol': 1e-4}
                self.double_backward_options = {
                    'dtype': numpy.float64, 'atol': 1e-4, 'rtol': 1e-4}
            if forward_options is not None:
                self.forward_options.update(forward_options)
            if backward_options is not None:
                self.backward_options.update(backward_options)
            if double_backward_options is not None:
                self.double_backward_options.update(double_backward_options)
        setattr(klass, "setUp", setUp)

        def check_forward(self, x_data):
            x = variable.Variable(x_data)
            y = func(x)
            self.assertEqual(y.data.dtype, x_data.dtype)
            y_expected = func_expected(cuda.to_cpu(x_data), dtype=x_data.dtype)
            testing.assert_allclose(y_expected, y.data, **self.forward_options)
        setattr(klass, "check_forward", check_forward)

        def test_forward_cpu(self):
            self.check_forward(self.x)
        setattr(klass, "test_forward_cpu", test_forward_cpu)

        @attr.gpu
        def test_forward_gpu(self):
            self.check_forward(cuda.to_gpu(self.x))
        setattr(klass, "test_forward_gpu", test_forward_gpu)

        def check_backward(self, x_data, y_grad):
            gradient_check.check_backward(
                func, x_data, y_grad, **self.backward_options)
        setattr(klass, "check_backward", check_backward)

        def test_backward_cpu(self):
            self.check_backward(self.x, self.gy)
        setattr(klass, "test_backward_cpu", test_backward_cpu)

        @attr.gpu
        def test_backward_gpu(self):
            self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
        setattr(klass, "test_backward_gpu", test_backward_gpu)

        if is_new_style:
            def check_double_backward(self, x_data, y_grad, x_grad_grad):
                func1 = _nonlinear(func) if is_linear else func
                gradient_check.check_double_backward(
                    func1, x_data, y_grad,
                    x_grad_grad, **self.double_backward_options)
            setattr(klass, "check_double_backward", check_double_backward)

            def test_double_backward_cpu(self):
                self.check_double_backward(self.x, self.gy, self.ggx)
            setattr(klass, "test_double_backward_cpu",
                    test_double_backward_cpu)

            @attr.gpu
            def test_double_backward_gpu(self):
                self.check_double_backward(
                    cuda.to_gpu(self.x), cuda.to_gpu(self.gy),
                    cuda.to_gpu(self.ggx))
            setattr(klass, "test_double_backward_gpu",
                    test_double_backward_gpu)

        if func_class is not None:
            def test_label(self):
                self.assertEqual(func_class().label, label_expected)
            setattr(klass, "test_label", test_label)

        # Return parameterized class.
        return testing.parameterize(*testing.product({
            'shape': [(3, 2), ()],
            'dtype': [numpy.float16, numpy.float32, numpy.float64]
        }))(klass)
Ejemplo n.º 7
0
    def f(klass):
        assert issubclass(klass, unittest.TestCase)

        def setUp(self):
            if is_new_style:
                self.x, self.gy, self.ggx = make_data(self.shape, self.dtype)
            else:
                self.x, self.gy = make_data(self.shape, self.dtype)

            if self.dtype == numpy.float16:
                self.backward_options = {
                    'eps': 2**-4,
                    'atol': 2**-4,
                    'rtol': 2**-4,
                    'dtype': numpy.float64
                }
                self.double_backward_options = {
                    'eps': 2**-4,
                    'atol': 2**-4,
                    'rtol': 2**-4,
                    'dtype': numpy.float64
                }
            else:
                self.backward_options = {
                    'dtype': numpy.float64,
                    'atol': 1e-4,
                    'rtol': 1e-4
                }
                self.double_backward_options = {
                    'dtype': numpy.float64,
                    'atol': 1e-4,
                    'rtol': 1e-4
                }
            if backward_options is not None:
                self.backward_options.update(backward_options)
            if double_backward_options is not None:
                self.double_backward_options.update(double_backward_options)

        setattr(klass, "setUp", setUp)

        def check_forward(self, x_data):
            x = variable.Variable(x_data)
            y = func(x)
            self.assertEqual(y.data.dtype, x_data.dtype)
            y_expected = func_expected(cuda.to_cpu(x_data), dtype=x_data.dtype)
            testing.assert_allclose(y_expected, y.data, atol=1e-4, rtol=1e-4)

        setattr(klass, "check_forward", check_forward)

        def test_forward_cpu(self):
            self.check_forward(self.x)

        setattr(klass, "test_forward_cpu", test_forward_cpu)

        @attr.gpu
        def test_forward_gpu(self):
            self.check_forward(cuda.to_gpu(self.x))

        setattr(klass, "test_forward_gpu", test_forward_gpu)

        def check_backward(self, x_data, y_grad):
            gradient_check.check_backward(func, x_data, y_grad,
                                          **self.backward_options)

        setattr(klass, "check_backward", check_backward)

        def test_backward_cpu(self):
            self.check_backward(self.x, self.gy)

        setattr(klass, "test_backward_cpu", test_backward_cpu)

        @attr.gpu
        def test_backward_gpu(self):
            self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))

        setattr(klass, "test_backward_gpu", test_backward_gpu)

        if is_new_style:

            def check_double_backward(self, x_data, y_grad, x_grad_grad):
                func1 = _nonlinear(func) if is_linear else func
                gradient_check.check_double_backward(
                    func1, x_data, y_grad, x_grad_grad,
                    **self.double_backward_options)

            setattr(klass, "check_double_backward", check_double_backward)

            def test_double_backward_cpu(self):
                self.check_double_backward(self.x, self.gy, self.ggx)

            setattr(klass, "test_double_backward_cpu",
                    test_double_backward_cpu)

            @attr.gpu
            def test_double_backward_gpu(self):
                self.check_double_backward(cuda.to_gpu(self.x),
                                           cuda.to_gpu(self.gy),
                                           cuda.to_gpu(self.ggx))

            setattr(klass, "test_double_backward_gpu",
                    test_double_backward_gpu)

        if func_class is not None:

            def test_label(self):
                self.assertEqual(func_class().label, label_expected)

            setattr(klass, "test_label", test_label)

        # Return parameterized class.
        return testing.parameterize(*testing.product({
            'shape': [(3, 2), ()],
            'dtype': [numpy.float16, numpy.float32, numpy.float64]
        }))(klass)
Ejemplo n.º 8
0
import six

import numpy as np

import chainer
from chainer import optimizers
from chainer import testing

_parameterize_optimizers = testing.parameterize(*testing.product({
    'optimizer_impl': [
        optimizers.AdaDelta,
        optimizers.AdaGrad,
        optimizers.Adam,
        optimizers.CorrectedMomentumSGD,
        optimizers.MomentumSGD,
        optimizers.MSVAG,
        optimizers.NesterovAG,
        optimizers.RMSprop,
        optimizers.RMSpropGraves,
        optimizers.SGD,
        optimizers.SMORMS3,
    ]
}))


@_parameterize_optimizers
class TestOptimizerHyperparameter(unittest.TestCase):
    def setUp(self):
        self.target = chainer.Link()
        with self.target.init_scope():
            self.target.w = chainer.Parameter()
Ejemplo n.º 9
0
    'AdamW',
    'AMSGrad',
    'AdaBound',
    'AMSBound',
    'CorrectedMomentumSGD',
    'MomentumSGD',
    'MSVAG',
    'NesterovAG',
    'RMSprop',
    'RMSpropGraves',
    'SGD',
    'SMORMS3',
]

_parameterize_optimizers = testing.parameterize(*testing.product({
    'optimizer_impl':
    [getattr(chainer.optimizers, o) for o in _all_optimizers]
}))


class SimpleChain(chainer.Chain):
    def __init__(self, shape=()):
        super(SimpleChain, self).__init__()
        w_np = np.asarray(np.random.randn(*shape)).astype(np.float32)
        with self.init_scope():
            self.w = chainer.Parameter(w_np, name='w')

    def __call__(self, x):
        return F.sum((x - self.w)**2)


class TestAllOptimizersCoverage(unittest.TestCase):
Ejemplo n.º 10
0
from chainer.testing import condition
from chainer.utils import type_check
import chainerx

_parameterize = testing.parameterize(*(testing.product_dict(
    testing.product({
        'test': [True, False],
        'size': ['skip', 'explicit'],
        'dtype':
        [numpy.float16, numpy.float32, numpy.float64, chainer.mixed16],
    }),
    testing.product({
        'ndim': [0, 1, 2, 3],
    }) + [
        {
            'input_shape': (5, 4, 3, 2),
            'axis': (0, 2, 3)
        },
        {
            'input_shape': (5, 4),
            'axis': 0
        },
        {
            'input_shape': (5, 4, 3),
            'axis': (0, 1)
        },
    ])))

_inject_backend_tests = testing.inject_backend_tests(
    None,
    # CPU tests
Ejemplo n.º 11
0
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import type_check
import chainerx


_parameterize = testing.parameterize(*(testing.product_dict(
    testing.product({
        'test': [True, False],
        'size': ['skip', 'explicit'],
        'dtype': [numpy.float16, numpy.float32, numpy.float64,
                  chainer.mixed16],
    }),
    testing.product({
        'ndim': [0, 1, 2, 3],
    }) + [
        {'input_shape': (5, 4, 3, 2), 'axis': (0, 2, 3)},
        {'input_shape': (5, 4), 'axis': 0},
        {'input_shape': (5, 4, 3), 'axis': (0, 1)},
    ]
)))


_inject_backend_tests = testing.inject_backend_tests(
    None,
    # CPU tests
    [
        {},
        {'use_ideep': 'always'},