Exemple #1
0
def _check_chainerx_numpy_result_array(check_result_func, chainerx_result,
                                       numpy_result, indices):
    # Compares `chainerx_result` and `numpy_result` as arrays.

    is_chainerx_valid_type = isinstance(chainerx_result, chainerx.ndarray)
    is_numpy_valid_type = _is_numpy_type(numpy_result)

    if not (is_chainerx_valid_type and is_numpy_valid_type):
        raise _ResultsCheckFailure(
            'Using decorator without returning ndarrays. '
            'If you want to explicitly ignore certain tests, '
            'return chainerx.testing.ignore() to avoid this error', indices)

    if chainerx_result.shape != numpy_result.shape:
        raise _ResultsCheckFailure(
            'Shape mismatch', indices, lambda np_r, chx_r:
            ('chainerx: {}, numpy: {}'.format(chx_r.shape, np_r.shape)))

    if chainerx_result.device is not chainerx.get_default_device():
        raise _ResultsCheckFailure(
            'ChainerX bad device', indices,
            lambda np_r, chx_r: ('default: {}, chainerx: {}'.format(
                chainerx.get_default_device(), chx_r.device)))

    try:
        check_result_func(chainerx_result, numpy_result)
    except AssertionError as e:
        # Convert AssertionError to _ResultsCheckFailure
        raise _ResultsCheckFailure(str(e), indices)
Exemple #2
0
def _check_chainerx_numpy_result_array(
        check_result_func, chainerx_result, numpy_result, indices):
    # Compares `chainerx_result` and `numpy_result` as arrays.

    is_chainerx_valid_type = isinstance(chainerx_result, chainerx.ndarray)
    is_numpy_valid_type = _is_numpy_type(numpy_result)

    if not (is_chainerx_valid_type and is_numpy_valid_type):
        raise _ResultsCheckFailure(
            'Using decorator without returning ndarrays. '
            'If you want to explicitly ignore certain tests, '
            'return chainerx.testing.ignore() to avoid this error', indices)

    if chainerx_result.shape != numpy_result.shape:
        raise _ResultsCheckFailure(
            'Shape mismatch', indices,
            lambda np_r, chx_r: (
                'chainerx: {}, numpy: {}'.format(chx_r.shape, np_r.shape)))

    if chainerx_result.device is not chainerx.get_default_device():
        raise _ResultsCheckFailure(
            'ChainerX bad device', indices,
            lambda np_r, chx_r: (
                'default: {}, chainerx: {}'.format(
                    chainerx.get_default_device(), chx_r.device)))

    try:
        check_result_func(chainerx_result, numpy_result)
    except AssertionError as e:
        # Convert AssertionError to _ResultsCheckFailure
        raise _ResultsCheckFailure(str(e), indices)
def test_using_device_with_name(device_instance1, device_instance2):
    if device_instance1 == device_instance2:
        return

    device1 = device_instance1
    device2 = device_instance2

    chainerx.set_default_device(device1)
    with chainerx.using_device(device2.name) as scope:
        assert chainerx.get_default_device() == device2
        assert scope.device is device2

    with chainerx.using_device(device2.backend.name, device2.index) as scope:
        assert chainerx.get_default_device() == device2
        assert scope.device is device2
Exemple #4
0
    def setup(self, float_dtype):
        dtype = float_dtype
        ksize = self.ksize
        device = chainerx.get_default_device()
        if (device.backend.name == 'cuda'
                and len(ksize) != 2
                and len(ksize) != 3):
            raise unittest.SkipTest(
                'cuDNN supports only 2 and 3 spatial dimensions.')

        # TODO(niboshi): average_pool can return nan if pad_mode is 'ignore',
        # and numeric gradients cannot be calculated.
        # If chainerx.where is implemented, we can replace nans and remove
        # this skip.
        if self.pad_mode in ('ignore', None):
            self.skip_backward_test = True
            self.skip_double_backward_test = True

        self.check_double_backward_options.update({'rtol': 5e-3, 'atol': 5e-3})
        if dtype == 'float16':
            self.check_forward_options.update({'rtol': 5e-3, 'atol': 5e-4})
            self.check_backward_options.update({'rtol': 5e-2, 'atol': 5e-3})
        else:
            self.check_backward_options.update({'rtol': 5e-3, 'atol': 5e-3, })

        self.dtype = dtype
Exemple #5
0
    def setup(self):
        device = chainerx.get_default_device()
        a_dtype, b_dtype = self.in_dtypes
        a_kind = numpy.dtype(a_dtype).kind
        b_kind = numpy.dtype(b_dtype).kind
        # TODO(beam2d): Remove the skip after supporting non-float dot on CUDA
        if device.name == 'cuda:0' and (a_kind != 'f' and b_kind != 'f'):
            pytest.skip('non-float dot is not supported on CUDA')

        # Skip backward/double-backward tests for int dtypes
        if a_kind != 'f' or b_kind != 'f':
            self.skip_backward_test = True
            self.skip_double_backward_test = True
        # Skip backward/double-backward tests if the output will be
        # disconnected.
        # TODO(niboshi): Remove this skip condition after enabling backward()
        # for such cases.
        if self.a_shape and self.a_shape[-1] == 0:
            self.skip_backward_test = True
            self.skip_double_backward_test = True

        if a_dtype == 'float16' or b_dtype == 'float16':
            self.check_forward_options.update({
                'rtol': 1e-2, 'atol': 1e-2})
            self.check_backward_options.update({
                'rtol': 1e-2, 'atol': 1e-2})
            self.check_double_backward_options.update({
                'rtol': 1e-2, 'atol': 1e-2})
Exemple #6
0
    def setup(self):
        device = chainerx.get_default_device()
        a_dtype, b_dtype = self.in_dtypes
        a_kind = numpy.dtype(a_dtype).kind
        b_kind = numpy.dtype(b_dtype).kind
        # TODO(beam2d): Remove the skip after supporting non-float dot on CUDA
        if device.name == 'cuda:0' and (a_kind != 'f' and b_kind != 'f'):
            pytest.skip('non-float dot is not supported on CUDA')

        # Skip backward/double-backward tests for int dtypes
        if a_kind != 'f' or b_kind != 'f':
            self.skip_backward_test = True
            self.skip_double_backward_test = True
        # Skip backward/double-backward tests if the output will be
        # disconnected.
        # TODO(niboshi): Remove this skip condition after enabling backward()
        # for such cases.
        if self.a_shape and self.a_shape[-1] == 0:
            self.skip_backward_test = True
            self.skip_double_backward_test = True

        if a_dtype == 'float16' or b_dtype == 'float16':
            self.check_forward_options.update({'rtol': 1e-2, 'atol': 1e-2})
            self.check_backward_options.update({'rtol': 1e-2, 'atol': 1e-2})
            self.check_double_backward_options.update({
                'rtol': 1e-2,
                'atol': 1e-2
            })
Exemple #7
0
    def setup(self, float_dtype):
        dtype = float_dtype
        ksize = self.ksize
        device = chainerx.get_default_device()
        if (device.backend.name == 'cuda' and len(ksize) != 2
                and len(ksize) != 3):
            raise unittest.SkipTest(
                'cuDNN supports only 2 and 3 spatial dimensions.')

        # TODO(niboshi): average_pool can return nan if pad_mode is 'ignore',
        # and numeric gradients cannot be calculated.
        # If chainerx.where is implemented, we can replace nans and remove
        # this skip.
        if self.pad_mode in ('ignore', None):
            self.skip_backward_test = True
            self.skip_double_backward_test = True

        self.check_double_backward_options.update({'rtol': 5e-3, 'atol': 5e-3})
        if dtype == 'float16':
            self.check_forward_options.update({'rtol': 5e-3, 'atol': 5e-4})
            self.check_backward_options.update({'rtol': 5e-2, 'atol': 5e-3})
        else:
            self.check_backward_options.update({
                'rtol': 5e-3,
                'atol': 5e-3,
            })

        self.dtype = dtype
Exemple #8
0
 def setup(self):
     device = chainerx.get_default_device()
     if (device.backend.name == 'native'
             and not chainerx.linalg._is_lapack_available()):
         pytest.skip('LAPACK is not linked to ChainerX')
     self.check_backward_options.update({'rtol': 5e-3})
     self.check_double_backward_options.update({'rtol': 5e-3})
Exemple #9
0
    def setup(self, float_dtype):

        device = chainerx.get_default_device()
        if device.backend.name == 'cuda' and len(self.x_shape) <= 3:
            # TODO(hvy): Support 1 dimensional convolution with CUDA.
            pytest.skip('cudnn does not support 1-dim convolution')
        if device.backend.name == 'cuda' and self.cover_all:
            pytest.skip('cudnn does not support cover_all')
        if device.backend.name == 'native' and float_dtype == 'float16':
            # TODO(niboshi): Fix accuracy
            pytest.skip('Native float16 operation has insufficient accuracy')

        self.dtype = float_dtype

        if float_dtype == 'float16':
            self.check_forward_options.update({'rtol': 5e-2, 'atol': 5e-3})
            self.check_backward_options.update({
                'eps': 2**-3,
                'rtol': 1e-1,
                'atol': 1e-2
            })
        else:
            self.check_forward_options.update({'rtol': 1e-3})
            self.check_backward_options.update({
                'eps': 1e-2,
                'rtol': 1e-3,
                'atol': 1e-4
            })
        self.check_double_backward_options.update({'rtol': 5e-2, 'atol': 5e-3})
Exemple #10
0
def test_chainerx_get_device():
    context = chainerx.Context()
    with chainerx.context_scope(context):
        device = chainerx.get_device('native:0')
        assert device.context is context
        assert device.name == 'native:0'
        assert device is chainerx.get_device('native', 0)
        assert device is chainerx.get_device(device)
        assert chainerx.get_default_device() is chainerx.get_device()
Exemple #11
0
    def setup(self):
        super().setup()
        device = chainerx.get_default_device()

        _skip_if_native_and_lapack_unavailable(device)

        self.check_forward_options.update({'rtol': 1e-4, 'atol': 1e-4})
        self.check_backward_options.update({'rtol': 5e-3})
        self.check_double_backward_options.update({'rtol': 5e-3})
Exemple #12
0
def test_ascontiguousarray_from_chainerx_array_device():
    with chainerx.using_device(chainerx.get_device('native:0')):
        dev = chainerx.get_device('native:1')  # Non default one
        assert chainerx.get_default_device() is not dev

        a = chainerx.arange(10, device=dev)
        b = chainerx.ascontiguousarray(a)
        assert b.is_contiguous is True
        assert b.device is dev
Exemple #13
0
def test_chainerx_get_device():
    context = chainerx.Context()
    with chainerx.context_scope(context):
        device = chainerx.get_device('native:0')
        assert device.context is context
        assert device.name == 'native:0'
        assert device is chainerx.get_device('native', 0)
        assert device is chainerx.get_device(device)
        assert chainerx.get_default_device() is chainerx.get_device()
Exemple #14
0
def prepare_chainerx_inputs(num_input, config, grad=False):
    device = chainerx.get_default_device()
    dtype = config['dtype']
    inputs = prepare_numpy_inputs(num_input, config)
    inputs = [chainerx.array(i, dtype=dtype) for i in inputs]
    if grad:
        for i in inputs:
            i.require_grad()
    device.synchronize()
    return inputs
Exemple #15
0
def cache_restore_context(request):
    device = chainerx.get_default_device()
    context = chainerx.get_default_context()
    global_context = chainerx.get_global_default_context()

    def restore_context():
        chainerx.set_global_default_context(global_context)
        chainerx.set_default_context(context)
        chainerx.set_default_device(device)
    request.addfinalizer(restore_context)
Exemple #16
0
    def check_device_spec_chainerx(self, device_spec, expected_device_name):
        device = backend.get_device(device_spec)
        assert isinstance(device, backend.ChainerxDevice)
        assert device.xp is chainerx
        assert isinstance(device.device, chainerx.Device)
        assert device.device.name == expected_device_name

        with backend.using_device(device_spec):
            # TODO(niboshi): Test the Chainer default device
            assert (chainerx.get_default_device() == chainerx.get_device(
                expected_device_name))
Exemple #17
0
def _check_array_from_chainerx_array_with_dtype(
        shape, src_dtype, dst_dtype_spec, copy, device=None):
    t = array_utils.create_dummy_ndarray(
        chainerx, shape, src_dtype, device=device)
    a = chainerx.array(t, dtype=dst_dtype_spec, copy=copy)

    src_dtype = chainerx.dtype(src_dtype)
    dst_dtype = src_dtype if dst_dtype_spec is None else chainerx.dtype(
        dst_dtype_spec)
    device = chainerx.get_device(device)

    if (not copy
            and src_dtype == dst_dtype
            and device is chainerx.get_default_device()):
        assert t is a
    else:
        assert t is not a
        chainerx.testing.assert_array_equal_ex(a, t.astype(dst_dtype))
        assert a.dtype == dst_dtype
        assert a.device is chainerx.get_default_device()
Exemple #18
0
def test_view(shape, dtype):
    array = array_utils.create_dummy_ndarray(chainerx, shape, dtype)
    view = array.view()

    chainerx.testing.assert_array_equal_ex(view, array)
    assert view.device is chainerx.get_default_device()

    # inplace modification
    if array.size > 0:
        array *= array
        assert array._debug_flat_data == view._debug_flat_data
Exemple #19
0
def cache_restore_context(request):
    device = chainerx.get_default_device()
    context = chainerx.get_default_context()
    global_context = chainerx.get_global_default_context()

    def restore_context():
        chainerx.set_global_default_context(global_context)
        chainerx.set_default_context(context)
        chainerx.set_default_device(device)

    request.addfinalizer(restore_context)
Exemple #20
0
def test_using_device(device_instance1, device_instance2):
    if device_instance1 == device_instance2:
        return

    device1 = device_instance1
    device2 = device_instance2

    chainerx.set_default_device(device1)
    with chainerx.using_device(device2) as scope:
        assert chainerx.get_default_device() is device2
        assert scope.device is device2

    scope = chainerx.using_device(device2)
    assert chainerx.get_default_device() == device1
    assert scope.device is device2
    with scope:
        assert chainerx.get_default_device() == device2
        assert scope.device is device2
    assert chainerx.get_default_device() == device1
    assert scope.device is device2
Exemple #21
0
def _check_array_from_chainerx_array_with_dtype(
        shape, src_dtype, dst_dtype_spec, copy, device=None):
    t = array_utils.create_dummy_ndarray(
        chainerx, shape, src_dtype, device=device)
    a = chainerx.array(t, dtype=dst_dtype_spec, copy=copy)

    src_dtype = chainerx.dtype(src_dtype)
    dst_dtype = src_dtype if dst_dtype_spec is None else chainerx.dtype(
        dst_dtype_spec)
    device = chainerx.get_device(device)

    if (not copy
            and src_dtype == dst_dtype
            and device is chainerx.get_default_device()):
        assert t is a
    else:
        assert t is not a
        chainerx.testing.assert_array_equal_ex(a, t.astype(dst_dtype))
        assert a.dtype == dst_dtype
        assert a.device is chainerx.get_default_device()
Exemple #22
0
def test_view(shape, dtype):
    array = array_utils.create_dummy_ndarray(chainerx, shape, dtype)
    view = array.view()

    chainerx.testing.assert_array_equal_ex(view, array)
    assert view.device is chainerx.get_default_device()

    # inplace modification
    if array.size > 0:
        array += array
        assert array._debug_flat_data == view._debug_flat_data
Exemple #23
0
    def check_device_spec_chainerx(self, device_spec, expected_device_name):
        device = backend.get_device(device_spec)
        assert isinstance(device, backend.ChainerxDevice)
        assert device.xp is chainerx
        assert isinstance(device.device, chainerx.Device)
        assert device.device.name == expected_device_name

        with backend.using_device(device_spec):
            # TODO(niboshi): Test the Chainer default device
            assert (
                chainerx.get_default_device()
                == chainerx.get_device(expected_device_name))
Exemple #24
0
    def setup(self, float_dtype):
        self.dtype = float_dtype

        device = chainerx.get_default_device()
        if device.backend.name == 'cuda' and len(self.x_shape) <= 3:
            # TODO(sonots): Support 1 dimensional convolution with CUDA.
            pytest.skip(
                'cuDNN does not support 1 dimensional convolution and throws '
                'DimensionError')
        if device.backend.name == 'cuda' and self.cover_all is True:
            pytest.skip(
                'outsize (for cover_all=True) is not supported by CUDA')

        if float_dtype == 'float16':
            self.check_forward_options.update({'rtol': 5e-2, 'atol': 5e-3})
            self.check_backward_options.update({
                'eps': 2**-3,
                'rtol': 1e-1,
                'atol': 1e-2
            })
        else:
            self.check_forward_options.update({'rtol': 1e-3})
            self.check_backward_options.update({
                'eps': 1e-2,
                'rtol': 1e-3,
                'atol': 1e-4
            })
        self.check_double_backward_options.update({'rtol': 5e-2, 'atol': 5e-3})

        # Determine outsize
        cover_all = self.cover_all
        if cover_all is None:
            outsize = None
        else:
            x_shape = self.x_shape
            w_shape = self.w_shape
            stride = self.stride
            pad = self.pad
            in_dims = x_shape[2:]
            kernel_size = w_shape[2:]
            ndim = len(in_dims)
            stride_tup = ((stride, ) *
                          ndim if isinstance(stride, int) else stride)
            pad_tup = (pad, ) * ndim if isinstance(pad, int) else pad
            outsize = tuple(
                chainer.utils.conv.get_deconv_outsize(d, k, s, p, cover_all)
                for (d, k, s,
                     p) in zip(in_dims, kernel_size, stride_tup, pad_tup))
        self.outsize = outsize
Exemple #25
0
    def setup(self):
        device = chainerx.get_default_device()

        _skip_if_native_and_lapack_unavailable(device)

        self.check_backward_options.update({
            'eps': 1e-5,
            'rtol': 1e-3,
            'atol': 1e-3
        })
        self.check_double_backward_options.update({
            'eps': 1e-5,
            'rtol': 1e-3,
            'atol': 1e-3
        })
Exemple #26
0
def _guess_device_from_array_module(xp):
    """Returns a plausible device from array module

    .. warning::

        There can be multiple devices for a module

    """
    if xp is cuda.cupy:
        return cuda.GpuDevice(cuda.Device())
    elif xp is chainerx:
        return _chainerx.ChainerxDevice(chainerx.get_default_device())
    else:
        # Cannot detect intel64, because xp of intel64 is numpy.
        return _cpu.CpuDevice()
Exemple #27
0
def _guess_device_from_array_module(xp):
    """Returns a plausible device from array module

    .. warning::

        There can be multiple devices for a module

    """
    if xp is cuda.cupy:
        return cuda.GpuDevice(cuda.Device())
    elif xp is chainerx:
        return _chainerx.ChainerxDevice(chainerx.get_default_device())
    else:
        # Cannot detect intel64, because xp of intel64 is numpy.
        return _cpu.CpuDevice()
Exemple #28
0
    def setup(self, float_dtype):
        dtype = float_dtype
        ksize = self.ksize
        device = chainerx.get_default_device()
        if (device.backend.name == 'cuda'
                and len(ksize) != 2
                and len(ksize) != 3):
            raise unittest.SkipTest(
                'cuDNN supports only 2 and 3 spatial dimensions')

        if dtype == 'float16':
            self.check_backward_options.update({'rtol': 5e-2, 'atol': 1e-3})
            self.check_double_backward_options.update({
                'rtol': 5e-2, 'atol': 1e-3})

        self.dtype = dtype
Exemple #29
0
 def setup(self):
     self.check_forward_options.update({'rtol': 1e-2, 'atol': 1e-2})
     self.check_backward_options.update({'rtol': 1e-2, 'atol': 1e-2})
     self.check_double_backward_options.update({'rtol': 5e-3, 'atol': 5e-2})
     if self.in_dtypes[0] == 'float16':
         self.check_forward_options.update({'rtol': 1e-1, 'atol': 1e-1})
         self.check_backward_options.update({'rtol': 1e-1, 'atol': 1e-1})
         self.check_double_backward_options.update({
             'rtol': 1e-1,
             'atol': 1e-1
         })
     device = chainerx.get_default_device()
     if device.backend.name == 'cuda':
         if self.in_dtypes[0] != 'float32':
             self.skip_backward_test = True
         self.skip_double_backward_test = True
Exemple #30
0
    def setup(self, float_dtype):
        dtype = float_dtype
        ksize = self.ksize
        device = chainerx.get_default_device()
        if (device.backend.name == 'cuda' and len(ksize) != 2
                and len(ksize) != 3):
            raise unittest.SkipTest(
                'cuDNN supports only 2 and 3 spatial dimensions')

        if dtype == 'float16':
            self.check_backward_options.update({'rtol': 5e-2, 'atol': 1e-3})
            self.check_double_backward_options.update({
                'rtol': 5e-2,
                'atol': 1e-3
            })

        self.dtype = dtype
Exemple #31
0
    def setup(self, float_dtype):
        self.dtype = float_dtype

        device = chainerx.get_default_device()
        if device.backend.name == 'cuda' and len(self.x_shape) <= 3:
            # TODO(sonots): Support 1 dimensional convolution with CUDA.
            pytest.skip(
                'cuDNN does not support 1 dimensional convolution and throws '
                'DimensionError')
        if device.backend.name == 'cuda' and self.cover_all is True:
            pytest.skip(
                'outsize (for cover_all=True) is not supported by CUDA')

        if float_dtype == 'float16':
            self.check_forward_options.update({'rtol': 5e-2, 'atol': 5e-3})
            self.check_backward_options.update({
                'eps': 2 ** -3, 'rtol': 1e-1, 'atol': 1e-2})
        else:
            self.check_forward_options.update({'rtol': 1e-3})
            self.check_backward_options.update({
                'eps': 1e-2, 'rtol': 1e-3, 'atol': 1e-4})
        self.check_double_backward_options.update({
            'rtol': 5e-2, 'atol': 5e-3})

        # Determine outsize
        cover_all = self.cover_all
        if cover_all is None:
            outsize = None
        else:
            x_shape = self.x_shape
            w_shape = self.w_shape
            stride = self.stride
            pad = self.pad
            in_dims = x_shape[2:]
            kernel_size = w_shape[2:]
            ndim = len(in_dims)
            stride_tup = (
                (stride,) * ndim if isinstance(stride, int) else stride)
            pad_tup = (pad,) * ndim if isinstance(pad, int) else pad
            outsize = tuple(
                chainer.utils.conv.get_deconv_outsize(d, k, s, p, cover_all)
                for (d, k, s, p)
                in zip(in_dims, kernel_size, stride_tup, pad_tup))
        self.outsize = outsize
    def setup(self, dtype):
        device = chainerx.get_default_device()
        # TODO(imanishi): Remove the skip after supporting non-float dot on
        # CUDA
        if device.name == 'cuda:0' and numpy.dtype(dtype).kind != 'f':
            pytest.skip('non-float dot is not supported')

        # Skip backward/double-backward tests for int dtypes
        if numpy.dtype(dtype).kind != 'f':
            self.skip_backward_test = True
            self.skip_double_backward_test = True

        if dtype == 'float16':
            self.check_forward_options.update({
                'rtol': 1e-2, 'atol': 1e-2})
            self.check_backward_options.update({
                'rtol': 1e-2, 'atol': 1e-2})
            self.check_double_backward_options.update({
                'rtol': 1e-2, 'atol': 1e-2})

        self.dtype = dtype
Exemple #33
0
    def setup(self, dtype):
        device = chainerx.get_default_device()
        # TODO(imanishi): Remove the skip after supporting non-float dot on
        # CUDA
        if device.name == 'cuda:0' and numpy.dtype(dtype).kind != 'f':
            pytest.skip('non-float dot is not supported')

        # Skip backward/double-backward tests for int dtypes
        if numpy.dtype(dtype).kind != 'f':
            self.skip_backward_test = True
            self.skip_double_backward_test = True

        if dtype == 'float16':
            self.check_forward_options.update({
                'rtol': 1e-2, 'atol': 1e-2})
            self.check_backward_options.update({
                'rtol': 1e-2, 'atol': 1e-2})
            self.check_double_backward_options.update({
                'rtol': 1e-2, 'atol': 1e-2})

        self.dtype = dtype
Exemple #34
0
    def setup(self):
        if len(self.in_dtypes) == 3:
            x_dtype, w_dtype, b_dtype = self.in_dtypes
        else:
            (x_dtype, w_dtype), b_dtype = self.in_dtypes, None

        x_kind = numpy.dtype(x_dtype).kind
        w_kind = numpy.dtype(w_dtype).kind
        b_kind = None if b_dtype is None else numpy.dtype(b_dtype).kind

        device = chainerx.get_default_device()
        if device.backend.name == 'cuda' and len(self.x_shape) <= 3:
            # TODO(hvy): Support 1 dimensional convolution with CUDA.
            pytest.skip('cudnn does not support 1-dim convolution')
        if device.backend.name == 'cuda' and self.cover_all:
            pytest.skip('cudnn does not support cover_all')

        # Skip backward/double-backward tests for int dtypes
        if (x_kind != 'f' and w_kind != 'f'
                and (b_kind is None or b_kind != 'f')):
            self.skip_backward_test = True
            self.skip_double_backward_test = True

        if (x_dtype == 'float16' or w_dtype == 'float16'
                or b_dtype == 'float16'):
            self.check_forward_options.update({'rtol': 5e-2, 'atol': 5e-3})
            self.check_backward_options.update({
                'eps': 2**-3,
                'rtol': 1e-1,
                'atol': 1e-2
            })
        else:
            self.check_forward_options.update({'rtol': 1e-3})
            self.check_backward_options.update({
                'eps': 1e-2,
                'rtol': 1e-3,
                'atol': 1e-4
            })
        self.check_double_backward_options.update({'rtol': 5e-2, 'atol': 5e-3})
Exemple #35
0
    def setup(self):
        if len(self.in_dtypes) == 3:
            x_dtype, w_dtype, b_dtype = self.in_dtypes
        else:
            (x_dtype, w_dtype), b_dtype = self.in_dtypes, None

        x_kind = numpy.dtype(x_dtype).kind
        w_kind = numpy.dtype(w_dtype).kind
        b_kind = None if b_dtype is None else numpy.dtype(b_dtype).kind

        device = chainerx.get_default_device()
        if device.backend.name == 'cuda' and (x_kind != 'f' or w_kind != 'f'
                                              or b_kind != 'f'):
            raise unittest.SkipTest('CUDA dot does not support integers.')

        # Skip backward/double-backward tests for int dtypes
        if (x_kind != 'f' and w_kind != 'f'
                and (b_kind is None or b_kind != 'f')):
            self.skip_backward_test = True
            self.skip_double_backward_test = True

        # Skip backward/double-backward tests if the output will be
        # disconnected.
        # TODO(niboshi): Remove this skip condition after enabling backward()
        # for such cases.
        if 0 in self.x_shape or 0 in self.w_shape:
            self.skip_backward_test = True
            self.skip_double_backward_test = True

        if (x_dtype == 'float16' or w_dtype == 'float16'
                or b_dtype == 'float16'):
            self.check_forward_options.update({'rtol': 1e-2, 'atol': 1e-2})
            self.check_backward_options.update({'rtol': 1e-2, 'atol': 1e-2})
            self.check_double_backward_options.update({
                'rtol': 1e-2,
                'atol': 1e-2
            })
Exemple #36
0
    def setup(self, float_dtype):

        device = chainerx.get_default_device()
        if device.backend.name == 'cuda' and len(self.x_shape) <= 3:
            # TODO(hvy): Support 1 dimensional convolution with CUDA.
            pytest.skip('cudnn does not support 1-dim convolution')
        if device.backend.name == 'cuda' and self.cover_all:
            pytest.skip('cudnn does not support cover_all')
        if device.backend.name == 'native' and float_dtype == 'float16':
            # TODO(niboshi): Fix accuracy
            pytest.skip('Native float16 operation has insufficient accuracy')

        self.dtype = float_dtype

        if float_dtype == 'float16':
            self.check_forward_options.update({'rtol': 5e-2, 'atol': 5e-3})
            self.check_backward_options.update({
                'eps': 2 ** -3, 'rtol': 1e-1, 'atol': 1e-2})
        else:
            self.check_forward_options.update({'rtol': 1e-3})
            self.check_backward_options.update({
                'eps': 1e-2, 'rtol': 1e-3, 'atol': 1e-4})
        self.check_double_backward_options.update({
            'rtol': 5e-2, 'atol': 5e-3})
Exemple #37
0
    def setup(self):
        reduced_shape = self.reduced_shape
        x_dtype = self.x_dtype
        param_dtype = self.param_dtype
        eps = self.eps
        decay = self.decay
        axis = self.axis
        contiguous = self.contiguous

        # - Non-contiguous running values which are updated in-place are not
        # supported by CUDA.
        # - Non-contiguous gamma and beta is not supported by CUDA.
        # TODO(hvy): Support non-contiguous gamma and beta with CUDA. Create a
        # contiguous copy in the cuDNN wrapper.
        if (chainerx.get_default_device().backend.name == 'cuda'
                and contiguous is None):
            raise unittest.SkipTest(
                'batch_norm with CUDA currently has limited support for '
                'non-contiguous inputs.')

        # BatchNorm is unstable for fp16 for both native and CUDA.
        # TODO(hvy): Fix backward and double backward for fp16.
        if x_dtype == 'float16' and param_dtype == 'float16':
            self.skip_backward_test = True
            self.skip_double_backward_test = True

        self.running_mean = numpy.random.uniform(
            -1, 1, reduced_shape).astype(param_dtype)
        self.running_var = numpy.random.uniform(
            0.1, 1, reduced_shape).astype(param_dtype)

        optional_args = {}
        if eps is not None:
            optional_args['eps'] = eps
        if decay is not None:
            optional_args['decay'] = decay
        if axis is not None:
            optional_args['axis'] = axis
        self.optional_args = optional_args

        # TODO(hvy): Fix forward, backward and double backward for fp16.
        if x_dtype == 'float16' or param_dtype == 'float16':
            self.check_forward_options.update({
                'rtol': 1e-1, 'atol': 1e-1})
            self.check_backward_options.update({
                'rtol': 1e-1, 'atol': 1e-1})
            self.check_double_backward_options.update({
                'rtol': 1e-1, 'atol': 1e-1})
        else:
            self.check_forward_options.update({
                'rtol': 1e-6, 'atol': 1e-5})
            self.check_backward_options.update({
                'rtol': 5e-3, 'atol': 5e-4})
            self.check_double_backward_options.update({
                'rtol': 5e-2, 'atol': 5e-3})

        # Running values that are recorded in forward for similarity checks.
        self.running_mean_chx = None
        self.running_var_chx = None
        self.running_mean_ch = None
        self.running_var_ch = None
Exemple #38
0
def generate_array(initializer, shape, xp, dtype=None, device=None):
    """Return initialized array.

    The algorithms used to make the new values depend on the
    concrete derived classes. If the initializer has the ``dtype`` attribute,
    it is used to construct the array. Otherwise, ``chainer.config.dtype`` is
    used instead. See :ref:`configuration` for the dtype config.

    Args:
        initializer: A callable object that takes :class:`numpy.ndarray`
             or :class:`cupy.ndarray` and edits its value.
        shape (tuple): Shape of a return array.
        xp (module): :mod:`cupy`, :mod:`numpy`, or :mod:`chainerx`.
        dtype: Dtype specifier. If omitted, ``initializer.dtype`` is used.
        device: Target device specifier. If omitted, the current device is
             used for :mod:`cupy`, and the default device is used for
             :mod:`chainerx`.

    Returns:
        numpy.ndarray, cupy.ndarray, or chainerx.ndarray: An initialized array.

    """
    dtype_attr = getattr(initializer, 'dtype', None)
    if dtype is not None and dtype_attr is not None \
            and numpy.dtype(dtype) != numpy.dtype(dtype_attr):
        raise ValueError(
            'dtype mismatch: {} != {}'.format(dtype, dtype_attr))
    if dtype is None:
        dtype = dtype_attr
    dtype = chainer.get_dtype(dtype)

    if device is None:
        if xp is cuda.cupy:
            device = chainer.get_device(cuda.Device())
        elif xp is chainerx:
            device = chainer.get_device(chainerx.get_default_device())
        else:
            device = chainer.get_device(numpy)
    else:
        device = chainer.get_device(device)
        if xp != device.xp:
            raise ValueError('xp and device arguments are inconsistent.')

    if xp is chainerx:
        # Initialize with NumPy/CuPy array that shares memory with the
        # ChainerX array.
        # TODO(sonots): Directly use initializer after ChainerX
        # supports random.
        chx_device = device.device
        array = chainerx.empty(shape, dtype=dtype, device=chx_device)
        if chx_device.backend.name == 'native':
            temp_array = _cpu._to_cpu(array)
            temp_device = cuda.DummyDevice
        elif chx_device.backend.name == 'cuda':
            temp_array = cuda.to_gpu(array, chx_device.index)
            temp_device = cuda.Device(chx_device.index)
        else:
            raise RuntimeError('ChainerX backend: {} is not supported.'.format(
                chx_device.backend.name))
        with temp_device:
            initializer(temp_array)
        return array

    with chainer.using_device(device):
        array = xp.empty(shape, dtype=dtype)
        initializer(array)
    return array
Exemple #39
0
def check_device(a, device=None):
    if device is None:
        device = chainerx.get_default_device()
    elif isinstance(device, str):
        device = chainerx.get_device(device)
    assert a.device is device
Exemple #40
0
    def setup(self, float_dtype):
        self.dtype = float_dtype

        eps = self.eps
        decay = self.decay
        axis = self.axis

        optional_args = {}
        if eps is not None:
            optional_args['eps'] = eps
        if decay is not None:
            optional_args['decay'] = decay
        if axis is not None:
            optional_args['axis'] = axis
        self.optional_args = optional_args

        # - Non-contiguous running values which are updated in-place are not
        # supported by CUDA.
        # - Non-contiguous gamma and beta is not supported by CUDA.
        # TODO(hvy): Support non-contiguous gamma and beta with CUDA. Create a
        # contiguous copy in the cuDNN wrapper.
        self.is_cuda = chainerx.get_default_device().backend.name == 'cuda'
        if self.is_cuda and self.contiguous is None:
            raise unittest.SkipTest(
                'batch_norm with CUDA currently has limited support for '
                'non-contiguous inputs.')

        # Float16 backward is unstable.
        if float_dtype == 'float16':
            self.skip_backward_test = True
            self.skip_double_backward_test = True

        if float_dtype == 'float16':
            self.check_forward_options.update({'rtol': 1e-1, 'atol': 1e-1})
            self.check_backward_options.update({
                'eps': 1e-3,
                'rtol': 1e-1,
                'atol': 1e-2
            })
            self.check_double_backward_options.update({
                'rtol': 1e-1,
                'atol': 1e-2
            })
        else:
            self.check_forward_options.update({'rtol': 1e-6, 'atol': 1e-5})
            self.check_backward_options.update({
                'eps': 1e-3,
                'rtol': 5e-3,
                'atol': 5e-4
            })
            self.check_double_backward_options.update({
                'rtol': 5e-2,
                'atol': 5e-3
            })

        reduced_shape = self.reduced_shape
        running_mean = numpy.random.uniform(-1, 1,
                                            reduced_shape).astype(float_dtype)
        running_var = numpy.random.uniform(.1, 1,
                                           reduced_shape).astype(float_dtype)
        self.running_mean = running_mean
        self.running_var = running_var

        # Used to verify running mean and variance similarity with Chainer.
        self.running_mean_chx = None
        self.running_var_chx = None
        self.running_mean_ch = None
        self.running_var_ch = None
Exemple #41
0
 def test_use(self, backend_config):
     device = chainer.get_device(backend_config.chainerx_device)
     with chainerx.using_device('native:1'):
         device.use()
         assert device.device is chainerx.get_default_device()
Exemple #42
0
 def test_use(self, backend_config):
     device = chainer.get_device(backend_config.chainerx_device)
     with chainerx.using_device('native:1'):
         device.use()
         assert device.device is chainerx.get_default_device()
Exemple #43
0
def cache_restore_device(request):
    device = chainerx.get_default_device()

    def restore_device():
        chainerx.set_default_device(device)
    request.addfinalizer(restore_device)
Exemple #44
0
def test_default_device_with_name(device_instance1):
    device = device_instance1
    chainerx.set_default_device(device.name)
    assert chainerx.get_default_device() is device
Exemple #45
0
def generate_array(initializer, shape, xp, dtype=None, device=None):
    # type: (types.AbstractInitializer, types.ShapeSpec, types.Xp, types.DTypeSpec, types.DeviceSpec) -> types.NdArray  # NOQA
    """Return initialized array.

    The algorithms used to make the new values depend on the
    concrete derived classes. If the initializer has the ``dtype`` attribute,
    it is used to construct the array. Otherwise, ``chainer.config.dtype`` is
    used instead. See :ref:`configuration` for the dtype config.

    Args:
        initializer: A callable object that takes :class:`numpy.ndarray`
             or :class:`cupy.ndarray` and edits its value.
        shape (tuple): Shape of a return array.
        xp (module): :mod:`cupy`, :mod:`numpy`, or :mod:`chainerx`.
        dtype: Dtype specifier. If omitted, ``initializer.dtype`` is used.
        device: Target device specifier. If omitted, the current device is
             used for :mod:`cupy`, and the default device is used for
             :mod:`chainerx`.

    Returns:
        numpy.ndarray, cupy.ndarray, or chainerx.ndarray: An initialized array.

    """
    dtype_attr = getattr(initializer, 'dtype', None)
    if dtype is not None and dtype_attr is not None \
            and numpy.dtype(dtype) != numpy.dtype(dtype_attr):
        raise ValueError(
            'dtype mismatch: {} != {}'.format(dtype, dtype_attr))
    if dtype is None:
        dtype = dtype_attr
    dtype = chainer.get_dtype(dtype)

    if device is None:
        if xp is cuda.cupy:
            backend_device = chainer.get_device(cuda.Device())
        elif xp is chainerx:
            backend_device = chainer.get_device(chainerx.get_default_device())
        else:
            backend_device = chainer.get_device(numpy)
    else:
        backend_device = chainer.get_device(device)
        if xp != backend_device.xp:
            raise ValueError('xp and device arguments are inconsistent.')

    if xp is chainerx:
        # Initialize with NumPy/CuPy array that shares memory with the
        # ChainerX array.
        # TODO(sonots): Directly use initializer after ChainerX
        # supports random.
        chx_device = backend_device.device  # type: ignore
        # TODO(okapies): remove 'type: ignore' when chainerx implements sequence support for empty() # NOQA
        array = chainerx.empty(shape, dtype=dtype, device=chx_device)  # type: ignore # NOQA
        if chx_device.backend.name == 'native':
            temp_array = _cpu._to_cpu(array)
            temp_device = cuda.DummyDevice  # type: cuda.Device
        elif chx_device.backend.name == 'cuda':
            temp_array = cuda.to_gpu(array, chx_device.index)
            temp_device = cuda.Device(chx_device.index)
        else:
            raise RuntimeError('ChainerX backend: {} is not supported.'.format(
                chx_device.backend.name))
        with temp_device:
            initializer(temp_array)
        return array

    with chainer.using_device(backend_device):
        array = xp.empty(shape, dtype=dtype)
        initializer(array)
    return array
Exemple #46
0
def run_binary_broadcast_op_benchmark(op,
                                      config,
                                      mode='forward',
                                      warmup=10,
                                      runs=25):
    if not op.get_forward_func():
        return (None, None), config
    backend = backend_switcher[op.get_backend()]
    func = op.get_forward_func()
    config1 = {"shape": config["shape1"], "dtype": config["dtype"]}
    config2 = {"shape": config["shape2"], "dtype": config["dtype"]}
    if backend == 'numpy':
        if mode == 'forward':

            def benchmark_func(inputs):
                result = func(*inputs)
                return result

            def input_func():
                return prepare_numpy_inputs(1, config1) + prepare_numpy_inputs(
                    1, config2)

            forward_time, forward_std = get_time_metric(
                benchmark_func, input_func, warmup, runs)
            return (forward_time, forward_std), config
        else:
            return (None, None), config
    elif backend == 'mxnet.numpy':
        if mode == 'forward':

            def benchmark_func(inputs):
                result = func(*inputs)
                return result

            def input_func():
                return prepare_mxnet_inputs(1, config1,
                                            False) + prepare_mxnet_inputs(
                                                1, config2, False)

            forward_time, forward_std = get_time_metric(
                benchmark_func, input_func, warmup, runs)
            return (forward_time, forward_std), config
        else:

            def input_func():
                inputs = prepare_mxnet_inputs(
                    1, config1, True) + prepare_mxnet_inputs(1, config2, True)
                with mxnet.autograd.record():
                    result = func(*inputs)
                return result

            def benchmark_func(result):
                result.backward()
                return result

            backward_time, backward_std = get_time_metric(
                benchmark_func, input_func, warmup, runs)
            return (backward_time, backward_std), config
    elif backend == 'jax.numpy':

        def input_func():
            return prepare_jax_inputs(1, config1) + prepare_jax_inputs(
                1, config2)

        if mode == 'forward':
            jit_func = jax.jit(func)

            def benchmark_func(inputs):
                result = jit_func(*inputs)
                try:
                    result.block_until_ready()
                except Exception:
                    pass
                return result

            forward_time, forward_std = get_time_metric(
                benchmark_func, input_func, warmup, runs)
            return (forward_time, forward_std), config
        else:

            def grad_func(*args):
                return jax.numpy.sum(func(*args))

            jit_func = jax.jit(jax.grad(grad_func, [0, 1]))

            def benchmark_func(inputs):
                result = jit_func(*inputs)
                try:
                    result.block_until_ready()
                except Exception:
                    pass
                return result

            backward_time, backward_std = get_time_metric(
                benchmark_func, input_func, warmup, runs)
            return (backward_time, backward_std), config
    elif backend == 'chainerx':
        device = chainerx.get_default_device()
        if mode == 'forward':

            def input_func():
                return prepare_chainerx_inputs(
                    1, config1, False) + prepare_chainerx_inputs(
                        1, config2, False)

            def benchmark_func(inputs):
                res = func(*inputs)
                device.synchronize()
                return res

            forward_time, forward_std = get_time_metric(
                benchmark_func, input_func, warmup, runs)
            return (forward_time, forward_std), config
        else:

            def input_func():
                inputs = prepare_chainerx_inputs(
                    1, config1, True) + prepare_chainerx_inputs(
                        1, config2, True)
                result = func(*inputs)
                result.grad = chainerx.ones_like(result)
                return result

            def benchmark_func(result):
                result.backward()
                device.synchronize()

            backward_time, backward_std = get_time_metric(
                benchmark_func, input_func, warmup, runs)
            return (backward_time, backward_std), config
Exemple #47
0
def _run_simple_op_benchmark(num_input,
                             op,
                             config,
                             mode='forward',
                             warmup=10,
                             runs=25):
    if not op.get_forward_func():
        return (None, None), config
    backend = backend_switcher[op.get_backend()]
    func = op.get_forward_func()
    if num_input:
        config_ = deepcopy(config)
        tensor_config = {
            'shape': config_.pop('shape'),
            'dtype': config_.pop('dtype')
        } if num_input else None
        func = functools.partial(func, **config_)
        if backend == 'numpy':
            if mode == 'forward':

                def benchmark_func(inputs):
                    result = func(*inputs)
                    return result

                input_func = functools.partial(prepare_numpy_inputs, num_input,
                                               tensor_config)
                forward_time, forward_std = get_time_metric(
                    benchmark_func, input_func, warmup, runs)
                return (forward_time, forward_std), config
            else:
                return (None, None), config
        elif backend == 'mxnet.numpy':
            if mode == 'forward':

                def benchmark_func(inputs):
                    result = func(*inputs)
                    return result

                input_func = functools.partial(prepare_mxnet_inputs, num_input,
                                               tensor_config, False)
                forward_time, forward_std = get_time_metric(
                    benchmark_func, input_func, warmup, runs)
                return (forward_time, forward_std), config
            else:

                def input_func():
                    inputs = prepare_mxnet_inputs(num_input, tensor_config,
                                                  True)
                    with mxnet.autograd.record():
                        result = func(*inputs)
                    return result

                def benchmark_func(result):
                    result.backward()

                backward_time, backward_std = get_time_metric(
                    benchmark_func, input_func, warmup, runs)
                return (backward_time, backward_std), config
        elif backend == 'jax.numpy':
            input_func = functools.partial(prepare_jax_inputs, num_input,
                                           tensor_config)
            if mode == 'forward':
                jit_func = jax.jit(func)

                def benchmark_func(inputs):
                    result = jit_func(*inputs)
                    try:
                        result.block_until_ready()
                    except Exception:
                        pass
                    return result

                forward_time, forward_std = get_time_metric(
                    benchmark_func, input_func, warmup, runs)
                return (forward_time, forward_std), config
            else:

                def grad_func(*args):
                    return jax.numpy.sum(func(*args))

                jit_func = jax.jit(jax.grad(grad_func, list(range(num_input))))

                def benchmark_func(inputs):
                    result = jit_func(*inputs)
                    try:
                        result.block_until_ready()
                    except Exception:
                        pass
                    return result

                backward_time, backward_std = get_time_metric(
                    benchmark_func, input_func, warmup, runs)
                return (backward_time, backward_std), config
        elif backend == 'chainerx':
            device = chainerx.get_default_device()
            if mode == 'forward':
                input_func = functools.partial(prepare_chainerx_inputs,
                                               num_input, tensor_config, False)

                def benchmark_func(inputs):
                    res = func(*inputs)
                    device.synchronize()
                    return res

                forward_time, forward_std = get_time_metric(
                    benchmark_func, input_func, warmup, runs)
                return (forward_time, forward_std), config
            else:

                def input_func():
                    inputs = prepare_chainerx_inputs(num_input, tensor_config,
                                                     True)
                    result = func(*inputs)
                    result.grad = chainerx.ones_like(result)
                    return result

                def benchmark_func(result):
                    result.backward()
                    device.synchronize()

                backward_time, backward_std = get_time_metric(
                    benchmark_func, input_func, warmup, runs)
                return (backward_time, backward_std), config
    else:
        if mode == 'forward':
            func = functools.partial(func, **config)
            forward_time, forward_std = get_time_metric(
                func, None, warmup, runs)
            return (forward_time, forward_std), config
        else:
            return (None, None), config
Exemple #48
0
    def setup(self):
        if len(self.in_dtypes) == 3:
            x_dtype, w_dtype, b_dtype = self.in_dtypes
        else:
            (x_dtype, w_dtype), b_dtype = self.in_dtypes, None

        x_kind = numpy.dtype(x_dtype).kind
        w_kind = numpy.dtype(w_dtype).kind
        b_kind = None if b_dtype is None else numpy.dtype(b_dtype).kind

        device = chainerx.get_default_device()
        if device.backend.name == 'cuda' and len(self.x_shape) <= 3:
            # TODO(sonots): Support 1 dimensional convolution with CUDA.
            pytest.skip(
                'cuDNN does not support 1 dimensional convolution and throws '
                'DimensionError')
        if device.backend.name == 'cuda' and self.cover_all is True:
            pytest.skip(
                'outsize (for cover_all=True) is not supported by CUDA')

        # Skip backward/double-backward tests for int dtypes
        if (x_kind != 'f' and w_kind != 'f'
                and (b_kind is None or b_kind != 'f')):
            self.skip_backward_test = True
            self.skip_double_backward_test = True

        if (x_dtype == 'float16' or w_dtype == 'float16'
                or b_dtype == 'float16'):
            self.check_forward_options.update({'rtol': 5e-2, 'atol': 5e-3})
            self.check_backward_options.update({
                'eps': 2**-3,
                'rtol': 1e-1,
                'atol': 1e-2
            })
        else:
            self.check_forward_options.update({'rtol': 1e-3})
            self.check_backward_options.update({
                'eps': 1e-2,
                'rtol': 1e-3,
                'atol': 1e-4
            })
        self.check_double_backward_options.update({'rtol': 5e-2, 'atol': 5e-3})

        # Determine outsize
        cover_all = self.cover_all
        if cover_all is None:
            outsize = None
        else:
            x_shape = self.x_shape
            w_shape = self.w_shape
            stride = self.stride
            pad = self.pad
            in_dims = x_shape[2:]
            kernel_size = w_shape[2:]
            ndim = len(in_dims)
            stride_tup = ((stride, ) *
                          ndim if isinstance(stride, int) else stride)
            pad_tup = (pad, ) * ndim if isinstance(pad, int) else pad
            outsize = tuple(
                chainer.utils.conv.get_deconv_outsize(d, k, s, p, cover_all)
                for (d, k, s,
                     p) in zip(in_dims, kernel_size, stride_tup, pad_tup))
        self.outsize = outsize