示例#1
0
def get_array_module(*args):
    """Gets an appropriate one from :mod:`numpy`, :mod:`cupy`, or
    :mod:`chainerx`.

    This function will return their data arrays' array module for
    :class:`~chainer.Variable` arguments.

    Args:
        args: Values to determine whether NumPy, CuPy, or ChainerX should be
        used.

    Returns:
        module: :mod:`cupy`, :mod:`numpy`, or :mod:`chainerx` is returned based
        on the types of the arguments.

    """
    if chainerx.is_available() or cuda.available:
        args = [arg.data if isinstance(arg, chainer.variable.Variable) else arg
                for arg in args]

    if (chainerx.is_available()
            and any([isinstance(a, chainerx.ndarray) for a in args])):
        return chainerx
    elif cuda.available:
        return cuda.cupy.get_array_module(*args)
    else:
        return numpy
示例#2
0
文件: backend.py 项目: jnishi/chainer
def get_array_module(*args):
    """Gets an appropriate one from :mod:`numpy`, :mod:`cupy`, or
    :mod:`chainerx`.

    This function will return their data arrays' array module for
    :class:`~chainer.Variable` arguments.

    Args:
        args: Values to determine whether NumPy, CuPy, or ChainerX should be
        used.

    Returns:
        module: :mod:`cupy`, :mod:`numpy`, or :mod:`chainerx` is returned based
        on the types of the arguments.

    """
    if chainerx.is_available() or cuda.available:
        args = [arg.data if isinstance(arg, chainer.variable.Variable) else arg
                for arg in args]

    if (chainerx.is_available()
            and any([isinstance(a, chainerx.ndarray) for a in args])):
        return chainerx
    elif cuda.available:
        return cuda.cupy.get_array_module(*args)
    else:
        return numpy
示例#3
0
    def to_chx(self):
        """Converts parameter variables and persistent values to ChainerX \
without any copy.

        This method does not handle non-registered attributes. If some of such
        attributes must be copied to ChainerX, the link implementation must
        override this method to do so.

        Returns: self
        """  # NOQA
        if not chainerx.is_available():
            raise RuntimeError('ChainerX is not available.')

        xp = self._device.xp
        if xp is chainerx:
            return self

        d = self.__dict__
        for name in self._params:
            d[name].to_chx()
        for name in self._persistent:
            if not numpy.isscalar(d[name]):
                d[name] = backend.to_chx(d[name])

        self._device = (backend.ChainerxDevice.from_fallback_device(
            self._device))

        return self
示例#4
0
def _extract_apply_in_data(inputs):
    if not inputs:
        return False, ()

    if chainerx.is_available():
        has_chainerx_array = False

        # Unwrap arrays
        arrays = []
        for x in inputs:
            if isinstance(x, variable.Variable):
                if x._has_chainerx_array:
                    arrays.append(x._data[0])
                    has_chainerx_array = True
                else:
                    arrays.append(x.array)
            else:  # x is ndarray
                arrays.append(x)
                if not has_chainerx_array:
                    if isinstance(x, chainerx.ndarray):
                        has_chainerx_array = True
        return has_chainerx_array, tuple(arrays)
    else:
        return False, tuple([
            x.array if isinstance(x, variable.Variable) else x for x in inputs
        ])
示例#5
0
def get_array_module(*args):
    """Gets an appropriate NumPy-compatible module to process arguments

    This function will return their data arrays' array module for
    :class:`~chainer.Variable` arguments.

    Args:
        args: Values to determine whether NumPy, CuPy, or ChainerX should be
            used.

    Returns:
        module: :mod:`numpy`, :mod:`cupy`, or :mod:`chainerx` is returned based
        on the types of the arguments.

    """
    is_chainerx_available = chainerx.is_available()
    if is_chainerx_available or cuda.available:
        arrays = []
        for arg in args:
            # Unwrap arrays
            if isinstance(arg, chainer.variable.Variable):
                array = arg.data
            else:
                array = arg
            if is_chainerx_available and isinstance(array, chainerx.ndarray):
                return chainerx
            arrays.append(array)
        if cuda.available:
            return cuda.cupy.get_array_module(*arrays)
    return numpy
示例#6
0
def _load_array_types():
    # Note: this function may not be protected by GIL because of external
    # calls.
    global _array_types
    global _cpu_array_types
    if _array_types is None:
        array_types = [numpy.ndarray]
        cpu_array_types = [numpy.ndarray]

        if backends.cuda.available:
            array_types.append(backends.cuda.ndarray)

        if backends.intel64.is_ideep_available():
            array_types.append(backends.intel64.mdarray)
            cpu_array_types.append(backends.intel64.mdarray)

        if chainerx.is_available():
            array_types.append(chainerx.ndarray)
            cpu_array_types.append(chainerx.ndarray)

        array_types = tuple(array_types)
        cpu_array_types = tuple(cpu_array_types)

        _array_types = array_types
        _cpu_array_types = cpu_array_types
示例#7
0
文件: __init__.py 项目: y1r/chainer
def _load_array_types():
    # Note: this function may not be protected by GIL because of external
    # calls.
    global _array_types
    global _cpu_array_types
    if _array_types is None:
        array_types = [numpy.ndarray]
        cpu_array_types = [numpy.ndarray]

        if backends.cuda.available:
            array_types.append(backends.cuda.ndarray)

        if backends.intel64.is_ideep_available():
            array_types.append(backends.intel64.mdarray)
            cpu_array_types.append(backends.intel64.mdarray)

        if chainerx.is_available():
            array_types.append(chainerx.ndarray)
            cpu_array_types.append(chainerx.ndarray)

        array_types = tuple(array_types)
        cpu_array_types = tuple(cpu_array_types)

        _array_types = array_types
        _cpu_array_types = cpu_array_types
示例#8
0
def _extract_apply_in_data(inputs):
    # Extracts arrays from FunctionNode.apply() inputs.
    #
    # A flag that indicates whether inputs are chainerx arrays is also
    # returned.
    #
    # Each object in `inputs` may be `Variable` or an array.
    # If it's a `Variable` and its underlying array is a chainerx array,
    # `Variable._data[0]` (which is backproppable in contrast to
    # `Variable.array`) is returned.
    #
    # If at least one of the arrays is a ChainerX array, all other NumPy/CuPy
    # arrays are converted to ChainerX arrays without copy.
    if len(inputs) == 0:
        return False, ()

    # Unwrap arrays
    arrays = [
        (x._data[0] if x.xp is chainerx else x.array)
        if isinstance(x, variable.Variable) else x for x in inputs]

    if (chainerx.is_available()
            and any([isinstance(arr, chainerx.ndarray) for arr in arrays])):
        return True, tuple(backend.to_chainerx(arrays))
    return False, tuple(arrays)
示例#9
0
def get_array_module(*args):
    """Gets an appropriate NumPy-compatible module to process arguments

    This function will return their data arrays' array module for
    :class:`~chainer.Variable` arguments.

    Args:
        args: Values to determine whether NumPy, CuPy, or ChainerX should be
            used.

    Returns:
        module: :mod:`numpy`, :mod:`cupy`, or :mod:`chainerx` is returned based
        on the types of the arguments.

    """
    is_chainerx_available = chainerx.is_available()
    if is_chainerx_available or cuda.available:
        arrays = []
        for arg in args:
            # Unwrap arrays
            if isinstance(arg, chainer.variable.Variable):
                array = arg.data
            else:
                array = arg
            if is_chainerx_available and isinstance(array, chainerx.ndarray):
                return chainerx
            arrays.append(array)
        if cuda.available:
            return cuda.cupy.get_array_module(*arrays)
    return numpy
示例#10
0
文件: link.py 项目: jnishi/chainer
    def to_chainerx(self):
        """Converts parameter variables and persistent values to ChainerX \
without any copy.

        This method does not handle non-registered attributes. If some of such
        attributes must be copied to ChainerX, the link implementation must
        override this method to do so.

        Returns: self
        """  # NOQA
        if not chainerx.is_available():
            raise RuntimeError('ChainerX is not available.')

        xp = self._device.xp
        if xp is chainerx:
            return self

        d = self.__dict__
        for name in self._params:
            d[name].to_chainerx()
        for name in self._persistent:
            if not numpy.isscalar(d[name]):
                d[name] = backend.to_chainerx(d[name])

        self._device = (
            backend.ChainerxDevice.from_fallback_device(self._device))

        return self
示例#11
0
def _get_device(device_spec):
    # Called from chainer.backend.get_device
    if not chainerx.is_available():
        return None
    if isinstance(device_spec, chainerx.Device):
        return ChainerxDevice(device_spec)
    if isinstance(device_spec, str):
        return ChainerxDevice(chainerx.get_device(device_spec))
    if (isinstance(device_spec, tuple) and len(device_spec) >= 1
            and isinstance(device_spec[0], str)):
        return ChainerxDevice(chainerx.get_device(*device_spec))
    return None
示例#12
0
文件: _chainerx.py 项目: hvy/chainer
def _get_device(device_spec):
    # Called from chainer.backend.get_device
    if not chainerx.is_available():
        return None
    if isinstance(device_spec, chainerx.Device):
        return ChainerxDevice(device_spec)
    if isinstance(device_spec, str):
        return ChainerxDevice(chainerx.get_device(device_spec))
    if (isinstance(device_spec, tuple) and len(device_spec) >= 1
            and isinstance(device_spec[0], str)):
        return ChainerxDevice(chainerx.get_device(*device_spec))
    return None
示例#13
0
 def __init__(self):
     self.chainer_version = chainer.__version__
     self.chainerx_available = chainerx.is_available()
     self.numpy_version = numpy.__version__
     self.platform_version = platform.platform()
     if cuda.available:
         self.cuda_info = cuda.cupyx.get_runtime_info()
     else:
         self.cuda_info = None
     if intel64.is_ideep_available():
         self.ideep_version = intel64.ideep.__version__
     else:
         self.ideep_version = None
示例#14
0
    def to_chx(self):
        """Converts parameter variables and persistent values to ChainerX \
without any copy.

        This method does not handle non-registered attributes. If some of such
        attributes must be copied to ChainerX, the link implementation must
        override this method to do so.

        Returns: self
        """
        if not chainerx.is_available():
            raise RuntimeError('ChainerX is not available.')

        if self.xp is chainerx:
            return self

        self.device_resident_accept(_ToChxVisitor())
        return self
示例#15
0
    def to_chx(self):
        """Converts parameter variables and persistent values to ChainerX \
without any copy.

        This method does not handle non-registered attributes. If some of such
        attributes must be copied to ChainerX, the link implementation must
        override this method to do so.

        Returns: self
        """
        if not chainerx.is_available():
            raise RuntimeError('ChainerX is not available.')

        if self.xp is chainerx:
            return self

        self.device_resident_accept(_ToChxVisitor())
        return self
示例#16
0
def _extract_apply_in_data(inputs):
    # Extracts arrays from FunctionNode.apply() inputs.
    #
    # A flag that indicates whether inputs are chainerx arrays is also
    # returned.
    #
    # Each object in `inputs` may be `Variable` or an array.
    # If it's a `Variable` and its underlying array is a chainerx array,
    # `Variable._data[0]` (which is backproppable in contrast to
    # `Variable.array`) is returned.
    #
    # If at least one of the arrays is a ChainerX array, all other NumPy/CuPy
    # arrays are converted to ChainerX arrays without copy.
    if len(inputs) == 0:
        return False, ()

    if chainerx.is_available():
        has_chainerx_array = False

        # Unwrap arrays
        arrays = []
        for x in inputs:
            if isinstance(x, variable.Variable):
                if x._has_chainerx_array:
                    arrays.append(x._data[0])
                    has_chainerx_array = True
                else:
                    arrays.append(x.array)
            else:  # x is ndarray
                arrays.append(x)
                if not has_chainerx_array:
                    if isinstance(x, chainerx.ndarray):
                        has_chainerx_array = True

        if has_chainerx_array:
            return True, tuple(backend.to_chainerx(arrays))
        else:
            return False, tuple(arrays)

    else:
        return False, tuple([
            x.array if isinstance(x, variable.Variable) else x
            for x in inputs])
示例#17
0
def is_arrays_compatible(arrays):
    # Do not use this function to check if a single object is an array or
    # not. Use isinstance(obj, chainer.get_array_types()) instead.

    arrays = [a for a in arrays if a is not None]

    if not arrays:
        return True

    # If there's at least one chainerx.ndarray, all other arrays
    # must be chainerx as well
    are_chainerx = [isinstance(arr, chainerx.ndarray) for arr in arrays]
    if chainerx.is_available() and any(are_chainerx):
        return all(are_chainerx)

    if isinstance(arrays[0], backends.cuda.ndarray):
        types = backends.cuda.ndarray
    else:
        types = get_cpu_array_types()
    return all([isinstance(a, types) for a in arrays])
示例#18
0
def is_arrays_compatible(arrays):
    arrays = [a for a in arrays if a is not None]

    if len(arrays) == 0:
        return True

    # If there's at least one chainerx.ndarray, all other arrays
    # will be converted to memory-shared chainerx.ndarrays.
    # TODO(niboshi): intel64.mdarray is not supported yet.
    # TODO(niboshi): Delegate array compatibility check to chainerx.
    if (chainerx.is_available()
            and any([isinstance(arr, chainerx.ndarray) for arr in arrays])):
        return not any([
            isinstance(arr, backends.intel64.mdarray) for arr in arrays])

    if isinstance(arrays[0], backends.cuda.ndarray):
        types = backends.cuda.ndarray
    else:
        types = get_cpu_array_types()
    return all([isinstance(a, types) for a in arrays])
示例#19
0
def is_arrays_compatible(arrays):
    arrays = [a for a in arrays if a is not None]

    if len(arrays) == 0:
        return True

    # If there's at least one chainerx.ndarray, all other arrays
    # will be converted to memory-shared chainerx.ndarrays.
    # TODO(niboshi): intel64.mdarray is not supported yet.
    # TODO(niboshi): Delegate array compatibility check to chainerx.
    if (chainerx.is_available()
            and any([isinstance(arr, chainerx.ndarray) for arr in arrays])):
        return not any(
            [isinstance(arr, backends.intel64.mdarray) for arr in arrays])

    if isinstance(arrays[0], backends.cuda.ndarray):
        types = backends.cuda.ndarray
    else:
        types = get_cpu_array_types()
    return all([isinstance(a, types) for a in arrays])
def force_backprop_mode():
    """Make a context manager which enables back-propagation.

    When you want to enable back-propagation in :func:`no_backprop_mode`, call
    this method. A :class:`~chainer.Variable` created in this context always
    has a computational graph unless overridden by deeper contexts. If you call
    this method outside of :func:`no_backprop_mode` context, it changes
    nothing.

    In the following example, ``y`` has a computational graph and calling
    :func:`~chainer.Variable.backward` on ``y`` will compute and accumulate the
    gradients of the variables in the graph, in this case only ``x``.

    >>> x = chainer.Variable(np.array([1,], np.float32))
    >>> with chainer.no_backprop_mode():
    ...     with chainer.force_backprop_mode():
    ...         y = x + 1
    >>> y.backward()
    >>> x.grad
    array([1.], dtype=float32)

    .. note::

       ``chainer.force_backprop_mode()`` implicitly applies ChainerX's
       counterpart :func:`chainerx.force_backprop_mode()`, but not vice versa.
       Also, setting ``enable_backprop`` :ref:`configuration <configuration>`
       does not affect ChainerX.

    .. seealso::

       See :func:`chainer.no_backprop_mode` for details on disabled
       back-propagation mode.

    """
    c = configuration.using_config('enable_backprop', True)
    if chainerx.is_available():
        return _BackpropModeContext((c, chainerx.force_backprop_mode()))
    return _BackpropModeContext((c, ))
示例#21
0
文件: function.py 项目: hvy/chainer
def force_backprop_mode():
    """Make a context manager which enables back-propagation.

    When you want to enable back-propagation in :func:`no_backprop_mode`, call
    this method. A :class:`~chainer.Variable` created in this context always
    has a computational graph unless overridden by deeper contexts. If you call
    this method outside of :func:`no_backprop_mode` context, it changes
    nothing.

    In the following example, ``y`` has a computational graph and calling
    :func:`~chainer.Variable.backward` on ``y`` will compute and accumulate the
    gradients of the variables in the graph, in this case only ``x``.

    >>> x = chainer.Variable(np.array([1,], np.float32))
    >>> with chainer.no_backprop_mode():
    ...     with chainer.force_backprop_mode():
    ...         y = x + 1
    >>> y.backward()
    >>> x.grad
    array([1.], dtype=float32)

    .. note::

       ``chainer.force_backprop_mode()`` implicitly applies ChainerX's
       counterpart :func:`chainerx.force_backprop_mode()`, but not vice versa.
       Also, setting ``enable_backprop`` :ref:`configuration <configuration>`
       does not affect ChainerX.

    .. seealso::

       See :func:`chainer.no_backprop_mode` for details on disabled
       back-propagation mode.

    """
    c = configuration.using_config('enable_backprop', True)
    if chainerx.is_available():
        return _BackpropModeContext((c, chainerx.force_backprop_mode()))
    return _BackpropModeContext((c,))
示例#22
0
文件: __init__.py 项目: y1r/chainer
def is_arrays_compatible(arrays):
    # Do not use this function to check if a single object is an array or
    # not. Use isinstance(obj, chainer.get_array_types()) instead.

    arrays = [a for a in arrays if a is not None]

    if not arrays:
        return True

    # If there's at least one chainerx.ndarray, all other arrays
    # will be converted to memory-shared chainerx.ndarrays.
    # TODO(niboshi): intel64.mdarray is not supported yet.
    # TODO(niboshi): Delegate array compatibility check to chainerx.
    if (chainerx.is_available()
            and any([isinstance(arr, chainerx.ndarray) for arr in arrays])):
        return not any(
            [isinstance(arr, backends.intel64.mdarray) for arr in arrays])

    if isinstance(arrays[0], backends.cuda.ndarray):
        types = backends.cuda.ndarray
    else:
        types = get_cpu_array_types()
    return all([isinstance(a, types) for a in arrays])
示例#23
0
def is_arrays_compatible(arrays):
    # Do not use this function to check if a single object is an array or
    # not. Use isinstance(obj, chainer.get_array_types()) instead.

    arrays = [a for a in arrays if a is not None]

    if not arrays:
        return True

    # If there's at least one chainerx.ndarray, all other arrays
    # will be converted to memory-shared chainerx.ndarrays.
    # TODO(niboshi): intel64.mdarray is not supported yet.
    # TODO(niboshi): Delegate array compatibility check to chainerx.
    if (chainerx.is_available()
            and any([isinstance(arr, chainerx.ndarray) for arr in arrays])):
        return not any([
            isinstance(arr, backends.intel64.mdarray) for arr in arrays])

    if isinstance(arrays[0], backends.cuda.ndarray):
        types = backends.cuda.ndarray
    else:
        types = get_cpu_array_types()
    return all([isinstance(a, types) for a in arrays])
示例#24
0
文件: function.py 项目: hvy/chainer
def no_backprop_mode():
    """Make a context manager which disables back-propagation.

    In this context, Chainer does not make a computational graph. It has the
    benefit of reducing memory consumption. However, a
    :class:`~chainer.Variable` created in this context does not hold a
    reference to the :class:`~chainer.FunctionNode` that created itself so no
    gradients are accumulated by :func:`~chainer.Variable.backward`.

    In the following example, ``y`` is created in this context, which means
    that calling :func:`~chainer.Variable.backward` on ``y`` has no effect on
    the gradients of ``x``.

    >>> x = chainer.Variable(np.array([1,], np.float32))
    >>> with chainer.no_backprop_mode():
    ...     y = x + 1
    >>> y.backward()
    >>> x.grad is None
    True

    .. note::

       ``chainer.no_backprop_mode()`` implicitly applies ChainerX's
       counterpart :func:`chainerx.no_backprop_mode()`, but not vice versa.
       Also, setting ``enable_backprop`` :ref:`configuration <configuration>`
       does not affect ChainerX.

    .. seealso::

       See :func:`chainer.force_backprop_mode` for details on how to override
       this context.

    """
    c = configuration.using_config('enable_backprop', False)
    if chainerx.is_available():
        return _BackpropModeContext((c, chainerx.no_backprop_mode()))
    return _BackpropModeContext((c,))
def no_backprop_mode():
    """Make a context manager which disables back-propagation.

    In this context, Chainer does not make a computational graph. It has the
    benefit of reducing memory consumption. However, a
    :class:`~chainer.Variable` created in this context does not hold a
    reference to the :class:`~chainer.FunctionNode` that created itself so no
    gradients are accumulated by :func:`~chainer.Variable.backward`.

    In the following example, ``y`` is created in this context, which means
    that calling :func:`~chainer.Variable.backward` on ``y`` has no effect on
    the gradients of ``x``.

    >>> x = chainer.Variable(np.array([1,], np.float32))
    >>> with chainer.no_backprop_mode():
    ...     y = x + 1
    >>> y.backward()
    >>> x.grad is None
    True

    .. note::

       ``chainer.no_backprop_mode()`` implicitly applies ChainerX's
       counterpart :func:`chainerx.no_backprop_mode()`, but not vice versa.
       Also, setting ``enable_backprop`` :ref:`configuration <configuration>`
       does not affect ChainerX.

    .. seealso::

       See :func:`chainer.force_backprop_mode` for details on how to override
       this context.

    """
    c = configuration.using_config('enable_backprop', False)
    if chainerx.is_available():
        return _BackpropModeContext((c, chainerx.no_backprop_mode()))
    return _BackpropModeContext((c, ))
示例#26
0
def _extract_apply_in_data(inputs):
    # Extracts arrays from FunctionNode.apply() inputs.
    #
    # A flag that indicates whether inputs are chainerx arrays is also
    # returned.
    #
    # Each object in `inputs` may be `Variable` or an array.
    # If it's a `Variable` and its underlying array is a chainerx array,
    # `Variable._data[0]` (which is backproppable in contrast to
    # `Variable.array`) is returned.
    #
    # If at least one of the arrays is a ChainerX array, all other NumPy/CuPy
    # arrays are converted to ChainerX arrays without copy.
    if len(inputs) == 0:
        return False, ()

    # Unwrap arrays
    arrays = [(x._data[0] if x.xp is chainerx else x.array) if isinstance(
        x, variable.Variable) else x for x in inputs]

    if (chainerx.is_available()
            and any([isinstance(arr, chainerx.ndarray) for arr in arrays])):
        return True, tuple(backend.to_chainerx(arrays))
    return False, tuple(arrays)
        run_train(model, train_data, optimizer=1)


@pytest.mark.gpu
def test_run_train_gpu(model, train_data, valid_data):
    device = 0
    model.to_gpu(device)
    run_train(model,
              train_data,
              valid=valid_data,
              epoch=1,
              batch_size=8,
              device=device)


@pytest.mark.skipif(not chainerx.is_available(),
                    reason='chainerx is not available')
def test_run_train_chainerx_native(model, train_data, valid_data):
    device = chainer.get_device('native')
    model.to_device(device)
    run_train(model,
              train_data,
              valid=valid_data,
              epoch=1,
              batch_size=8,
              device=device)


@pytest.mark.gpu
@pytest.mark.skipif(not chainerx.is_available(),
                    reason='chainerx is not available')
示例#28
0
def test_is_available():
    assert chainerx.is_available()
示例#29
0
def get_device(device_spec):
    # type: (types.DeviceSpec) -> Device
    """Returns a device object.

    Args:
        device_spec (object): Device specifier. If a :class:`chainer.Device`
            instance is given, it is returned intact. Otherwise the following
            values are supported:

            * ChainerX devices

              * A string representing a device.
                (ex. ``'native:0'``, ``'native'``)
              * A :class:`chainerx.Device` object.

            * CuPy

              * A string starts with ``'@cupy:'``.
                (ex. ``'@cupy:0'``)
              * A :class:`cupy.cuda.Device` object.

            * NumPy

              * The string ``'@numpy'``.

            * NumPy with Intel Architecture

              * The string ``'@intel64'``.
    """
    if isinstance(device_spec, Device):
        return device_spec

    if isinstance(device_spec, cuda._integer_types):
        return _get_device_cupy_or_numpy(device_spec)

    if chainerx.is_available() and isinstance(device_spec, chainerx.Device):
        return _chainerx.ChainerxDevice(device_spec)

    if cuda.available and isinstance(device_spec, cuda.Device):
        return cuda.GpuDevice(device_spec)

    if isinstance(device_spec, six.string_types):
        # '-1', '0', '1', ...
        try:
            int_device_spec = int(device_spec)
        except ValueError:
            pass
        else:
            return _get_device_cupy_or_numpy(int_device_spec)

        if device_spec.startswith('@'):
            # '@module:...'
            mod_name, colon, precise_spec = device_spec[1:].partition(':')
            if mod_name == 'numpy':
                if not colon:
                    return _cpu.CpuDevice()
            elif mod_name == 'cupy':
                if colon:
                    return cuda.GpuDevice.from_device_id(int(precise_spec))
            elif mod_name == 'intel64':
                if not colon:
                    return intel64.Intel64Device()

        elif chainerx.is_available():
            return _chainerx.ChainerxDevice(chainerx.get_device(device_spec))

    raise ValueError('Invalid device specifier: {}'.format(device_spec))
示例#30
0
import pytest

from chainer.backends import cuda
import chainerx


if not chainerx.is_available():
    # Skip all ChainerX tests if ChainerX is unavailable.
    # TODO(kmaehashi) This is an tentative fix. This file should be removed
    # once chainer-test supports ChainerX.
    pytest.mark.chainerx = pytest.mark.skip


def pytest_runtest_teardown(item, nextitem):
    if cuda.available:
        assert cuda.cupy.cuda.runtime.getDevice() == 0


# testing.run_module(__name__, __file__)
示例#31
0
def main():
    archs = {
        'alex': alex.Alex,
        'alex_fp16': alex.AlexFp16,
        'googlenet': googlenet.GoogLeNet,
        'googlenetbn': googlenetbn.GoogLeNetBN,
        'googlenetbn_fp16': googlenetbn.GoogLeNetBNFp16,
        'nin': nin.NIN,
        'resnet50': resnet50.ResNet50,
        'resnext50': resnext50.ResNeXt50,
    }

    parser = argparse.ArgumentParser(
        description='Learning convnet from ILSVRC2012 dataset')
    parser.add_argument('train', help='Path to training image-label list file')
    parser.add_argument('val', help='Path to validation image-label list file')
    parser.add_argument('--arch', '-a', choices=archs.keys(), default='nin',
                        help='Convnet architecture')
    parser.add_argument('--batchsize', '-B', type=int, default=32,
                        help='Learning minibatch size')
    parser.add_argument('--epoch', '-E', type=int, default=10,
                        help='Number of epochs to train')
    parser.add_argument('--device', '-d', type=str, default='-1',
                        help='Device specifier. Either ChainerX device '
                        'specifier or an integer. If non-negative integer, '
                        'CuPy arrays with specified device id are used. If '
                        'negative integer, NumPy arrays are used')
    parser.add_argument('--initmodel',
                        help='Initialize the model from given file')
    parser.add_argument('--loaderjob', '-j', type=int,
                        help='Number of parallel data loading processes')
    parser.add_argument('--mean', '-m', default='mean.npy',
                        help='Mean file (computed by compute_mean.py)')
    parser.add_argument('--resume', '-r', default='',
                        help='Initialize the trainer from given file')
    parser.add_argument('--out', '-o', default='result',
                        help='Output directory')
    parser.add_argument('--root', '-R', default='.',
                        help='Root directory path of image files')
    parser.add_argument('--val_batchsize', '-b', type=int, default=250,
                        help='Validation minibatch size')
    parser.add_argument('--test', action='store_true')
    parser.set_defaults(test=False)
    parser.add_argument('--dali', action='store_true')
    parser.set_defaults(dali=False)
    group = parser.add_argument_group('deprecated arguments')
    group.add_argument('--gpu', '-g', type=int, nargs='?', const=0,
                       help='GPU ID (negative value indicates CPU)')
    args = parser.parse_args()

    device = parse_device(args)

    print('Device: {}'.format(device))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    # Initialize the model to train
    model = archs[args.arch]()
    if args.initmodel:
        print('Load model from {}'.format(args.initmodel))
        chainer.serializers.load_npz(args.initmodel, model)
    model.to_device(device)
    device.use()

    # Load the mean file
    mean = np.load(args.mean)
    if args.dali:
        if not dali_util._dali_available:
            raise RuntimeError('DALI seems not available on your system.')
        num_threads = args.loaderjob
        if num_threads is None or num_threads <= 0:
            num_threads = 1
        ch_mean = list(np.average(mean, axis=(1, 2)))
        ch_std = [255.0, 255.0, 255.0]
        # Setup DALI pipelines
        train_pipe = dali_util.DaliPipelineTrain(
            args.train, args.root, model.insize, args.batchsize,
            num_threads, args.gpu, True, mean=ch_mean, std=ch_std)
        val_pipe = dali_util.DaliPipelineVal(
            args.val, args.root, model.insize, args.val_batchsize,
            num_threads, args.gpu, False, mean=ch_mean, std=ch_std)
        train_iter = chainer.iterators.DaliIterator(train_pipe)
        val_iter = chainer.iterators.DaliIterator(val_pipe, repeat=False)
        # converter = dali_converter
        converter = dali_util.DaliConverter(mean=mean, crop_size=model.insize)
    else:
        # Load the dataset files
        train = PreprocessedDataset(args.train, args.root, mean, model.insize)
        val = PreprocessedDataset(args.val, args.root, mean, model.insize,
                                  False)
        # These iterators load the images with subprocesses running in parallel
        # to the training/validation.
        train_iter = chainer.iterators.MultiprocessIterator(
            train, args.batchsize, n_processes=args.loaderjob)
        val_iter = chainer.iterators.MultiprocessIterator(
            val, args.val_batchsize, repeat=False, n_processes=args.loaderjob)
        converter = dataset.concat_examples

    # Set up an optimizer
    optimizer = chainer.optimizers.MomentumSGD(lr=0.01, momentum=0.9)
    optimizer.setup(model)

    # Set up a trainer
    updater = training.updaters.StandardUpdater(
        train_iter, optimizer, converter=converter, device=device)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), args.out)

    val_interval = (1 if args.test else 100000), 'iteration'
    log_interval = (1 if args.test else 1000), 'iteration'

    trainer.extend(extensions.Evaluator(val_iter, model, converter=converter,
                                        device=device), trigger=val_interval)
    # TODO(sonots): Temporarily disabled for chainerx. Fix it.
    if not (chainerx.is_available() and isinstance(device, chainerx.Device)):
        trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.snapshot(), trigger=val_interval)
    trainer.extend(extensions.snapshot_object(
        model, 'model_iter_{.updater.iteration}'), trigger=val_interval)
    # Be careful to pass the interval directly to LogReport
    # (it determines when to emit log rather than when to read observations)
    trainer.extend(extensions.LogReport(trigger=log_interval))
    trainer.extend(extensions.observe_lr(), trigger=log_interval)
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration', 'main/loss', 'validation/main/loss',
        'main/accuracy', 'validation/main/accuracy', 'lr'
    ]), trigger=log_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    trainer.run()
示例#32
0
def get_device(device_spec: types.DeviceSpec) -> Device:
    """Returns a device object.

    Args:
        device_spec (object): Device specifier.
            If a :class:`chainer.backend.Device` instance is given, it is
            returned intact. Otherwise the following values are supported:

            * ChainerX devices

              * A string representing a device.
                (ex. ``'native:0'``, ``'native'``)
              * A :class:`chainerx.Device` object.

            * CuPy

              * A string starts with ``'@cupy:'``.
                (ex. ``'@cupy:0'``)
              * A :class:`cupy.cuda.Device` object.

            * NumPy

              * The string ``'@numpy'``.

            * NumPy with Intel Architecture

              * The string ``'@intel64'``.
    """
    if isinstance(device_spec, Device):
        return device_spec

    if isinstance(device_spec, cuda._integer_types):
        return _get_device_cupy_or_numpy(device_spec)

    if chainerx.is_available() and isinstance(device_spec, chainerx.Device):
        return _chainerx.ChainerxDevice(device_spec)

    if cuda.available and isinstance(device_spec, cuda.Device):
        return cuda.GpuDevice(device_spec)

    if isinstance(device_spec, six.string_types):
        # '-1', '0', '1', ...
        try:
            int_device_spec = int(device_spec)
        except ValueError:
            pass
        else:
            return _get_device_cupy_or_numpy(int_device_spec)

        if device_spec.startswith('@'):
            # '@module:...'
            mod_name, colon, precise_spec = device_spec[1:].partition(':')
            if mod_name == 'numpy':
                if not colon:
                    return _cpu.CpuDevice()
            elif mod_name == 'cupy':
                if colon:
                    return cuda.GpuDevice.from_device_id(int(precise_spec))
            elif mod_name == 'intel64':
                if not colon:
                    return intel64.Intel64Device()
            raise ValueError(
                'Device specifiers starting with \'@\' must be followed by'
                ' a module name and depending on the module, module specific'
                ' precise device specifiers. Actual: {}'.format(device_spec))
        else:
            # String device specifier without '@' prefix is assumed to be a
            # ChainerX device.
            if not chainerx.is_available():
                raise RuntimeError(
                    'Tried to parse ChainerX device specifier \'{}\', '
                    'but ChainerX is not available. '
                    'Note that device specifiers without \'@\' prefix are '
                    'assumed to be ChainerX device '
                    'specifiers.'.format(device_spec))
            return _chainerx.ChainerxDevice(chainerx.get_device(device_spec))

    raise TypeError(
        'Device specifier must be a backend.Device, cuda.Device,'
        ' chainerx.Device, integer or a string. Actual: {}'.format(
            type(device_spec)))
示例#33
0
def softmax_cross_entropy(x,
                          t,
                          normalize=True,
                          cache_score=True,
                          class_weight=None,
                          ignore_label=-1,
                          reduce='mean',
                          enable_double_backprop=False):
    """Computes cross entropy loss for pre-softmax activations.

    Args:
        x (:class:`~chainer.Variable` or :ref:`ndarray`):
            Variable holding a multidimensional array whose element indicates
            unnormalized log probability: the first axis of the variable
            represents the number of samples, and the second axis represents
            the number of classes. While this function computes a usual softmax
            cross entropy if the number of dimensions is equal to 2, it
            computes a cross entropy of the replicated softmax if the number of
            dimensions is greater than 2.
        t (:class:`~chainer.Variable` or :ref:`ndarray`):
            Variable holding a signed integer vector of ground truth
            labels. If ``t[i] == ignore_label``, corresponding ``x[i]`` is
            ignored.
        normalize (bool): If ``True``, this function normalizes the cross
            entropy loss across all instances. If ``False``, it only
            normalizes along a batch size.
        cache_score (bool): When it is ``True``, the function stores result
            of forward computation to use it on backward computation. It
            reduces computational cost though consumes more memory.
            If ``enable_double_backprop`` option is ``True``, this option
            is forcibly turned off and the function does not cache
            the intermediate value.
        class_weight (:ref:`ndarray`):
            An array that contains constant weights that will be multiplied
            with the loss values along with the second dimension. The shape of
            this array should be ``(x.shape[1],)``. If this is not ``None``,
            each class weight ``class_weight[i]`` is actually multiplied to
            ``y[:, i]`` that is the corresponding log-softmax output of ``x``
            and has the same shape as ``x`` before calculating the actual loss
            value.
        ignore_label (int): Label value you want to ignore. Its default value
            is ``-1``. See description of the argument `t`.
        reduce (str): A string that determines whether to reduce the loss
            values. If it is ``'mean'``, it computes the sum of the individual
            cross entropy and normalize it according to ``normalize`` option.
            If it is ``'no'``, this function computes cross entropy for each
            instance and does not normalize it (``normalize`` option is
            ignored). In this case, the loss value of the ignored instance,
            which has ``ignore_label`` as its target value, is set to ``0``.
        enable_double_backprop (bool): If ``True``, this function uses
            implementation that supports higher order differentiation.
            If ``False``, it uses single-backprop implementation.
            This function use the single-backprop version because we expect
            it is faster. So, if you need second or higher derivatives,
            you need to turn it on explicitly.

    Returns:
        ~chainer.Variable: A variable holding a scalar array of the cross
        entropy loss.  If ``reduce`` is ``'mean'``, it is a scalar array.
        If ``reduce`` is ``'no'``, the shape is same as that of ``t``.

    .. note::

       This function is differentiable only by ``x``.

    .. admonition:: Example

        >>> x = np.array([[-1, 0, 1, 2], [2, 0, 1, -1]]).astype(np.float32)
        >>> x
        array([[-1.,  0.,  1.,  2.],
               [ 2.,  0.,  1., -1.]], dtype=float32)
        >>> t = np.array([3, 0]).astype(np.int32)
        >>> t
        array([3, 0], dtype=int32)
        >>> y = F.softmax_cross_entropy(x, t)
        >>> y
        variable(0.44018972)
        >>> log_softmax = -F.log_softmax(x)
        >>> expected_loss = np.mean([log_softmax[row, column].data \
for row, column in enumerate(t)])
        >>> y.array == expected_loss
        True

    """

    is_chainerx = (chainerx.is_available()
                   and backend.get_array_module(x) is chainerx)

    if is_chainerx or not enable_double_backprop:
        # Optimized implementation.
        # For non-ChainerX, forward and backward are supported but
        # double-backprop is not supported.
        # For ChainerX, even forward is supported for only specific
        # configuration of inputs and parameters, which is tested with
        # `SoftmaxCrossEntropy._is_chainerx_supported()`.
        func = SoftmaxCrossEntropy(normalize, cache_score, class_weight,
                                   ignore_label, reduce)

        if not is_chainerx or func._is_chainerx_supported((x, t)):
            loss, = func.apply((x, t))
            return loss

    # Generic double-backprop-enabled but unoptimized implementation
    return _double_backward_softmax_cross_entropy(x, t, normalize,
                                                  class_weight, ignore_label,
                                                  reduce, is_chainerx)
示例#34
0
import pytest
import six

import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
# TODO(hvy): Remove the following import once testing.backend is imported
# in testing/__init__.py
import chainer.testing.backend
from chainer import utils
from chainer.utils import type_check
import chainerx

if chainerx.is_available():
    import chainerx.testing


def make_array(start, shape, dtype, device):
    size = numpy.product(shape, dtype='i')
    a = numpy.arange(start, start + size)
    a = a.reshape(shape)
    a = a.astype(dtype, copy=False)
    return device.send(a)


@testing.parameterize(*testing.product({
    'y_shape': [(4, ), (0, ), (2, 3), ()],
    'x_shape': [(3, ), (0, ), (4, 1), ()],
}))
示例#35
0
import unittest

import numpy
import pytest

import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import testing
from chainer.testing import attr
import chainerx


if chainerx.is_available():
    import chainerx.testing


class _TestCopyToBase(object):

    src_data = numpy.arange(1, 5, dtype=numpy.float32)
    dst_data = numpy.zeros_like(src_data)

    def _get_dst(self):
        raise NotImplementedError

    def test_from_cpu(self):
        src = self.src_data
        dst = self._get_dst()
        backend.copyto(dst, src)
        numpy.testing.assert_array_equal(cuda.to_cpu(dst), self.src_data)
示例#36
0
def softmax_cross_entropy(
        x, t, normalize=True, cache_score=True, class_weight=None,
        ignore_label=-1, reduce='mean', enable_double_backprop=False):
    """Computes cross entropy loss for pre-softmax activations.

    Args:
        x (:class:`~chainer.Variable` or :ref:`ndarray`):
            Variable holding a multidimensional array whose element indicates
            unnormalized log probability: the first axis of the variable
            represents the number of samples, and the second axis represents
            the number of classes. While this function computes a usual softmax
            cross entropy if the number of dimensions is equal to 2, it
            computes a cross entropy of the replicated softmax if the number of
            dimensions is greater than 2.
        t (:class:`~chainer.Variable` or :ref:`ndarray`):
            Variable holding a signed integer vector of ground truth
            labels. If ``t[i] == ignore_label``, corresponding ``x[i]`` is
            ignored.
        normalize (bool): If ``True``, this function normalizes the cross
            entropy loss across all instances. If ``False``, it only
            normalizes along a batch size.
        cache_score (bool): When it is ``True``, the function stores result
            of forward computation to use it on backward computation. It
            reduces computational cost though consumes more memory.
            If ``enable_double_backprop`` option is ``True``, this option
            is forcibly turned off and the function does not cache
            the intermediate value.
        class_weight (:ref:`ndarray`):
            An array that contains constant weights that will be multiplied
            with the loss values along with the second dimension. The shape of
            this array should be ``(x.shape[1],)``. If this is not ``None``,
            each class weight ``class_weight[i]`` is actually multiplied to
            ``y[:, i]`` that is the corresponding log-softmax output of ``x``
            and has the same shape as ``x`` before calculating the actual loss
            value.
        ignore_label (int): Label value you want to ignore. Its default value
            is ``-1``. See description of the argument `t`.
        reduce (str): A string that determines whether to reduce the loss
            values. If it is ``'mean'``, it computes the sum of the individual
            cross entropy and normalize it according to ``normalize`` option.
            If it is ``'no'``, this function computes cross entropy for each
            instance and does not normalize it (``normalize`` option is
            ignored). In this case, the loss value of the ignored instance,
            which has ``ignore_label`` as its target value, is set to ``0``.
        enable_double_backprop (bool): If ``True``, this function uses
            implementation that supports higher order differentiation.
            If ``False``, it uses single-backprop implementation.
            This function use the single-backprop version because we expect
            it is faster. So, if you need second or higher derivatives,
            you need to turn it on explicitly.

    Returns:
        ~chainer.Variable: A variable holding a scalar array of the cross
        entropy loss.  If ``reduce`` is ``'mean'``, it is a scalar array.
        If ``reduce`` is ``'no'``, the shape is same as that of ``t``.

    .. note::

       This function is differentiable only by ``x``.

    .. admonition:: Example

        >>> x = np.array([[-1, 0, 1, 2], [2, 0, 1, -1]]).astype(np.float32)
        >>> x
        array([[-1.,  0.,  1.,  2.],
               [ 2.,  0.,  1., -1.]], dtype=float32)
        >>> t = np.array([3, 0]).astype(np.int32)
        >>> t
        array([3, 0], dtype=int32)
        >>> y = F.softmax_cross_entropy(x, t)
        >>> y
        variable(0.44018972)
        >>> log_softmax = -F.log_softmax(x)
        >>> expected_loss = np.mean([log_softmax[row, column].data \
for row, column in enumerate(t)])
        >>> y.array == expected_loss
        True

    """

    is_chainerx = (
        chainerx.is_available() and backend.get_array_module(x) is chainerx)

    if is_chainerx or not enable_double_backprop:
        # Optimized implementation.
        # For non-ChainerX, forward and backward are supported but
        # double-backprop is not supported.
        # For ChainerX, even forward is supported for only specific
        # configuration of inputs and parameters, which is tested with
        # `SoftmaxCrossEntropy._is_chainerx_supported()`.
        func = SoftmaxCrossEntropy(
            normalize, cache_score, class_weight, ignore_label, reduce)

        if not is_chainerx or func._is_chainerx_supported((x, t)):
            loss, = func.apply((x, t))
            return loss

    # Generic double-backprop-enabled but unoptimized implementation
    return _double_backward_softmax_cross_entropy(
        x, t, normalize, class_weight, ignore_label, reduce, is_chainerx)
示例#37
0
文件: backend.py 项目: xu3kev/chainer
def get_device(device_spec):
    # type: (types.DeviceSpec) -> Device
    """Returns a device object.

    Args:
        device_spec (object): Device specifier.
            If a :class:`chainer.backend.Device` instance is given, it is
            returned intact. Otherwise the following values are supported:

            * ChainerX devices

              * A string representing a device.
                (ex. ``'native:0'``, ``'native'``)
              * A :class:`chainerx.Device` object.

            * CuPy

              * A string starts with ``'@cupy:'``.
                (ex. ``'@cupy:0'``)
              * A :class:`cupy.cuda.Device` object.

            * NumPy

              * The string ``'@numpy'``.

            * NumPy with Intel Architecture

              * The string ``'@intel64'``.
    """
    if isinstance(device_spec, Device):
        return device_spec

    if isinstance(device_spec, cuda._integer_types):
        return _get_device_cupy_or_numpy(device_spec)

    if chainerx.is_available() and isinstance(device_spec, chainerx.Device):
        return _chainerx.ChainerxDevice(device_spec)

    if cuda.available and isinstance(device_spec, cuda.Device):
        return cuda.GpuDevice(device_spec)

    if isinstance(device_spec, six.string_types):
        # '-1', '0', '1', ...
        try:
            int_device_spec = int(device_spec)
        except ValueError:
            pass
        else:
            return _get_device_cupy_or_numpy(int_device_spec)

        if device_spec.startswith('@'):
            # '@module:...'
            mod_name, colon, precise_spec = device_spec[1:].partition(':')
            if mod_name == 'numpy':
                if not colon:
                    return _cpu.CpuDevice()
            elif mod_name == 'cupy':
                if colon:
                    return cuda.GpuDevice.from_device_id(int(precise_spec))
            elif mod_name == 'intel64':
                if not colon:
                    return intel64.Intel64Device()

        elif chainerx.is_available():
            return _chainerx.ChainerxDevice(chainerx.get_device(device_spec))

    raise ValueError('Invalid device specifier: {}'.format(device_spec))
示例#38
0
class TestDeviceSpec(unittest.TestCase):
    """Test for backend.get_device() and backend.using_device()"""
    def check_device_spec_numpy(self, device_spec):
        device = backend.get_device(device_spec)
        assert isinstance(device, backend.CpuDevice)
        assert device.xp is numpy

        with backend.using_device(device_spec):
            # TODO(niboshi): Test the Chainer default device
            pass

    def check_device_spec_cupy(self, device_spec, expected_device_id):
        device = backend.get_device(device_spec)
        assert isinstance(device, backend.GpuDevice)
        assert isinstance(device.device, cuda.Device)
        assert device.xp is cuda.cupy
        assert device.device.id == expected_device_id

        with backend.using_device(device_spec):
            # TODO(niboshi): Test the Chainer default device
            assert cuda.Device() == cuda.Device(expected_device_id)

    def check_device_spec_chainerx(self, device_spec, expected_device_name):
        device = backend.get_device(device_spec)
        assert isinstance(device, backend.ChainerxDevice)
        assert device.xp is chainerx
        assert isinstance(device.device, chainerx.Device)
        assert device.device.name == expected_device_name

        with backend.using_device(device_spec):
            # TODO(niboshi): Test the Chainer default device
            assert (chainerx.get_default_device() == chainerx.get_device(
                expected_device_name))

    def check_device_spec_intel64(self, device_spec):
        device = backend.get_device(device_spec)
        assert isinstance(device, backend.Intel64Device)
        assert device.xp is numpy

        with backend.using_device(device_spec):
            # TODO(niboshi): Test the Chainer default device
            pass

    def check_invalid(self, device_spec):
        with pytest.raises(Exception):
            backend.get_device(device_spec)

        with pytest.raises(Exception):
            backend.using_device(device_spec)

    def test_str_numpy(self):
        self.check_device_spec_numpy('@numpy')

    def test_legacy_int_numpy(self):
        self.check_device_spec_numpy(-1)

    def test_legacy_str_numpy(self):
        self.check_device_spec_numpy('-1')

    def test_module_numpy_device(self):
        self.check_device_spec_numpy(backend.CpuDevice())

    @attr.chainerx
    def test_str_chainerx_backend(self):
        self.check_device_spec_chainerx('native', 'native:0')

    @attr.chainerx
    def test_str_chainerx_device(self):
        self.check_device_spec_chainerx('native:0', 'native:0')

    @attr.gpu
    def test_str_cupy_device(self):
        self.check_device_spec_cupy('@cupy:0', 0)

    @attr.gpu
    def test_legacy_int_cupy_device(self):
        self.check_device_spec_cupy(0, 0)

    @attr.gpu
    def test_legacy_str_cupy_device(self):
        self.check_device_spec_cupy('0', 0)

    @attr.multi_gpu(2)
    def test_str_cupy_device_multi_gpu(self):
        self.check_device_spec_cupy('@cupy:1', 1)

    @attr.multi_gpu(2)
    def test_legacy_int_cupy_device_multi_gpu(self):
        self.check_device_spec_cupy(1, 1)

    @attr.multi_gpu(2)
    def test_legacy_str_cupy_device_multi_gpu(self):
        self.check_device_spec_cupy('1', 1)

    @attr.chainerx
    def test_chainerx_device(self):
        chainerx_device = chainerx.get_device('native:0')
        self.check_device_spec_chainerx(chainerx_device, 'native:0')

    @attr.gpu
    def test_cuda_device(self):
        cupy_device = cuda.Device(0)
        self.check_device_spec_cupy(cupy_device, 0)

    @attr.ideep
    def test_str_intel64(self):
        self.check_device_spec_intel64('@intel64')

    def test_str_chainerx_invalid(self):
        self.check_invalid('native:foo')
        self.check_invalid('')

    def test_str_module_invalid(self):
        self.check_invalid('@foo')
        self.check_invalid('@foo:0')

    def test_str_cupy_invalid(self):
        self.check_invalid('@cupy')
        self.check_invalid('@cupy::0')

    def test_str_numpy_invalid(self):
        self.check_invalid('@numpy:')
        self.check_invalid('@numpy:0')
        self.check_invalid('@:numpy')

    def test_tuple_invalid(self):
        # tuple is no longer supported from Chainer
        self.check_invalid(('native', 0))

    def test_cuda_dummy_device_invalid(self):
        self.check_invalid(cuda.DummyDevice)

    @unittest.skipIf(chainerx.is_available(),
                     'Only tested when ChainerX is not built')
    def test_chx_device_spec_without_chx_available(self):
        # If chainerx is not available, get_device() with unprefixed string
        # should mention ChainerX unavailability in the error message.
        with pytest.raises(RuntimeError, match=r'.*ChainerX.*'):
            chainer.get_device('foo')