Пример #1
0
def _getitem(arr, key):
    if not isinstance(arr, chainerx.ndarray):
        return arr[key]

    try:
        return arr[key]
    except (IndexError, chainerx.DimensionError):
        pass

    is_backprop_required = arr.is_backprop_required()

    arr = backend.from_chainerx(arr)
    if isinstance(key, chainerx.ndarray):
        key = backend.from_chainerx(key)

    if isinstance(arr, cuda.ndarray):
        with arr.device:
            ret = arr[key]
    else:
        ret = arr[key]

    # Doing this check after the fallback __getitem__ because the error which
    # caused the fallback might not be due to advanced indexing. In such
    # case the fallback __getitem__ should also raise the error.

    if is_backprop_required:
        raise RuntimeError(
            'ChainerX getitem fallback for advanced indexing is not supported '
            'for arrays that are connected to a graph.')

    return backend.to_chainerx(ret)
Пример #2
0
    def update_core_chainerx(self, param):
        """Updates the ChainerX parameter.

        This method can be overridden to implement custom update logic.
        The default implementation is to convert the parameter to a
        memory-shared NumPy/CuPy parameter and call the corresponding update
        method.

        See :meth:`update_core` for details.

        Args:
            param (~chainer.Variable): Variable to be updated.

        """
        grad_array = param.grad
        backend_name = param.array.device.backend.name
        if backend_name == 'native':
            update_core = self.update_core_cpu
        elif backend_name == 'cuda':
            update_core = self.update_core_gpu
        else:
            raise RuntimeError(
                'Default implementation of Optimizer.update_core_chainerx is '
                'only provided for native or cuda backends (actual: {}). '
                'Override Optimizer.update_core_chainerx() to implement '
                'custom update logic.'.format(backend_name))

        # Convert state arrays to NumPy/CuPy
        chainerx_state_arrays = {}
        for state_name, st in self.state.items():
            st = self.state[state_name]
            if isinstance(st, chainerx.ndarray):
                self.state[state_name] = backend.from_chainerx(st)
                chainerx_state_arrays[state_name] = st

        # Create a temporary parameter with memory-shared NumPy/CuPy array
        # If the ChainerX parameter has a cached NumPy/CuPy copy, use the
        # cache and avoid redundant conversion. Else, create the cache here
        # and use it.
        if param._chainerx_fallback_array is None:
            param._chainerx_fallback_array = backend.from_chainerx(param.array)

        temp_param = variable.Variable._init_unchecked(
            param._chainerx_fallback_array, is_chainerx_array=False)

        if grad_array is not None:
            temp_param._set_grad_without_check(
                backend.from_chainerx(grad_array))

        # Update
        update_core(temp_param)

        # Restore state arrays
        for state_name, arr in chainerx_state_arrays.items():
            cur_arr = self.state[state_name]
            if cur_arr is not arr:
                arr = backend.to_chainerx(cur_arr)
            self.state[state_name] = arr
Пример #3
0
    def update_core_chainerx(self, param):
        """Updates the ChainerX parameter.

        This method can be overridden to implement custom update logic.
        The default implementation is to convert the parameter to a
        memory-shared NumPy/CuPy parameter and call the corresponding update
        method.

        See :meth:`update_core` for details.

        Args:
            param (~chainer.Variable): Variable to be updated.

        """
        grad_array = param.grad
        backend_name = param.array.device.backend.name
        if backend_name == 'native':
            update_core = self.update_core_cpu
        elif backend_name == 'cuda':
            update_core = self.update_core_gpu
        else:
            raise RuntimeError(
                'Default implementation of Optimizer.update_core_chainerx is '
                'only provided for native or cuda backends (actual: {}). '
                'Override Optimizer.update_core_chainerx() to implement '
                'custom update logic.'.format(backend_name))

        # Convert state arrays to NumPy/CuPy
        chainerx_state_arrays = {}
        for state_name, st in self.state.items():
            st = self.state[state_name]
            if isinstance(st, chainerx.ndarray):
                self.state[state_name] = backend.from_chainerx(st)
                chainerx_state_arrays[state_name] = st

        # Create a temporary parameter with memory-shared NumPy/CuPy array
        # If the ChainerX parameter has a cached NumPy/CuPy copy, use the
        # cache and avoid redundant conversion. Else, create the cache here
        # and use it.
        if param._chainerx_fallback_array is None:
            param._chainerx_fallback_array = backend.from_chainerx(
                param.array)

        temp_param = variable.Variable(param._chainerx_fallback_array)

        if grad_array is not None:
            temp_param._set_grad_without_check(
                backend.from_chainerx(grad_array))

        # Update
        update_core(temp_param)

        # Restore state arrays
        for state_name, arr in chainerx_state_arrays.items():
            self.state[state_name] = arr
Пример #4
0
def _setitem(arr, key, value):
    """Sets arr[key] to value by falling back to a non-ChainerX arrays.

    Supports both basic and advanced indexing.

    Note:

        With the ``cuda`` backend, the behavior differs from NumPy when
        integer arrays in ``slices`` reference the same location
        multiple times. In that case, the value that is actually stored
        is undefined.

        >>> import chainerx
        >>> chainerx.set_default_device('cuda:0')
        >>> a = chainerx.zeros((2,), dtype=chainerx.float)
        >>> i = chainerx.array([0, 1, 0, 1, 0, 1])
        >>> v = chainerx.arange(6).astype(chainerx.float)
        >>> a[i] = v
        >>> a  # doctest: +SKIP
        array([2., 3.], shape=(2,), dtype=float64, device='cuda:0')

        On the other hand, NumPy and ``native`` backend store the value
        corresponding to the last index among the indices referencing
        duplicate locations.

        >>> import numpy
        >>> a_cpu = numpy.zeros((2,), dtype=numpy.float)
        >>> i_cpu = numpy.array([0, 1, 0, 1, 0, 1])
        >>> v_cpu = numpy.arange(6).astype(numpy.float)
        >>> a_cpu[i_cpu] = v_cpu
        >>> a_cpu
        array([4., 5.])

    """
    if not isinstance(arr, chainerx.ndarray):
        arr[key] = value
        return

    if arr.is_backprop_required():
        raise RuntimeError(
            'ChainerX setitem fallback for advanced indexing is not supported '
            'for arrays that are connected to a graph.')

    arr = backend.from_chainerx(arr)
    if isinstance(key, chainerx.ndarray):
        key = backend.from_chainerx(key)
    if isinstance(value, chainerx.ndarray):
        value = backend.from_chainerx(value)
    if isinstance(arr, cuda.ndarray):
        with arr.device:
            arr[key] = value
    else:
        arr[key] = value
Пример #5
0
    def backward(self, target_input_indexes, grad_outputs):
        retained_inputs = self.get_retained_inputs()
        inputs = [None] * len(self.inputs)
        in_data = [None] * len(self.inputs)
        for retained, i_in in six.moves.zip(retained_inputs,
                                            self._input_indexes_to_retain):
            inputs[i_in] = retained
            in_data[i_in] = None if retained is None else retained.array
        in_data = tuple(in_data)

        grad_out_data = tuple(
            [None if grad is None else grad.data for grad in grad_outputs])

        is_chainerx_fallback_mode = self._is_chainerx_fallback_mode
        if is_chainerx_fallback_mode:
            # Convert input and output gradients to numpy/cupy
            in_data = backend.from_chainerx(in_data)
            grad_out_data = backend.from_chainerx(grad_out_data)

        # Call Function.backward
        with cuda.get_device_from_array(*(in_data + grad_out_data)):
            if is_chainerx_fallback_mode:
                # Enable attribute fallback
                with function_node._chainerx_attribute_fallback(
                        self._function, self.chainerx_device):
                    gxs = self._function.backward(in_data, grad_out_data)
            else:
                gxs = self._function.backward(in_data, grad_out_data)

        # Check gradients
        for x, gx in six.moves.zip(self.inputs, gxs):
            if gx is not None:
                variable._check_grad_type(self, x, True, gx)

        # Convert input gradients back to ChainerX
        if is_chainerx_fallback_mode:
            gxs = backend.to_chainerx(gxs)

        ret = []
        for i in target_input_indexes:
            if gxs[i] is None:
                g = None
            else:
                # Intentionally not passing requires_grad=False so that
                # backprop routines can raise an error when a further backprop
                # is attempted against this gradient variable.
                g = variable.Variable(gxs[i])
                if g.xp is not chainerx:
                    g.node._old_style_grad_generator = self._function.label
            ret.append(g)

        return tuple(ret)
Пример #6
0
    def backward(self, target_input_indexes, grad_outputs):
        retained_inputs = self.get_retained_inputs()
        inputs = [None] * len(self.inputs)
        in_data = [None] * len(self.inputs)
        for retained, i_in in six.moves.zip(
                retained_inputs, self._input_indexes_to_retain):
            inputs[i_in] = retained
            in_data[i_in] = retained.array
        in_data = tuple(in_data)

        grad_out_data = tuple([None if grad is None else grad.data
                               for grad in grad_outputs])

        is_chainex_fallback_mode = self._is_chainex_fallback_mode
        if is_chainex_fallback_mode:
            # Convert input and output gradients to numpy/cupy
            in_data = backend.from_chainerx(in_data)
            grad_out_data = backend.from_chainerx(grad_out_data)

        # Call Function.backward
        with cuda.get_device_from_array(*(in_data + grad_out_data)):
            if is_chainex_fallback_mode:
                # Enable attribute fallback
                with function_node._chainerx_attribute_fallback(
                        self._function, self.chainerx_device):
                    gxs = self._function.backward(in_data, grad_out_data)
            else:
                gxs = self._function.backward(in_data, grad_out_data)

        for x, gx in six.moves.zip(self.inputs, gxs):
            variable._check_grad_type(self, x, True, gx, False)

        # Convert input gradients back to ChainerX
        if is_chainex_fallback_mode:
            gxs = backend.to_chainerx(gxs)

        ret = []
        for i in target_input_indexes:
            if gxs[i] is None:
                g = None
            else:
                # Intentionally not passing requires_grad=False so that
                # backprop routines can raise an error when a further backprop
                # is attempted against this gradient variable.
                g = variable.Variable(gxs[i])
                if g.xp is not chainerx:
                    g.node._old_style_grad_generator = self._function.label
            ret.append(g)

        return tuple(ret)
Пример #7
0
    def _chainerx_apply_fallback_preprocess(self, in_data, inputs):
        chainerx_in_data = in_data
        in_data = []
        device = None
        for data, x in six.moves.zip(chainerx_in_data, inputs):
            if data is None:
                fallback_data = None
            else:
                # Use the cached fallback arrays as inputs if they exist.
                x_is_variable = isinstance(x, variable.Variable)
                if x_is_variable and x._chainerx_fallback_array is not None:
                    fallback_data = x._chainerx_fallback_array
                    if device is None:
                        device = x.device
                else:
                    fallback_data = backend.from_chainerx(data)
                    if device is None:
                        device = backend.ChainerxDevice(data.device)

                    # Update the fallback cache if possible.
                    if x_is_variable:
                        x._chainerx_fallback_array = fallback_data

            in_data.append(fallback_data)

        in_data = tuple(in_data)
        return chainerx_in_data, in_data, device
Пример #8
0
    def forward_cpu(self, inputs):
        class_weight = backend.from_chainerx(self.class_weight)

        self.retain_inputs((0, 1))
        x, t = inputs
        if chainer.is_debug():
            _check_input_values(x, t, self.ignore_label)

        log_y = log_softmax._log_softmax(x)
        if self.cache_score:
            self.y = numpy.exp(log_y)
        if class_weight is not None:
            shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
            log_y *= _broadcast_to(class_weight.reshape(shape), x.shape)
        log_yd = numpy.rollaxis(log_y, 1)
        log_yd = log_yd.reshape(len(log_yd), -1)
        t_valid = t != self.ignore_label
        t = t * t_valid
        log_p = log_yd[t.ravel(), numpy.arange(t.size)]

        log_p *= t_valid.ravel()
        if self.reduce == 'mean':
            # deal with the case where the SoftmaxCrossEntropy is
            # unpickled from the old version
            if self.normalize:
                count = t_valid.sum()
            else:
                count = len(x)
            self._coeff = 1.0 / max(count, 1)

            y = log_p.sum(keepdims=True) * (-self._coeff)
            return y.reshape(()),
        else:
            return -log_p.reshape(t.shape),
    def forward_cpu(self, inputs):
        class_weight = backend.from_chainerx(self.class_weight)

        self.retain_inputs((0, 1))
        x, t = inputs
        if chainer.is_debug():
            _check_input_values(x, t, self.ignore_label)

        log_y = log_softmax._log_softmax(x)
        if self.cache_score:
            self.y = numpy.exp(log_y)
        if class_weight is not None:
            shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
            log_y *= _broadcast_to(class_weight.reshape(shape), x.shape)
        log_yd = numpy.rollaxis(log_y, 1)
        log_yd = log_yd.reshape(len(log_yd), -1)
        t_valid = t != self.ignore_label
        t = t * t_valid
        log_p = log_yd[t.ravel(), numpy.arange(t.size)]

        log_p *= t_valid.ravel()
        if self.reduce == 'mean':
            # deal with the case where the SoftmaxCrossEntropy is
            # unpickled from the old version
            if self.normalize:
                count = t_valid.sum()
            else:
                count = len(x)
            self._coeff = 1.0 / max(count, 1)

            y = log_p.sum(keepdims=True) * (-self._coeff)
            return y.reshape(()),
        else:
            return -log_p.reshape(t.shape),
Пример #10
0
 def _make_samples(self, t):
     size = int(t.shape[0])
     # first one is the positive, and others are sampled negatives
     samples = self.sampler((size, self.sample_size + 1))
     samples = backend.from_chainerx(samples)
     samples[:, 0] = t
     return samples
Пример #11
0
    def forward_gpu(self, inputs):
        t = backend.from_chainerx(self.t)  # Workaround for ChainerX.

        gx = cuda.cupy.zeros(self.shape, self.dtype)
        gx = cuda.elementwise('S t, T gloss', 'raw T gx',
                              'int ind[] = {i, t}; gx[ind] = gloss;',
                              'getitem_bwd')(t, inputs[0], gx)
        return gx,
Пример #12
0
 def getattribute(self, name):
     value = sup.__getattribute__(name)
     if isinstance(value, chainerx.ndarray):
         fallback_arr = fallback_array_cache.get(name)
         if fallback_arr is None:
             fallback_arr = backend.from_chainerx(value)
             fallback_array_cache[name] = fallback_arr
         return fallback_arr
     return value
Пример #13
0
 def getattribute(self, name):
     value = sup.__getattribute__(name)
     if isinstance(value, chainerx.ndarray):
         fallback_arr = fallback_array_cache.get(name)
         if fallback_arr is None:
             fallback_arr = backend.from_chainerx(value)
             fallback_array_cache[name] = fallback_arr
         return fallback_arr
     return value
Пример #14
0
    def output_data(self):
        """A tuple of the retained output arrays.

        It has the same length as the :attr:`outputs`. Elements that are not
        retained are set to ``None``.

        """
        if self.node._is_chainerx_fallback_mode:
            return backend.from_chainerx(self.node.output_data)
        return self.node.output_data
Пример #15
0
    def output_data(self):
        """A tuple of the retained output arrays.

        It has the same length as the :attr:`outputs`. Elements that are not
        retained are set to ``None``.

        """
        if self.node._is_chainerx:
            return backend.from_chainerx(self.node.output_data)
        return self.node.output_data
Пример #16
0
    def forward_gpu(self, inputs):
        t = backend.from_chainerx(self.t)  # Workaround for ChainerX.

        gx = cuda.cupy.zeros(self.shape, self.dtype)
        gx = cuda.elementwise(
            'S t, T gloss',
            'raw T gx',
            'int ind[] = {i, t}; gx[ind] = gloss;',
            'getitem_bwd'
        )(t, inputs[0], gx)
        return gx,
Пример #17
0
def _getitem(arr, key):
    try:
        return arr[key]
    except (IndexError, chainerx.DimensionError):
        pass

    if isinstance(arr, chainerx.ndarray):
        arr = backend.from_chainerx(arr)
        is_arr_chainerx = True
    else:
        is_arr_chainerx = False
    if isinstance(key, chainerx.ndarray):
        key = backend.from_chainerx(key)
    if isinstance(arr, cuda.ndarray):
        with arr.device:
            ret = arr[key]
    else:
        ret = arr[key]
    if is_arr_chainerx:
        ret = backend.to_chainerx(ret)
    return ret
Пример #18
0
    def forward_gpu(self, inputs):
        class_weight = backend.from_chainerx(self.class_weight)

        self.retain_inputs((0, 1))
        cupy = cuda.cupy
        x, t = inputs
        if chainer.is_debug():
            _check_input_values(x, t, self.ignore_label)

        if x.size == 0:
            y = cupy.zeros(t.shape, dtype=x.dtype)
            if self.cache_score:
                self.y = y
            if self.reduce == 'mean':
                return y.sum(),
            else:
                return y,
        log_y = log_softmax._log_softmax(x)
        if self.cache_score:
            self.y = cupy.exp(log_y)
        if class_weight is not None:
            shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
            log_y *= cupy.broadcast_to(class_weight.reshape(shape), x.shape)
        if self.normalize:
            coeff = cupy.maximum(1, (t != self.ignore_label).sum())
        else:
            coeff = max(1, len(t))
        self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype)

        log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
        if self.reduce == 'mean':
            ret = cuda.reduce(
                'S t, raw T log_y, int32 n_channel, raw T coeff, '
                'S ignore_label',
                'T out',
                't == ignore_label ? T(0) : log_y[_j * n_channel + t]',
                'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd'
            )(t, log_y.reduced_view(), log_y.shape[-1],
              self._coeff, self.ignore_label)
        else:
            ret = cuda.elementwise(
                'S t, raw T log_y, int32 n_channel, T ignore', 'T out',
                '''
                if (t == ignore) {
                  out = 0;
                } else {
                  out = -log_y[i * n_channel + t];
                }
                ''',
                'softmax_crossent_no_reduce_fwd'
            )(t, log_y.reduced_view(), log_y.shape[-1], self.ignore_label)
            ret = ret.reshape(t.shape)
        return ret,
    def forward_gpu(self, inputs):
        class_weight = backend.from_chainerx(self.class_weight)

        self.retain_inputs((0, 1))
        cupy = cuda.cupy
        x, t = inputs
        if chainer.is_debug():
            _check_input_values(x, t, self.ignore_label)

        if x.size == 0:
            y = cupy.zeros(t.shape, dtype=x.dtype)
            if self.cache_score:
                self.y = y
            if self.reduce == 'mean':
                return y.sum(),
            else:
                return y,
        log_y = log_softmax._log_softmax(x)
        if self.cache_score:
            self.y = cupy.exp(log_y)
        if class_weight is not None:
            shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
            log_y *= cupy.broadcast_to(class_weight.reshape(shape), x.shape)
        if self.normalize:
            coeff = cupy.maximum(1, (t != self.ignore_label).sum())
        else:
            coeff = max(1, len(t))
        self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype)

        log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
        if self.reduce == 'mean':
            ret = cuda.reduce(
                'S t, raw T log_y, int32 n_channel, raw T coeff, '
                'S ignore_label', 'T out',
                't == ignore_label ? T(0) : log_y[_j * n_channel + t]',
                'a + b', 'out = a * -coeff[0]', '0',
                'crossent_fwd')(t, log_y.reduced_view(), log_y.shape[-1],
                                self._coeff, self.ignore_label)
        else:
            ret = cuda.elementwise(
                'S t, raw T log_y, int32 n_channel, T ignore', 'T out', '''
                if (t == ignore) {
                  out = 0;
                } else {
                  out = -log_y[i * n_channel + t];
                }
                ''', 'softmax_crossent_no_reduce_fwd')(t, log_y.reduced_view(),
                                                       log_y.shape[-1],
                                                       self.ignore_label)
            ret = ret.reshape(t.shape)
        return ret,
Пример #20
0
    def from_chainerx(self):
        """Converts parameter variables and persistent values from ChainerX \
to NumPy/CuPy devices without any copy."""
        d = self.__dict__
        for name in self._params:
            d[name].from_chainerx()
        for name in self._persistent:
            if not numpy.isscalar(d[name]):
                d[name] = backend.from_chainerx(d[name])

        if isinstance(self._device, backend.ChainerxDevice):
            self._device = self._device.fallback_device

        return self
Пример #21
0
    def test_from_chainerx(self, backend_config):
        arr = backend_config.get_array(numpy.ones((2, 3), numpy.float32))
        arr_converted = backend.from_chainerx(arr)

        src_device = backend_config.device
        if src_device.xp is chainerx:
            dst_xp = src_device.fallback_device.xp
            assert isinstance(arr_converted, dst_xp.ndarray)
            if dst_xp is cuda.cupy:
                assert arr_converted.device.id == src_device.device.index
        else:
            assert arr is arr_converted

        with backend_config:
            self.check_equal_memory_shared(arr, arr_converted)
Пример #22
0
    def test_from_chainerx(self, backend_config):
        arr = backend_config.get_array(numpy.ones((2, 3), numpy.float32))
        arr_converted = backend.from_chainerx(arr)

        src_device = backend_config.device
        if src_device.xp is chainerx:
            dst_xp = src_device.fallback_device.xp
            assert isinstance(arr_converted, dst_xp.ndarray)
            if dst_xp is cuda.cupy:
                assert arr_converted.device.id == src_device.device.index
        else:
            assert arr is arr_converted

        with backend_config:
            self.check_equal_memory_shared(arr, arr_converted)
Пример #23
0
 def forward(self, xs):
     a = xs[0]
     b = xs[1]
     y = a.copy()
     xp = backend.get_array_module(a)
     slices = tuple([
         backend.from_chainerx(s) if isinstance(s, chainerx.ndarray) else s
         for s in self.slices
     ])
     if y[slices].shape != b.shape:
         raise ValueError('Chainer does not support automatic broadcasting '
                          'of variables.')
     if xp is numpy:
         numpy.add.at(y, slices, b),
     else:
         cuda.cupyx.scatter_add(y, slices, b),
     return y,
Пример #24
0
    def _chainerx_apply_fallback_preprocess(self, in_data, inputs):
        chainerx_in_data = in_data
        in_data = []
        for i in six.moves.range(len(inputs)):
            # Use the cached fallback arrays as inputs if they exist.
            x = inputs[i]
            x_is_variable = isinstance(x, variable.Variable)
            if x_is_variable and x._chainerx_fallback_array is not None:
                x_data = x._chainerx_fallback_array
            else:
                x_data = backend.from_chainerx(chainerx_in_data[i])

                # Update the fallback cache if possible.
                if x_is_variable:
                    x._chainerx_fallback_array = x_data

            in_data.append(x_data)

        in_data = tuple(in_data)
        return chainerx_in_data, in_data
Пример #25
0
    def _chainerx_apply_fallback_preprocess(self, in_data, inputs):
        chainerx_in_data = in_data
        in_data = []
        device = None
        for data, x in six.moves.zip(chainerx_in_data, inputs):
            # Use the cached fallback arrays as inputs if they exist.
            x_is_variable = isinstance(x, variable.Variable)
            if x_is_variable and x._chainerx_fallback_array is not None:
                fallback_data = x._chainerx_fallback_array
                if device is None:
                    device = x.device
            else:
                fallback_data = backend.from_chainerx(data)
                if device is None:
                    device = backend.ChainerxDevice(data.device)

                # Update the fallback cache if possible.
                if x_is_variable:
                    x._chainerx_fallback_array = fallback_data

            in_data.append(fallback_data)

        in_data = tuple(in_data)
        return chainerx_in_data, in_data, device
Пример #26
0
    def forward(self, inputs):
        self.retain_inputs((0, 1))
        x, gamma, beta = inputs

        xp = backend.get_array_module(x)
        if self.running_mean is None:
            self.running_mean = xp.zeros_like(gamma)
            self.running_var = xp.zeros_like(gamma)

        self.axis = _compute_axis(x.ndim, gamma.ndim, self.axis)
        self.key_axis = _compute_key_axis(x.ndim, gamma.ndim, self.axis)

        if all(x.shape[i] == 1 for i in self.axis):
            if 0 in self.axis:
                warnings.warn(
                    'A batch with no more than one sample has been given'
                    ' to F.batch_normalization. F.batch_normalization'
                    ' will always output a zero tensor for such batches.'
                    ' This could be caused by incorrect configuration in'
                    ' your code (such as running evaluation while'
                    ' chainer.config.train=True),'
                    ' but could also happen in the last batch of training'
                    ' if non-repeating iterator is used.',
                    UserWarning)
            else:
                warnings.warn(
                    'F.batch_normalization received a batch with single'
                    ' dimensions along all axes that are used for aggregating'
                    ' statistics. F.batch_normalization'
                    ' will always output a zero tensor for such batches.',
                    UserWarning)

        # TODO(niboshi): Refactor calculation of expander and axis into a
        # function and call it just before they are used.

        # expander inserts singleton dimensions to gamma and beta so that they
        # can be broadcasted with x.
        expander = [None for _ in range(x.ndim)]
        for i in self.key_axis:
            expander[i] = slice(None)
        expander = tuple(expander)
        self.expander = expander

        self.mode = _BNMode(x, gamma, self.key_axis)
        self.use_cudnn = self.mode.can_use_cudnn(xp)
        self.use_ideep = self.mode.can_use_ideep()

        if self.use_ideep:
            # TODO(niboshi): Refactor iDeep part into a separate method
            expand_dim = False
            if x.ndim == 2:
                expand_dim = True
                x = x[:, :, None, None]

            y, self.mean, self.var, self.inv_std = (
                intel64.ideep.batchNormalization.Forward(
                    intel64.ideep.array(x),
                    intel64.ideep.array(gamma),
                    intel64.ideep.array(beta),
                    None,
                    None,
                    self.eps
                ))

            m = x.size // gamma.size
            adjust = m / max(m - 1., 1.)

            # Update running_mean
            if isinstance(self.running_mean, intel64.ideep.mdarray):
                self.running_mean.inplace_axpby(
                    self.decay, (1 - self.decay), self.mean)
            else:
                self.running_mean *= self.decay
                self.running_mean += self.mean * (1 - self.decay)

            # Update running_var
            if isinstance(self.running_var, intel64.ideep.mdarray):
                self.running_var.inplace_axpby(
                    self.decay, (1 - self.decay), self.var * adjust)
            else:
                self.running_var *= self.decay
                self.running_var += self.var * adjust * (1 - self.decay)

            if expand_dim:
                y = numpy.squeeze(y, axis=(2, 3))

        elif self.use_cudnn:
            if self.mean is None:
                # Output cache to speed up backward pass.
                self.mean = xp.empty_like(gamma)
                # Output cache to speed up backward pass.
                self.inv_std = xp.empty_like(gamma)
            y = cudnn.batch_normalization_forward_training(
                x, gamma, beta, self.running_mean, self.running_var,
                self.mean, self.inv_std, self.eps, self.decay,
                self.mode.is_for_conv2d, self.mode.get_cudnn_mode(),
                configuration.config.debug)
        else:
            # Generic CPU and GPU implementation

            gamma = gamma[expander]
            beta = beta[expander]
            self.mean = x.mean(axis=self.axis)
            var = x.var(axis=self.axis)
            if xp is numpy:
                self.inv_std = numpy.reciprocal(numpy.sqrt(
                    var + self.eps, dtype=x.dtype))
            else:
                self.inv_std = cuda.cupyx.rsqrt(var + self.eps)
            y = _apply_bn_fwd(xp, x, self.mean[expander],
                              self.inv_std[expander], gamma, beta)
            # Update running statistics
            m = x.size // gamma.size
            adjust = m / max(m - 1., 1.)  # unbiased estimation

            xp = backend.get_array_module(self.running_mean, self.running_var)
            if xp is chainerx:
                self.running_mean, self.running_var = backend.from_chainerx(
                    (self.running_mean, self.running_var))

            self.running_mean *= self.decay
            self.running_mean += (1 - self.decay) * self.mean
            self.running_var *= self.decay
            self.running_var += (1 - self.decay) * adjust * var

            if xp is chainerx:
                self.running_mean = backend.to_chainerx(self.running_mean)
                self.running_var = backend.to_chainerx(self.running_var)

        return y,
Пример #27
0
    def forward(self, inputs):
        self.retain_inputs((0, 1))
        x, gamma, beta = inputs

        xp = backend.get_array_module(x)
        if self.running_mean is None:
            self.running_mean = xp.zeros_like(gamma)
            self.running_var = xp.zeros_like(gamma)

        self.axis = _compute_axis(x.ndim, gamma.ndim, self.axis)
        self.key_axis = _compute_key_axis(x.ndim, gamma.ndim, self.axis)

        if all(x.shape[i] == 1 for i in self.axis):
            if 0 in self.axis:
                warnings.warn(
                    'A batch with no more than one sample has been given'
                    ' to F.batch_normalization. F.batch_normalization'
                    ' will always output a zero tensor for such batches.'
                    ' This could be caused by incorrect configuration in'
                    ' your code (such as running evaluation while'
                    ' chainer.config.train=True),'
                    ' but could also happen in the last batch of training'
                    ' if non-repeating iterator is used.', UserWarning)
            else:
                warnings.warn(
                    'F.batch_normalization received a batch with single'
                    ' dimensions along all axes that are used for aggregating'
                    ' statistics. F.batch_normalization'
                    ' will always output a zero tensor for such batches.',
                    UserWarning)

        # TODO(niboshi): Refactor calculation of expander and axis into a
        # function and call it just before they are used.

        # expander inserts singleton dimensions to gamma and beta so that they
        # can be broadcasted with x.
        expander = [None for _ in range(x.ndim)]
        for i in self.key_axis:
            expander[i] = slice(None)
        expander = tuple(expander)
        self.expander = expander

        self.mode = _BNMode(x, gamma, self.key_axis)
        self.use_cudnn = self.mode.can_use_cudnn(xp)
        self.use_ideep = self.mode.can_use_ideep()

        if self.use_ideep:
            # TODO(niboshi): Refactor iDeep part into a separate method
            expand_dim = False
            if x.ndim == 2:
                expand_dim = True
                x = x[:, :, None, None]

            y, self.mean, self.var, self.inv_std = (
                intel64.ideep.batchNormalization.Forward(
                    intel64.ideep.array(x), intel64.ideep.array(gamma),
                    intel64.ideep.array(beta), None, None, self.eps))

            m = x.size // gamma.size
            adjust = m / max(m - 1., 1.)

            # Update running_mean
            if isinstance(self.running_mean, intel64.ideep.mdarray):
                self.running_mean.inplace_axpby(self.decay, (1 - self.decay),
                                                self.mean)
            else:
                self.running_mean *= self.decay
                self.running_mean += self.mean * (1 - self.decay)

            # Update running_var
            if isinstance(self.running_var, intel64.ideep.mdarray):
                self.running_var.inplace_axpby(self.decay, (1 - self.decay),
                                               self.var * adjust)
            else:
                self.running_var *= self.decay
                self.running_var += self.var * adjust * (1 - self.decay)

            if expand_dim:
                y = numpy.squeeze(y, axis=(2, 3))

        elif self.use_cudnn:
            if self.mean is None:
                # Output cache to speed up backward pass.
                self.mean = xp.empty_like(gamma)
                # Output cache to speed up backward pass.
                self.inv_std = xp.empty_like(gamma)
            y = cudnn.batch_normalization_forward_training(
                x, gamma, beta, self.running_mean, self.running_var, self.mean,
                self.inv_std, self.eps, self.decay, self.mode.is_for_conv2d,
                self.mode.get_cudnn_mode(), chainer.is_debug())
        else:
            # Generic CPU and GPU implementation

            gamma = gamma[expander]
            beta = beta[expander]
            self.mean = x.mean(axis=self.axis)
            var = x.var(axis=self.axis)
            if xp is numpy:
                self.inv_std = numpy.reciprocal(
                    numpy.sqrt(var + self.eps, dtype=x.dtype))
            else:
                self.inv_std = cuda.cupyx.rsqrt(var + self.eps)
            y = _apply_bn_fwd(xp, x, self.mean[expander],
                              self.inv_std[expander], gamma, beta)
            # Update running statistics
            m = x.size // gamma.size
            adjust = m / max(m - 1., 1.)  # unbiased estimation

            xp = backend.get_array_module(self.running_mean, self.running_var)
            if xp is chainerx:
                self.running_mean, self.running_var = backend.from_chainerx(
                    (self.running_mean, self.running_var))

            self.running_mean *= self.decay
            self.running_mean += (1 - self.decay) * self.mean
            self.running_var *= self.decay
            self.running_var += (1 - self.decay) * adjust * var

            if xp is chainerx:
                self.running_mean = backend.to_chainerx(self.running_mean)
                self.running_var = backend.to_chainerx(self.running_var)

        return y,
Пример #28
0
    def forward(self, inputs):
        self.retain_inputs((0, 1))
        x, gamma, beta = inputs

        xp = backend.get_array_module(x)
        if self.running_mean is None:
            self.running_mean = xp.zeros_like(gamma)
            self.running_var = xp.zeros_like(gamma)

        self.axis = _compute_axis(x.ndim, gamma.ndim, self.axis)
        self.key_axis = _compute_key_axis(x.ndim, gamma.ndim, self.axis)

        if all(x.shape[i] == 1 for i in self.axis):
            if 0 in self.axis:
                warnings.warn(
                    'A batch with no more than one sample has been given'
                    ' to F.batch_normalization. F.batch_normalization'
                    ' will always output a zero tensor for such batches.'
                    ' This could be caused by incorrect configuration in'
                    ' your code (such as running evaluation while'
                    ' chainer.config.train=True),'
                    ' but could also happen in the last batch of training'
                    ' if non-repeating iterator is used.',
                    UserWarning)
            else:
                warnings.warn(
                    'F.batch_normalization received a batch with single'
                    ' dimensions along all axes that are used for aggregating'
                    ' statistics. F.batch_normalization'
                    ' will always output a zero tensor for such batches.',
                    UserWarning)

        # TODO(niboshi): Refactor calculation of expander and axis into a
        # function and call it just before they are used.

        # expander inserts singleton dimensions to gamma and beta so that they
        # can be broadcasted with x.
        expander = [None for _ in range(x.ndim)]
        for i in self.key_axis:
            expander[i] = slice(None)
        expander = tuple(expander)
        self.expander = expander

        self.mode = _BNMode(x, gamma, self.key_axis)
        self.use_cudnn = self.mode.can_use_cudnn(xp)
        self.use_ideep = self.mode.can_use_ideep()

        if self.use_ideep:
            # TODO(niboshi): Refactor iDeep part into a separate method
            expand_dim = False
            if x.ndim == 2:
                expand_dim = True
                x = x[:, :, None, None]

            y, self.mean, self.var, self.inv_std = (
                intel64.ideep.batchNormalization.Forward(
                    intel64.ideep.array(x),
                    intel64.ideep.array(gamma),
                    intel64.ideep.array(beta),
                    None,
                    None,
                    self.eps
                ))

            m = x.size // gamma.size
            adjust = m / max(m - 1., 1.)

            # Update running_mean
            if isinstance(self.running_mean, intel64.ideep.mdarray):
                self.running_mean.inplace_axpby(
                    self.decay, (1 - self.decay), self.mean)
            else:
                self.running_mean *= self.decay
                self.running_mean += self.mean * (1 - self.decay)

            # Update running_var
            if isinstance(self.running_var, intel64.ideep.mdarray):
                self.running_var.inplace_axpby(
                    self.decay, (1 - self.decay), self.var * adjust)
            else:
                self.running_var *= self.decay
                self.running_var += self.var * adjust * (1 - self.decay)

            if expand_dim:
                y = numpy.squeeze(y, axis=(2, 3))

        elif self.use_cudnn:
            # TODO(niboshi): Refactor cuDNN part into a separate method
            x = cuda.cupy.ascontiguousarray(x)

            gamma = cuda.cupy.ascontiguousarray(gamma)
            beta = cuda.cupy.ascontiguousarray(beta)
            dtype = x.dtype
            handle = cudnn.get_handle()
            x_desc = cudnn.create_tensor_descriptor(
                _as4darray(x, self.mode))
            cudnn_mode = self.mode.get_cudnn_mode()
            derivedBnDesc = cudnn.create_uninitialized_tensor_descriptor()
            libcudnn.deriveBNTensorDescriptor(derivedBnDesc.value,
                                              x_desc.value, cudnn_mode)
            dtype_param = _get_dtype_of_tensor_descriptor(derivedBnDesc)
            if dtype_param is not dtype:
                gamma = gamma.astype(dtype_param)
                beta = beta.astype(dtype_param)
                running_mean = self.running_mean.astype(dtype_param)
                running_var = self.running_var.astype(dtype_param)
            else:
                running_mean = self.running_mean
                running_var = self.running_var

            oz_dtype = (
                numpy.float64 if x.dtype == numpy.float64 else numpy.float32)
            one = numpy.array(1, dtype=oz_dtype).ctypes
            zero = numpy.array(0, dtype=oz_dtype).ctypes
            y = cuda.cupy.empty_like(x)
            # Factor used in the moving average
            factor = 1 - self.decay

            if self.mean is None:
                # Output cache to speed up backward pass.
                self.mean = xp.empty_like(gamma)
                # Output cache to speed up backward pass.
                self.inv_std = xp.empty_like(gamma)
            # Note: cuDNN computes the mini-batch mean and variance
            # internally. We can simply (optionally) pass
            # it the running-average mean and variance arrays.
            # Note: This API seems to set the inverse of the standard deviation
            # (instead of variance) to resultSaveInvVariance argument. The
            # current implementation of our BN depends on this behavior so that
            # we can reduce the number of reduction kernels.
            libcudnn.batchNormalizationForwardTraining(
                handle, cudnn_mode, one.data, zero.data,
                x_desc.value, x.data.ptr, x_desc.value,
                y.data.ptr, derivedBnDesc.value, gamma.data.ptr,
                beta.data.ptr, factor, running_mean.data.ptr,
                running_var.data.ptr, self.eps,
                self.mean.data.ptr, self.inv_std.data.ptr)

            # Note: When the CUDNN_BATCHNORM_SPATIAL_PERSISTENT mode is used,
            # there is a possibility of numerical overflow. You can use
            # queryRuntimeError() to make sure whether the overflow actually
            # occured or not during the batch normalization.
            if (cudnn_mode is libcudnn.CUDNN_BATCHNORM_SPATIAL_PERSISTENT and
                    configuration.config.debug):
                query_mode = libcudnn.CUDNN_ERRQUERY_BLOCKING
                rstatus = libcudnn.queryRuntimeError(handle, query_mode)
                if rstatus is not libcudnn.CUDNN_STATUS_SUCCESS:
                    warnings.warn(
                        'A numerical overflow might have happend in cuDNN'
                        'batch normalization (status:{})'.format(rstatus))

            if dtype_param is not dtype:
                # When data type of prameters is converted, say, from fp16
                # to fp32, the values of fp32 arrays of running_mean and
                # running_var updated by batchNormalizationForwardTraining
                # must be explicitly written back to their original fp16
                # arrays.
                running_mean = running_mean.astype(dtype)
                running_var = running_var.astype(dtype)
                self.running_mean.data.copy_from(running_mean.data,
                                                 running_mean.nbytes)
                self.running_var.data.copy_from(running_var.data,
                                                running_var.nbytes)
        else:
            # Generic CPU and GPU implementation

            gamma = gamma[expander]
            beta = beta[expander]
            self.mean = x.mean(axis=self.axis)
            var = x.var(axis=self.axis)
            if xp is numpy:
                self.inv_std = numpy.reciprocal(numpy.sqrt(
                    var + self.eps, dtype=x.dtype))
            else:
                self.inv_std = cuda.cupyx.rsqrt(var + self.eps)
            y = _apply_bn_fwd(xp, x, self.mean[expander],
                              self.inv_std[expander], gamma, beta)
            # Update running statistics
            m = x.size // gamma.size
            adjust = m / max(m - 1., 1.)  # unbiased estimation

            xp = backend.get_array_module(self.running_mean, self.running_var)
            if xp is chainerx:
                self.running_mean, self.running_var = backend.from_chainerx(
                    (self.running_mean, self.running_var))

            self.running_mean *= self.decay
            self.running_mean += (1 - self.decay) * self.mean
            self.running_var *= self.decay
            self.running_var += (1 - self.decay) * adjust * var

            if xp is chainerx:
                self.running_mean = backend.to_chainerx(self.running_mean)
                self.running_var = backend.to_chainerx(self.running_var)

        return y,
Пример #29
0
    def forward_cpu(self, inputs):
        t = backend.from_chainerx(self.t)  # Workaround for ChainerX.

        gx = numpy.zeros(self.shape, self.dtype)
        gx[six.moves.range(self.t.size), t] = inputs[0]
        return gx,
Пример #30
0
    def forward_cpu(self, inputs):
        t = backend.from_chainerx(self.t)  # Workaround for ChainerX.

        gx = numpy.zeros(self.shape, self.dtype)
        gx[six.moves.range(self.t.size), t] = inputs[0]
        return gx,
Пример #31
0
 def forward_cpu(self, inputs):
     b = backend.from_chainerx(self.b)  # Workaround for ChainerX
     y = (b > 0) * inputs[0]
     return utils.force_array(y, dtype=y.dtype),
Пример #32
0
 def forward(self, xs):
     slices = tuple([
         backend.from_chainerx(s) if isinstance(s, chainerx.ndarray) else s
         for s in self.slices
     ])
     return utils.force_array(xs[0][slices]),
Пример #33
0
 def forward_cpu(self, inputs):
     b = backend.from_chainerx(self.b)  # Workaround for ChainerX
     y = (b > 0) * inputs[0]
     return utils.force_array(y, dtype=y.dtype),
Пример #34
0
 def forward_gpu(self, inputs):
     b = backend.from_chainerx(self.b)  # Workaround for ChainerX
     gx = _relu_grad2_kernel(b, inputs[0])
     return gx,
Пример #35
0
 def forward_gpu(self, inputs):
     b = backend.from_chainerx(self.b)  # Workaround for ChainerX
     gx = _relu_grad2_kernel(b, inputs[0])
     return gx,