Esempio n. 1
0
    def check_forward(self, args):
        y = functions.batch_normalization(*[chainer.Variable(i) for i in args], eps=self.eps)
        self.assertEqual(y.data.dtype, self.dtype)

        y_expect = _batch_normalization(self.expander, self.gamma, self.beta, self.x, self.mean, self.var)

        gradient_check.assert_allclose(y_expect, y.data, **self.check_forward_optionss)
Esempio n. 2
0
    def __call__(self, x, **kwargs):
        """__call__(self, x, finetune=False)

        Invokes the forward propagation of BatchNormalization.

        In training mode, the BatchNormalization computes moving averages of
        mean and variance for evaluation during training, and normalizes the
        input using batch statistics.

        .. warning::

           ``test`` argument is not supported anymore since v2.
           Instead, use ``chainer.using_config('train', False)``.
           See :func:`chainer.using_config`.

        Args:
            x (Variable): Input variable.
            finetune (bool): If it is in the training mode and ``finetune`` is
                ``True``, BatchNormalization runs in fine-tuning mode; it
                accumulates the input array to compute population statistics
                for normalization, and normalizes the input using batch
                statistics.

        """
        argument.check_unexpected_kwargs(
            kwargs, test='test argument is not supported anymore. '
            'Use chainer.using_config')
        finetune, = argument.parse_kwargs(kwargs, ('finetune', False))

        if hasattr(self, 'gamma'):
            gamma = self.gamma
        else:
            with cuda.get_device_from_id(self._device_id):
                gamma = variable.Variable(self.xp.ones(
                    self.avg_mean.shape, dtype=x.dtype))

        if hasattr(self, 'beta'):
            beta = self.beta
        else:
            with cuda.get_device_from_id(self._device_id):
                beta = variable.Variable(self.xp.zeros(
                    self.avg_mean.shape, dtype=x.dtype))

        if configuration.config.train:
            if finetune:
                self.N += 1
                decay = 1. - 1. / self.N
            else:
                decay = self.decay

            ret = functions.batch_normalization(
                x, gamma, beta, eps=self.eps, running_mean=self.avg_mean,
                running_var=self.avg_var, decay=decay)
        else:
            # Use running average statistics or fine-tuned statistics.
            mean = variable.Variable(self.avg_mean)
            var = variable.Variable(self.avg_var)
            ret = functions.fixed_batch_normalization(
                x, gamma, beta, mean, var, self.eps)
        return ret
Esempio n. 3
0
    def check_forward(self, args, use_cudnn=True):
        y = functions.batch_normalization(
            *[chainer.Variable(i) for i in args], running_mean=None,
            running_var=None, decay=self.decay, eps=self.eps,
            use_cudnn=use_cudnn)
        self.assertEqual(y.data.dtype, self.dtype)

        y_expect = _batch_normalization(
            self.expander, self.gamma, self.beta, self.x, self.mean, self.var)

        testing.assert_allclose(
            y_expect, y.data, **self.check_forward_options)
Esempio n. 4
0
    def check_forward(self, inputs, backend_config):
        y_expected, = self.forward_cpu(inputs)

        if backend_config.use_cuda:
            inputs = cuda.to_gpu(inputs)
        if not self.c_contiguous:
            inputs = _to_fcontiguous(inputs)

        with backend_config:
            y = functions.batch_normalization(
                *inputs, running_mean=None,
                running_var=None, decay=self.decay, eps=self.eps)
        assert y.data.dtype == self.dtype

        testing.assert_allclose(
            y_expected, y.data, **self.check_forward_options)
Esempio n. 5
0
    def check_forward(self, inputs, backend_config):
        # TODO(niboshi): Support it
        if backend_config.use_chainerx and self.dtype == numpy.float16:
            raise unittest.SkipTest('ChainerX does not support float16')

        if self.running_statistics:
            running_mean_expected = self.running_mean.copy()
            running_var_expected = self.running_var.copy()
        else:
            running_mean_expected = None
            running_var_expected = None

        y_expected, = self.forward_cpu(
            inputs, running_mean_expected, running_var_expected)

        inputs = backend_config.get_array(inputs)
        running_mean = backend_config.get_array(self.running_mean)
        running_var = backend_config.get_array(self.running_var)

        if not self.c_contiguous:
            inputs = _as_noncontiguous_array(inputs)
            running_mean = _as_noncontiguous_array(running_mean)
            running_var = _as_noncontiguous_array(running_var)

        with backend_config:
            y = functions.batch_normalization(
                *inputs, running_mean=running_mean,
                running_var=running_var, **self.bn_options)
        assert y.data.dtype == self.dtype

        testing.assert_allclose(
            y_expected, y.data, **self.check_forward_options)
        if self.running_statistics:
            testing.assert_allclose(
                running_mean_expected, running_mean,
                **self.check_forward_options)
            testing.assert_allclose(
                running_var_expected, running_var,
                **self.check_forward_options)
    def check_forward(self, inputs, backend_config):
        if self.running_statistics:
            running_mean_expected = self.running_mean.copy()
            running_var_expected = self.running_var.copy()
        else:
            running_mean_expected = None
            running_var_expected = None

        y_expected, = self.forward_cpu(
            inputs, running_mean_expected, running_var_expected)

        inputs = backend_config.get_array(inputs)
        running_mean = backend_config.get_array(self.running_mean)
        running_var = backend_config.get_array(self.running_var)

        if not self.c_contiguous:
            with backend_config:
                inputs = _as_noncontiguous_array(inputs)
                running_mean = _as_noncontiguous_array(running_mean)
                running_var = _as_noncontiguous_array(running_var)

        with backend_config:
            y = functions.batch_normalization(
                *inputs, running_mean=running_mean,
                running_var=running_var, **self.bn_options)
        assert y.data.dtype == self.dtype

        testing.assert_allclose(
            y_expected, y.data, **self.check_forward_options)
        if self.running_statistics:
            testing.assert_allclose(
                running_mean_expected, running_mean,
                **self.check_forward_options)
            testing.assert_allclose(
                running_var_expected, running_var,
                **self.check_forward_options)
Esempio n. 7
0
 def test_invalid_batch_no_batch_axis(self):
     args = self.create_batch((1, 3,), (1, 3, 1))
     with testing.assert_warns(UserWarning):
         functions.batch_normalization(*args, axis=2)
Esempio n. 8
0
 def test_invalid(self):
     with self.assertRaises(RuntimeError):
         functions.batch_normalization(*self.args, eps=2e-6)
    def __call__(self, x, **kwargs):
        """__call__(self, x, finetune=False)

        Invokes the forward propagation of BatchNormalization.

        In training mode, the BatchNormalization computes moving averages of
        mean and variance for evaluation during training, and normalizes the
        input using batch statistics.

        .. warning::

           ``test`` argument is not supported anymore since v2.
           Instead, use ``chainer.using_config('train', False)``.
           See :func:`chainer.using_config`.

        Args:
            x (Variable): Input variable.
            finetune (bool): If it is in the training mode and ``finetune`` is
                ``True``, BatchNormalization runs in fine-tuning mode; it
                accumulates the input array to compute population statistics
                for normalization, and normalizes the input using batch
                statistics.

        """
        argument.check_unexpected_kwargs(
            kwargs, test='test argument is not supported anymore. '
            'Use chainer.using_config')
        finetune, = argument.parse_kwargs(kwargs, ('finetune', False))

        if hasattr(self, 'gamma'):
            gamma = self.gamma
        else:
            with cuda.get_device_from_id(self._device_id):
                gamma = variable.Variable(self.xp.ones(
                    self.avg_mean.shape, dtype=x.dtype))
        if hasattr(self, 'beta'):
            beta = self.beta
        else:
            with cuda.get_device_from_id(self._device_id):
                beta = variable.Variable(self.xp.zeros(
                    self.avg_mean.shape, dtype=x.dtype))

        
        if configuration.config.train or self.enforce_compute: 
            # if self.force_compute is true, then compute following...
            if finetune:
                self.N += 1
                decay = 1. - 1. / self.N
            else:
                decay = self.decay

            ret = functions.batch_normalization(
                x, gamma, beta, eps=self.eps, running_mean=self.avg_mean,
                running_var=self.avg_var, decay=decay)
        else:  # if is not train like test
            # Use running average statistics or fine-tuned statistics.
            mean = variable.Variable(self.avg_mean)
            var = variable.Variable(self.avg_var)
            ret = functions.fixed_batch_normalization(
                x, gamma, beta, mean, var, self.eps)
        return ret
Esempio n. 10
0
    def __call__(self, x, **kwargs):
        """__call__(self, x, finetune=False)
        Invokes the forward propagation of BatchNormalization.
        In training mode, the BatchNormalization computes moving averages of
        mean and variance for evaluation during training, and normalizes the
        input using batch statistics.
        .. warning::
           ``test`` argument is not supported anymore since v2.
           Instead, use ``chainer.using_config('train', False)``.
           See :func:`chainer.using_config`.
        Args:
            x (Variable): Input variable.
            finetune (bool): If it is in the training mode and ``finetune`` is
                ``True``, BatchNormalization runs in fine-tuning mode; it
                accumulates the input array to compute population statistics
                for normalization, and normalizes the input using batch
                statistics.
        """
        # check argument
        argument.check_unexpected_kwargs(
            kwargs,
            test='test argument is not supported anymore. '
            'Use chainer.using_config')
        finetune, = argument.parse_kwargs(kwargs, ('finetune', False))

        original_shape = x.shape
        batch_size = original_shape[0]
        # reshape input x if batchsize > 1
        if batch_size > 1:
            reshaped_x = functions.expand_dims(x, axis=0)
        else:
            reshaped_x = x

        if hasattr(self, 'gamma'):
            gamma = self.gamma
            if self.norm_grad:
                gamma.add_batch(batch_size)
        else:
            with cuda.get_device_from_id(self._device_id):
                gamma = variable.Variable(
                    self.xp.ones(self.avg_mean.shape, dtype=x.dtype))
        if hasattr(self, 'beta'):
            beta = self.beta
            if self.norm_grad:
                beta.add_batch(batch_size)
        else:
            with cuda.get_device_from_id(self._device_id):
                beta = variable.Variable(
                    self.xp.zeros(self.avg_mean.shape, dtype=x.dtype))

        #align shapes if x was reshaped
        if batch_size > 1:
            mean = self.xp.stack((self.avg_mean, ) * batch_size)
            var = self.xp.stack((self.avg_var, ) * batch_size)
            gamma = functions.stack((gamma, ) * batch_size)
            beta = functions.stack((beta, ) * batch_size)
        else:
            mean = self.xp.asarray(self.avg_mean)
            var = self.xp.asarray(self.avg_var)

        chainer_version = int(chainer.__version__.split('.')[0])

        if configuration.config.train:
            if finetune:
                self.N += 1
                decay = 1. - 1. / self.N
            else:
                decay = self.decay

            if chainer_version < 3:
                func = batch_normalization.BatchNormalizationFunction(
                    self.eps, mean, var, decay)
                ret = func(reshaped_x, gamma, beta)
            else:
                ret = functions.batch_normalization(reshaped_x,
                                                    gamma,
                                                    beta,
                                                    eps=self.eps,
                                                    running_mean=mean,
                                                    running_var=var,
                                                    decay=decay)

        else:
            head_ndim = gamma.ndim + 1
            axis = (0, ) + tuple(range(head_ndim, reshaped_x.ndim))
            mean = reshaped_x.data.mean(axis=axis)
            var = reshaped_x.data.var(axis=axis)
            ret = functions.fixed_batch_normalization(reshaped_x, gamma, beta,
                                                      mean, var, self.eps)

        # ret is normalized input x
        if batch_size > 1:
            ret = functions.reshape(ret, original_shape)
        return ret
 def f(*inputs):
     y = functions.batch_normalization(*inputs,
                                       decay=self.decay,
                                       eps=self.eps,
                                       axis=self.axis)
     return y,
Esempio n. 12
0
 def f(*inputs):
     y = functions.batch_normalization(
         *inputs, decay=self.decay, eps=self.eps)
     return y * y,  # make nonlinear against beta
Esempio n. 13
0
    def forward(self, x, **kwargs):
        """forward(self, x, finetune=False)

        Invokes the forward propagation of BatchNormalization.

        In training mode, the BatchNormalization computes moving averages of
        mean and variance for evaluation during training, and normalizes the
        input using batch statistics.

        .. warning::

           ``test`` argument is not supported anymore since v2.
           Instead, use ``chainer.using_config('train', False)``.
           See :func:`chainer.using_config`.

        Args:
            x (Variable): Input variable.
            finetune (bool): If it is in the training mode and ``finetune`` is
                ``True``, BatchNormalization runs in fine-tuning mode; it
                accumulates the input array to compute population statistics
                for normalization, and normalizes the input using batch
                statistics.

        """
        finetune, = argument.parse_kwargs(
            kwargs, ('finetune', False),
            test='test argument is not supported anymore. '
                 'Use chainer.using_config')

        if self.avg_mean is None:
            param_shape = tuple([
                d
                for i, d in enumerate(x.shape)
                if i not in self.axis])
            self._initialize_params(param_shape)

        gamma = self.gamma
        if gamma is None:
            with cuda.get_device_from_id(self._device_id):
                gamma = self.xp.ones(
                    self.avg_mean.shape, dtype=x.dtype)

        beta = self.beta
        if beta is None:
            with cuda.get_device_from_id(self._device_id):
                beta = self.xp.zeros(
                    self.avg_mean.shape, dtype=x.dtype)

        if configuration.config.train:
            if finetune:
                self.N += 1
                decay = 1. - 1. / self.N
            else:
                decay = self.decay

            ret = functions.batch_normalization(
                x, gamma, beta, eps=self.eps, running_mean=self.avg_mean,
                running_var=self.avg_var, decay=decay, axis=self.axis)
        else:
            # Use running average statistics or fine-tuned statistics.
            mean = self.avg_mean
            var = self.avg_var
            ret = functions.fixed_batch_normalization(
                x, gamma, beta, mean, var, self.eps, axis=self.axis)
        return ret
Esempio n. 14
0
 def test_invalid(self):
     with self.assertRaises(RuntimeError):
         functions.batch_normalization(*self.args, eps=2e-6)
Esempio n. 15
0
 def test_invalid_batch(self):
     args = self.create_batch((3, ), (1, 3))
     with testing.assert_warns(UserWarning):
         functions.batch_normalization(*args)
Esempio n. 16
0
 def test_valid(self):
     functions.batch_normalization(*self.args, eps=1e-5)
Esempio n. 17
0
 def forward(self):
     return functions.batch_normalization(
         *[chainer.Variable(i) for i in self.args],
         eps=self.eps,
         running_mean=self.mean,
         running_var=self.var)
Esempio n. 18
0
 def f(*inputs):
     return functions.batch_normalization(*inputs, **self.bn_options)
Esempio n. 19
0
 def test_valid_batch_no_batch_axis(self):
     args = self.create_batch((1, 3,), (1, 3, 2))
     with warnings.catch_warnings(record=True) as w:
         functions.batch_normalization(*args, axis=2)
         assert len(w) == 0
Esempio n. 20
0
 def test_invalid(self):
     eps = -0.1
     if chainer.backends.cuda.libcudnn.get_build_version() < 7500:
         eps = 2e-6
     with self.assertRaises(RuntimeError):
         functions.batch_normalization(*self.args, eps=eps)
Esempio n. 21
0
 def batch_normalization(self, *args):
     return functions.batch_normalization(
         *args, decay=self.decay, eps=self.eps)
Esempio n. 22
0
    def forward(self, x, **kwargs):
        """forward(self, x, finetune=False)

        Invokes the forward propagation of BatchNormalization.

        In training mode, the BatchNormalization computes moving averages of
        mean and variance for evaluation during training, and normalizes the
        input using batch statistics.

        Args:
            x (Variable): Input variable.
            finetune (bool): If it is in the training mode and ``finetune`` is
                ``True``, BatchNormalization runs in fine-tuning mode; it
                accumulates the input array to compute population statistics
                for normalization, and normalizes the input using batch
                statistics.

        """
        finetune, = argument.parse_kwargs(
            kwargs, ('finetune', False),
            test='test argument is not supported anymore. '
                 'Use chainer.using_config')

        if self.avg_mean is None:
            param_shape = tuple([
                d
                for i, d in enumerate(x.shape)
                if i not in self.axis])
            self._initialize_params(param_shape)

        gamma = self.gamma
        if gamma is None:
            with chainer.using_device(self.device):
                gamma = self.xp.ones(
                    self.avg_mean.shape, dtype=self._highprec_dtype)

        beta = self.beta
        if beta is None:
            with chainer.using_device(self.device):
                beta = self.xp.zeros(
                    self.avg_mean.shape, dtype=self._highprec_dtype)

        if configuration.config.train:
            if finetune:
                self.N += 1
                decay = 1. - 1. / self.N
            else:
                decay = self.decay

            avg_mean = self.avg_mean
            avg_var = self.avg_var

            if chainer.config.in_recomputing:
                # Do not update statistics when extra forward computation is
                # called.
                if finetune:
                    self.N -= 1  # Revert the count
                avg_mean = None
                avg_var = None

            ret = functions.batch_normalization(
                x, gamma, beta, eps=self.eps, running_mean=avg_mean,
                running_var=avg_var, decay=decay, axis=self.axis)
        else:
            # Use running average statistics or fine-tuned statistics.
            mean = self.avg_mean
            var = self.avg_var
            ret = functions.fixed_batch_normalization(
                x, gamma, beta, mean, var, self.eps, axis=self.axis)
        return ret
Esempio n. 23
0
 def f(*inputs):
     y = functions.batch_normalization(
         *inputs, decay=self.decay, eps=self.eps)
     return y,
Esempio n. 24
0
 def test_valid_batch(self):
     args = self.create_batch((3, ), (1, 3, 2, 2))
     with warnings.catch_warnings(record=True) as w:
         functions.batch_normalization(*args)
         assert len(w) == 0
 def f(*inputs):
     y = functions.batch_normalization(*inputs,
                                       decay=self.decay,
                                       eps=self.eps,
                                       axis=self.axis)
     return y * y,  # make nonlinear against beta
Esempio n. 26
0
 def f(*inputs):
     return functions.batch_normalization(
         *inputs, **self.bn_options)
Esempio n. 27
0
 def forward(self):
     return functions.batch_normalization(
         *[chainer.Variable(i) for i in self.args], eps=self.eps,
         running_mean=self.mean, running_var=self.var,
         use_cudnn=self.use_cudnn)
Esempio n. 28
0
 def test_valid(self):
     functions.batch_normalization(*self.args, eps=1e-5)
 def f(*inputs):
     y = functions.batch_normalization(
         *inputs, **self.bn_options)
     return y * y,  # make nonlinear against beta
Esempio n. 30
0
    def forward(self, x, **kwargs):
        """forward(self, x, finetune=False)

        Invokes the forward propagation of BatchNormalization.

        In training mode, the BatchNormalization computes moving averages of
        mean and variance for evaluation during training, and normalizes the
        input using batch statistics.

        Args:
            x (Variable): Input variable.
            finetune (bool): If it is in the training mode and ``finetune`` is
                ``True``, BatchNormalization runs in fine-tuning mode; it
                accumulates the input array to compute population statistics
                for normalization, and normalizes the input using batch
                statistics.

        """
        finetune, = argument.parse_kwargs(
            kwargs, ('finetune', False),
            test='test argument is not supported anymore. '
                 'Use chainer.using_config')

        if self.avg_mean is None:
            param_shape = tuple([
                d
                for i, d in enumerate(x.shape)
                if i not in self.axis])
            self._initialize_params(param_shape)

        gamma = self.gamma
        if gamma is None:
            with chainer.using_device(self.device):
                gamma = self.xp.ones(
                    self.avg_mean.shape, dtype=x.dtype)

        beta = self.beta
        if beta is None:
            with chainer.using_device(self.device):
                beta = self.xp.zeros(
                    self.avg_mean.shape, dtype=x.dtype)

        if configuration.config.train:
            if finetune:
                self.N += 1
                decay = 1. - 1. / self.N
            else:
                decay = self.decay

            avg_mean = self.avg_mean
            avg_var = self.avg_var

            if chainer.config.in_recomputing:
                # Do not update statistics when extra forward computation is
                # called.
                if finetune:
                    self.N -= 1  # Revert the count
                avg_mean = None
                avg_var = None

            ret = functions.batch_normalization(
                x, gamma, beta, eps=self.eps, running_mean=avg_mean,
                running_var=avg_var, decay=decay, axis=self.axis)
        else:
            # Use running average statistics or fine-tuned statistics.
            mean = self.avg_mean
            var = self.avg_var
            ret = functions.fixed_batch_normalization(
                x, gamma, beta, mean, var, self.eps, axis=self.axis)
        return ret