Esempio n. 1
0
def batch_normalization(x, mean, variance,
                        offset, scale,
                        decay=0.9,
                        variance_epsilon=1e-3,
                        use_global_stats=-1,
                        name=None):
    norm_x = ops.BatchNorm([x, mean, variance], decay, variance_epsilon, use_global_stats, name=name)
    return ops.Scale([norm_x, scale, offset], name=name + '_scale' if name is not None else name)
Esempio n. 2
0
def batch_norm_with_global_normalization(t, m, v,
                                         beta, gamma,
                                         decay=0.9,
                                         variance_epsilon=1e-3,
                                         scale_after_normalization=True,
                                         use_global_stats=-1,
                                         name=None):
    norm_x = ops.BatchNorm([t, m, v], decay, variance_epsilon, use_global_stats, name=name)
    if scale_after_normalization:
        return ops.Scale([norm_x, gamma, beta], name=name + '_scale' if name is not None else name)
    else: return norm_x
Esempio n. 3
0
def batch_normalization(inputs, gamma, beta, mean, var, **kwargs):
    """Batch Normalization, introduced by `[Ioffe & Szegedy, 2015] <https://arxiv.org/abs/1502.03167>`_

    Parameters
    ----------
    inputs : Tensor
        The input tensor.
    gamma: Tensor
        The scale parameter.
    beta: Tensor
        The shift parameter.
    mean: Tensor
        The moving average of mean.
    var: Tensor
        The moving average of variance.

    Returns
    -------
    Tensor
        The output tensor.

    """
    return ops.BatchNorm([inputs, mean, var, gamma, beta])
Esempio n. 4
0
    def call(self, inputs, training=False):
        use_stats = 0 if training else 1
        if self.fused:
            return ops.FusedBatchNorm([
                inputs, self.moving_mean, self.moving_variance, self.gamma,
                self.beta
            ],
                                      axis=self.axis,
                                      momentum=self.momentum,
                                      eps=self.epsilon,
                                      use_stats=use_stats,
                                      mode='DEFAULT')

        x_norm = ops.BatchNorm(
            [inputs, self.moving_mean, self.moving_variance],
            axis=self.axis,
            momentum=self.momentum,
            eps=self.epsilon,
            use_stats=use_stats,
            mode='DEFAULT')
        if self.gamma is not None:
            # use scale
            if self.beta is not None:
                return ops.Scale([x_norm, self.gamma, self.beta],
                                 axis=self.axis,
                                 num_axes=1)
            else:
                return ops.Scale([x_norm, self.gamma],
                                 axis=self.axis,
                                 num_axes=1)
        else:
            # do not use scale
            if self.beta is not None:
                return ops.BiasAdd([x_norm, self.beta],
                                   data_format=self._data_format)
            else:
                return x_norm
Esempio n. 5
0
 def LayerSetup(self, bottom):
     inputs = [bottom] + [blob['data'] for blob in self._blobs]
     return _ops.BatchNorm(inputs, **self.arguments)
Esempio n. 6
0
 def Setup(self, bottom):
     super(BatchNormLayer, self).Setup(bottom)
     return ops.BatchNorm(bottom + [blob['data'] for blob in self._blobs],
                          **self._param)