def bias_add(value, bias, data_format='NCHW', name=None): """ Adds `bias` to `value`. This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D. Broadcasting is supported, so `value` may have any number of dimensions. Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the case where both types are quantized. Args: value: A `Tensor`. bias: A 1-D `Tensor` with size matching the last dimension of `value`. data_format: A string. 'NHWC' and 'NCHW' are supported. name: A name for the operation (optional). Returns: A `Tensor` with the same type as `value`. """ return ops.BiasAdd([value, bias], data_format=data_format, name=None)
def call(self, inputs, training=False): use_stats = 0 if training else 1 if self.fused: return ops.FusedBatchNorm([ inputs, self.moving_mean, self.moving_variance, self.gamma, self.beta ], axis=self.axis, momentum=self.momentum, eps=self.epsilon, use_stats=use_stats, mode='DEFAULT') x_norm = ops.BatchNorm( [inputs, self.moving_mean, self.moving_variance], axis=self.axis, momentum=self.momentum, eps=self.epsilon, use_stats=use_stats, mode='DEFAULT') if self.gamma is not None: # use scale if self.beta is not None: return ops.Scale([x_norm, self.gamma, self.beta], axis=self.axis, num_axes=1) else: return ops.Scale([x_norm, self.gamma], axis=self.axis, num_axes=1) else: # do not use scale if self.beta is not None: return ops.BiasAdd([x_norm, self.beta], data_format=self._data_format) else: return x_norm
def bias_add(value, bias, data_format='NHWC', name=None): return ops.BiasAdd([value, bias], data_format=data_format)