Exemple #1
0
def batch_norm(inputs,
               decay=0.999,
               center=True,
               scale=False,
               epsilon=0.001,
               activation_fn=None,
               param_initializers=None,
               param_regularizers=None,
               updates_collections=ops.GraphKeys.UPDATE_OPS,
               is_training=True,
               reuse=None,
               variables_collections=None,
               outputs_collections=None,
               trainable=True,
               batch_weights=None,
               fused=False,
               data_format=DATA_FORMAT_NHWC,
               zero_debias_moving_mean=False,
               scope=None,
               renorm=False,
               renorm_clipping=None,
               renorm_decay=0.99):
    scope = _default_scope(scope, 'BATCH_NORM', 'BatchNorm')
    if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
        raise ValueError('data_format has to be either NCHW or NHWC.')
    axis = 1 if data_format == DATA_FORMAT_NCHW else -1

    with vs.variable_scope(scope, reuse=reuse) as sc:
        if not param_initializers:
            param_initializers = {}
        beta_initializer = param_initializers.get('beta', init_ops.zeros_initializer())
        gamma_initializer = param_initializers.get('gamma', init_ops.ones_initializer())
        moving_mean_initializer = param_initializers.get('moving_mean', init_ops.zeros_initializer())
        moving_variance_initializer = param_initializers.get('moving_variance', init_ops.ones_initializer())

        if not param_regularizers:
            param_regularizers = {}

        beta_regularizer = param_regularizers.get('beta')
        gamma_regularizer = param_regularizers.get('gamma')

        return layers.batch_norm(
            inputs=inputs,
            axis=axis,
            momentum=decay,
            epsilon=epsilon,
            center=center,
            scale=scale,
            beta_initializer=beta_initializer,
            gamma_initializer=gamma_initializer,
            moving_mean_initializer=moving_mean_initializer,
            moving_variance_initializer=moving_variance_initializer,
            beta_regularizer=beta_regularizer,
            gamma_regularizer=gamma_regularizer,
            trainable=trainable,
            renorm=renorm,
            renorm_clipping=renorm_clipping,
            renorm_momentum=renorm_decay,
            fused=fused,
            training=is_training)
Exemple #2
0
def batch_norm(inputs,
               decay=0.999,
               center=True,
               scale=False,
               epsilon=0.001,
               activation_fn=None,
               param_initializers=None,
               param_regularizers=None,
               updates_collections=ops.GraphKeys.UPDATE_OPS,
               is_training=True,
               reuse=None,
               variables_collections=None,
               outputs_collections=None,
               trainable=True,
               batch_weights=None,
               fused=False,
               data_format=DATA_FORMAT_NHWC,
               zero_debias_moving_mean=False,
               scope=None,
               renorm=False,
               renorm_clipping=None,
               renorm_decay=0.99):
    scope = _default_scope(scope, 'BATCH_NORM', 'BatchNorm')
    if data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC):
        raise ValueError('data_format has to be either NCHW or NHWC.')
    axis = 1 if data_format == DATA_FORMAT_NCHW else -1

    with vs.variable_scope(scope, reuse=reuse) as sc:
        if not param_initializers:
            param_initializers = {}
        beta_initializer = param_initializers.get('beta', init_ops.zeros_initializer())
        gamma_initializer = param_initializers.get('gamma', init_ops.ones_initializer())
        moving_mean_initializer = param_initializers.get('moving_mean', init_ops.zeros_initializer())
        moving_variance_initializer = param_initializers.get('moving_variance', init_ops.ones_initializer())

        if not param_regularizers:
            param_regularizers = {}

        beta_regularizer = param_regularizers.get('beta')
        gamma_regularizer = param_regularizers.get('gamma')

        return layers.batch_norm(
            inputs=inputs,
            axis=axis,
            momentum=decay,
            epsilon=epsilon,
            center=center,
            scale=scale,
            beta_initializer=beta_initializer,
            gamma_initializer=gamma_initializer,
            moving_mean_initializer=moving_mean_initializer,
            moving_variance_initializer=moving_variance_initializer,
            beta_regularizer=beta_regularizer,
            gamma_regularizer=gamma_regularizer,
            trainable=trainable,
            renorm=renorm,
            renorm_clipping=renorm_clipping,
            renorm_momentum=renorm_decay,
            fused=fused,
            training=is_training)
Exemple #3
0
def conv2d(inputs,
           filters,
           kernel_size,
           strides=(1, 1),
           padding='valid',
           data_format='channels_last',
           dilation_rate=(1, 1),
           activation=None,
           use_bias=True,
           kernel_initializer=None,
           bias_initializer=init_ops.zeros_initializer(),
           kernel_regularizer=None,
           bias_regularizer=None,
           activity_regularizer=None,
           trainable=True,
           name=None,
           reuse=None):
    layer = Conv2D(filters=filters,
                   kernel_size=kernel_size,
                   strides=strides,
                   padding=padding,
                   data_format=data_format,
                   dilation_rate=dilation_rate,
                   activation=activation,
                   use_bias=use_bias,
                   kernel_initializer=kernel_initializer,
                   bias_initializer=bias_initializer,
                   kernel_regularizer=kernel_regularizer,
                   bias_regularizer=bias_regularizer,
                   activity_regularizer=activity_regularizer,
                   trainable=trainable,
                   name=name,
                   _reuse=reuse,
                   _scope=name)
    return layer.apply(inputs)
Exemple #4
0
 def __init__(self,
              filters,
              kernel_size,
              strides=(1, 1),
              padding='valid',
              data_format='channels_last',
              dilation_rate=(1, 1),
              activation=None,
              use_bias=True,
              kernel_initializer=None,
              bias_initializer=init_ops.zeros_initializer(),
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              trainable=True,
              name=None,
              **kwargs):
     super(Conv2D, self).__init__(rank=2,
                                  filters=filters,
                                  kernel_size=kernel_size,
                                  strides=strides,
                                  padding=padding,
                                  data_format=data_format,
                                  dilation_rate=dilation_rate,
                                  activation=activation,
                                  use_bias=use_bias,
                                  kernel_initializer=kernel_initializer,
                                  bias_initializer=bias_initializer,
                                  kernel_regularizer=kernel_regularizer,
                                  bias_regularizer=bias_regularizer,
                                  activity_regularizer=activity_regularizer,
                                  trainable=trainable,
                                  name=name,
                                  **kwargs)
Exemple #5
0
    def _get_default_initializer(
        self,
        name,
        shape=None,
        dtype=dtypes.float32,
    ):
        # Defaults: float32
        if dtype is None:
            dtype = dtypes.float32

        # Xavier for float16, float32, float64
        if dtype.is_floating:
            initializer = init_ops.glorot_uniform_initializer()

        # Zeros for integers
        elif dtype.is_integer or \
                dtype.is_unsigned or \
                    dtype.is_bool:
            initializer = init_ops.zeros_initializer()(shape=shape,
                                                       dtype=dtype.base_dtype)

        # Fail to match the DType
        else:
            raise ValueError(
                'An initializer for Variable({}) of %s is required.'.format(
                    name, dtype.base_dtype))

        return initializer
Exemple #6
0
def fully_connected(inputs,
                    num_outputs,
                    activation_fn=nn.relu,
                    normalizer_fn=None,
                    normalizer_params=None,
                    weights_initializer=initializers.xavier_initializer(),
                    weights_regularizer=None,
                    biases_initializer=init_ops.zeros_initializer(),
                    biases_regularizer=None,
                    reuse=None,
                    variables_collections=None,
                    outputs_collections=None,
                    trainable=True,
                    scope=None):
    scope = _default_scope(scope, 'FULLY_CONNECTED', 'fully_connected')
    with vs.variable_scope(scope, reuse=reuse) as sc:
        return layers.dense(
            inputs=inputs,
            units=num_outputs,
            activation=activation_fn,
            use_bias=True if biases_initializer is not None else False,
            kernel_initializer=weights_initializer,
            bias_initializer=biases_initializer,
            bias_regularizer=biases_regularizer,
            activity_regularizer=None,
            trainable=trainable,
            reuse=reuse)
Exemple #7
0
def fully_connected(inputs,
                    num_outputs,
                    activation_fn=nn.relu,
                    normalizer_fn=None,
                    normalizer_params=None,
                    weights_initializer=initializers.xavier_initializer(),
                    weights_regularizer=None,
                    biases_initializer=init_ops.zeros_initializer(),
                    biases_regularizer=None,
                    reuse=None,
                    variables_collections=None,
                    outputs_collections=None,
                    trainable=True,
                    scope=None):
    scope = _default_scope(scope, 'FULLY_CONNECTED', 'fully_connected')
    with vs.variable_scope(scope, reuse=reuse) as sc:
        return layers.dense(
            inputs=inputs,
            units=num_outputs,
            activation=activation_fn,
            use_bias=True if biases_initializer is not None else False,
            kernel_initializer=weights_initializer,
            bias_initializer=biases_initializer,
            bias_regularizer=biases_regularizer,
            activity_regularizer=None,
            trainable=trainable,
            reuse=reuse)
Exemple #8
0
 def __init__(self, rank,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format='channels_last',
              dilation_rate=1,
              activation=None,
              use_bias=True,
              kernel_initializer=None,
              bias_initializer=init_ops.zeros_initializer(),
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              trainable=True,
              name=None,
              **kwargs):
     super(_Conv, self).__init__(trainable=trainable, name=name, **kwargs)
     self.rank = rank
     self.filters = filters
     self.kernel_size = utils.normalize_tuple(kernel_size, rank, 'kernel_size')
     self.strides = utils.normalize_tuple(strides, rank, 'strides')
     self.padding = utils.normalize_padding(padding)
     self.data_format = utils.normalize_data_format(data_format)
     self.dilation_rate = utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
     self.activation = activation
     self.use_bias = use_bias
     self.kernel_initializer = kernel_initializer
     self.bias_initializer = bias_initializer
     self.kernel_regularizer = kernel_regularizer
     self.bias_regularizer = bias_regularizer
     self.activity_regularizer = activity_regularizer
     self.input_spec = base.InputSpec(ndim=self.rank + 2)
Exemple #9
0
 def __init__(self, filters,
              kernel_size,
              strides=(1, 1),
              padding='valid',
              data_format='channels_last',
              dilation_rate=(1, 1),
              activation=None,
              use_bias=True,
              kernel_initializer=None,
              bias_initializer=init_ops.zeros_initializer(),
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              trainable=True,
              name=None,
              **kwargs):
     super(Conv2D, self).__init__(
         rank=2,
         filters=filters,
         kernel_size=kernel_size,
         strides=strides,
         padding=padding,
         data_format=data_format,
         dilation_rate=dilation_rate,
         activation=activation,
         use_bias=use_bias,
         kernel_initializer=kernel_initializer,
         bias_initializer=bias_initializer,
         kernel_regularizer=kernel_regularizer,
         bias_regularizer=bias_regularizer,
         activity_regularizer=activity_regularizer,
         trainable=trainable,
         name=name, **kwargs)
Exemple #10
0
def dense(inputs,
          units,
          activation=None,
          use_bias=True,
          kernel_initializer=None,
          bias_initializer=init_ops.zeros_initializer(),
          kernel_regularizer=None,
          bias_regularizer=None,
          activity_regularizer=None,
          trainable=True,
          name=None,
          reuse=None):
    layer = Dense(units,
                  activation=activation,
                  use_bias=use_bias,
                  kernel_initializer=kernel_initializer,
                  bias_initializer=bias_initializer,
                  kernel_regularizer=kernel_regularizer,
                  bias_regularizer=bias_regularizer,
                  activity_regularizer=activity_regularizer,
                  trainable=trainable,
                  name=name,
                  _scope=name,
                  _reuse=reuse)
    return layer.apply(inputs)
Exemple #11
0
 def __init__(self,
              axis=-1,
              momentum=0.99,
              epsilon=1e-3,
              center=True,
              scale=True,
              beta_initializer=init_ops.zeros_initializer(),
              gamma_initializer=init_ops.ones_initializer(),
              moving_mean_initializer=init_ops.zeros_initializer(),
              moving_variance_initializer=init_ops.ones_initializer(),
              beta_regularizer=None,
              gamma_regularizer=None,
              renorm=False,
              renorm_clipping=None,
              renorm_momentum=0.99,
              fused=False,
              trainable=True,
              name=None,
              **kwargs):
     super(BatchNormalization, self).__init__(trainable=trainable,
                                              name=name,
                                              **kwargs)
     self.axis = axis
     self.momentum = momentum
     self.epsilon = epsilon
     self.center = center
     self.scale = scale
     self.beta_initializer = beta_initializer
     self.gamma_initializer = gamma_initializer
     self.moving_mean_initializer = moving_mean_initializer
     self.moving_variance_initializer = moving_variance_initializer
     self.beta_regularizer = beta_regularizer
     self.gamma_regularizer = gamma_regularizer
     self.renorm = renorm
     self.fused = fused
     self.trainable = trainable
     if fused:
         if not center or not scale:
             raise ValueError(
                 'fused norm requires both center and scale set to be True.'
             )
     if renorm:
         raise ValueError('renorm is currently not supported.')
Exemple #12
0
def batch_normalization(
        inputs,
        axis=-1,
        momentum=0.99,
        epsilon=1e-3,
        center=True,
        scale=True,
        beta_initializer=init_ops.zeros_initializer(),
        gamma_initializer=init_ops.ones_initializer(),
        moving_mean_initializer=init_ops.zeros_initializer(),
        moving_variance_initializer=init_ops.ones_initializer(),
        beta_regularizer=None,
        gamma_regularizer=None,
        training=False,
        trainable=True,
        name=None,
        reuse=None,
        renorm=False,
        renorm_clipping=None,
        renorm_momentum=0.99,
        fused=False):
    layer = BatchNormalization(
        axis=axis,
        momentum=momentum,
        epsilon=epsilon,
        center=center,
        scale=scale,
        beta_initializer=beta_initializer,
        gamma_initializer=gamma_initializer,
        moving_mean_initializer=moving_mean_initializer,
        moving_variance_initializer=moving_variance_initializer,
        beta_regularizer=beta_regularizer,
        gamma_regularizer=gamma_regularizer,
        renorm=renorm,
        renorm_clipping=renorm_clipping,
        renorm_momentum=renorm_momentum,
        fused=fused,
        trainable=trainable,
        name=name,
        _reuse=reuse,
        _scope=name)
    return layer.apply(inputs, training=training)
Exemple #13
0
def batch_normalization(inputs,
                        axis=-1,
                        momentum=0.99,
                        epsilon=1e-3,
                        center=True,
                        scale=True,
                        beta_initializer=init_ops.zeros_initializer(),
                        gamma_initializer=init_ops.ones_initializer(),
                        moving_mean_initializer=init_ops.zeros_initializer(),
                        moving_variance_initializer=init_ops.ones_initializer(),
                        beta_regularizer=None,
                        gamma_regularizer=None,
                        training=False,
                        trainable=True,
                        name=None,
                        reuse=None,
                        renorm=False,
                        renorm_clipping=None,
                        renorm_momentum=0.99,
                        fused=False):
  layer = BatchNormalization(
      axis=axis,
      momentum=momentum,
      epsilon=epsilon,
      center=center,
      scale=scale,
      beta_initializer=beta_initializer,
      gamma_initializer=gamma_initializer,
      moving_mean_initializer=moving_mean_initializer,
      moving_variance_initializer=moving_variance_initializer,
      beta_regularizer=beta_regularizer,
      gamma_regularizer=gamma_regularizer,
      renorm=renorm,
      renorm_clipping=renorm_clipping,
      renorm_momentum=renorm_momentum,
      fused=fused,
      trainable=trainable,
      name=name,
      _reuse=reuse,
      _scope=name)
  return layer.apply(inputs, training=training)
Exemple #14
0
 def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
     if dtype is None: dtype = dtypes.float32
     if dtype.is_floating:
         initializer = init_ops.glorot_uniform_initializer()
     elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
         initializer = init_ops.zeros_initializer()(shape=shape,
                                                    dtype=dtype.base_dtype)
     else:
         raise ValueError(
             'An initializer for Variable({}) of %s is required.'.format(
                 name, dtype.base_dtype))
     return initializer
Exemple #15
0
 def __init__(self,
              axis=-1,
              momentum=0.99,
              epsilon=1e-3,
              center=True,
              scale=True,
              beta_initializer=init_ops.zeros_initializer(),
              gamma_initializer=init_ops.ones_initializer(),
              moving_mean_initializer=init_ops.zeros_initializer(),
              moving_variance_initializer=init_ops.ones_initializer(),
              beta_regularizer=None,
              gamma_regularizer=None,
              renorm=False,
              renorm_clipping=None,
              renorm_momentum=0.99,
              fused=False,
              trainable=True,
              name=None,
              **kwargs):
     super(BatchNormalization, self).__init__(trainable=trainable, name=name, **kwargs)
     self.axis = axis
     self.momentum = momentum
     self.epsilon = epsilon
     self.center = center
     self.scale = scale
     self.beta_initializer = beta_initializer
     self.gamma_initializer = gamma_initializer
     self.moving_mean_initializer = moving_mean_initializer
     self.moving_variance_initializer = moving_variance_initializer
     self.beta_regularizer = beta_regularizer
     self.gamma_regularizer = gamma_regularizer
     self.renorm = renorm
     self.fused = fused
     self.trainable = trainable
     if fused:
         if not center or not scale:
             raise ValueError('fused norm requires both center and scale set to be True.')
     if renorm:
         raise ValueError('renorm is currently not supported.')
Exemple #16
0
def convolution(
    inputs,
    num_outputs,
    kernel_size,
    stride=1,
    padding='SAME',
    data_format=None,
    rate=1,
    activation_fn=nn.relu,
    normalizer_fn=None,
    normalizer_params=None,
    weights_initializer=initializers.xavier_initializer(),
    weights_regularizer=None,
    biases_initializer=init_ops.zeros_initializer(),
    biases_regularizer=None,
    reuse=None,
    variables_collections=None,
    outputs_collections=None,
    trainable=True,
    scope=None,
):
    scope = _default_scope(scope, 'Conv')
    if data_format not in [None, 'NHWC', 'NCHW']:
        raise ValueError('Invalid data_format: %r' % (data_format,))
    data_format = 'channels_first' if data_format == 'NCHW' else 'channels_last'
    input_rank = inputs.get_shape().ndims

    with vs.variable_scope(scope, reuse=reuse) as sc:
        if input_rank == 4:
            return layers.conv2d(
                inputs=inputs,
                filters=num_outputs,
                kernel_size=kernel_size,
                strides=stride,
                padding=padding,
                data_format=data_format,
                dilation_rate=rate,
                activation=activation_fn,
                use_bias=True if biases_initializer is not None else False,
                kernel_initializer=weights_initializer,
                bias_initializer=biases_initializer,
                bias_regularizer=biases_regularizer,
                activity_regularizer=None,
                trainable=trainable,
                reuse=reuse,
            )
Exemple #17
0
def convolution(inputs,
                num_outputs,
                kernel_size,
                stride=1,
                padding='SAME',
                data_format=None,
                rate=1,
                activation_fn=nn.relu,
                normalizer_fn=None,
                normalizer_params=None,
                weights_initializer=initializers.xavier_initializer(),
                weights_regularizer=None,
                biases_initializer=init_ops.zeros_initializer(),
                biases_regularizer=None,
                reuse=None,
                variables_collections=None,
                outputs_collections=None,
                trainable=True,
                scope=None):
    scope = _default_scope(scope, 'CONVOLUTION', 'Conv')
    if data_format not in [None, 'NHWC', 'NCHW']:
        raise ValueError('Invalid data_format: %r' % (data_format,))
    data_format = 'channels_first' if data_format == 'NCHW' else 'channels_last'
    input_rank = inputs.get_shape().ndims

    with vs.variable_scope(scope, reuse=reuse) as sc:
        if input_rank == 4:
            return layers.conv2d(
                inputs=inputs,
                filters=num_outputs,
                kernel_size=kernel_size,
                strides=stride,
                padding=padding,
                data_format=data_format,
                dilation_rate=rate,
                activation=activation_fn,
                use_bias=True if biases_initializer is not None else False,
                kernel_initializer=weights_initializer,
                bias_initializer=biases_initializer,
                bias_regularizer=biases_regularizer,
                activity_regularizer=None,
                trainable=trainable,
                reuse=reuse)
Exemple #18
0
 def __init__(self, units,
              activation=None,
              use_bias=True,
              kernel_initializer=None,
              bias_initializer=init_ops.zeros_initializer(),
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              trainable=True,
              name=None,
              **kwargs):
     super(Dense, self).__init__(trainable=trainable, name=name, **kwargs)
     self.units = units
     self.activation = activation
     self.use_bias = use_bias
     self.kernel_initializer = kernel_initializer
     self.bias_initializer = bias_initializer
     self.kernel_regularizer = kernel_regularizer
     self.bias_regularizer = bias_regularizer
     self.activity_regularizer = activity_regularizer
     self.input_spec = base.InputSpec(min_ndim=2)
Exemple #19
0
 def __init__(self, units,
              activation=None,
              use_bias=True,
              kernel_initializer=None,
              bias_initializer=init_ops.zeros_initializer(),
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              trainable=True,
              name=None,
              **kwargs):
     super(Dense, self).__init__(trainable=trainable, name=name, **kwargs)
     self.units = units
     self.activation = activation
     self.use_bias = use_bias
     self.kernel_initializer = kernel_initializer
     self.bias_initializer = bias_initializer
     self.kernel_regularizer = kernel_regularizer
     self.bias_regularizer = bias_regularizer
     self.activity_regularizer = activity_regularizer
     self.input_spec = base.InputSpec(min_ndim=2)
Exemple #20
0
def dense(inputs, units,
          activation=None,
          use_bias=True,
          kernel_initializer=None,
          bias_initializer=init_ops.zeros_initializer(),
          kernel_regularizer=None,
          bias_regularizer=None,
          activity_regularizer=None,
          trainable=True,
          name=None,
          reuse=None):
    layer = Dense(units,
                  activation=activation,
                  use_bias=use_bias,
                  kernel_initializer=kernel_initializer,
                  bias_initializer=bias_initializer,
                  kernel_regularizer=kernel_regularizer,
                  bias_regularizer=bias_regularizer,
                  activity_regularizer=activity_regularizer,
                  trainable=trainable,
                  name=name,
                  _scope=name,
                  _reuse=reuse)
    return layer.apply(inputs)
Exemple #21
0
def conv2d(inputs,
           filters,
           kernel_size,
           strides=(1, 1),
           padding='valid',
           data_format='channels_last',
           dilation_rate=(1, 1),
           activation=None,
           use_bias=True,
           kernel_initializer=None,
           bias_initializer=init_ops.zeros_initializer(),
           kernel_regularizer=None,
           bias_regularizer=None,
           activity_regularizer=None,
           trainable=True,
           name=None,
           reuse=None):
    layer = Conv2D(
        filters=filters,
        kernel_size=kernel_size,
        strides=strides,
        padding=padding,
        data_format=data_format,
        dilation_rate=dilation_rate,
        activation=activation,
        use_bias=use_bias,
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
        kernel_regularizer=kernel_regularizer,
        bias_regularizer=bias_regularizer,
        activity_regularizer=activity_regularizer,
        trainable=trainable,
        name=name,
        _reuse=reuse,
        _scope=name)
    return layer.apply(inputs)