Beispiel #1
0
    def test_sum_regularizer(self):
        l1_function = regularizers.l1_regularizer(.1)
        l2_function = regularizers.l2_regularizer(.2)
        self.assertIsNone(regularizers.sum_regularizer([]))
        self.assertIsNone(regularizers.sum_regularizer([None]))
        self.assertIsNone(
            regularizers.sum_regularizer([regularizers.l1_regularizer(.0)
                                          ])(None))

        values = np.array([-3.])
        weights = constant_op.constant(values)
        with session.Session() as sess:
            l1_reg1 = regularizers.sum_regularizer([l1_function])
            l1_result1 = sess.run(l1_reg1(weights))

            l1_reg2 = regularizers.sum_regularizer([l1_function, None])
            l1_result2 = sess.run(l1_reg2(weights))

            l1_reg3 = regularizers.sum_regularizer(
                [l1_function, regularizers.l2_regularizer(.0)])
            l1_result3 = sess.run(l1_reg3(weights))

            l1_l2_reg = regularizers.sum_regularizer(
                [l1_function, l2_function])
            l1_l2_result = sess.run(l1_l2_reg(weights))

        self.assertAllClose(.1 * np.abs(values).sum(), l1_result1)
        self.assertAllClose(.1 * np.abs(values).sum(), l1_result2)
        self.assertAllClose(.1 * np.abs(values).sum(), l1_result3)
        self.assertAllClose(
            .1 * np.abs(values).sum() + .2 * np.power(values, 2).sum() / 2.0,
            l1_l2_result)
Beispiel #2
0
def alexnet_v2_arg_scope(weight_decay=0.0005):
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected],
            activation_fn=nn_ops.relu,
            biases_initializer=init_ops.constant_initializer(0.1),
            weights_regularizer=regularizers.l2_regularizer(weight_decay)):
        with arg_scope([layers.conv2d], padding='SAME'):
            with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
                return arg_sc
Beispiel #3
0
 def test_apply_zero_regularization(self):
     regularizer = regularizers.l2_regularizer(0.0)
     array_weights_list = [[1.5], [2, 3, 4.2], [10, 42, 666.6]]
     tensor_weights_list = [
         constant_op.constant(x) for x in array_weights_list
     ]
     with self.cached_session():
         result = regularizers.apply_regularization(regularizer,
                                                    tensor_weights_list)
         self.assertAllClose(0.0, result.eval())
Beispiel #4
0
def inception_v3_arg_scope(weight_decay=0.00004,
                           batch_norm_var_collection='moving_vars',
                           batch_norm_decay=0.9997,
                           batch_norm_epsilon=0.001,
                           updates_collections=ops.GraphKeys.UPDATE_OPS,
                           use_fused_batchnorm=True):
    """Defines the default InceptionV3 arg scope.

    Args:
      weight_decay: The weight decay to use for regularizing the model.
      batch_norm_var_collection: The name of the collection for the batch norm
        variables.
      batch_norm_decay: Decay for batch norm moving average
      batch_norm_epsilon: Small float added to variance to avoid division by zero
      updates_collections: Collections for the update ops of the layer
      use_fused_batchnorm: Enable fused batchnorm.

    Returns:
      An `arg_scope` to use for the inception v3 model.
    """
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': batch_norm_decay,
        # epsilon to prevent 0s in variance.
        'epsilon': batch_norm_epsilon,
        # collection containing update_ops.
        'updates_collections': updates_collections,
        # Use fused batch norm if possible.
        'fused': use_fused_batchnorm,
        # collection containing the moving mean and moving variance.
        'variables_collections': {
            'beta': None,
            'gamma': None,
            'moving_mean': [batch_norm_var_collection],
            'moving_variance': [batch_norm_var_collection],
        }
    }

    # Set weight_decay for weights in Conv and FC layers.
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected],
            weights_regularizer=regularizers.l2_regularizer(weight_decay)):
        with arg_scope([layers.conv2d],
                       weights_initializer=initializers.
                       variance_scaling_initializer(),
                       activation_fn=nn_ops.relu,
                       normalizer_fn=layers_lib.batch_norm,
                       normalizer_params=batch_norm_params) as sc:
            return sc
Beispiel #5
0
def inception_v1_arg_scope(weight_decay=0.00004,
                           use_batch_norm=True,
                           batch_norm_var_collection='moving_vars'):
    """Defines the default InceptionV1 arg scope.

    Note: Althougth the original paper didn't use batch_norm we found it useful.

    Args:
      weight_decay: The weight decay to use for regularizing the model.
      use_batch_norm: "If `True`, batch_norm is applied after each convolution.
      batch_norm_var_collection: The name of the collection for the batch norm
        variables.

    Returns:
      An `arg_scope` to use for the inception v3 model.
    """
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.9997,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # collection containing update_ops.
        'updates_collections': ops.GraphKeys.UPDATE_OPS,
        # collection containing the moving mean and moving variance.
        'variables_collections': {
            'beta': None,
            'gamma': None,
            'moving_mean': [batch_norm_var_collection],
            'moving_variance': [batch_norm_var_collection],
        }
    }
    if use_batch_norm:
        normalizer_fn = layers_lib.batch_norm
        normalizer_params = batch_norm_params
    else:
        normalizer_fn = None
        normalizer_params = {}
    # Set weight_decay for weights in Conv and FC layers.
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected],
            weights_regularizer=regularizers.l2_regularizer(weight_decay)):
        with arg_scope([layers.conv2d],
                       weights_initializer=initializers.
                       variance_scaling_initializer(),
                       activation_fn=nn_ops.relu,
                       normalizer_fn=normalizer_fn,
                       normalizer_params=normalizer_params) as sc:
            return sc
Beispiel #6
0
def vgg_arg_scope(weight_decay=0.0005):
    """Defines the VGG arg scope.

    Args:
      weight_decay: The l2 regularization coefficient.

    Returns:
      An arg_scope.
    """
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected],
        activation_fn=nn_ops.relu,
        weights_regularizer=regularizers.l2_regularizer(weight_decay),
            biases_initializer=init_ops.zeros_initializer()):
        with arg_scope([layers.conv2d], padding='SAME') as arg_sc:
            return arg_sc
Beispiel #7
0
def resnet_arg_scope(weight_decay=0.0001,
                     batch_norm_decay=0.997,
                     batch_norm_epsilon=1e-5,
                     batch_norm_scale=True):
    """Defines the default ResNet arg scope.

    The batch-normalization related default values above are
    appropriate for use in conjunction with the reference ResNet models
    released at https://github.com/KaimingHe/deep-residual-networks. When
    training ResNets from scratch, they might need to be tuned.

    Args:
      weight_decay: The weight decay to use for regularizing the model.
      batch_norm_decay: The moving average decay when estimating layer activation
        statistics in batch normalization.
      batch_norm_epsilon: Small constant to prevent division by zero when
        normalizing activations by their variance in batch normalization.
      batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
        activations in the batch normalization layer.

    Returns:
      An `arg_scope` to use for the resnet models.
    """
    batch_norm_params = {
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'scale': batch_norm_scale,
        'updates_collections': ops.GraphKeys.UPDATE_OPS,
    }

    with arg_scope(
        [layers_lib.conv2d],
            weights_regularizer=regularizers.l2_regularizer(weight_decay),
            weights_initializer=initializers.variance_scaling_initializer(),
            activation_fn=nn_ops.relu,
            normalizer_fn=layers.batch_norm,
            normalizer_params=batch_norm_params):
        with arg_scope([layers.batch_norm], **batch_norm_params):
            # The following implies padding='SAME' for pool1, which makes feature
            # alignment easier for dense prediction tasks. This is also used in
            # https://github.com/facebook/fb.resnet.torch. However the accompanying
            # code of 'Deep Residual Learning for Image Recognition' uses
            # padding='VALID' for pool1. You can switch to that choice by setting
            # slim.arg_scope([slim.layers.max_pool2d], padding='VALID').
            with arg_scope([layers.max_pool2d], padding='SAME') as arg_sc:
                return arg_sc
Beispiel #8
0
    def test_l2(self):
        with self.assertRaises(ValueError):
            regularizers.l2_regularizer(-1.)
        with self.assertRaises(ValueError):
            regularizers.l2_regularizer(0)

        self.assertIsNone(regularizers.l2_regularizer(0.)(None))

        values = np.array([1., -1., 4., 2.])
        weights = constant_op.constant(values)
        with session.Session() as sess:
            result = sess.run(regularizers.l2_regularizer(.42)(weights))

        self.assertAllClose(np.power(values, 2).sum() / 2.0 * .42, result)
Beispiel #9
0
def inception_v2_arg_scope(weight_decay=0.00004,
                           batch_norm_var_collection='moving_vars'):
    """Defines the default InceptionV2 arg scope.

    Args:
      weight_decay: The weight decay to use for regularizing the model.
      batch_norm_var_collection: The name of the collection for the batch norm
        variables.

    Returns:
      An `arg_scope` to use for the inception v3 model.
    """
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.9997,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # collection containing update_ops.
        'updates_collections': ops.GraphKeys.UPDATE_OPS,
        # collection containing the moving mean and moving variance.
        'variables_collections': {
            'beta': None,
            'gamma': None,
            'moving_mean': [batch_norm_var_collection],
            'moving_variance': [batch_norm_var_collection],
        }
    }

    # Set weight_decay for weights in Conv and FC layers.
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected],
            weights_regularizer=regularizers.l2_regularizer(weight_decay)):
        with arg_scope(
            [layers.conv2d],
            weights_initializer=initializers.variance_scaling_initializer(),
            activation_fn=nn_ops.relu,
            normalizer_fn=layers_lib.batch_norm,
                normalizer_params=batch_norm_params) as sc:
            return sc