コード例 #1
0
def xfcn_arg_scope(weight_decay=0.0001,
                    batch_norm_decay=0.9997,
                    batch_norm_epsilon=0.001):
    """Defines the xfcn arg scope.
    Args:
    weight_decay: The l2 regularization coefficient.
    Returns:
    An arg_scope.
    """

    with slim.arg_scope([slim.convolution2d_transpose],
                        activation_fn=None,
                        weights_initializer=tf.random_normal_initializer(stddev=0.001),
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        biases_initializer=None,
                        trainable=False,
                        padding='VALID'), \
         slim.arg_scope([slim.conv2d, slim.separable_conv2d],
                        activation_fn=None,
                        weights_initializer=tf.random_normal_initializer(stddev=0.001),
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        biases_initializer=None,
                        biases_regularizer=None,
                        padding='SAME'), \
         slim.arg_scope([slim.batch_norm],
                        decay=batch_norm_decay,
                        epsilon=batch_norm_epsilon,
                        is_training=True) as arg_sc:
        return arg_sc
コード例 #2
0
def inference(images,
              keep_probability,
              phase_train=True,
              bottleneck_layer_size=128,
              weight_decay=0.0,
              reuse=None):
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': 0.995,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # force in-place updates of mean and variance estimates
        'updates_collections': None,
        # Moving averages ends up in the trainable variables collection
        'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
    }

    with slim.arg_scope(
        [slim.conv2d, slim.fully_connected],
            weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
            weights_regularizer=slim.l2_regularizer(weight_decay),
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):
        return inception_resnet_v1(images,
                                   is_training=phase_train,
                                   dropout_keep_prob=keep_probability,
                                   bottleneck_layer_size=bottleneck_layer_size,
                                   reuse=reuse)
コード例 #3
0
def conv_net(inputs):
    '''
    Build a CNN.

    Parameters
    ----------
    inputs : input data

    Returns
    -------
    net : a CNN architecture
    '''

    # using the scope to avoid mentioning the parameters repeatedly
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
        activation_fn = leaky_relu(0.005),
        weights_initializer = tf.truncated_normal_initializer(0.0, 0.01),
        weights_regularizer = slim.l2_regularizer(0.0005)):

        net = slim.conv2d(inputs, 512, (3, inputs.shape[2]), 1, padding = 'valid', scope = 'conv_1') # (3, dimension_count)
        net = slim.max_pool2d(net, (4, 1), 4, padding = 'valid', scope = 'pool_2')
        net = slim.conv2d(net, 512, (5, 1), 1, scope = 'conv_3')
        net = slim.max_pool2d(net, (4, 1), 4, padding = 'valid', scope = 'pool_4')
        net = slim.flatten(net, scope = 'flatten_5')
        net = slim.fully_connected(net, 2, scope = 'fc_6', activation_fn = tf.nn.softmax)

    return net
コード例 #4
0
    def __init__(self, net, labels_one_hot, model_params, method_params):
        """
        Stores arguments in member variable for further use
        :param net: shape [batch_size, num_features, feature_size] which contains some extracted image features
        :param labels_one_hot: [batch_size, seq_length, num_char_classes]- ground truth labels for the input features
        :param model_params: a namedtuple with model parameters
        :param method_params: A SequenceLayerParams
        """
        self._params = model_params
        self._mparams = method_params
        self._net = net
        self._labels_one_hot = labels_one_hot
        self._batch_size = net.get_shape().dims[0]

        # Initialize parameters for char logits which will be computed on the fly
        # inside an LSTM decoder.
        self._char_logits = {}
        regularizer = tf_slim.l2_regularizer(self._mparams.weight_decay)

        self._softmax_w = tf_slim.model_variable(
            'softmax_w',
            [self._mparams.num_lstm_units, self._params.num_char_classes],
            initializer=orthogonal_initializer,
            regularizer=regularizer)

        self._softmax_b = tf_slim.model_variable(
            'softmax_b', [self._params.num_char_classes],
            initializer=tf.zeros_initializer(),
            regularizer=regularizer)
コード例 #5
0
ファイル: cap2sg_linguistic.py プロジェクト: yekeren/WSSGG
def enrich_features(options, dt):
  """Enrich text features.

  Args:
    options: A Cap2SGLinguistic proto.
    dt: A DataTuple object.
  """
  if not isinstance(options, model_pb2.Cap2SGLinguistic):
    raise ValueError('Options has to be a Cap2SGLinguistic proto.')

  if not isinstance(dt, DataTuple):
    raise ValueError('Invalid DataTuple object.')
  
  regularizer = slim.l2_regularizer(scale=float(options.weight_decay))

  gn = graph_networks.build_graph_network(options.graph_network,
                                          is_training=True)
  entity_embs, relation_embs = gn.compute_graph_embeddings(
      batch_n_node=dt.n_entity,
      batch_n_edge=dt.n_relation,
      batch_nodes=dt.entity_embs,
      batch_edges=dt.relation_embs,
      batch_senders=dt.relation_senders,
      batch_receivers=dt.relation_receivers,
      regularizer=regularizer)

  dt.refined_entity_embs = entity_embs
  dt.refined_relation_embs = relation_embs
  return dt
コード例 #6
0
ファイル: nasnet.py プロジェクト: HabanaAI/Model-References
def nasnet_large_arg_scope(weight_decay=5e-5,
                           batch_norm_decay=0.9997,
                           batch_norm_epsilon=1e-3):
    """Defines the default arg scope for the NASNet-A Large ImageNet model.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_decay: Decay for batch norm moving average.
    batch_norm_epsilon: Small float added to variance to avoid dividing by zero
      in batch norm.

  Returns:
    An `arg_scope` to use for the NASNet Large Model.
  """
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': batch_norm_decay,
        # epsilon to prevent 0s in variance.
        'epsilon': batch_norm_epsilon,
        'scale': True,
        'fused': True,
    }
    weights_regularizer = slim.l2_regularizer(weight_decay)
    weights_initializer = slim.variance_scaling_initializer(mode='FAN_OUT')
    with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],
                   weights_regularizer=weights_regularizer,
                   weights_initializer=weights_initializer):
        with arg_scope([slim.fully_connected], activation_fn=None, scope='FC'):
            with arg_scope([slim.conv2d, slim.separable_conv2d],
                           activation_fn=None,
                           biases_initializer=None):
                with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
                    return sc
コード例 #7
0
def inception_v3_arg_scope(weight_decay=0.00004,
                           stddev=0.1,
                           batch_norm_var_collection='moving_var'):
    batch_norm_params = {
        'decay': 0.9997,
        'epsilon': 0.001,
        'updates_collections': tf.compat.v1.GraphKeys.UPDATE_OPS,
        'variable_collections': {
            'beta': None,
            'gamma': None,
            'moving_mean': [batch_norm_var_collection],
            'moving_variance': [batch_norm_var_collection]
        }
    }

    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_regularizer=slim.l2_regularizer(weight_decay)):
        with slim.arg_scope(
            [slim.conv2d],
                weights_regularizer=tf.initializers.TruncatedNormal(
                    stddev=stddev),
                activation_fn=tf.compat.v1.nn.relu,
                normalizer_fn=slim.batch_norm,
                normalizer_params=batch_norm_params) as sc:

            return sc
コード例 #8
0
def lenet(inputs, scope='lenet', is_training=True, reuse=False):
    layers = OrderedDict()
    net = inputs
    with tf.variable_scope(scope, reuse=reuse):
        with ExitStack() as stack:
            stack.enter_context(
                slim.arg_scope(
                    [slim.fully_connected, slim.conv2d],
                    activation_fn=tf.nn.relu,
                    weights_regularizer=slim.l2_regularizer(2.5e-5)))
            stack.enter_context(slim.arg_scope([slim.conv2d], padding='VALID'))
            net = slim.conv2d(net, 20, 5, scope='conv1')
            layers['conv1'] = net
            net = slim.max_pool2d(net, 2, stride=2, scope='pool1')
            layers['pool1'] = net
            net = slim.conv2d(net, 50, 5, scope='conv2')
            layers['conv2'] = net
            net = slim.max_pool2d(net, 2, stride=2, scope='pool2')
            layers['pool2'] = net
            net = tf.layers.flatten(net)
            net = slim.fully_connected(net, 500, scope='fc3')
            layers['fc3'] = net
            net = slim.fully_connected(net,
                                       10,
                                       activation_fn=None,
                                       scope='fc4')
            layers['fc4'] = net
    return net, layers
コード例 #9
0
def vgg_arg_scope(weight_decay=0.0005):
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        activation_fn=tf.nn.relu,
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        biases_initializer=tf.zeros_initializer()):
        with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
            return arg_sc
コード例 #10
0
def ghost_backbone(inputs, is_training=False):
    with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
                        normalizer_fn=slim.batch_norm,
                        activation_fn=tf.nn.relu,
                        weights_initializer=slim.xavier_initializer(),
                        biases_initializer=None,
                        weights_regularizer=slim.l2_regularizer(0.0005),
                        padding='SAME'):
        with slim.arg_scope([slim.batch_norm],
                            center=True,
                            scale=True,
                            is_training=is_training):
            net = slim.conv2d(inputs, 16, kernel_size=3, stride=2, scope='Conv')
            feature_map_2 = net
            net = ghost_bottleneck(net, 32, 16, s_s=2, name='expanded_conv')
            feature_map_4 = net
            net = ghost_bottleneck(net, 72, 24, s_s=2, name='expanded_conv_1')
            net = ghost_bottleneck(net, 88, 24, s_s=1, name='expanded_conv_2')
            feature_map_8 = net
            net = ghost_bottleneck(net, 96, 40, s_s=2, name='expanded_conv_3')
            net = ghost_bottleneck(net, 240, 40, s_s=1, name='expanded_conv_4')
            net = ghost_bottleneck(net, 240, 40, s_s=1, name='expanded_conv_5')
            net = ghost_bottleneck(net, 120, 48, s_s=1, name='expanded_conv_6')
            net = ghost_bottleneck(net, 144, 48, s_s=1, name='expanded_conv_7')
            feature_map_16 = net
            net = ghost_bottleneck(net, 288, 96, s_s=2, name='expanded_conv_8')
            net = ghost_bottleneck(net, 576, 96, s_s=1, name='expanded_conv_9')
            net = ghost_bottleneck(net, 576, 96, s_s=1, name='expanded_conv_10')
            feature_map_32 = net

            return feature_map_4, feature_map_8, feature_map_16, feature_map_32
コード例 #11
0
ファイル: alexnet.py プロジェクト: tpsgrp/python-app
def alexnet_v2_arg_scope(weight_decay=0.0005):
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        activation_fn=tf.nn.relu,
                        biases_initializer=tf.constant_initializer(0.1),
                        weights_regularizer=slim.l2_regularizer(weight_decay)):
        with slim.arg_scope([slim.conv2d], padding='SAME'):
            with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
                return arg_sc
コード例 #12
0
ファイル: scope_generator.py プロジェクト: ylfzr/vega
def get_regularizer(desc):
    """Get regularizer function."""
    if desc.type == 'l1_regularizer':
        return slim.l1_regularizer(scale=float(desc.weight))
    elif desc.type == 'l2_regularizer':
        return slim.l2_regularizer(scale=float(desc.weight))
    else:
        raise ValueError('Unknown regularizer type: {}'.format(desc.type))
コード例 #13
0
def training_scope(l2_weight_decay=1e-4, is_training=None):
  """Arg scope for training MnasFPN."""
  with slim.arg_scope(
      [slim.conv2d],
      weights_initializer=tf.initializers.he_normal(),
      weights_regularizer=slim.l2_regularizer(l2_weight_decay)), \
      slim.arg_scope(
          [slim.separable_conv2d],
          weights_initializer=tf.initializers.truncated_normal(
              stddev=0.536),  # He_normal for 3x3 depthwise kernel.
          weights_regularizer=slim.l2_regularizer(l2_weight_decay)), \
      slim.arg_scope([slim.batch_norm],
                     is_training=is_training,
                     epsilon=0.01,
                     decay=0.99,
                     center=True,
                     scale=True) as s:
    return s
コード例 #14
0
def _get_base_scope_args(weight_decay):
    """Returns arguments needed to initialize the base `arg_scope`."""
    regularizer = slim.l2_regularizer(weight_decay)
    conv_weights_init = slim.xavier_initializer_conv2d()
    base_scope_args = {
        'weights_initializer': conv_weights_init,
        'activation_fn': tf.nn.relu,
        'weights_regularizer': regularizer,
    }
    return base_scope_args
コード例 #15
0
def mobilenet_v1_arg_scope(
        is_training=True,
        weight_decay=0.00004,
        stddev=0.09,
        regularize_depthwise=False,
        batch_norm_decay=0.9997,
        batch_norm_epsilon=0.001,
        batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS,
        normalizer_fn=slim.batch_norm):
    """Defines the default MobilenetV1 arg scope.

  Args:
    is_training: Whether or not we're training the model. If this is set to
      None, the parameter is not added to the batch_norm arg_scope.
    weight_decay: The weight decay to use for regularizing the model.
    stddev: The standard deviation of the trunctated normal weight initializer.
    regularize_depthwise: Whether or not apply regularization on depthwise.
    batch_norm_decay: Decay for batch norm moving average.
    batch_norm_epsilon: Small float added to variance to avoid dividing by zero
      in batch norm.
    batch_norm_updates_collections: Collection for the update ops for
      batch norm.
    normalizer_fn: Normalization function to apply after convolution.

  Returns:
    An `arg_scope` to use for the mobilenet v1 model.
  """
    batch_norm_params = {
        'center': True,
        'scale': True,
        'decay': batch_norm_decay,
        'epsilon': batch_norm_epsilon,
        'updates_collections': batch_norm_updates_collections,
    }
    if is_training is not None:
        batch_norm_params['is_training'] = is_training

    # Set weight_decay for weights in Conv and DepthSepConv layers.
    weights_init = tf.truncated_normal_initializer(stddev=stddev)
    regularizer = slim.l2_regularizer(weight_decay)
    if regularize_depthwise:
        depthwise_regularizer = regularizer
    else:
        depthwise_regularizer = None
    with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
                        weights_initializer=weights_init,
                        activation_fn=tf.nn.relu6,
                        normalizer_fn=normalizer_fn):
        with slim.arg_scope([slim.batch_norm], **batch_norm_params):
            with slim.arg_scope([slim.conv2d],
                                weights_regularizer=regularizer):
                with slim.arg_scope(
                    [slim.separable_conv2d],
                        weights_regularizer=depthwise_regularizer) as sc:
                    return sc
コード例 #16
0
def resnet_arg_scope(
    weight_decay=0.0001,
    batch_norm_decay=0.997,
    batch_norm_epsilon=1e-5,
    batch_norm_scale=True,
    activation_fn=tf.nn.relu,
    use_batch_norm=True,
    batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS):
  """Defines the default ResNet arg scope.

  TODO(gpapan): The batch-normalization related default values above are
    appropriate for use in conjunction with the reference ResNet models
    released at https://github.com/KaimingHe/deep-residual-networks. When
    training ResNets from scratch, they might need to be tuned.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_decay: The moving average decay when estimating layer activation
      statistics in batch normalization.
    batch_norm_epsilon: Small constant to prevent division by zero when
      normalizing activations by their variance in batch normalization.
    batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
      activations in the batch normalization layer.
    activation_fn: The activation function which is used in ResNet.
    use_batch_norm: Whether or not to use batch normalization.
    batch_norm_updates_collections: Collection for the update ops for
      batch norm.

  Returns:
    An `arg_scope` to use for the resnet152 models.
  """
  batch_norm_params = {
      'decay': batch_norm_decay,
      'epsilon': batch_norm_epsilon,
      'scale': batch_norm_scale,
      'updates_collections': batch_norm_updates_collections,
      'fused': None,  # Use fused batch norm if possible.
  }

  with slim.arg_scope(
      [slim.conv2d],
      weights_regularizer=slim.l2_regularizer(weight_decay),
      weights_initializer=slim.variance_scaling_initializer(),
      activation_fn=activation_fn,
      normalizer_fn=slim.batch_norm if use_batch_norm else None,
      normalizer_params=batch_norm_params):
    with slim.arg_scope([slim.batch_norm], **batch_norm_params):
      # The following implies padding='SAME' for pool1, which makes feature
      # alignment easier for dense prediction tasks. This is also used in
      # https://github.com/facebook/fb.resnet.torch. However the accompanying
      # code of 'Deep Residual Learning for Image Recognition' uses
      # padding='VALID' for pool1. You can switch to that choice by setting
      # slim.arg_scope([slim.max_pool2d], padding='VALID').
      with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
        return arg_sc
コード例 #17
0
ファイル: model.py プロジェクト: PlathC/VitalRecordAnalyser
def model(images, weight_decay=1e-5, is_training=True):
    '''
    define the model, we use slim's implemention of resnet
    '''
    images = mean_image_subtraction(images)

    with slim.arg_scope(resnet_v1.resnet_arg_scope(weight_decay=weight_decay)):
        logits, end_points = resnet_v1.resnet_v1_50(images, is_training=is_training, scope='resnet_v1_50')

    with tf.variable_scope('feature_fusion', values=[end_points.values]):
        batch_norm_params = {
        'decay': 0.997,
        'epsilon': 1e-5,
        'scale': True,
        'is_training': is_training
        }
        with slim.arg_scope([slim.conv2d],
                            activation_fn=tf.nn.relu,
                            normalizer_fn=slim.batch_norm,
                            normalizer_params=batch_norm_params,
                            weights_regularizer=slim.l2_regularizer(weight_decay)):
            f = [end_points['pool5'], end_points['pool4'],
                 end_points['pool3'], end_points['pool2']]
            for i in range(4):
                print('Shape of f_{} {}'.format(i, f[i].shape))
            g = [None, None, None, None]
            h = [None, None, None, None]
            num_outputs = [None, 128, 64, 32]
            for i in range(4):
                if i == 0:
                    h[i] = f[i]
                else:
                    c1_1 = slim.conv2d(tf.concat([g[i-1], f[i]], axis=-1), num_outputs[i], 1)
                    # c1_1 = slim.conv2d(tf.concat([g[i-1], f[i]], axis=3), num_outputs[i], 1)
                    h[i] = slim.conv2d(c1_1, num_outputs[i], 3)
                if i <= 2:
                    g[i] = unpool(h[i])
                else:
                    g[i] = slim.conv2d(h[i], num_outputs[i], 3)
                print('Shape of h_{} {}, g_{} {}'.format(i, h[i].shape, i, g[i].shape))

            # here we use a slightly different way for regression part,
            # we first use a sigmoid to limit the regression range, and also
            # this is do with the angle map
            F_score = slim.conv2d(g[3], 1, 1, activation_fn=tf.nn.sigmoid, normalizer_fn=None)
            # 4 channel of axis aligned bbox and 1 channel rotation angle
            geo_map = slim.conv2d(g[3], 4, 1, activation_fn=tf.nn.sigmoid, normalizer_fn=None) * FLAGS.text_scale
            angle_map = (slim.conv2d(g[3], 1, 1, activation_fn=tf.nn.sigmoid, normalizer_fn=None) - 0.5) * np.pi/2 # angle is between [-45, 45]
            F_geometry = tf.concat([geo_map, angle_map], axis=-1)
            # pi2 = 0.5 * np.pi
            # angle_map = (slim.conv2d(g[3], 1, 1, activation_fn=tf.nn.sigmoid,
            #                          normalizer_fn=None) - 0.5) * pi2  # angle is between [-45, 45]
            # F_geometry = tf.concat([geo_map, angle_map], axis=3)

    return F_score, F_geometry
コード例 #18
0
ファイル: layers.py プロジェクト: eaogorman/DeepLabCut
def prediction_layer_stage(cfg, input, name, num_outputs):
    with slim.arg_scope(
        [slim.conv2d, slim.conv2d_transpose],
        padding="SAME",
        activation_fn=None,
        normalizer_fn=None,
        weights_regularizer=slim.l2_regularizer(cfg["weight_decay"]),
    ):
        with tf.compat.v1.variable_scope(name):
            pred = slim.conv2d(input, num_outputs, kernel_size=[3, 3], stride=1,)
            return pred
コード例 #19
0
    def extract_features(self, preprocessed_inputs):
        """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """
        preprocessed_inputs = shape_utils.check_min_image_dim(
            33, preprocessed_inputs)
        nodes_dict = lookup_spaghetti_arch(self._spaghettinet_arch_name)

        with tf.variable_scope(self._spaghettinet_arch_name,
                               reuse=self._reuse_weights):
            with slim.arg_scope(
                [slim.conv2d],
                    weights_initializer=tf.truncated_normal_initializer(
                        mean=0.0, stddev=0.03),
                    weights_regularizer=slim.l2_regularizer(1e-5)):
                with slim.arg_scope(
                    [slim.separable_conv2d],
                        weights_initializer=tf.truncated_normal_initializer(
                            mean=0.0, stddev=0.03),
                        weights_regularizer=slim.l2_regularizer(1e-5)):
                    with slim.arg_scope([slim.batch_norm],
                                        is_training=self._is_training,
                                        epsilon=0.001,
                                        decay=0.97,
                                        center=True,
                                        scale=True):
                        spaghetti_net = SpaghettiNet(
                            node_specs=nodes_dict,
                            is_training=self._is_training,
                            use_native_resize_op=self._use_native_resize_op,
                            use_explicit_padding=self._use_explicit_padding,
                            name=self._spaghettinet_arch_name)
                        feature_maps = spaghetti_net.apply(preprocessed_inputs)
        return feature_maps
コード例 #20
0
def training_scope(is_training=True,
                   weight_decay=0.00004,
                   stddev=0.09,
                   dropout_keep_prob=0.8,
                   bn_decay=0.997):
  """Defines Mobilenet training scope.

  Usage:
     with tf.contrib.slim.arg_scope(mobilenet.training_scope()):
       logits, endpoints = mobilenet_v2.mobilenet(input_tensor)

     # the network created will be trainble with dropout/batch norm
     # initialized appropriately.
  Args:
    is_training: if set to False this will ensure that all customizations are
      set to non-training mode. This might be helpful for code that is reused
      across both training/evaluation, but most of the time training_scope with
      value False is not needed. If this is set to None, the parameters is not
      added to the batch_norm arg_scope.

    weight_decay: The weight decay to use for regularizing the model.
    stddev: Standard deviation for initialization, if negative uses xavier.
    dropout_keep_prob: dropout keep probability (not set if equals to None).
    bn_decay: decay for the batch norm moving averages (not set if equals to
      None).

  Returns:
    An argument scope to use via arg_scope.
  """
  # Note: do not introduce parameters that would change the inference
  # model here (for example whether to use bias), modify conv_def instead.
  batch_norm_params = {
      'decay': bn_decay,
      'is_training': is_training
  }
  if stddev < 0:
    weight_intitializer = slim.initializers.xavier_initializer()
  else:
    weight_intitializer = tf.truncated_normal_initializer(stddev=stddev)

  # Set weight_decay for weights in Conv and FC layers.
  with slim.arg_scope(
      [slim.conv2d, slim.fully_connected, slim.separable_conv2d],
      weights_initializer=weight_intitializer,
      normalizer_fn=slim.batch_norm), \
      slim.arg_scope([mobilenet_base, mobilenet], is_training=is_training),\
      safe_arg_scope([slim.batch_norm], **batch_norm_params), \
      safe_arg_scope([slim.dropout], is_training=is_training,
                     keep_prob=dropout_keep_prob), \
      slim.arg_scope([slim.conv2d], \
                     weights_regularizer=slim.l2_regularizer(weight_decay)), \
      slim.arg_scope([slim.separable_conv2d], weights_regularizer=None) as s:
    return s
コード例 #21
0
def inception_resnet_v2_arg_scope(
        weight_decay=0.00004,
        batch_norm_decay=0.9997,
        batch_norm_epsilon=0.001,
        activation_fn=tf.nn.relu,
        batch_norm_updates_collections=tf.compat.v1.GraphKeys.UPDATE_OPS,
        batch_norm_scale=False):
    """Returns the scope with the default parameters for inception_resnet_v2.

  Args:
    weight_decay: the weight decay for weights variables.
    batch_norm_decay: decay for the moving average of batch_norm momentums.
    batch_norm_epsilon: small float added to variance to avoid dividing by zero.
    activation_fn: Activation function for conv2d.
    batch_norm_updates_collections: Collection for the update ops for
      batch norm.
    batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
      activations in the batch normalization layer.

  Returns:
    a arg_scope with the parameters needed for inception_resnet_v2.
  """
    # Set weight_decay for weights in conv2d and fully_connected layers.
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_regularizer=slim.l2_regularizer(weight_decay),
                        biases_regularizer=slim.l2_regularizer(weight_decay)):

        batch_norm_params = {
            'decay': batch_norm_decay,
            'epsilon': batch_norm_epsilon,
            'updates_collections': batch_norm_updates_collections,
            'fused': None,  # Use fused batch norm if possible.
            'scale': batch_norm_scale,
        }
        # Set activation_fn and parameters for batch_norm.
        with slim.arg_scope([slim.conv2d],
                            activation_fn=activation_fn,
                            normalizer_fn=slim.batch_norm,
                            normalizer_params=batch_norm_params) as scope:
            return scope
コード例 #22
0
def attention_inception_v3_arg_scope(
        weight_decay=0.00004,
        use_batch_norm=True,
        batch_norm_decay=0.9997,
        batch_norm_epsilon=0.001,
        activation_fn=tf.nn.relu,
        batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS,
        batch_norm_scale=False):
    """Defines the default arg scope for inception models.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    use_batch_norm: "If `True`, batch_norm is applied after each convolution.
    batch_norm_decay: Decay for batch norm moving average.
    batch_norm_epsilon: Small float added to variance to avoid dividing by zero
      in batch norm.
    activation_fn: Activation function for conv2d.
    batch_norm_updates_collections: Collection for the update ops for batch
      norm.
    batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
      activations in the batch normalization layer.

  Returns:
    An `arg_scope` to use for the inception models.
  """
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': batch_norm_decay,
        # epsilon to prevent 0s in variance.
        'epsilon': batch_norm_epsilon,
        # collection containing update_ops.
        'updates_collections': batch_norm_updates_collections,
        # use fused batch norm if possible.
        'fused': None,
        'scale': batch_norm_scale,
    }
    if use_batch_norm:
        normalizer_fn = slim.batch_norm
        normalizer_params = batch_norm_params
    else:
        normalizer_fn = None
        normalizer_params = {}
    # Set weight_decay for weights in Conv and FC layers.
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_regularizer=slim.l2_regularizer(weight_decay)):
        with slim.arg_scope(
            [slim.conv2d],
                weights_initializer=slim.variance_scaling_initializer(),
                activation_fn=activation_fn,
                normalizer_fn=normalizer_fn,
                normalizer_params=normalizer_params) as sc:
            return sc
コード例 #23
0
def conv(inputs,
         num_outputs,
         kernel_size,
         stride=1,
         rate=1,
         use_bias=True,
         batch_norm=False,
         is_training=False,
         activation_fn=tf.nn.relu,
         scope=None,
         reuse=False):
    if batch_norm:
        normalizer_fn = slim.batch_norm
        b_init = None
    else:
        normalizer_fn = None
        if use_bias:
            b_init = b_initializer(0.0)
        else:
            b_init = None

    output = slim.conv2d(
        inputs=inputs,
        num_outputs=num_outputs,
        kernel_size=kernel_size,
        stride=stride,
        padding='SAME',
        rate=rate,
        weights_initializer=w_initializer(),
        weights_regularizer=slim.l2_regularizer(1.0),
        biases_initializer=b_init,
        normalizer_fn=normalizer_fn,
        normalizer_params={
            'center': True,
            'is_training': is_training,
            'variables_collections': {
                'beta': [tf.compat.v1.GraphKeys.BIASES],
                'moving_mean':
                [tf.compat.v1.GraphKeys.MOVING_AVERAGE_VARIABLES],
                'moving_variance':
                [tf.compat.v1.GraphKeys.MOVING_AVERAGE_VARIABLES]
            },
        },
        activation_fn=activation_fn,
        variables_collections={
            'weights': [tf.compat.v1.GraphKeys.WEIGHTS],
            'biases': [tf.compat.v1.GraphKeys.BIASES]
        },
        outputs_collections=[tf.compat.v1.GraphKeys.ACTIVATIONS],
        scope=scope,
        reuse=reuse)
    return output
コード例 #24
0
ファイル: vgg.py プロジェクト: hcw-00/grad-cam
def vgg_arg_scope(weight_decay=0.0005):
  """Defines the VGG arg scope.
  Args:
    weight_decay: The l2 regularization coefficient.
  Returns:
    An arg_scope.
  """
  with slim.arg_scope([slim.conv2d, slim.fully_connected],
                      activation_fn=tf.nn.relu,
                      weights_regularizer=slim.l2_regularizer(weight_decay),
                      biases_initializer=tf.zeros_initializer()):
    with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
      return arg_sc
コード例 #25
0
def _conv(h, filters, kernel_size, strides=1,
          normalizer_fn=slim.batch_norm, activation_fn=tf.nn.relu6):
  if activation_fn is None:
    raise ValueError('Activation function cannot be None. Use tf.identity '
                     'instead to better support quantized training.')
  return slim.conv2d(
      h,
      filters,
      kernel_size,
      stride=strides,
      activation_fn=activation_fn,
      normalizer_fn=normalizer_fn,
      weights_initializer=tf.initializers.he_normal(),
      weights_regularizer=slim.l2_regularizer(BACKBONE_WEIGHT_DECAY),
      padding='SAME')
コード例 #26
0
    def get_real_model(self):
        """Get real model of regularizer."""
        if self.model:
            return self.model
        else:
            if self.type == 'l1_regularizer':
                self.model = slim.l1_regularizer(scale=float(self.weight))
            elif self.type == 'l2_regularizer':
                self.model = slim.l2_regularizer(scale=float(self.weight))
            else:
                self.model = None
                raise ValueError('Unknown regularizer type: {}'.format(
                    self.type))

            return self.model
コード例 #27
0
def adversarial_discriminator(net, layers, scope='adversary', leaky=False):
    #     if leaky:
    #         activation_fn = tflearn.activations.leaky_relu
    #     else:
    activation_fn = tf.nn.relu
    with ExitStack() as stack:
        stack.enter_context(tf.variable_scope(scope))
        stack.enter_context(
            slim.arg_scope([slim.fully_connected],
                           activation_fn=activation_fn,
                           weights_regularizer=slim.l2_regularizer(2.5e-5)))
        for dim in layers:
            net = slim.fully_connected(net, dim)
        net = slim.fully_connected(net, 2, activation_fn=None)
    return net
コード例 #28
0
ファイル: lenet.py プロジェクト: MuralidharGIT01/ObjDetector
def lenet_arg_scope(weight_decay=0.0):
    """Defines the default lenet argument scope.

  Args:
    weight_decay: The weight decay to use for regularizing the model.

  Returns:
    An `arg_scope` to use for the inception v3 model.
  """
    with slim.arg_scope(
        [slim.conv2d, slim.fully_connected],
            weights_regularizer=slim.l2_regularizer(weight_decay),
            weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
            activation_fn=tf.nn.relu) as sc:
        return sc
コード例 #29
0
def inception_v3(nlabels, images):
    batch_norm_params = {
        "is_training": False, "trainable": True, "decay": 0.9997,
        "epsilon": 0.001,
        "variables_collections": {
            "beta": None,
            "gamma": None,
            "moving_mean": ["moving_vars"],
            "moving_variance": ["moving_vars"],
        }
    }
    weight_decay = 0.00004
    stddev = 0.1
    weights_regularizer = tf_slim.l2_regularizer(weight_decay)

    args_for_scope = (
        dict(list_ops_or_scope=[tf_slim.layers.conv2d, tf_slim.layers.fully_connected],
             weights_regularizer=weights_regularizer, trainable=True),
        dict(list_ops_or_scope=[tf_slim.layers.conv2d],
             weights_initializer=tf1.truncated_normal_initializer(stddev=stddev),
             activation_fn=tf1.nn.relu,
             normalizer_fn=tf_slim.layers.batch_norm,
             normalizer_params=batch_norm_params),
    )

    with tf1.variable_scope("InceptionV3", "InceptionV3", [images]) as scope, \
            tf_slim.arg_scope(**args_for_scope[0]), \
            tf_slim.arg_scope(**args_for_scope[1]):
        net, end_points = inception_v3_base(images, scope=scope)
        with tf1.variable_scope("logits"):
            shape = net.get_shape()
            net = tf_slim.layers.avg_pool2d(net, shape[1:3], padding="VALID",
                                    scope="pool")
            net = tf1.nn.dropout(net, 1, name='droplast')
            net = tf_slim.layers.flatten(net, scope="flatten")

    with tf1.variable_scope('output') as scope:
        weights = tf1.Variable(
            tf1.truncated_normal([2048, nlabels], mean=0.0, stddev=0.01),
            name='weights')
        biases = tf1.Variable(
            tf1.constant(0.0, shape=[nlabels], dtype=tf1.float32), name='biases')
        output = tf1.add(tf1.matmul(net, weights), biases, name=scope.name)

        tensor_name = re.sub('tower_[0-9]*/', '', output.op.name)
        tf1.summary.histogram(tensor_name + '/activations', output)
        tf1.summary.scalar(tensor_name + '/sparsity', tf1.nn.zero_fraction(output))
    return output
コード例 #30
0
def resnet_arg_scope():
    batch_norm_params = dict(decay=0.997,
                             epsilon=1e-5,
                             scale=True,
                             is_training=tfu.is_training(),
                             fused=True,
                             data_format=tfu.data_format())

    with slim.arg_scope(
        [slim.conv2d, slim.conv3d],
            weights_regularizer=slim.l2_regularizer(1e-4),
            weights_initializer=slim.variance_scaling_initializer(),
            activation_fn=tf.nn.relu,
            normalizer_fn=slim.batch_norm,
            normalizer_params=batch_norm_params):
        with slim.arg_scope([slim.batch_norm], **batch_norm_params):
            with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
                return arg_sc