Esempio n. 1
0
def alexnet(inputs,
            is_training=True,
            dropout_keep_prob=0.5, ):
    with tf.variable_scope("alexnet", reuse=tf.AUTO_REUSE):
        with arg_scope(
                [layers.conv2d, layers_lib.max_pool2d],
                data_format="NCHW"):
            x = layers.conv2d(
                inputs, 64, [3, 3],  padding='VALID', scope='conv1')
            x = layers_lib.max_pool2d(x, [2, 2], 2, scope='pool1')
            x = layers.conv2d(x, 192, [5, 5], scope='conv2')
            x = layers_lib.max_pool2d(x, [2, 2], 2, scope='pool2')
            x = layers.conv2d(x, 384, [3, 3], scope='conv3')
            x = layers.conv2d(x, 384, [3, 3], scope='conv4')
            x = layers.conv2d(x, 256, [3, 3], scope='conv5')
            x = layers_lib.max_pool2d(x, [2, 2], 2, scope='pool5')

            with arg_scope(
                    [layers.conv2d],
                    weights_initializer=trunc_normal(0.005),
                    biases_initializer=init_ops.constant_initializer(0.1)):
                x = layers.conv2d(x, 4096, [3, 3], padding='VALID', scope='fc6')
                x = layers_lib.dropout(x, dropout_keep_prob, is_training=is_training, scope='dropout6')
                x = layers.conv2d(x, 4096, [1, 1], scope='fc7')
                x = layers_lib.dropout(x, dropout_keep_prob, is_training=is_training, scope='dropout7')
                x = layers.conv2d(x, NUM_CLASSES, [1, 1], activation_fn=None, normalizer_fn=None,
                                  biases_initializer=init_ops.zeros_initializer(), scope='fc8')
                x = tf.squeeze(x, [2, 3], name='fc8/squeezed')

        return x
Esempio n. 2
0
def vgg_a(inputs,
          num_classes=1000,
          is_training=True,
          dropout_keep_prob=0.5,
          spatial_squeeze=True,
          scope='vgg_a'):
  """Oxford Net VGG 11-Layers version A Example.

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
  with variable_scope.variable_scope(scope, 'vgg_a', [inputs]) as sc:
    end_points_collection = sc.original_name_scope + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d.
    with arg_scope(
        [layers.conv2d, layers_lib.max_pool2d],
        outputs_collections=end_points_collection):
      net = layers_lib.repeat(
          inputs, 1, layers.conv2d, 64, [3, 3], scope='conv1')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
      net = layers_lib.repeat(net, 1, layers.conv2d, 128, [3, 3], scope='conv2')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
      net = layers_lib.repeat(net, 2, layers.conv2d, 256, [3, 3], scope='conv3')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
      net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv4')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
      net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv5')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
      # Use conv2d instead of fully_connected layers.
      net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout6')
      net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout7')
      net = layers.conv2d(
          net,
          num_classes, [1, 1],
          activation_fn=None,
          normalizer_fn=None,
          scope='fc8')
      # Convert end_points_collection into a end_point dict.
      end_points = utils.convert_collection_to_dict(end_points_collection)
      if spatial_squeeze:
        net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
        end_points[sc.name + '/fc8'] = net
      return net, end_points
def slim_net_original(image, keep_prob):
    with arg_scope([layers.conv2d, layers.fully_connected], biases_initializer=tf.random_normal_initializer(stddev=0.1)):

        # conv2d(inputs, num_outputs, kernel_size, stride=1, padding='SAME',
        # activation_fn=nn.relu, normalizer_fn=None, normalizer_params=None,
        # weights_initializer=initializers.xavier_initializer(), weights_regularizer=None,
        # biases_initializer=init_ops.zeros_initializer, biases_regularizer=None, scope=None):
        net = layers.conv2d(image, 32, [5, 5], scope='conv1', weights_regularizer=regularizers.l1_regularizer(0.5))

        # max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None)
        net = layers.max_pool2d(net, 2, scope='pool1')

        net = layers.conv2d(net, 64, [5, 5], scope='conv2', weights_regularizer=regularizers.l2_regularizer(0.5))
        summaries.summarize_tensor(net, tag='conv2')

        net = layers.max_pool2d(net, 2, scope='pool2')

        net = layers.flatten(net, scope='flatten1')

        # fully_connected(inputs, num_outputs, activation_fn=nn.relu, normalizer_fn=None,
        # normalizer_params=None, weights_initializer=initializers.xavier_initializer(),
        # weights_regularizer=None, biases_initializer=init_ops.zeros_initializer,
        # biases_regularizer=None, scope=None):
        net = layers.fully_connected(net, 1024, scope='fc1')

        # dropout(inputs, keep_prob=0.5, is_training=True, scope=None)
        net = layers.dropout(net, keep_prob=keep_prob, scope='dropout1')

        net = layers.fully_connected(net, 10, scope='fc2')
    return net
Esempio n. 4
0
def alexnet_v2(inputs,
               num_classes=1000,
               is_training=True,
               dropout_keep_prob=0.7,
               spatial_squeeze=True,
               scope='alexnet_v2'):
  """AlexNet version 2.

  Described in: http://arxiv.org/pdf/1404.5997v2.pdf
  Parameters from:
  github.com/akrizhevsky/cuda-convnet2/blob/master/layers/
  layers-imagenet-1gpu.cfg

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224. To use in fully
        convolutional mode, set spatial_squeeze to false.
        The LRN layers have been removed and change the initializers from
        random_normal_initializer to xavier_initializer.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
  with variable_scope.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
    end_points_collection = sc.original_name_scope + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d.
    with arg_scope([layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d], outputs_collections=[end_points_collection]):
      net = layers.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
      net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool1')
      net = layers.conv2d(net, 192, [5, 5], scope='conv2')
      net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool2')
      net = layers.conv2d(net, 384, [3, 3], scope='conv3')
      net = layers.conv2d(net, 384, [3, 3], scope='conv4')
      net = layers.conv2d(net, 256, [3, 3], scope='conv5')
      net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool5')
      # Use conv2d instead of fully_connected layers.
      with arg_scope([layers.conv2d], weights_initializer=trunc_normal(0.005), biases_initializer=init_ops.constant_initializer(0.1)):
        net = layers.conv2d(net, 4096, [5, 5], padding='VALID', scope='fc6')
        net = slim.batch_norm(net, decay=0.9999, center=True, scale=False, epsilon=0.001, is_training=is_training, scope='bn1')
        net = layers_lib.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout6')
        net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
        net = slim.batch_norm(net, decay=0.9999, center=True, scale=False, epsilon=0.001, is_training=is_training, scope='bn2')
        net = layers_lib.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout7')
        net = layers.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, biases_initializer=init_ops.zeros_initializer(), scope='fc8')

      # Convert end_points_collection into a end_point dict.
      end_points = utils.convert_collection_to_dict(end_points_collection)
      if spatial_squeeze:
        net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
        end_points[sc.name + '/fc8'] = net
      return net, end_points
Esempio n. 5
0
def _embedding_alexnet(is_training, images, params):
    with tf.variable_scope('Siamese', 'CFCASiamese', [images], reuse=tf.AUTO_REUSE):
        with arg_scope(
                [layers.conv2d], activation_fn=tf.nn.relu):
            net = layers.conv2d(
                images, 96, [11, 11], 4, padding='VALID', scope='conv1')
            # net = layers.batch_norm(net, decay=0.9, epsilon=1e-06, is_training=is_training)
            net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool1')
            net = layers.conv2d(net, 256, [5, 5], scope='conv2')
            # net = layers.batch_norm(net, decay=0.9, epsilon=1e-06, is_training=is_training)
            net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool2')
            net = layers_lib.dropout(
                net, keep_prob=0.7, is_training=is_training)
            net = layers.conv2d(net, 384, [3, 3], scope='conv3')
            net = layers.conv2d(net, 256, [3, 3], scope='conv4')
            net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool5')
            net = layers_lib.dropout(
                net, keep_prob=0.7, is_training=is_training)
            net = layers_lib.flatten(net, scope='flatten1')
            net = layers_lib.fully_connected(net, 1024, scope='fc1',
                                             weights_regularizer=layers.l2_regularizer(0.0005))
            net = layers_lib.dropout(
                net, keep_prob=0.5, is_training=is_training)
            net = layers_lib.fully_connected(net, params.embedding_size, scope='fc2',
                                             weights_regularizer=layers.l2_regularizer(0.0005))
            return net
Esempio n. 6
0
def lenet(input,
          is_training=True,
          dropout_keep_prob=0.5,
          ):
    with tf.variable_scope("lenet", reuse=tf.AUTO_REUSE):
        with arg_scope(
                [layers.conv2d, layers_lib.max_pool2d],
                data_format="NCHW"):
            x = layers.conv2d(
                input,
                32,
                kernel_size=[5, 5],
                padding="same",
                activation_fn=tf.nn.relu)
            x = layers_lib.max_pool2d(x, [2, 2], 2)
            x = layers.conv2d(
                x,
                64,
                kernel_size=[5, 5],
                padding="same",
                activation_fn=tf.nn.relu)
            x = layers_lib.max_pool2d(x, [2, 2], 2)
            print (x)
            x = tf.reshape(x, [-1, 8 * 8 * 64])
            x = tf.layers.dense(inputs=x, units=1024, activation=tf.nn.relu)
            x = tf.layers.dropout(
                inputs=x, rate=dropout_keep_prob, training=is_training)
            x = tf.layers.dense(inputs=x, units=10)
        return x
Esempio n. 7
0
def _vgg16(inputs, training=True, embedding_size=64, 
           dropout_keep_prob=0.5,
           middleRepr=False,
           scope='vgg_16'):
    """ From https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/slim/python/slim/nets/vgg.py
    
Oxford Net VGG 16-Layers version D Example without fcn/convolutional layers at end

  """
    with variable_scope.variable_scope(scope, 'vgg_16', [inputs]) as sc:
        net = layers_lib.repeat(inputs, 2, layers.conv2d, 64, [3, 3], scope='conv1')
        net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
        
        net = layers_lib.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')
        net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
        
        net = layers_lib.repeat(net, 3, layers.conv2d, 256, [3, 3], scope='conv3')
        net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
        
        net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv4')
        net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
        
        net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv5')
        net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
        
        net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')

        if middleRepr:
            return net

        net = tf.layers.dense(net, embedding_size, 
                              activation=tf.nn.relu, kernel_initializer=xav_init(), name='fc8')
            
        return net   
Esempio n. 8
0
def vgg_a(inputs,
          num_classes=1000,
          is_training=True,
          dropout_keep_prob=0.5,
          spatial_squeeze=True,
          scope='vgg_a'):
  """Oxford Net VGG 11-Layers version A Example.

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
  with variable_scope.variable_scope(scope, 'vgg_a', [inputs]) as sc:
    end_points_collection = sc.original_name_scope + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d.
    with arg_scope(
        [layers.conv2d, layers_lib.max_pool2d],
        outputs_collections=end_points_collection):
      net = layers_lib.repeat(
          inputs, 1, layers.conv2d, 64, [3, 3], scope='conv1')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
      net = layers_lib.repeat(net, 1, layers.conv2d, 128, [3, 3], scope='conv2')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
      net = layers_lib.repeat(net, 2, layers.conv2d, 256, [3, 3], scope='conv3')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
      net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv4')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
      net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv5')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
      # Use conv2d instead of fully_connected layers.
      net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout6')
      net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout7')
      net = layers.conv2d(
          net,
          num_classes, [1, 1],
          activation_fn=None,
          normalizer_fn=None,
          scope='fc8')
      # Convert end_points_collection into a end_point dict.
      end_points = utils.convert_collection_to_dict(end_points_collection)
      if spatial_squeeze:
        net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
        end_points[sc.name + '/fc8'] = net
      return net, end_points
Esempio n. 9
0
    def get_decision_net(self, net, net_prob_mat):

        with tf.name_scope('decision'):

            decision_net = tf.concat([net, net_prob_mat], axis=3)

            decision_net = layers_lib.max_pool2d(decision_net, [2, 2],
                                                 scope='decision/pool4')

            decision_net = layers.conv2d(decision_net,
                                         8, [5, 5],
                                         padding='SAME',
                                         scope='decision/conv6')

            decision_net = layers_lib.max_pool2d(decision_net, [2, 2],
                                                 scope='decision/pool5')

            decision_net = layers.conv2d(decision_net,
                                         16, [5, 5],
                                         padding='SAME',
                                         scope='decision/conv7')

            decision_net = layers_lib.max_pool2d(decision_net, [2, 2],
                                                 scope='decision/pool6')

            decision_net = layers.conv2d(decision_net,
                                         32, [5, 5],
                                         scope='decision/conv8')

            with tf.name_scope('decision/global_avg_pool'):
                avg_decision_net = keras.layers.GlobalAveragePooling2D()(
                    decision_net)

            with tf.name_scope('decision/global_max_pool'):
                max_decision_net = keras.layers.GlobalMaxPooling2D()(
                    decision_net)

            with tf.name_scope('decision/global_avg_pool'):
                avg_prob_net = keras.layers.GlobalAveragePooling2D()(
                    net_prob_mat)

            with tf.name_scope('decision/global_max_pool'):
                max_prob_net = keras.layers.GlobalMaxPooling2D()(net_prob_mat)

            # adding avg_prob_net and max_prob_net may not be needed, but it doesen't hurt
            decision_net = tf.concat([
                avg_decision_net, max_decision_net, avg_prob_net, max_prob_net
            ],
                                     axis=1)

            decision_net = layers.fully_connected(
                decision_net,
                1,
                scope='decision/FC9',
                normalizer_fn=None,
                biases_initializer=tf.constant_initializer(0),
                activation_fn=None)
        return decision_net
Esempio n. 10
0
def get_slim_arch(inputs, num_classes=1000, scope='vgg_16'):
    """
    from vgg16 https://github.com/tensorflow/models/blob/master/research/slim/nets/vgg.py
    :param inputs:
    :param num_classes:
    :param scope:
    :return:
    """
    with variable_scope.variable_scope(scope, 'vgg_16', [inputs]) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        # Collect outputs for conv2d, fully_connected and max_pool2d.

        # Arg scope set default parameters for a list of ops
        with arg_scope(
            [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
                outputs_collections=end_points_collection):
            net = layers_lib.repeat(inputs,
                                    2,
                                    layers.conv2d,
                                    64, [3, 3],
                                    scope='conv1')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
            net = layers_lib.repeat(net,
                                    2,
                                    layers.conv2d,
                                    128, [3, 3],
                                    scope='conv2')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
            net = layers_lib.repeat(net,
                                    3,
                                    layers.conv2d,
                                    256, [3, 3],
                                    scope='conv3')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
            net = layers_lib.repeat(net,
                                    3,
                                    layers.conv2d,
                                    512, [3, 3],
                                    scope='conv4')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
            net = layers_lib.repeat(net,
                                    3,
                                    layers.conv2d,
                                    512, [3, 3],
                                    scope='conv5')

            # Here we have 14x14 filters
            net = tf.reduce_mean(net, [1, 2])  # Global average pooling
            net = layers_lib.fully_connected(net,
                                             num_classes,
                                             activation_fn=None,
                                             biases_initializer=None,
                                             scope='softmax_logits')

            # Convert end_points_collection into a end_point dict.
            end_points = utils.convert_collection_to_dict(
                end_points_collection)
            return net, end_points
Esempio n. 11
0
    def alexnet_v2(inputs,
                   is_training=True,
                   emb_size=4096,
                   dropout_keep_prob=0.5,
                   scope='alexnet_v2'):

        inputs = tf.cast(inputs, tf.float32)
        if new_shape is not None:
            shape = new_shape
            inputs = tf.image.resize_images(
                inputs,
                tf.constant(new_shape[:2]),
                method=tf.image.ResizeMethod.BILINEAR)
        else:
            shape = img_shape
        if is_training and augmentation_function is not None:
            inputs = augmentation_function(inputs, shape)
        if image_summary:
            tf.summary.image('Inputs', inputs, max_outputs=3)

        net = inputs
        mean = tf.reduce_mean(net, [1, 2], True)
        std = tf.reduce_mean(tf.square(net - mean), [1, 2], True)
        net = (net - mean) / (std + 1e-5)
        inputs = net

        with variable_scope.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
            end_points_collection = sc.original_name_scope + '_end_points'

            # Collect outputs for conv2d, fully_connected and max_pool2d.
            with arg_scope(
                    [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
                    outputs_collections=[end_points_collection]):
                net = layers.conv2d(
                    inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
                net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool1')
                net = layers.conv2d(net, 192, [5, 5], scope='conv2')
                net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool2')
                net = layers.conv2d(net, 384, [3, 3], scope='conv3')
                net = layers.conv2d(net, 384, [3, 3], scope='conv4')
                net = layers.conv2d(net, 256, [3, 3], scope='conv5')
                net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool5')

                net = slim.flatten(net, scope='flatten')

                # Use conv2d instead of fully_connected layers.
                with arg_scope(
                        [slim.fully_connected],
                        weights_initializer=trunc_normal(0.005),
                        biases_initializer=init_ops.constant_initializer(0.1)):
                    net = layers.fully_connected(net, 4096, scope='fc6')
                    net = layers_lib.dropout(
                        net, dropout_keep_prob, is_training=is_training, scope='dropout6')
                    net = layers.fully_connected(net, emb_size, scope='fc7')

        return net
Esempio n. 12
0
def truncated_vgg_16(inputs, is_training=True, scope="vgg_16"):
    """Oxford Net VGG 16-Layers version D Example.

    For use in SSD object detection network, which has this particular
    truncated version of VGG16 detailed in its paper.

    Args:
      inputs: a tensor of size [batch_size, height, width, channels].
      scope: Optional scope for the variables.

    Returns:
      the last op containing the conv5 tensor and end_points dict.
    """
    with variable_scope.variable_scope(scope, "vgg_16", [inputs]) as sc:
        end_points_collection = sc.original_name_scope + "_end_points"
        # Collect outputs for conv2d, fully_connected and max_pool2d.
        with arg_scope(
            [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
                outputs_collections=end_points_collection,
        ):
            net = layers_lib.repeat(inputs,
                                    2,
                                    layers.conv2d,
                                    64, [3, 3],
                                    scope="conv1")
            net = layers_lib.max_pool2d(net, [2, 2], scope="pool1")
            net = layers_lib.repeat(net,
                                    2,
                                    layers.conv2d,
                                    128, [3, 3],
                                    scope="conv2")
            net = layers_lib.max_pool2d(net, [2, 2], scope="pool2")
            net = layers_lib.repeat(net,
                                    3,
                                    layers.conv2d,
                                    256, [3, 3],
                                    scope="conv3")
            net = layers_lib.max_pool2d(net, [2, 2], scope="pool3")
            net = layers_lib.repeat(net,
                                    3,
                                    layers.conv2d,
                                    512, [3, 3],
                                    scope="conv4")
            net = layers_lib.max_pool2d(net, [2, 2], scope="pool4")
            net = layers_lib.repeat(net,
                                    3,
                                    layers.conv2d,
                                    512, [3, 3],
                                    scope="conv5")
            # Convert end_points_collection into a end_point dict.
            end_points = utils.convert_collection_to_dict(
                end_points_collection)
            return net, end_points
Esempio n. 13
0
def alexnet_v2(inputs,
               num_classes=2,
               is_training=True,
               dropout_keep_prob=0.5,
               spatial_squeeze=True,
               scope='alexnet_v2'):
    inputs = tf.reshape(inputs, (-1, 80, 80, 1))
    with variable_scope.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        # Collect outputs for conv2d, fully_connected and max_pool2d.
        debug_shape(inputs)
        with arg_scope(
            [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
                outputs_collections=[end_points_collection]):
            net = layers.conv2d(inputs,
                                64, [3, 3],
                                padding='VALID',
                                scope='conv1')
            debug_shape(net)
            net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool1')
            debug_shape(net)
            net = layers.conv2d(net, 192, [5, 5], scope='conv2')
            debug_shape(net)
            net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool2')
            debug_shape(net)
            net = layers.conv2d(net, 384, [3, 3], scope='conv3')
            net = layers.conv2d(net, 384, [3, 3], scope='conv4')
            net = layers.conv2d(net, 256, [3, 3], scope='conv5')
            net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool5')
            debug_shape(net)
            with arg_scope(
                [layers.conv2d],
                    weights_initializer=trunc_normal(0.005),
                    biases_initializer=init_ops.constant_initializer(0.1)):
                net = layers.conv2d(net,
                                    256, [7, 7],
                                    2,
                                    padding='VALID',
                                    scope='fc6')
                debug_shape(net)
                net = layers_lib.dropout(net,
                                         dropout_keep_prob,
                                         is_training=is_training,
                                         scope='dropout6')
                net = layers.conv2d(net, 256, [1, 1], scope='fc7')
                debug_shape(net)
            end_points = utils.convert_collection_to_dict(
                end_points_collection)
            if spatial_squeeze:
                net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
            end_points[sc.name + '/fc8'] = net
            return net, end_points
Esempio n. 14
0
def vgg16_base(inputs):
    with tf.variable_scope('vgg_16'):
        net = layers_lib.repeat(inputs, 2, layers.conv2d, 64, [3, 3], padding='SAME', scope='conv1')
        net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
        net = layers_lib.repeat(net, 2, layers.conv2d, 128, [3, 3], padding='SAME', scope='conv2')
        net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
        net = layers_lib.repeat(net, 3, layers.conv2d, 256, [3, 3], padding='SAME', scope='conv3')
        net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
        net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], padding='SAME', scope='conv4')
        net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
        net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], padding='SAME', scope='conv5')
        net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
    return net
Esempio n. 15
0
def vgg_16_tcorr(inputs,
           corr_features,
           num_classes=1000,
           is_training=True,
           dropout_keep_prob=0.5,
           spatial_squeeze=True,
           scope='vgg_16_tcorr'):

  with variable_scope.variable_scope(scope, 'vgg_16_tcorr', [inputs]) as sc:
    end_points_collection = sc.original_name_scope + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d.
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
        outputs_collections=end_points_collection):
      forder = corr_features['vgg_16/conv1/conv1_1']
      net, forder = init_conv_corr(inputs, 2, 64, [3, 3], 'conv1', forder, corr_features)
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')

      net, forder = repeat_conv_corr(net, 2, 128, [3, 3], 'conv2', forder, corr_features)
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')

      net, forder = repeat_conv_corr(net, 3, 256, [3, 3], 'conv3', forder, corr_features)
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')

      net, forder = repeat_conv_corr(net, 3, 512, [3, 3], 'conv4', forder, corr_features)
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')

      net, _ = repeat_conv_corr(net, 3, 512, [3, 3], 'conv5', forder, corr_features)
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')

      net = layers.conv2d(net, 512, [1,1], scope = 'fuse5')

      # Use conv2d instead of fully_connected layers.
      net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout6')
      net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout7')
      net = layers.conv2d(
          net,
          num_classes, [1, 1],
          activation_fn=None,
          normalizer_fn=None,
          scope='fc8')
      # Convert end_points_collection into a end_point dict.
      end_points = utils.convert_collection_to_dict(end_points_collection)
      if spatial_squeeze:
        net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
        end_points[sc.name + '/fc8'] = net
      return net, end_points
Esempio n. 16
0
def gazenet(inputs,
            head_inputs,
            head_loc,
            num_classes = 1000,
            is_training=True,
            dropout_keep_prob= 0.5,
            spatial_squeeze = True,
            scope = 'gazenet'):
    #input is a tensor of [batch_size, height, width, channels]
    with variable_scope.variable_scope(scope, "gazenet", [inputs]) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        with arg_scope(
            [layers.conv2d, layers_lib.fuly_connected, layers_lib.max_pool2d],
                outputs_collections =[end_points_collection]):
            saliency_net= layers.conv2d(inputs, 64, [11,11], 4, padding = 'VALID', scope = 'conv1')
            #56x56 , 52x52
            saliency_net = layers_lib.max_pool2d(saliency_net, [3,3], 2, scope = 'pool1')
            #28x28, 26x26
            saliency_net = layers.conv2d(saliency_net, 192, [5,5], scope = 'conv2')
            saliency_net = layers_lib.max_pool2d(saliency_net, [3,3], 2, scope = 'pool2')
            #14x14
            saliency_net = layers.conv2d(saliency_net, 384, [3,3], scope = 'conv3')
            saliency_net = layers.conv2d(saliency_net, 384, [3,3], scope = 'conv4')
            saliency_net = layers.conv2d(saliency_net, 256, [3,3], scope = 'conv5')
            # net = layers_lib.max_pool2d(net, [3,3], 2, scope = 'pool5')
            # last layer is a 1x1x 256 conv layer
            saliency_net = layers.conv2d(saliency_net, 1, [1,1], scope = 'conv6')
            #14x14, 13x13

            gaze_net = layers.conv2d(head_inputs, 64, [11,11], 4, padding ="VALID", scope = 'gaze_conv1')
            gaze_net = layers_lib.max_pool2d(gaze_net, [3,3], 2, scope = 'gaze_pool1')
            gaze_net = layers.conv2d(gaze_net, 192, [5,5], scope = 'gaze_conv2')
            gaze_net = layers_lib.max_pool2d(gaze_net, [3,3],2, scope = 'gaze_pool2')
            gaze_net = layers.conv2d(gaze_net, 384, [3,3], scope = 'gaze_conv3')
            gaze_net = layers.conv2d(gaze_net, 384, [3,3], scope = 'gaze_conv4')
            gaze_net = layers.conv2d(gaze_net, 256, [3,3], scope = 'gaze_conv5')

            gaze_net = layers.ops.fc(gaze_net, 100, scope = 'fc1')
            #Concatenate the location of the head
            head_loc = layers.flatten(head_loc)
            gaze_net = tf.concat(gaze_net, head_loc, 0)
            # to do: use tf.contrib.stack
            gaze_net = layers.ops.fc(gaze_net, 375, scope = 'fc2')
            gaze_net = layers.ops.fc(gaze_net, 200, scope = 'fc3')
            gaze_net = layers.ops.fc(gaze_net, 169, scope = 'fc4')


            product_mask = tf.multiply(gaze_net, layers.flatten(saliency_net))

            prediction_mask = layers.ops.fc(product_mask, 25, scope='prediction')
Esempio n. 17
0
def max_pool2d(inputs,
               kernel_size=2,
               stride=2,
               padding='SAME',
               explicit_padding=True,
               outputs_collections=None,
               scope=None):
    """
    eqaully same padding for max_pool2d
    """
    kernel_size = [kernel_size, kernel_size
                   ] if type(kernel_size) is int else kernel_size
    stride = [stride, stride] if type(stride) is int else stride

    if padding == 'SAME' and explicit_padding:
        inputs = same_padding(inputs, kernel_size, [1, 1])
        padding = 'VALID'

    pool = layers_lib.max_pool2d(inputs,
                                 kernel_size,
                                 stride=stride,
                                 padding=padding,
                                 data_format='NHWC',
                                 outputs_collections=outputs_collections,
                                 scope=scope)
    return pool
Esempio n. 18
0
def dense_block(inputs, depth, depth_bottleneck, stride, name, rate=1):
    depth_in = inputs.get_shape()[3]
    if depth == depth_in:
        if stride == 1:
            shortcut = inputs
        else:
            shortcut = layers.max_pool2d(inputs, [1, 1], stride=factor, scope=name+'_shortcut')
    else:
        shortcut = layers.conv2d(
            inputs,
            depth, [1, 1],
            stride=stride,
            activation_fn=None,
            scope=name+'_shortcut')
    if PRINT_LAYER_LOG:
        print(name+'_shortcut', shortcut.get_shape())

    residual = layers.conv2d(
        inputs, depth_bottleneck, [1, 1], stride=1, scope=name+'_conv1')
    if PRINT_LAYER_LOG:
        print(name+'_conv1', residual.get_shape())
    residual = resnet_utils.conv2d_same(
        residual, depth_bottleneck, 3, stride, rate=rate, scope=name+'_conv2')
    if PRINT_LAYER_LOG:
        print(name+'_conv2', residual.get_shape())
    residual = layers.conv2d(
        residual, depth, [1, 1], stride=1, activation_fn=None, scope=name+'_conv3')
    if PRINT_LAYER_LOG:
        print(name+'_conv3', residual.get_shape())
    output = nn_ops.relu(shortcut + residual)
    return output
Esempio n. 19
0
    def suanet_v2(inputs, is_training=True, emb_size=256, scope='suanet_v2'):

        inputs = tf.cast(inputs, tf.float32)

        net = inputs
        mean = tf.reduce_mean(net, [1, 2], True)
        std = tf.reduce_mean(tf.square(net - mean), [1, 2], True)
        net = (net - mean) / (std + 1e-5)
        inputs = net

        with variable_scope.variable_scope(scope, 'suanet_v2', [inputs]) as sc:
            end_points_collection = sc.original_name_scope + '_end_points'
            end_points = {}
            # Collect outputs for conv2d, fully_connected and max_pool2d.
            with arg_scope([
                    layers.conv2d, layers_lib.fully_connected,
                    layers_lib.max_pool2d
            ],
                           outputs_collections=[end_points_collection]):
                end_points['conv1'] = layers.conv2d(inputs,
                                                    96, [11, 11],
                                                    4,
                                                    scope='conv1')
                end_points['pool1'] = layers_lib.max_pool2d(
                    end_points['conv1'], [3, 3], 2, scope='pool1')
                end_points['conv2'] = layers.conv2d(end_points['pool1'],
                                                    256, [5, 5],
                                                    scope='conv2')
                end_points['pool2'] = layers_lib.max_pool2d(
                    end_points['conv2'], [3, 3], 2, scope='pool2')
                end_points['conv3'] = layers.conv2d(end_points['pool2'],
                                                    emb_size, [3, 3],
                                                    scope='conv3')
                filter_n_stride_height = end_points['conv3'].get_shape()[1]
                filter_n_stride_width = end_points['conv3'].get_shape()[2]
                end_points['pool3'] = layers_lib.max_pool2d(
                    end_points['conv3'],
                    [filter_n_stride_height, filter_n_stride_width],
                    [filter_n_stride_height, filter_n_stride_width],
                    scope='pool3')
                end_points['flatten'] = slim.flatten(end_points['pool3'],
                                                     scope='flatten')
        return end_points
Esempio n. 20
0
def vgg_16(inputs,
           num_classes=1000,
           is_training=True,
           dropout_keep_prob=0.5,
           spatial_squeeze=True,
           scope='vgg_16'):
  """Oxford Net VGG 16-Layers version D Example.

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
  with variable_scope.variable_scope(scope, 'vgg_16', [inputs]) as sc:
    end_points_collection = sc.original_name_scope + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d.
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
        outputs_collections=end_points_collection):

      net = layers_lib.repeat(inputs, 2, layers.conv2d, 64, [3, 3], scope='conv1'); print(net)
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool1'); print(net)
      net = layers_lib.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2'); print(net)
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool2'); print(net)
      net = layers_lib.repeat(net, 3, layers.conv2d, 256, [3, 3], scope='conv3'); print(net)
      net = layers_lib.max_pool2d(net, [2, 2], padding='same', scope='pool3'); print(net)
      net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv4'); conv4_3 = net
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool4'); print(net)
      net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv5'); conv5_3 = net
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
      return net
Esempio n. 21
0
def vgg16(inputs,
          num_classes=1000,
          is_training=True,
          dropout_keep_prob=0.5,
          spatial_squeeze=True,
          reuse=tf.AUTO_REUSE,
          scope='vgg_16'):
    with variable_scope.variable_scope(scope, 'vgg_16', [inputs],
                                       reuse=reuse) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        # Collect outputs for conv2d, fully_connected and max_pool2d.
        with arg_scope(
            [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
                outputs_collections=end_points_collection):
            net = layers_lib.repeat(inputs,
                                    2,
                                    layers.conv2d,
                                    64, [3, 3],
                                    scope='conv1')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
            net = layers_lib.repeat(net,
                                    2,
                                    layers.conv2d,
                                    128, [3, 3],
                                    scope='conv2')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
            net = layers_lib.repeat(net,
                                    3,
                                    layers.conv2d,
                                    256, [3, 3],
                                    scope='conv3')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
            net = layers_lib.repeat(net,
                                    3,
                                    layers.conv2d,
                                    512, [3, 3],
                                    scope='conv4')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')

        # Convert end_points_collection into a end_point dict.
        end_points = utils.convert_collection_to_dict(end_points_collection)
        return net, end_points
Esempio n. 22
0
def max_pool(inputs, kernel_size, stride=1, scope=None):
    if kernel_size == 1 and stride == 1:
        outputs = inputs
    else:
        outputs = layers.max_pool2d(inputs=inputs,
                                    kernel_size=kernel_size,
                                    stride=1,
                                    scope=scope)
        if stride > 1:
            outputs = blur_pool(outputs, stride=stride)

    return outputs
Esempio n. 23
0
def slim_net_original(image, keep_prob):
    with arg_scope(
        [layers.conv2d, layers.fully_connected],
            biases_initializer=tf.random_normal_initializer(stddev=0.1)):

        # conv2d(inputs, num_outputs, kernel_size, stride=1, padding='SAME',
        # activation_fn=nn.relu, normalizer_fn=None, normalizer_params=None,
        # weights_initializer=initializers.xavier_initializer(), weights_regularizer=None,
        # biases_initializer=init_ops.zeros_initializer, biases_regularizer=None, scope=None):
        net = layers.conv2d(
            image,
            32, [5, 5],
            scope='conv1',
            weights_regularizer=regularizers.l1_regularizer(0.5))

        # max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None)
        net = layers.max_pool2d(net, 2, scope='pool1')

        net = layers.conv2d(
            net,
            64, [5, 5],
            scope='conv2',
            weights_regularizer=regularizers.l2_regularizer(0.5))
        summaries.summarize_tensor(net, tag='conv2')

        net = layers.max_pool2d(net, 2, scope='pool2')

        net = layers.flatten(net, scope='flatten1')

        # fully_connected(inputs, num_outputs, activation_fn=nn.relu, normalizer_fn=None,
        # normalizer_params=None, weights_initializer=initializers.xavier_initializer(),
        # weights_regularizer=None, biases_initializer=init_ops.zeros_initializer,
        # biases_regularizer=None, scope=None):
        net = layers.fully_connected(net, 1024, scope='fc1')

        # dropout(inputs, keep_prob=0.5, is_training=True, scope=None)
        net = layers.dropout(net, keep_prob=keep_prob, scope='dropout1')

        net = layers.fully_connected(net, 10, scope='fc2')
    return net
Esempio n. 24
0
def resnet50(image_input,
             is_training,
             embedding_dim,
             scope='resnet_v2_50',
             before_pool=False):
    with tf.variable_scope(scope):
        with arg_scope([layers.batch_norm],
                       is_training=is_training,
                       scale=True):
            with arg_scope([layers_lib.conv2d],
                           activation_fn=None,
                           normalizer_fn=None):
                out = conv2d_same(inputs=image_input,
                                  num_outputs=64,
                                  kernel_size=7,
                                  stride=2,
                                  scope='conv1')
            out = layers.max_pool2d(out, [3, 3], stride=2, scope='pool1')
            with arg_scope([layers_lib.conv2d],
                           activation_fn=nn_ops.relu,
                           normalizer_fn=layers.batch_norm):
                out = resnet_v2_block(inputs=out,
                                      base_depth=64,
                                      num_units=3,
                                      stride=2,
                                      scope='block1')
                out = resnet_v2_block(inputs=out,
                                      base_depth=128,
                                      num_units=4,
                                      stride=2,
                                      scope='block2')
                out = resnet_v2_block(inputs=out,
                                      base_depth=256,
                                      num_units=6,
                                      stride=2,
                                      scope='block3')
                out = resnet_v2_block(inputs=out,
                                      base_depth=512,
                                      num_units=3,
                                      stride=1,
                                      scope='block4')
            out = layers.batch_norm(out,
                                    activation_fn=nn_ops.relu,
                                    scope='postnorm')
            avg_out = tf.reduce_mean(out, axis=[1, 2], name='pool5')

    fc_out = tf.layers.dense(inputs=avg_out, units=embedding_dim)

    if before_pool:
        return out, fc_out
    else:
        return fc_out
Esempio n. 25
0
def resnet_v1(inputs,
              blocks,
              num_classes=None,
              is_training=True,
              global_pool=True,
              output_stride=None,
              include_root_block=True,
              reuse=None,
              scope=None):
    with variable_scope.variable_scope(scope,
                                       'resnet_v1', [inputs],
                                       reuse=reuse) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        with arg_scope([layers.conv2d, naive, resnet_utils.stack_blocks_dense],
                       outputs_collections=end_points_collection):
            with arg_scope([layers.batch_norm], is_training=is_training):
                net = inputs
                if include_root_block:
                    if output_stride is not None:
                        if output_stride % 4 != 0:
                            raise ValueError(
                                'The output_stride needs to be a multiple of 4.'
                            )
                        output_stride /= 4
                    net = resnet_utils.conv2d_same(net,
                                                   64,
                                                   7,
                                                   stride=2,
                                                   scope='conv1')
                    net = layers_lib.max_pool2d(net, [3, 3],
                                                stride=2,
                                                scope='pool1')
                net = resnet_utils.stack_blocks_dense(net, blocks,
                                                      output_stride)
                if global_pool:
                    # Global average pooling.
                    net = math_ops.reduce_mean(net, [1, 2],
                                               name='pool5',
                                               keep_dims=True)
                if num_classes is not None:
                    net = layers.conv2d(net,
                                        num_classes, [1, 1],
                                        activation_fn=None,
                                        normalizer_fn=None,
                                        scope='logits')
                end_points = utils.convert_collection_to_dict(
                    end_points_collection)
                if num_classes is not None:
                    end_points['predictions'] = layers_lib.softmax(
                        net, scope='predictions')
                return net, end_points
Esempio n. 26
0
def subsample(inputs, factor, scope=None):
    """Subsamples the input along the spatial dimensions.
  Args:
    inputs: A `Tensor` of size [batch, height_in, width_in, channels].
    factor: The subsampling factor.
    scope: Optional variable_scope.
  Returns:
    output: A `Tensor` of size [batch, height_out, width_out, channels] with the
      input, either intact (if factor == 1) or subsampled (if factor > 1).
  """
    if factor == 1:
        return inputs
    else:
        return layers.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
Esempio n. 27
0
def subsample(inputs, factor, scope=None):
  """Subsamples the input along the spatial dimensions.

  Args:
    inputs: A `Tensor` of size [batch, height_in, width_in, channels].
    factor: The subsampling factor.
    scope: Optional variable_scope.

  Returns:
    output: A `Tensor` of size [batch, height_out, width_out, channels] with the
      input, either intact (if factor == 1) or subsampled (if factor > 1).
  """
  if factor == 1:
    return inputs
  else:
    return layers.max_pool2d(inputs, [1, 1], stride=factor, scope=scope)
Esempio n. 28
0
def _dimension_reduction(net,
                         branch_0_depth=224,
                         branch_1_depth=96,
                         use_deform_conv=False,
                         scope='dimension_reduction'):
    """
    Dimension reduction module of ldnet-v1.
    :param net: the net input.
    :param branch_0_depth: the depth of branch_0.
    :param branch_1_depth: the depth of branch_1.
    :param scope: optional scope.
    :return:
        the size of returned net: [batch_size, height, width, channel], which
        channel = (branch_0_depth + branch_1_depth) + last_net_depth
    """
    with variable_scope.variable_scope(scope):
        with variable_scope.variable_scope('Branch_0'):
            branch_0 = layers.conv2d(net,
                                     branch_0_depth, [3, 3],
                                     stride=2,
                                     scope='Conv2d_1a_1x1')

        with variable_scope.variable_scope('Branch_1'):
            branch_1 = layers.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
            if use_deform_conv:
                branch_1 = ConvOffset2D(64, name='conv3_offset')(
                    branch_1)  # net offset
            branch_1 = layers.conv2d(branch_1,
                                     96, [3, 3],
                                     scope='Conv2d_0b_3x3')
            if use_deform_conv:
                branch_1 = ConvOffset2D(96, name='conv3_offset')(
                    branch_1)  # net offset
            branch_1 = layers.conv2d(branch_1,
                                     branch_1_depth, [3, 3],
                                     stride=2,
                                     scope='Conv2d_1c_1x1')

        with variable_scope.variable_scope('Branch_2'):
            branch_2 = layers_lib.max_pool2d(net, [3, 3],
                                             stride=2,
                                             scope='MaxPool_1a_3x3')

        net = array_ops.concat([branch_0, branch_1, branch_2], 3)

    return net
Esempio n. 29
0
def main(_):

    dropout_on = tf.placeholder(tf.float32)
    if dropout_on is not None:
        conv_keep_prob = 1.0
    else:
        conv_keep_prob = 1.0

    x = tf.placeholder(tf.float32, shape=[None, 14 * 4])
    y_ = tf.placeholder(tf.float32, shape=[None, 2])

    x_image = tf.reshape([-1, 14, 4, 1])

    n_conv1 = 384  # TBD
    L_conv1 = 9  # TBD
    maxpool_len1 = 2
    conv1 = convolution2d(x_image,
                          n_conv1, [L_conv1, 4],
                          padding="VALID",
                          normalizer_fn=None)
    conv1_pool_len = int((14 - L_conv1 + 1) / maxpool_len1)

    n_conv2 = n_conv1
    L_conv2 = 5
    maxpool_len2 = int(
        conv1_pool_len - L_conv2 +
        1)  # global maxpooling (max-pool across temporal domain)
    conv2 = convolution2d(conv1_pool,
                          n_conv2, [L_conv2, 1],
                          padding='VALID',
                          normalizer_fn=None)
    conv2_pool = max_pool2d(conv2, [maxpool_len2, 1], [maxpool_len2, 1])
    # conv2_drop = tf.nn.dropout(conv2_pool, conv_keep_prob)

    # LINEAR FC LAYER
    y_conv = fully_connected(flatten(conv2_pool), 2, activation_fn=None)
    y_conv_softmax = tf.nn.softmax(y_conv)

    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))
    train_step = tf.train.AdamOptimizer().minimize(cross_entropy)
    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    sess.run(tf.initialize_all_variables())
Esempio n. 30
0
def max_pool2d(inputs,
               kernel_size=2,
               stride=2,
               padding='SAME',
               outputs_collections=None,
               scope=None):
    """
    eqaully same padding for max_pool2d
    """
    if padding == 'SAME':
        inputs = same_padding(inputs, [kernel_size, kernel_size], [1, 1])

    pool = layers_lib.max_pool2d(inputs,
                                 kernel_size,
                                 stride=stride,
                                 padding='VALID',
                                 data_format='NHWC',
                                 outputs_collections=outputs_collections,
                                 scope=scope)
    return pool
Esempio n. 31
0
    def __init__(self, sequence_length, num_classes):

        #placeholders for input, output and dropout
        self.input_x = tf.placeholder(tf.float32, [None, sequence_length],
                                      name="input_x")
        self.input_y = tf.placeholder(tf.float32, [None, num_classes],
                                      name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32,
                                                name="dropout_keep_prob")

        x_image = tf.reshape(self.input_x, shape=[-1, 14, 4, 1])

        n_conv1 = 44
        L_conv1 = 5
        maxpool_len1 = 2
        conv1 = convolution2d(x_image,
                              n_conv1, [L_conv1, 4],
                              padding='VALID',
                              normalizer_fn=None)
        conv1_pool = max_pool2d(conv1, [maxpool_len1, 1], [maxpool_len1, 1])
        conv1_pool_len = int((101 - L_conv1 + 1) / maxpool_len1)

        # n_conv2 = n_conv1
        # L_conv2 = 3
        # maxpool_len2 = int(conv1_pool_len - L_conv2 + 1)  # global maxpooling (max-pool across temporal domain)
        # conv2 = convolution2d(conv1_pool, n_conv2, [L_conv2, 1], padding='VALID', normalizer_fn=None)
        # conv2_pool = max_pool2d(conv2, [maxpool_len2, 1], [maxpool_len2, 1])

        # LINEAR FC LAYER
        y_conv = fully_connected(flatten(conv1_pool), 2, activation_fn=None)
        prediction = tf.nn.softmax(y_conv)

        self.cross_entropy = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=y_conv,
                                                    labels=self.input_y))
        # train_step = tf.train.AdamOptimizer().minimize(cross_entropy)

        correct_prediction = tf.equal(tf.argmax(prediction, 1),
                                      tf.argmax(self.input_y, 1))
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
Esempio n. 32
0
def resnet50(image_input, is_training, scope='resnet_v2_50'):
    with tf.variable_scope(scope):
        with arg_scope(resnet_arg_scope(is_training=is_training)):
            with arg_scope([layers_lib.conv2d],
                           activation_fn=None,
                           normalizer_fn=None):
                out = conv2d_same(inputs=image_input,
                                  num_outputs=64,
                                  kernel_size=7,
                                  stride=2,
                                  scope='conv1')
            out = layers.max_pool2d(out, [3, 3], stride=2, scope='pool1')
            out = resnet_v2_block(inputs=out,
                                  base_depth=64,
                                  num_units=3,
                                  stride=2,
                                  scope='block1')
            out = resnet_v2_block(inputs=out,
                                  base_depth=128,
                                  num_units=4,
                                  stride=2,
                                  scope='block2')
            out = resnet_v2_block(inputs=out,
                                  base_depth=256,
                                  num_units=6,
                                  stride=2,
                                  scope='block3')
            out = resnet_v2_block(inputs=out,
                                  base_depth=512,
                                  num_units=3,
                                  stride=1,
                                  scope='block4')
            out = layers.batch_norm(out,
                                    activation_fn=nn_ops.relu,
                                    scope='postnorm')

    return out
Esempio n. 33
0
def resnet_v2(inputs,
              blocks,
              num_classes=None,
              is_training=None,
              global_pool=True,
              output_stride=None,
              include_root_block=True,
              reuse=None,
              scope=None):
  """Generator for v2 (preactivation) ResNet models.

  This function generates a family of ResNet v2 models. See the resnet_v2_*()
  methods for specific model instantiations, obtained by selecting different
  block instantiations that produce ResNets of various depths.

  Training for image classification on Imagenet is usually done with [224, 224]
  inputs, resulting in [7, 7] feature maps at the output of the last ResNet
  block for the ResNets defined in [1] that have nominal stride equal to 32.
  However, for dense prediction tasks we advise that one uses inputs with
  spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
  this case the feature maps at the ResNet output will have spatial shape
  [(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
  and corners exactly aligned with the input image corners, which greatly
  facilitates alignment of the features to the image. Using as input [225, 225]
  images results in [8, 8] feature maps at the output of the last ResNet block.

  For dense prediction tasks, the ResNet needs to run in fully-convolutional
  (FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
  have nominal stride equal to 32 and a good choice in FCN mode is to use
  output_stride=16 in order to increase the density of the computed features at
  small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.

  Args:
    inputs: A tensor of size [batch, height_in, width_in, channels].
    blocks: A list of length equal to the number of ResNet blocks. Each element
      is a resnet_utils.Block object describing the units in the block.
    num_classes: Number of predicted classes for classification tasks. If None
      we return the features before the logit layer.
    is_training: whether is training or not. If None, the value inherited from
      the resnet_arg_scope is used. Specifying value None is deprecated.
    global_pool: If True, we perform global average pooling before computing the
      logits. Set to True for image classification, False for dense prediction.
    output_stride: If None, then the output will be computed at the nominal
      network stride. If output_stride is not None, it specifies the requested
      ratio of input to output spatial resolution.
    include_root_block: If True, include the initial convolution followed by
      max-pooling, if False excludes it. If excluded, `inputs` should be the
      results of an activation-less convolution.
    reuse: whether or not the network and its variables should be reused. To be
      able to reuse 'scope' must be given.
    scope: Optional variable_scope.


  Returns:
    net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
      If global_pool is False, then height_out and width_out are reduced by a
      factor of output_stride compared to the respective height_in and width_in,
      else both height_out and width_out equal one. If num_classes is None, then
      net is the output of the last ResNet block, potentially after global
      average pooling. If num_classes is not None, net contains the pre-softmax
      activations.
    end_points: A dictionary from components of the network to the corresponding
      activation.

  Raises:
    ValueError: If the target output_stride is not valid.
  """
  with variable_scope.variable_scope(
      scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
    end_points_collection = sc.original_name_scope + '_end_points'
    with arg_scope(
        [layers_lib.conv2d, bottleneck, resnet_utils.stack_blocks_dense],
        outputs_collections=end_points_collection):
      if is_training is not None:
        bn_scope = arg_scope([layers.batch_norm], is_training=is_training)
      else:
        bn_scope = arg_scope([])
      with bn_scope:
        net = inputs
        if include_root_block:
          if output_stride is not None:
            if output_stride % 4 != 0:
              raise ValueError('The output_stride needs to be a multiple of 4.')
            output_stride /= 4
          # We do not include batch normalization or activation functions in
          # conv1 because the first ResNet unit will perform these. Cf.
          # Appendix of [2].
          with arg_scope(
              [layers_lib.conv2d], activation_fn=None, normalizer_fn=None):
            net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
          net = layers.max_pool2d(net, [3, 3], stride=2, scope='pool1')
        net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
        # This is needed because the pre-activation variant does not have batch
        # normalization or activation functions in the residual unit output. See
        # Appendix of [2].
        net = layers.batch_norm(
            net, activation_fn=nn_ops.relu, scope='postnorm')
        if global_pool:
          # Global average pooling.
          net = math_ops.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
        if num_classes is not None:
          net = layers_lib.conv2d(
              net,
              num_classes, [1, 1],
              activation_fn=None,
              normalizer_fn=None,
              scope='logits')
        # Convert end_points_collection into a dictionary of end_points.
        end_points = utils.convert_collection_to_dict(end_points_collection)
        if num_classes is not None:
          end_points['predictions'] = layers.softmax(net, scope='predictions')
        return net, end_points
Esempio n. 34
0
def inception_v3_base(inputs,
                      final_endpoint='Mixed_7c',
                      min_depth=16,
                      depth_multiplier=1.0,
                      scope=None):
  """Inception model from http://arxiv.org/abs/1512.00567.

  Constructs an Inception v3 network from inputs to the given final endpoint.
  This method can construct the network up to the final inception block
  Mixed_7c.

  Note that the names of the layers in the paper do not correspond to the names
  of the endpoints registered by this function although they build the same
  network.

  Here is a mapping from the old_names to the new names:
  Old name          | New name
  =======================================
  conv0             | Conv2d_1a_3x3
  conv1             | Conv2d_2a_3x3
  conv2             | Conv2d_2b_3x3
  pool1             | MaxPool_3a_3x3
  conv3             | Conv2d_3b_1x1
  conv4             | Conv2d_4a_3x3
  pool2             | MaxPool_5a_3x3
  mixed_35x35x256a  | Mixed_5b
  mixed_35x35x288a  | Mixed_5c
  mixed_35x35x288b  | Mixed_5d
  mixed_17x17x768a  | Mixed_6a
  mixed_17x17x768b  | Mixed_6b
  mixed_17x17x768c  | Mixed_6c
  mixed_17x17x768d  | Mixed_6d
  mixed_17x17x768e  | Mixed_6e
  mixed_8x8x1280a   | Mixed_7a
  mixed_8x8x2048a   | Mixed_7b
  mixed_8x8x2048b   | Mixed_7c

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    final_endpoint: specifies the endpoint to construct the network up to. It
      can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
      'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',
      'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c',
      'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'].
    min_depth: Minimum depth value (number of channels) for all convolution ops.
      Enforced when depth_multiplier < 1, and not an active constraint when
      depth_multiplier >= 1.
    depth_multiplier: Float multiplier for the depth (number of channels)
      for all convolution ops. The value must be greater than zero. Typical
      usage will be to set this value in (0, 1) to reduce the number of
      parameters or computation cost of the model.
    scope: Optional variable_scope.

  Returns:
    tensor_out: output tensor corresponding to the final_endpoint.
    end_points: a set of activations for external use, for example summaries or
                losses.

  Raises:
    ValueError: if final_endpoint is not set to one of the predefined values,
                or depth_multiplier <= 0
  """
  # end_points will collect relevant activations for external use, for example
  # summaries or losses.
  end_points = {}

  if depth_multiplier <= 0:
    raise ValueError('depth_multiplier is not greater than zero.')
  depth = lambda d: max(int(d * depth_multiplier), min_depth)

  with variable_scope.variable_scope(scope, 'InceptionV3', [inputs]):
    with arg_scope(
        [layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
        stride=1,
        padding='VALID'):
      # 299 x 299 x 3
      end_point = 'Conv2d_1a_3x3'
      net = layers.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points
      # 149 x 149 x 32
      end_point = 'Conv2d_2a_3x3'
      net = layers.conv2d(net, depth(32), [3, 3], scope=end_point)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points
      # 147 x 147 x 32
      end_point = 'Conv2d_2b_3x3'
      net = layers.conv2d(
          net, depth(64), [3, 3], padding='SAME', scope=end_point)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points
      # 147 x 147 x 64
      end_point = 'MaxPool_3a_3x3'
      net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points
      # 73 x 73 x 64
      end_point = 'Conv2d_3b_1x1'
      net = layers.conv2d(net, depth(80), [1, 1], scope=end_point)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points
      # 73 x 73 x 80.
      end_point = 'Conv2d_4a_3x3'
      net = layers.conv2d(net, depth(192), [3, 3], scope=end_point)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points
      # 71 x 71 x 192.
      end_point = 'MaxPool_5a_3x3'
      net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points
      # 35 x 35 x 192.

      # Inception blocks
    with arg_scope(
        [layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
        stride=1,
        padding='SAME'):
      # mixed: 35 x 35 x 256.
      end_point = 'Mixed_5b'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
          branch_1 = layers.conv2d(
              branch_1, depth(64), [5, 5], scope='Conv2d_0b_5x5')
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers.conv2d(
              net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
          branch_2 = layers.conv2d(
              branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
          branch_2 = layers.conv2d(
              branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
        with variable_scope.variable_scope('Branch_3'):
          branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
          branch_3 = layers.conv2d(
              branch_3, depth(32), [1, 1], scope='Conv2d_0b_1x1')
        net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points

      # mixed_1: 35 x 35 x 288.
      end_point = 'Mixed_5c'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net, depth(48), [1, 1], scope='Conv2d_0b_1x1')
          branch_1 = layers.conv2d(
              branch_1, depth(64), [5, 5], scope='Conv_1_0c_5x5')
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers.conv2d(
              net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
          branch_2 = layers.conv2d(
              branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
          branch_2 = layers.conv2d(
              branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
        with variable_scope.variable_scope('Branch_3'):
          branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
          branch_3 = layers.conv2d(
              branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1')
        net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points

      # mixed_2: 35 x 35 x 288.
      end_point = 'Mixed_5d'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
          branch_1 = layers.conv2d(
              branch_1, depth(64), [5, 5], scope='Conv2d_0b_5x5')
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers.conv2d(
              net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
          branch_2 = layers.conv2d(
              branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
          branch_2 = layers.conv2d(
              branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
        with variable_scope.variable_scope('Branch_3'):
          branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
          branch_3 = layers.conv2d(
              branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1')
        net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points

      # mixed_3: 17 x 17 x 768.
      end_point = 'Mixed_6a'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net,
              depth(384), [3, 3],
              stride=2,
              padding='VALID',
              scope='Conv2d_1a_1x1')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
          branch_1 = layers.conv2d(
              branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
          branch_1 = layers.conv2d(
              branch_1,
              depth(96), [3, 3],
              stride=2,
              padding='VALID',
              scope='Conv2d_1a_1x1')
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers_lib.max_pool2d(
              net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3')
        net = array_ops.concat([branch_0, branch_1, branch_2], 3)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points

      # mixed4: 17 x 17 x 768.
      end_point = 'Mixed_6b'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
          branch_1 = layers.conv2d(
              branch_1, depth(128), [1, 7], scope='Conv2d_0b_1x7')
          branch_1 = layers.conv2d(
              branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers.conv2d(
              net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
          branch_2 = layers.conv2d(
              branch_2, depth(128), [7, 1], scope='Conv2d_0b_7x1')
          branch_2 = layers.conv2d(
              branch_2, depth(128), [1, 7], scope='Conv2d_0c_1x7')
          branch_2 = layers.conv2d(
              branch_2, depth(128), [7, 1], scope='Conv2d_0d_7x1')
          branch_2 = layers.conv2d(
              branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
        with variable_scope.variable_scope('Branch_3'):
          branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
          branch_3 = layers.conv2d(
              branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
        net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points

      # mixed_5: 17 x 17 x 768.
      end_point = 'Mixed_6c'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
          branch_1 = layers.conv2d(
              branch_1, depth(160), [1, 7], scope='Conv2d_0b_1x7')
          branch_1 = layers.conv2d(
              branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers.conv2d(
              net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
          branch_2 = layers.conv2d(
              branch_2, depth(160), [7, 1], scope='Conv2d_0b_7x1')
          branch_2 = layers.conv2d(
              branch_2, depth(160), [1, 7], scope='Conv2d_0c_1x7')
          branch_2 = layers.conv2d(
              branch_2, depth(160), [7, 1], scope='Conv2d_0d_7x1')
          branch_2 = layers.conv2d(
              branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
        with variable_scope.variable_scope('Branch_3'):
          branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
          branch_3 = layers.conv2d(
              branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
        net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points
      # mixed_6: 17 x 17 x 768.
      end_point = 'Mixed_6d'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
          branch_1 = layers.conv2d(
              branch_1, depth(160), [1, 7], scope='Conv2d_0b_1x7')
          branch_1 = layers.conv2d(
              branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers.conv2d(
              net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
          branch_2 = layers.conv2d(
              branch_2, depth(160), [7, 1], scope='Conv2d_0b_7x1')
          branch_2 = layers.conv2d(
              branch_2, depth(160), [1, 7], scope='Conv2d_0c_1x7')
          branch_2 = layers.conv2d(
              branch_2, depth(160), [7, 1], scope='Conv2d_0d_7x1')
          branch_2 = layers.conv2d(
              branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
        with variable_scope.variable_scope('Branch_3'):
          branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
          branch_3 = layers.conv2d(
              branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
        net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points

      # mixed_7: 17 x 17 x 768.
      end_point = 'Mixed_6e'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
          branch_1 = layers.conv2d(
              branch_1, depth(192), [1, 7], scope='Conv2d_0b_1x7')
          branch_1 = layers.conv2d(
              branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers.conv2d(
              net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
          branch_2 = layers.conv2d(
              branch_2, depth(192), [7, 1], scope='Conv2d_0b_7x1')
          branch_2 = layers.conv2d(
              branch_2, depth(192), [1, 7], scope='Conv2d_0c_1x7')
          branch_2 = layers.conv2d(
              branch_2, depth(192), [7, 1], scope='Conv2d_0d_7x1')
          branch_2 = layers.conv2d(
              branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
        with variable_scope.variable_scope('Branch_3'):
          branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
          branch_3 = layers.conv2d(
              branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
        net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points

      # mixed_8: 8 x 8 x 1280.
      end_point = 'Mixed_7a'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
          branch_0 = layers.conv2d(
              branch_0,
              depth(320), [3, 3],
              stride=2,
              padding='VALID',
              scope='Conv2d_1a_3x3')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
          branch_1 = layers.conv2d(
              branch_1, depth(192), [1, 7], scope='Conv2d_0b_1x7')
          branch_1 = layers.conv2d(
              branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
          branch_1 = layers.conv2d(
              branch_1,
              depth(192), [3, 3],
              stride=2,
              padding='VALID',
              scope='Conv2d_1a_3x3')
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers_lib.max_pool2d(
              net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3')
        net = array_ops.concat([branch_0, branch_1, branch_2], 3)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points
      # mixed_9: 8 x 8 x 2048.
      end_point = 'Mixed_7b'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
          branch_1 = array_ops.concat(
              [
                  layers.conv2d(
                      branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
                  layers.conv2d(
                      branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')
              ],
              3)
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers.conv2d(
              net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
          branch_2 = layers.conv2d(
              branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
          branch_2 = array_ops.concat(
              [
                  layers.conv2d(
                      branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
                  layers.conv2d(
                      branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')
              ],
              3)
        with variable_scope.variable_scope('Branch_3'):
          branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
          branch_3 = layers.conv2d(
              branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
        net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points

      # mixed_10: 8 x 8 x 2048.
      end_point = 'Mixed_7c'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
          branch_1 = array_ops.concat(
              [
                  layers.conv2d(
                      branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
                  layers.conv2d(
                      branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')
              ],
              3)
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers.conv2d(
              net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
          branch_2 = layers.conv2d(
              branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
          branch_2 = array_ops.concat(
              [
                  layers.conv2d(
                      branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
                  layers.conv2d(
                      branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')
              ],
              3)
        with variable_scope.variable_scope('Branch_3'):
          branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
          branch_3 = layers.conv2d(
              branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
        net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points
    raise ValueError('Unknown final endpoint %s' % final_endpoint)
Esempio n. 35
0
def overfeat(inputs,
             num_classes=1000,
             is_training=True,
             dropout_keep_prob=0.5,
             spatial_squeeze=True,
             scope='overfeat'):
  """Contains the model definition for the OverFeat network.

  The definition for the network was obtained from:
    OverFeat: Integrated Recognition, Localization and Detection using
    Convolutional Networks
    Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
    Yann LeCun, 2014
    http://arxiv.org/abs/1312.6229

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 231x231. To use in fully
        convolutional mode, set spatial_squeeze to false.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.

  """
  with variable_scope.variable_scope(scope, 'overfeat', [inputs]) as sc:
    end_points_collection = sc.name + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
        outputs_collections=end_points_collection):
      net = layers.conv2d(
          inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
      net = layers.conv2d(net, 256, [5, 5], padding='VALID', scope='conv2')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
      net = layers.conv2d(net, 512, [3, 3], scope='conv3')
      net = layers.conv2d(net, 1024, [3, 3], scope='conv4')
      net = layers.conv2d(net, 1024, [3, 3], scope='conv5')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
      with arg_scope(
          [layers.conv2d],
          weights_initializer=trunc_normal(0.005),
          biases_initializer=init_ops.constant_initializer(0.1)):
        # Use conv2d instead of fully_connected layers.
        net = layers.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6')
        net = layers_lib.dropout(
            net, dropout_keep_prob, is_training=is_training, scope='dropout6')
        net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
        net = layers_lib.dropout(
            net, dropout_keep_prob, is_training=is_training, scope='dropout7')
        net = layers.conv2d(
            net,
            num_classes, [1, 1],
            activation_fn=None,
            normalizer_fn=None,
            biases_initializer=init_ops.zeros_initializer(),
            scope='fc8')
      # Convert end_points_collection into a end_point dict.
      end_points = utils.convert_collection_to_dict(end_points_collection)
      if spatial_squeeze:
        net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
        end_points[sc.name + '/fc8'] = net
      return net, end_points
Esempio n. 36
0
def inception_v2_base(inputs,
                      final_endpoint='Mixed_5c',
                      min_depth=16,
                      depth_multiplier=1.0,
                      scope=None):
  """Inception v2 (6a2).

  Constructs an Inception v2 network from inputs to the given final endpoint.
  This method can construct the network up to the layer inception(5b) as
  described in http://arxiv.org/abs/1502.03167.

  Args:
    inputs: a tensor of shape [batch_size, height, width, channels].
    final_endpoint: specifies the endpoint to construct the network up to. It
      can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
      'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4a',
      'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a', 'Mixed_5b',
      'Mixed_5c'].
    min_depth: Minimum depth value (number of channels) for all convolution ops.
      Enforced when depth_multiplier < 1, and not an active constraint when
      depth_multiplier >= 1.
    depth_multiplier: Float multiplier for the depth (number of channels)
      for all convolution ops. The value must be greater than zero. Typical
      usage will be to set this value in (0, 1) to reduce the number of
      parameters or computation cost of the model.
    scope: Optional variable_scope.

  Returns:
    tensor_out: output tensor corresponding to the final_endpoint.
    end_points: a set of activations for external use, for example summaries or
                losses.

  Raises:
    ValueError: if final_endpoint is not set to one of the predefined values,
                or depth_multiplier <= 0
  """

  # end_points will collect relevant activations for external use, for example
  # summaries or losses.
  end_points = {}

  # Used to find thinned depths for each layer.
  if depth_multiplier <= 0:
    raise ValueError('depth_multiplier is not greater than zero.')
  depth = lambda d: max(int(d * depth_multiplier), min_depth)

  with variable_scope.variable_scope(scope, 'InceptionV2', [inputs]):
    with arg_scope(
        [
            layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d,
            layers.separable_conv2d
        ],
        stride=1,
        padding='SAME'):

      # Note that sizes in the comments below assume an input spatial size of
      # 224x224, however, the inputs can be of any size greater 32x32.

      # 224 x 224 x 3
      end_point = 'Conv2d_1a_7x7'
      # depthwise_multiplier here is different from depth_multiplier.
      # depthwise_multiplier determines the output channels of the initial
      # depthwise conv (see docs for tf.nn.separable_conv2d), while
      # depth_multiplier controls the # channels of the subsequent 1x1
      # convolution. Must have
      #   in_channels * depthwise_multipler <= out_channels
      # so that the separable convolution is not overparameterized.
      depthwise_multiplier = min(int(depth(64) / 3), 8)
      net = layers.separable_conv2d(
          inputs,
          depth(64), [7, 7],
          depth_multiplier=depthwise_multiplier,
          stride=2,
          weights_initializer=trunc_normal(1.0),
          scope=end_point)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points
      # 112 x 112 x 64
      end_point = 'MaxPool_2a_3x3'
      net = layers_lib.max_pool2d(net, [3, 3], scope=end_point, stride=2)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points
      # 56 x 56 x 64
      end_point = 'Conv2d_2b_1x1'
      net = layers.conv2d(
          net,
          depth(64), [1, 1],
          scope=end_point,
          weights_initializer=trunc_normal(0.1))
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points
      # 56 x 56 x 64
      end_point = 'Conv2d_2c_3x3'
      net = layers.conv2d(net, depth(192), [3, 3], scope=end_point)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points
      # 56 x 56 x 192
      end_point = 'MaxPool_3a_3x3'
      net = layers_lib.max_pool2d(net, [3, 3], scope=end_point, stride=2)
      end_points[end_point] = net
      if end_point == final_endpoint:
        return net, end_points
      # 28 x 28 x 192
      # Inception module.
      end_point = 'Mixed_3b'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net,
              depth(64), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_1 = layers.conv2d(
              branch_1, depth(64), [3, 3], scope='Conv2d_0b_3x3')
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers.conv2d(
              net,
              depth(64), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_2 = layers.conv2d(
              branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
          branch_2 = layers.conv2d(
              branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
        with variable_scope.variable_scope('Branch_3'):
          branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
          branch_3 = layers.conv2d(
              branch_3,
              depth(32), [1, 1],
              weights_initializer=trunc_normal(0.1),
              scope='Conv2d_0b_1x1')
        net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
        end_points[end_point] = net
        if end_point == final_endpoint:
          return net, end_points
      # 28 x 28 x 256
      end_point = 'Mixed_3c'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net,
              depth(64), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_1 = layers.conv2d(
              branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers.conv2d(
              net,
              depth(64), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_2 = layers.conv2d(
              branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
          branch_2 = layers.conv2d(
              branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
        with variable_scope.variable_scope('Branch_3'):
          branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
          branch_3 = layers.conv2d(
              branch_3,
              depth(64), [1, 1],
              weights_initializer=trunc_normal(0.1),
              scope='Conv2d_0b_1x1')
        net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
        end_points[end_point] = net
        if end_point == final_endpoint:
          return net, end_points
      # 28 x 28 x 320
      end_point = 'Mixed_4a'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net,
              depth(128), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_0 = layers.conv2d(
              branch_0, depth(160), [3, 3], stride=2, scope='Conv2d_1a_3x3')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net,
              depth(64), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_1 = layers.conv2d(
              branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
          branch_1 = layers.conv2d(
              branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3')
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers_lib.max_pool2d(
              net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
        net = array_ops.concat([branch_0, branch_1, branch_2], 3)
        end_points[end_point] = net
        if end_point == final_endpoint:
          return net, end_points
      # 14 x 14 x 576
      end_point = 'Mixed_4b'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net, depth(224), [1, 1], scope='Conv2d_0a_1x1')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net,
              depth(64), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_1 = layers.conv2d(
              branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers.conv2d(
              net,
              depth(96), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_2 = layers.conv2d(
              branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3')
          branch_2 = layers.conv2d(
              branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3')
        with variable_scope.variable_scope('Branch_3'):
          branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
          branch_3 = layers.conv2d(
              branch_3,
              depth(128), [1, 1],
              weights_initializer=trunc_normal(0.1),
              scope='Conv2d_0b_1x1')
        net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
        end_points[end_point] = net
        if end_point == final_endpoint:
          return net, end_points
      # 14 x 14 x 576
      end_point = 'Mixed_4c'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net,
              depth(96), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_1 = layers.conv2d(
              branch_1, depth(128), [3, 3], scope='Conv2d_0b_3x3')
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers.conv2d(
              net,
              depth(96), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_2 = layers.conv2d(
              branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3')
          branch_2 = layers.conv2d(
              branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3')
        with variable_scope.variable_scope('Branch_3'):
          branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
          branch_3 = layers.conv2d(
              branch_3,
              depth(128), [1, 1],
              weights_initializer=trunc_normal(0.1),
              scope='Conv2d_0b_1x1')
        net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
        end_points[end_point] = net
        if end_point == final_endpoint:
          return net, end_points
      # 14 x 14 x 576
      end_point = 'Mixed_4d'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net,
              depth(128), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_1 = layers.conv2d(
              branch_1, depth(160), [3, 3], scope='Conv2d_0b_3x3')
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers.conv2d(
              net,
              depth(128), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_2 = layers.conv2d(
              branch_2, depth(160), [3, 3], scope='Conv2d_0b_3x3')
          branch_2 = layers.conv2d(
              branch_2, depth(160), [3, 3], scope='Conv2d_0c_3x3')
        with variable_scope.variable_scope('Branch_3'):
          branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
          branch_3 = layers.conv2d(
              branch_3,
              depth(96), [1, 1],
              weights_initializer=trunc_normal(0.1),
              scope='Conv2d_0b_1x1')
        net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
        end_points[end_point] = net
        if end_point == final_endpoint:
          return net, end_points

      # 14 x 14 x 576
      end_point = 'Mixed_4e'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net, depth(96), [1, 1], scope='Conv2d_0a_1x1')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net,
              depth(128), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_1 = layers.conv2d(
              branch_1, depth(192), [3, 3], scope='Conv2d_0b_3x3')
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers.conv2d(
              net,
              depth(160), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_2 = layers.conv2d(
              branch_2, depth(192), [3, 3], scope='Conv2d_0b_3x3')
          branch_2 = layers.conv2d(
              branch_2, depth(192), [3, 3], scope='Conv2d_0c_3x3')
        with variable_scope.variable_scope('Branch_3'):
          branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
          branch_3 = layers.conv2d(
              branch_3,
              depth(96), [1, 1],
              weights_initializer=trunc_normal(0.1),
              scope='Conv2d_0b_1x1')
        net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
        end_points[end_point] = net
        if end_point == final_endpoint:
          return net, end_points
      # 14 x 14 x 576
      end_point = 'Mixed_5a'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net,
              depth(128), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_0 = layers.conv2d(
              branch_0, depth(192), [3, 3], stride=2, scope='Conv2d_1a_3x3')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net,
              depth(192), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_1 = layers.conv2d(
              branch_1, depth(256), [3, 3], scope='Conv2d_0b_3x3')
          branch_1 = layers.conv2d(
              branch_1, depth(256), [3, 3], stride=2, scope='Conv2d_1a_3x3')
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers_lib.max_pool2d(
              net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
        net = array_ops.concat([branch_0, branch_1, branch_2], 3)
        end_points[end_point] = net
        if end_point == final_endpoint:
          return net, end_points
      # 7 x 7 x 1024
      end_point = 'Mixed_5b'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net,
              depth(192), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_1 = layers.conv2d(
              branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3')
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers.conv2d(
              net,
              depth(160), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_2 = layers.conv2d(
              branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3')
          branch_2 = layers.conv2d(
              branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3')
        with variable_scope.variable_scope('Branch_3'):
          branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
          branch_3 = layers.conv2d(
              branch_3,
              depth(128), [1, 1],
              weights_initializer=trunc_normal(0.1),
              scope='Conv2d_0b_1x1')
        net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
        end_points[end_point] = net
        if end_point == final_endpoint:
          return net, end_points

      # 7 x 7 x 1024
      end_point = 'Mixed_5c'
      with variable_scope.variable_scope(end_point):
        with variable_scope.variable_scope('Branch_0'):
          branch_0 = layers.conv2d(
              net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
        with variable_scope.variable_scope('Branch_1'):
          branch_1 = layers.conv2d(
              net,
              depth(192), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_1 = layers.conv2d(
              branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3')
        with variable_scope.variable_scope('Branch_2'):
          branch_2 = layers.conv2d(
              net,
              depth(192), [1, 1],
              weights_initializer=trunc_normal(0.09),
              scope='Conv2d_0a_1x1')
          branch_2 = layers.conv2d(
              branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3')
          branch_2 = layers.conv2d(
              branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3')
        with variable_scope.variable_scope('Branch_3'):
          branch_3 = layers_lib.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
          branch_3 = layers.conv2d(
              branch_3,
              depth(128), [1, 1],
              weights_initializer=trunc_normal(0.1),
              scope='Conv2d_0b_1x1')
        net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
        end_points[end_point] = net
        if end_point == final_endpoint:
          return net, end_points
    raise ValueError('Unknown final endpoint %s' % final_endpoint)
Esempio n. 37
0
def inception_v1_base(inputs, final_endpoint='Mixed_5c', scope='InceptionV1'):
  """Defines the Inception V1 base architecture.

  This architecture is defined in:
    Going deeper with convolutions
    Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
    Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
    http://arxiv.org/pdf/1409.4842v1.pdf.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    final_endpoint: specifies the endpoint to construct the network up to. It
      can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
      'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
      'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
      'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
    scope: Optional variable_scope.

  Returns:
    A dictionary from components of the network to the corresponding activation.

  Raises:
    ValueError: if final_endpoint is not set to one of the predefined values.
  """
  end_points = {}
  with variable_scope.variable_scope(scope, 'InceptionV1', [inputs]):
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected],
        weights_initializer=trunc_normal(0.01)):
      with arg_scope(
          [layers.conv2d, layers_lib.max_pool2d], stride=1, padding='SAME'):
        end_point = 'Conv2d_1a_7x7'
        net = layers.conv2d(inputs, 64, [7, 7], stride=2, scope=end_point)
        end_points[end_point] = net
        if final_endpoint == end_point:
          return net, end_points
        end_point = 'MaxPool_2a_3x3'
        net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
        end_points[end_point] = net
        if final_endpoint == end_point:
          return net, end_points
        end_point = 'Conv2d_2b_1x1'
        net = layers.conv2d(net, 64, [1, 1], scope=end_point)
        end_points[end_point] = net
        if final_endpoint == end_point:
          return net, end_points
        end_point = 'Conv2d_2c_3x3'
        net = layers.conv2d(net, 192, [3, 3], scope=end_point)
        end_points[end_point] = net
        if final_endpoint == end_point:
          return net, end_points
        end_point = 'MaxPool_3a_3x3'
        net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
        end_points[end_point] = net
        if final_endpoint == end_point:
          return net, end_points

        end_point = 'Mixed_3b'
        with variable_scope.variable_scope(end_point):
          with variable_scope.variable_scope('Branch_0'):
            branch_0 = layers.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
          with variable_scope.variable_scope('Branch_1'):
            branch_1 = layers.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
            branch_1 = layers.conv2d(
                branch_1, 128, [3, 3], scope='Conv2d_0b_3x3')
          with variable_scope.variable_scope('Branch_2'):
            branch_2 = layers.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
            branch_2 = layers.conv2d(
                branch_2, 32, [3, 3], scope='Conv2d_0b_3x3')
          with variable_scope.variable_scope('Branch_3'):
            branch_3 = layers_lib.max_pool2d(
                net, [3, 3], scope='MaxPool_0a_3x3')
            branch_3 = layers.conv2d(
                branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
          net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
        end_points[end_point] = net
        if final_endpoint == end_point:
          return net, end_points

        end_point = 'Mixed_3c'
        with variable_scope.variable_scope(end_point):
          with variable_scope.variable_scope('Branch_0'):
            branch_0 = layers.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
          with variable_scope.variable_scope('Branch_1'):
            branch_1 = layers.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
            branch_1 = layers.conv2d(
                branch_1, 192, [3, 3], scope='Conv2d_0b_3x3')
          with variable_scope.variable_scope('Branch_2'):
            branch_2 = layers.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
            branch_2 = layers.conv2d(
                branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
          with variable_scope.variable_scope('Branch_3'):
            branch_3 = layers_lib.max_pool2d(
                net, [3, 3], scope='MaxPool_0a_3x3')
            branch_3 = layers.conv2d(
                branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
          net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
        end_points[end_point] = net
        if final_endpoint == end_point:
          return net, end_points

        end_point = 'MaxPool_4a_3x3'
        net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
        end_points[end_point] = net
        if final_endpoint == end_point:
          return net, end_points

        end_point = 'Mixed_4b'
        with variable_scope.variable_scope(end_point):
          with variable_scope.variable_scope('Branch_0'):
            branch_0 = layers.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
          with variable_scope.variable_scope('Branch_1'):
            branch_1 = layers.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
            branch_1 = layers.conv2d(
                branch_1, 208, [3, 3], scope='Conv2d_0b_3x3')
          with variable_scope.variable_scope('Branch_2'):
            branch_2 = layers.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
            branch_2 = layers.conv2d(
                branch_2, 48, [3, 3], scope='Conv2d_0b_3x3')
          with variable_scope.variable_scope('Branch_3'):
            branch_3 = layers_lib.max_pool2d(
                net, [3, 3], scope='MaxPool_0a_3x3')
            branch_3 = layers.conv2d(
                branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
          net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
        end_points[end_point] = net
        if final_endpoint == end_point:
          return net, end_points

        end_point = 'Mixed_4c'
        with variable_scope.variable_scope(end_point):
          with variable_scope.variable_scope('Branch_0'):
            branch_0 = layers.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
          with variable_scope.variable_scope('Branch_1'):
            branch_1 = layers.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
            branch_1 = layers.conv2d(
                branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
          with variable_scope.variable_scope('Branch_2'):
            branch_2 = layers.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
            branch_2 = layers.conv2d(
                branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
          with variable_scope.variable_scope('Branch_3'):
            branch_3 = layers_lib.max_pool2d(
                net, [3, 3], scope='MaxPool_0a_3x3')
            branch_3 = layers.conv2d(
                branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
          net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
        end_points[end_point] = net
        if final_endpoint == end_point:
          return net, end_points

        end_point = 'Mixed_4d'
        with variable_scope.variable_scope(end_point):
          with variable_scope.variable_scope('Branch_0'):
            branch_0 = layers.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
          with variable_scope.variable_scope('Branch_1'):
            branch_1 = layers.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
            branch_1 = layers.conv2d(
                branch_1, 256, [3, 3], scope='Conv2d_0b_3x3')
          with variable_scope.variable_scope('Branch_2'):
            branch_2 = layers.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
            branch_2 = layers.conv2d(
                branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
          with variable_scope.variable_scope('Branch_3'):
            branch_3 = layers_lib.max_pool2d(
                net, [3, 3], scope='MaxPool_0a_3x3')
            branch_3 = layers.conv2d(
                branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
          net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
        end_points[end_point] = net
        if final_endpoint == end_point:
          return net, end_points

        end_point = 'Mixed_4e'
        with variable_scope.variable_scope(end_point):
          with variable_scope.variable_scope('Branch_0'):
            branch_0 = layers.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
          with variable_scope.variable_scope('Branch_1'):
            branch_1 = layers.conv2d(net, 144, [1, 1], scope='Conv2d_0a_1x1')
            branch_1 = layers.conv2d(
                branch_1, 288, [3, 3], scope='Conv2d_0b_3x3')
          with variable_scope.variable_scope('Branch_2'):
            branch_2 = layers.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
            branch_2 = layers.conv2d(
                branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
          with variable_scope.variable_scope('Branch_3'):
            branch_3 = layers_lib.max_pool2d(
                net, [3, 3], scope='MaxPool_0a_3x3')
            branch_3 = layers.conv2d(
                branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
          net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
        end_points[end_point] = net
        if final_endpoint == end_point:
          return net, end_points

        end_point = 'Mixed_4f'
        with variable_scope.variable_scope(end_point):
          with variable_scope.variable_scope('Branch_0'):
            branch_0 = layers.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
          with variable_scope.variable_scope('Branch_1'):
            branch_1 = layers.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
            branch_1 = layers.conv2d(
                branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
          with variable_scope.variable_scope('Branch_2'):
            branch_2 = layers.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
            branch_2 = layers.conv2d(
                branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
          with variable_scope.variable_scope('Branch_3'):
            branch_3 = layers_lib.max_pool2d(
                net, [3, 3], scope='MaxPool_0a_3x3')
            branch_3 = layers.conv2d(
                branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
          net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
        end_points[end_point] = net
        if final_endpoint == end_point:
          return net, end_points

        end_point = 'MaxPool_5a_2x2'
        net = layers_lib.max_pool2d(net, [2, 2], stride=2, scope=end_point)
        end_points[end_point] = net
        if final_endpoint == end_point:
          return net, end_points

        end_point = 'Mixed_5b'
        with variable_scope.variable_scope(end_point):
          with variable_scope.variable_scope('Branch_0'):
            branch_0 = layers.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
          with variable_scope.variable_scope('Branch_1'):
            branch_1 = layers.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
            branch_1 = layers.conv2d(
                branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
          with variable_scope.variable_scope('Branch_2'):
            branch_2 = layers.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
            branch_2 = layers.conv2d(
                branch_2, 128, [3, 3], scope='Conv2d_0a_3x3')
          with variable_scope.variable_scope('Branch_3'):
            branch_3 = layers_lib.max_pool2d(
                net, [3, 3], scope='MaxPool_0a_3x3')
            branch_3 = layers.conv2d(
                branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
          net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
        end_points[end_point] = net
        if final_endpoint == end_point:
          return net, end_points

        end_point = 'Mixed_5c'
        with variable_scope.variable_scope(end_point):
          with variable_scope.variable_scope('Branch_0'):
            branch_0 = layers.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
          with variable_scope.variable_scope('Branch_1'):
            branch_1 = layers.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
            branch_1 = layers.conv2d(
                branch_1, 384, [3, 3], scope='Conv2d_0b_3x3')
          with variable_scope.variable_scope('Branch_2'):
            branch_2 = layers.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
            branch_2 = layers.conv2d(
                branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
          with variable_scope.variable_scope('Branch_3'):
            branch_3 = layers_lib.max_pool2d(
                net, [3, 3], scope='MaxPool_0a_3x3')
            branch_3 = layers.conv2d(
                branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
          net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
        end_points[end_point] = net
        if final_endpoint == end_point:
          return net, end_points
    raise ValueError('Unknown final endpoint %s' % final_endpoint)
Esempio n. 38
0
def get_slim_arch_bn(inputs, isTrainTensor, num_classes=1000, scope='vgg_16'):
    with variable_scope.variable_scope(scope, 'vgg_16', [inputs]) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        # Collect outputs for conv2d, fully_connected and max_pool2d.

        filters = 64

        # Arg scope set default parameters for a list of ops
        with arg_scope(
            [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
                outputs_collections=end_points_collection):
            net = layers_lib.repeat(
                inputs,
                2,
                layers.conv2d,
                filters, [3, 3],
                scope='conv1',
                weights_regularizer=slim.l2_regularizer(0.01))
            bn_0 = tf.contrib.layers.batch_norm(net,
                                                center=True,
                                                scale=True,
                                                is_training=isTrainTensor,
                                                scope='bn1',
                                                decay=0.9)
            p_0 = layers_lib.max_pool2d(bn_0, [2, 2], scope='pool1')

            net = layers_lib.repeat(
                p_0,
                2,
                layers.conv2d,
                filters, [3, 3],
                scope='conv2',
                weights_regularizer=slim.l2_regularizer(0.01))
            bn_1 = tf.contrib.layers.batch_norm(net,
                                                center=True,
                                                scale=True,
                                                is_training=isTrainTensor,
                                                scope='bn2',
                                                decay=0.9)
            res_1 = p_0 + bn_1
            p_1 = layers_lib.max_pool2d(res_1, [2, 2], scope='pool2')

            net = layers_lib.repeat(
                p_1,
                3,
                layers.conv2d,
                filters, [4, 4],
                scope='conv3',
                weights_regularizer=slim.l2_regularizer(0.01))
            bn_2 = tf.contrib.layers.batch_norm(net,
                                                center=True,
                                                scale=True,
                                                is_training=isTrainTensor,
                                                scope='bn3',
                                                decay=0.9)
            res_2 = p_1 + bn_2
            p_2 = layers_lib.max_pool2d(res_2, [2, 2], scope='pool3')

            net = layers_lib.repeat(
                p_2,
                3,
                layers.conv2d,
                filters, [5, 5],
                scope='conv4',
                weights_regularizer=slim.l2_regularizer(0.01))
            bn_3 = tf.contrib.layers.batch_norm(net,
                                                center=True,
                                                scale=True,
                                                is_training=isTrainTensor,
                                                scope='bn4',
                                                decay=0.9)
            res_3 = p_2 + bn_3
            p_3 = layers_lib.max_pool2d(res_3, [2, 2], scope='pool4')

            last_conv = net = layers_lib.repeat(
                p_3,
                3,
                layers.conv2d,
                filters, [5, 5],
                scope='conv5',
                weights_regularizer=slim.l2_regularizer(0.01))

            # Here we have 14x14 filters
            net = tf.reduce_mean(net, [1, 2])  # Global average pooling

            # add layer with float 32 mask of same shape as global average pooling out
            # feed default with ones, leave placeholder

            mask = tf.placeholder_with_default(tf.ones_like(net),
                                               shape=net.shape,
                                               name='gap_mask')
            net = tf.multiply(net, mask)

            net = layers_lib.fully_connected(net,
                                             num_classes,
                                             activation_fn=None,
                                             biases_initializer=None,
                                             scope='softmax_logits')

            with tf.variable_scope("raw_CAM"):
                w_tensor_name = "vgg_16/softmax_logits/weights:0"
                s_w = tf.get_default_graph().get_tensor_by_name(w_tensor_name)
                softmax_weights = tf.expand_dims(tf.expand_dims(s_w, 0),
                                                 0)  # reshape to match 1x1xFxC
                # tensor mult from (N x lh x lw x F) , (1 x 1 x F x C)
                cam = tf.tensordot(last_conv,
                                   softmax_weights, [[3], [2]],
                                   name='cam_out')

            # Convert end_points_collection into a end_point dict.
            end_points = utils.convert_collection_to_dict(
                end_points_collection)
            return net, end_points
Esempio n. 39
0
def alexnet_v2(inputs,
               num_classes=1000,
               is_training=True,
               dropout_keep_prob=0.5,
               spatial_squeeze=True,
               scope='alexnet_v2'):
  """AlexNet version 2.

  Described in: http://arxiv.org/pdf/1404.5997v2.pdf
  Parameters from:
  github.com/akrizhevsky/cuda-convnet2/blob/master/layers/
  layers-imagenet-1gpu.cfg

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224. To use in fully
        convolutional mode, set spatial_squeeze to false.
        The LRN layers have been removed and change the initializers from
        random_normal_initializer to xavier_initializer.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
  with variable_scope.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
    end_points_collection = sc.original_name_scope + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d.
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
        outputs_collections=[end_points_collection]):
      net = layers.conv2d(
          inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
      net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool1')
      net = layers.conv2d(net, 192, [5, 5], scope='conv2')
      net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool2')
      net = layers.conv2d(net, 384, [3, 3], scope='conv3')
      net = layers.conv2d(net, 384, [3, 3], scope='conv4')
      net = layers.conv2d(net, 256, [3, 3], scope='conv5')
      net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool5')

      # Use conv2d instead of fully_connected layers.
      with arg_scope(
          [layers.conv2d],
          weights_initializer=trunc_normal(0.005),
          biases_initializer=init_ops.constant_initializer(0.1)):
        net = layers.conv2d(net, 4096, [5, 5], padding='VALID', scope='fc6')
        net = layers_lib.dropout(
            net, dropout_keep_prob, is_training=is_training, scope='dropout6')
        net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
        net = layers_lib.dropout(
            net, dropout_keep_prob, is_training=is_training, scope='dropout7')
        net = layers.conv2d(
            net,
            num_classes, [1, 1],
            activation_fn=None,
            normalizer_fn=None,
            biases_initializer=init_ops.zeros_initializer(),
            scope='fc8')

      # Convert end_points_collection into a end_point dict.
      end_points = utils.convert_collection_to_dict(end_points_collection)
      if spatial_squeeze:
        net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
        end_points[sc.name + '/fc8'] = net
      return net, end_points