示例#1
0
def vgg_a(inputs,
          num_classes=1000,
          is_training=True,
          dropout_keep_prob=0.5,
          spatial_squeeze=True,
          scope='vgg_a'):
  """Oxford Net VGG 11-Layers version A Example.

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
  with variable_scope.variable_scope(scope, 'vgg_a', [inputs]) as sc:
    end_points_collection = sc.original_name_scope + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d.
    with arg_scope(
        [layers.conv2d, layers_lib.max_pool2d],
        outputs_collections=end_points_collection):
      net = layers_lib.repeat(
          inputs, 1, layers.conv2d, 64, [3, 3], scope='conv1')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
      net = layers_lib.repeat(net, 1, layers.conv2d, 128, [3, 3], scope='conv2')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
      net = layers_lib.repeat(net, 2, layers.conv2d, 256, [3, 3], scope='conv3')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
      net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv4')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
      net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv5')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
      # Use conv2d instead of fully_connected layers.
      net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout6')
      net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout7')
      net = layers.conv2d(
          net,
          num_classes, [1, 1],
          activation_fn=None,
          normalizer_fn=None,
          scope='fc8')
      # Convert end_points_collection into a end_point dict.
      end_points = utils.convert_collection_to_dict(end_points_collection)
      if spatial_squeeze:
        net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
        end_points[sc.name + '/fc8'] = net
      return net, end_points
示例#2
0
def alexnet_v2(inputs,
               num_classes=1000,
               is_training=True,
               dropout_keep_prob=0.7,
               spatial_squeeze=True,
               scope='alexnet_v2'):
  """AlexNet version 2.

  Described in: http://arxiv.org/pdf/1404.5997v2.pdf
  Parameters from:
  github.com/akrizhevsky/cuda-convnet2/blob/master/layers/
  layers-imagenet-1gpu.cfg

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224. To use in fully
        convolutional mode, set spatial_squeeze to false.
        The LRN layers have been removed and change the initializers from
        random_normal_initializer to xavier_initializer.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
  with variable_scope.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
    end_points_collection = sc.original_name_scope + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d.
    with arg_scope([layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d], outputs_collections=[end_points_collection]):
      net = layers.conv2d(inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
      net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool1')
      net = layers.conv2d(net, 192, [5, 5], scope='conv2')
      net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool2')
      net = layers.conv2d(net, 384, [3, 3], scope='conv3')
      net = layers.conv2d(net, 384, [3, 3], scope='conv4')
      net = layers.conv2d(net, 256, [3, 3], scope='conv5')
      net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool5')
      # Use conv2d instead of fully_connected layers.
      with arg_scope([layers.conv2d], weights_initializer=trunc_normal(0.005), biases_initializer=init_ops.constant_initializer(0.1)):
        net = layers.conv2d(net, 4096, [5, 5], padding='VALID', scope='fc6')
        net = slim.batch_norm(net, decay=0.9999, center=True, scale=False, epsilon=0.001, is_training=is_training, scope='bn1')
        net = layers_lib.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout6')
        net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
        net = slim.batch_norm(net, decay=0.9999, center=True, scale=False, epsilon=0.001, is_training=is_training, scope='bn2')
        net = layers_lib.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout7')
        net = layers.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, biases_initializer=init_ops.zeros_initializer(), scope='fc8')

      # Convert end_points_collection into a end_point dict.
      end_points = utils.convert_collection_to_dict(end_points_collection)
      if spatial_squeeze:
        net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
        end_points[sc.name + '/fc8'] = net
      return net, end_points
示例#3
0
def alexnet(inputs,
            is_training=True,
            dropout_keep_prob=0.5, ):
    with tf.variable_scope("alexnet", reuse=tf.AUTO_REUSE):
        with arg_scope(
                [layers.conv2d, layers_lib.max_pool2d],
                data_format="NCHW"):
            x = layers.conv2d(
                inputs, 64, [3, 3],  padding='VALID', scope='conv1')
            x = layers_lib.max_pool2d(x, [2, 2], 2, scope='pool1')
            x = layers.conv2d(x, 192, [5, 5], scope='conv2')
            x = layers_lib.max_pool2d(x, [2, 2], 2, scope='pool2')
            x = layers.conv2d(x, 384, [3, 3], scope='conv3')
            x = layers.conv2d(x, 384, [3, 3], scope='conv4')
            x = layers.conv2d(x, 256, [3, 3], scope='conv5')
            x = layers_lib.max_pool2d(x, [2, 2], 2, scope='pool5')

            with arg_scope(
                    [layers.conv2d],
                    weights_initializer=trunc_normal(0.005),
                    biases_initializer=init_ops.constant_initializer(0.1)):
                x = layers.conv2d(x, 4096, [3, 3], padding='VALID', scope='fc6')
                x = layers_lib.dropout(x, dropout_keep_prob, is_training=is_training, scope='dropout6')
                x = layers.conv2d(x, 4096, [1, 1], scope='fc7')
                x = layers_lib.dropout(x, dropout_keep_prob, is_training=is_training, scope='dropout7')
                x = layers.conv2d(x, NUM_CLASSES, [1, 1], activation_fn=None, normalizer_fn=None,
                                  biases_initializer=init_ops.zeros_initializer(), scope='fc8')
                x = tf.squeeze(x, [2, 3], name='fc8/squeezed')

        return x
示例#4
0
def vgg_a(inputs,
          num_classes=1000,
          is_training=True,
          dropout_keep_prob=0.5,
          spatial_squeeze=True,
          scope='vgg_a'):
  """Oxford Net VGG 11-Layers version A Example.

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
  with variable_scope.variable_scope(scope, 'vgg_a', [inputs]) as sc:
    end_points_collection = sc.original_name_scope + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d.
    with arg_scope(
        [layers.conv2d, layers_lib.max_pool2d],
        outputs_collections=end_points_collection):
      net = layers_lib.repeat(
          inputs, 1, layers.conv2d, 64, [3, 3], scope='conv1')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
      net = layers_lib.repeat(net, 1, layers.conv2d, 128, [3, 3], scope='conv2')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
      net = layers_lib.repeat(net, 2, layers.conv2d, 256, [3, 3], scope='conv3')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
      net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv4')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
      net = layers_lib.repeat(net, 2, layers.conv2d, 512, [3, 3], scope='conv5')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
      # Use conv2d instead of fully_connected layers.
      net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout6')
      net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout7')
      net = layers.conv2d(
          net,
          num_classes, [1, 1],
          activation_fn=None,
          normalizer_fn=None,
          scope='fc8')
      # Convert end_points_collection into a end_point dict.
      end_points = utils.convert_collection_to_dict(end_points_collection)
      if spatial_squeeze:
        net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
        end_points[sc.name + '/fc8'] = net
      return net, end_points
示例#5
0
def _embedding_alexnet(is_training, images, params):
    with tf.variable_scope('Siamese', 'CFCASiamese', [images], reuse=tf.AUTO_REUSE):
        with arg_scope(
                [layers.conv2d], activation_fn=tf.nn.relu):
            net = layers.conv2d(
                images, 96, [11, 11], 4, padding='VALID', scope='conv1')
            # net = layers.batch_norm(net, decay=0.9, epsilon=1e-06, is_training=is_training)
            net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool1')
            net = layers.conv2d(net, 256, [5, 5], scope='conv2')
            # net = layers.batch_norm(net, decay=0.9, epsilon=1e-06, is_training=is_training)
            net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool2')
            net = layers_lib.dropout(
                net, keep_prob=0.7, is_training=is_training)
            net = layers.conv2d(net, 384, [3, 3], scope='conv3')
            net = layers.conv2d(net, 256, [3, 3], scope='conv4')
            net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool5')
            net = layers_lib.dropout(
                net, keep_prob=0.7, is_training=is_training)
            net = layers_lib.flatten(net, scope='flatten1')
            net = layers_lib.fully_connected(net, 1024, scope='fc1',
                                             weights_regularizer=layers.l2_regularizer(0.0005))
            net = layers_lib.dropout(
                net, keep_prob=0.5, is_training=is_training)
            net = layers_lib.fully_connected(net, params.embedding_size, scope='fc2',
                                             weights_regularizer=layers.l2_regularizer(0.0005))
            return net
示例#6
0
def fully_connected_networks(net, num_classes, is_training, dropout_keep_prob):
    # Use conv2d instead of fully_connected layers.
    net = layers.conv2d(net, 1024, net.get_shape()[1:3], padding='VALID')
    net = layers_lib.dropout(net, dropout_keep_prob, is_training=is_training)
    net = layers.conv2d(net, 1024, [1, 1])
    net = layers_lib.dropout(net, dropout_keep_prob, is_training=is_training)
    net = layers.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None)
    net = array_ops.squeeze(net, [1, 2])
    net = tf.nn.softmax(net, name='predicts')
    return net
示例#7
0
def vgg_16_tcorr(inputs,
           corr_features,
           num_classes=1000,
           is_training=True,
           dropout_keep_prob=0.5,
           spatial_squeeze=True,
           scope='vgg_16_tcorr'):

  with variable_scope.variable_scope(scope, 'vgg_16_tcorr', [inputs]) as sc:
    end_points_collection = sc.original_name_scope + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d.
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
        outputs_collections=end_points_collection):
      forder = corr_features['vgg_16/conv1/conv1_1']
      net, forder = init_conv_corr(inputs, 2, 64, [3, 3], 'conv1', forder, corr_features)
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')

      net, forder = repeat_conv_corr(net, 2, 128, [3, 3], 'conv2', forder, corr_features)
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')

      net, forder = repeat_conv_corr(net, 3, 256, [3, 3], 'conv3', forder, corr_features)
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')

      net, forder = repeat_conv_corr(net, 3, 512, [3, 3], 'conv4', forder, corr_features)
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')

      net, _ = repeat_conv_corr(net, 3, 512, [3, 3], 'conv5', forder, corr_features)
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')

      net = layers.conv2d(net, 512, [1,1], scope = 'fuse5')

      # Use conv2d instead of fully_connected layers.
      net = layers.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout6')
      net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
      net = layers_lib.dropout(
          net, dropout_keep_prob, is_training=is_training, scope='dropout7')
      net = layers.conv2d(
          net,
          num_classes, [1, 1],
          activation_fn=None,
          normalizer_fn=None,
          scope='fc8')
      # Convert end_points_collection into a end_point dict.
      end_points = utils.convert_collection_to_dict(end_points_collection)
      if spatial_squeeze:
        net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
        end_points[sc.name + '/fc8'] = net
      return net, end_points
def slim_net_original(image, keep_prob):
    with arg_scope([layers.conv2d, layers.fully_connected], biases_initializer=tf.random_normal_initializer(stddev=0.1)):

        # conv2d(inputs, num_outputs, kernel_size, stride=1, padding='SAME',
        # activation_fn=nn.relu, normalizer_fn=None, normalizer_params=None,
        # weights_initializer=initializers.xavier_initializer(), weights_regularizer=None,
        # biases_initializer=init_ops.zeros_initializer, biases_regularizer=None, scope=None):
        net = layers.conv2d(image, 32, [5, 5], scope='conv1', weights_regularizer=regularizers.l1_regularizer(0.5))

        # max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None)
        net = layers.max_pool2d(net, 2, scope='pool1')

        net = layers.conv2d(net, 64, [5, 5], scope='conv2', weights_regularizer=regularizers.l2_regularizer(0.5))
        summaries.summarize_tensor(net, tag='conv2')

        net = layers.max_pool2d(net, 2, scope='pool2')

        net = layers.flatten(net, scope='flatten1')

        # fully_connected(inputs, num_outputs, activation_fn=nn.relu, normalizer_fn=None,
        # normalizer_params=None, weights_initializer=initializers.xavier_initializer(),
        # weights_regularizer=None, biases_initializer=init_ops.zeros_initializer,
        # biases_regularizer=None, scope=None):
        net = layers.fully_connected(net, 1024, scope='fc1')

        # dropout(inputs, keep_prob=0.5, is_training=True, scope=None)
        net = layers.dropout(net, keep_prob=keep_prob, scope='dropout1')

        net = layers.fully_connected(net, 10, scope='fc2')
    return net
示例#9
0
def inception_v1(inputs,
                 num_classes=1000,
                 is_training=True,
                 dropout_keep_prob=0.8,
                 prediction_fn=layers_lib.softmax,
                 spatial_squeeze=True,
                 reuse=None,
                 scope='InceptionV1'):
  """Defines the Inception V1 architecture.

  This architecture is defined in:

    Going deeper with convolutions
    Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
    Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
    http://arxiv.org/pdf/1409.4842v1.pdf.

  The default image size used to train this network is 224x224.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    dropout_keep_prob: the percentage of activation values that are retained.
    prediction_fn: a function to get predictions out of logits.
    spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
        of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
    reuse: whether or not the network and its variables should be reused. To be
      able to reuse 'scope' must be given.
    scope: Optional variable_scope.

  Returns:
    logits: the pre-softmax activations, a tensor of size
      [batch_size, num_classes]
    end_points: a dictionary from components of the network to the corresponding
      activation.
  """
  # Final pooling and prediction
  with variable_scope.variable_scope(
      scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:
    with arg_scope(
        [layers_lib.batch_norm, layers_lib.dropout], is_training=is_training):
      net, end_points = inception_v1_base(inputs, scope=scope)
      with variable_scope.variable_scope('Logits'):
        net = layers_lib.avg_pool2d(
            net, [7, 7], stride=1, scope='MaxPool_0a_7x7')
        net = layers_lib.dropout(net, dropout_keep_prob, scope='Dropout_0b')
        logits = layers.conv2d(
            net,
            num_classes, [1, 1],
            activation_fn=None,
            normalizer_fn=None,
            scope='Conv2d_0c_1x1')
        if spatial_squeeze:
          logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')

        end_points['Logits'] = logits
        end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
  return logits, end_points
示例#10
0
    def alexnet_v2(inputs,
                   is_training=True,
                   emb_size=4096,
                   dropout_keep_prob=0.5,
                   scope='alexnet_v2'):

        inputs = tf.cast(inputs, tf.float32)
        if new_shape is not None:
            shape = new_shape
            inputs = tf.image.resize_images(
                inputs,
                tf.constant(new_shape[:2]),
                method=tf.image.ResizeMethod.BILINEAR)
        else:
            shape = img_shape
        if is_training and augmentation_function is not None:
            inputs = augmentation_function(inputs, shape)
        if image_summary:
            tf.summary.image('Inputs', inputs, max_outputs=3)

        net = inputs
        mean = tf.reduce_mean(net, [1, 2], True)
        std = tf.reduce_mean(tf.square(net - mean), [1, 2], True)
        net = (net - mean) / (std + 1e-5)
        inputs = net

        with variable_scope.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
            end_points_collection = sc.original_name_scope + '_end_points'

            # Collect outputs for conv2d, fully_connected and max_pool2d.
            with arg_scope(
                    [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
                    outputs_collections=[end_points_collection]):
                net = layers.conv2d(
                    inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
                net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool1')
                net = layers.conv2d(net, 192, [5, 5], scope='conv2')
                net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool2')
                net = layers.conv2d(net, 384, [3, 3], scope='conv3')
                net = layers.conv2d(net, 384, [3, 3], scope='conv4')
                net = layers.conv2d(net, 256, [3, 3], scope='conv5')
                net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool5')

                net = slim.flatten(net, scope='flatten')

                # Use conv2d instead of fully_connected layers.
                with arg_scope(
                        [slim.fully_connected],
                        weights_initializer=trunc_normal(0.005),
                        biases_initializer=init_ops.constant_initializer(0.1)):
                    net = layers.fully_connected(net, 4096, scope='fc6')
                    net = layers_lib.dropout(
                        net, dropout_keep_prob, is_training=is_training, scope='dropout6')
                    net = layers.fully_connected(net, emb_size, scope='fc7')

        return net
示例#11
0
def alexnet_v2(inputs,
               num_classes=2,
               is_training=True,
               dropout_keep_prob=0.5,
               spatial_squeeze=True,
               scope='alexnet_v2'):
    inputs = tf.reshape(inputs, (-1, 80, 80, 1))
    with variable_scope.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        # Collect outputs for conv2d, fully_connected and max_pool2d.
        debug_shape(inputs)
        with arg_scope(
            [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
                outputs_collections=[end_points_collection]):
            net = layers.conv2d(inputs,
                                64, [3, 3],
                                padding='VALID',
                                scope='conv1')
            debug_shape(net)
            net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool1')
            debug_shape(net)
            net = layers.conv2d(net, 192, [5, 5], scope='conv2')
            debug_shape(net)
            net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool2')
            debug_shape(net)
            net = layers.conv2d(net, 384, [3, 3], scope='conv3')
            net = layers.conv2d(net, 384, [3, 3], scope='conv4')
            net = layers.conv2d(net, 256, [3, 3], scope='conv5')
            net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool5')
            debug_shape(net)
            with arg_scope(
                [layers.conv2d],
                    weights_initializer=trunc_normal(0.005),
                    biases_initializer=init_ops.constant_initializer(0.1)):
                net = layers.conv2d(net,
                                    256, [7, 7],
                                    2,
                                    padding='VALID',
                                    scope='fc6')
                debug_shape(net)
                net = layers_lib.dropout(net,
                                         dropout_keep_prob,
                                         is_training=is_training,
                                         scope='dropout6')
                net = layers.conv2d(net, 256, [1, 1], scope='fc7')
                debug_shape(net)
            end_points = utils.convert_collection_to_dict(
                end_points_collection)
            if spatial_squeeze:
                net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
            end_points[sc.name + '/fc8'] = net
            return net, end_points
示例#12
0
def roi_fc(inputs, boxes, box_idx, scope='vgg_16'):
    with variable_scope.variable_scope(scope, 'vgg_16', [inputs]) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        # Collect outputs for conv2d, fully_connected and max_pool2d.
        with arg_scope(
            [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
                outputs_collections=end_points_collection):
            # Use conv2d instead of fully_connected layers.
            net = roi_pooling(inputs, boxes, box_idx)
            net = layers.conv2d(net,
                                4096, [7, 7],
                                padding='VALID',
                                scope='fc6')
            net = layers_lib.dropout(net,
                                     0.5,
                                     is_training=True,
                                     scope='dropout6')
            net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
            net = layers_lib.dropout(net,
                                     0.5,
                                     is_training=True,
                                     scope='dropout7')

            return net
def inception_2d_fields(img,
                        fields,
                        num_classes=30,
                        is_training=True,
                        dropout_keep_prob=0.6,
                        prediction_fn=layers_lib.softmax,
                        spatial_squeeze=True,
                        reuse=None,
                        scope='InceptionV1_Fields'
                        ):
    with arg_scope([layers.conv2d, layers_lib.fully_connected],
                   weights_initializer=tf.contrib.layers.xavier_initializer(),
                   biases_initializer=tf.constant_initializer(0.2),
                   weights_regularizer=regularizers.l2_regularizer(0.0002),
                   biases_regularizer=regularizers.l2_regularizer(0.0002)):
        net, end_points = inception_2d.inception_v1_base(img, scope=scope, final_endpoint='Mixed_4b')
        with variable_scope.variable_scope('Logits'):
            net = layers_lib.avg_pool2d(net, [5, 5], stride=3, scope='AvgPool_0a_5x5')
            net = layers.conv2d(inputs=net, num_outputs=128, kernel_size=1)
            net = tf.reshape(net, [-1, 1, 1, 4 * 4 * 128])
            net = array_ops.squeeze(net,[1,2],name='Squeeze4Fields')
            net = tf.concat([net,fields],axis=1)
            net = layers.fully_connected(inputs=net, num_outputs=1024)
            net = layers_lib.dropout(net, dropout_keep_prob, scope='Dropout_0b')
            logits = layers.fully_connected(inputs=net,
                                            num_outputs=num_classes,
                                            activation_fn=None,
                                            weights_initializer=tf.contrib.layers.xavier_initializer(),
                                            biases_initializer=tf.constant_initializer(0.0),
                                            weights_regularizer=regularizers.l2_regularizer(0.0002),
                                            biases_regularizer=regularizers.l2_regularizer(0.0002),
                                            scope='InnerProduct')
            # logits = layers.conv2d(
            #     net,
            #     num_classes, [1, 1],
            #     activation_fn=None,
            #     normalizer_fn=None,
            #     scope='Conv2d_0c_1x1')
            if spatial_squeeze:
                logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')

            end_points['Logits'] = logits
            end_points['Predictions'] = prediction_fn(logits, scope='Predictions')


    return logits, end_points
def slim_net_original(image, keep_prob):
    with arg_scope(
        [layers.conv2d, layers.fully_connected],
            biases_initializer=tf.random_normal_initializer(stddev=0.1)):

        # conv2d(inputs, num_outputs, kernel_size, stride=1, padding='SAME',
        # activation_fn=nn.relu, normalizer_fn=None, normalizer_params=None,
        # weights_initializer=initializers.xavier_initializer(), weights_regularizer=None,
        # biases_initializer=init_ops.zeros_initializer, biases_regularizer=None, scope=None):
        net = layers.conv2d(
            image,
            32, [5, 5],
            scope='conv1',
            weights_regularizer=regularizers.l1_regularizer(0.5))

        # max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None)
        net = layers.max_pool2d(net, 2, scope='pool1')

        net = layers.conv2d(
            net,
            64, [5, 5],
            scope='conv2',
            weights_regularizer=regularizers.l2_regularizer(0.5))
        summaries.summarize_tensor(net, tag='conv2')

        net = layers.max_pool2d(net, 2, scope='pool2')

        net = layers.flatten(net, scope='flatten1')

        # fully_connected(inputs, num_outputs, activation_fn=nn.relu, normalizer_fn=None,
        # normalizer_params=None, weights_initializer=initializers.xavier_initializer(),
        # weights_regularizer=None, biases_initializer=init_ops.zeros_initializer,
        # biases_regularizer=None, scope=None):
        net = layers.fully_connected(net, 1024, scope='fc1')

        # dropout(inputs, keep_prob=0.5, is_training=True, scope=None)
        net = layers.dropout(net, keep_prob=keep_prob, scope='dropout1')

        net = layers.fully_connected(net, 10, scope='fc2')
    return net
示例#15
0
def alexnet_v2(inputs,
               num_classes=1000,
               is_training=True,
               dropout_keep_prob=0.5,
               spatial_squeeze=True,
               scope='alexnet_v2'):
  """AlexNet version 2.

  Described in: http://arxiv.org/pdf/1404.5997v2.pdf
  Parameters from:
  github.com/akrizhevsky/cuda-convnet2/blob/master/layers/
  layers-imagenet-1gpu.cfg

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224. To use in fully
        convolutional mode, set spatial_squeeze to false.
        The LRN layers have been removed and change the initializers from
        random_normal_initializer to xavier_initializer.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
  with variable_scope.variable_scope(scope, 'alexnet_v2', [inputs]) as sc:
    end_points_collection = sc.original_name_scope + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d.
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
        outputs_collections=[end_points_collection]):
      net = layers.conv2d(
          inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
      net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool1')
      net = layers.conv2d(net, 192, [5, 5], scope='conv2')
      net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool2')
      net = layers.conv2d(net, 384, [3, 3], scope='conv3')
      net = layers.conv2d(net, 384, [3, 3], scope='conv4')
      net = layers.conv2d(net, 256, [3, 3], scope='conv5')
      net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool5')

      # Use conv2d instead of fully_connected layers.
      with arg_scope(
          [layers.conv2d],
          weights_initializer=trunc_normal(0.005),
          biases_initializer=init_ops.constant_initializer(0.1)):
        net = layers.conv2d(net, 4096, [5, 5], padding='VALID', scope='fc6')
        net = layers_lib.dropout(
            net, dropout_keep_prob, is_training=is_training, scope='dropout6')
        net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
        net = layers_lib.dropout(
            net, dropout_keep_prob, is_training=is_training, scope='dropout7')
        net = layers.conv2d(
            net,
            num_classes, [1, 1],
            activation_fn=None,
            normalizer_fn=None,
            biases_initializer=init_ops.zeros_initializer(),
            scope='fc8')

      # Convert end_points_collection into a end_point dict.
      end_points = utils.convert_collection_to_dict(end_points_collection)
      if spatial_squeeze:
        net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
        end_points[sc.name + '/fc8'] = net
      return net, end_points
示例#16
0
def inception_v2(inputs,
                 num_classes=1000,
                 is_training=True,
                 dropout_keep_prob=0.8,
                 min_depth=16,
                 depth_multiplier=1.0,
                 prediction_fn=layers_lib.softmax,
                 spatial_squeeze=True,
                 reuse=None,
                 scope='InceptionV2'):
  """Inception v2 model for classification.

  Constructs an Inception v2 network for classification as described in
  http://arxiv.org/abs/1502.03167.

  The default image size used to train this network is 224x224.

  Args:
    inputs: a tensor of shape [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    dropout_keep_prob: the percentage of activation values that are retained.
    min_depth: Minimum depth value (number of channels) for all convolution ops.
      Enforced when depth_multiplier < 1, and not an active constraint when
      depth_multiplier >= 1.
    depth_multiplier: Float multiplier for the depth (number of channels)
      for all convolution ops. The value must be greater than zero. Typical
      usage will be to set this value in (0, 1) to reduce the number of
      parameters or computation cost of the model.
    prediction_fn: a function to get predictions out of logits.
    spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
        of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
    reuse: whether or not the network and its variables should be reused. To be
      able to reuse 'scope' must be given.
    scope: Optional variable_scope.

  Returns:
    logits: the pre-softmax activations, a tensor of size
      [batch_size, num_classes]
    end_points: a dictionary from components of the network to the corresponding
      activation.

  Raises:
    ValueError: if final_endpoint is not set to one of the predefined values,
                or depth_multiplier <= 0
  """
  if depth_multiplier <= 0:
    raise ValueError('depth_multiplier is not greater than zero.')

  # Final pooling and prediction
  with variable_scope.variable_scope(
      scope, 'InceptionV2', [inputs, num_classes], reuse=reuse) as scope:
    with arg_scope(
        [layers_lib.batch_norm, layers_lib.dropout], is_training=is_training):
      net, end_points = inception_v2_base(
          inputs,
          scope=scope,
          min_depth=min_depth,
          depth_multiplier=depth_multiplier)
      with variable_scope.variable_scope('Logits'):
        kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
        net = layers_lib.avg_pool2d(
            net,
            kernel_size,
            padding='VALID',
            scope='AvgPool_1a_{}x{}'.format(*kernel_size))
        # 1 x 1 x 1024
        net = layers_lib.dropout(
            net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
        logits = layers.conv2d(
            net,
            num_classes, [1, 1],
            activation_fn=None,
            normalizer_fn=None,
            scope='Conv2d_1c_1x1')
        if spatial_squeeze:
          logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')
      end_points['Logits'] = logits
      end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
  return logits, end_points
示例#17
0
def vgg_19_base(inputs, dropout_keep_prob=0.5, scope='None'):
    """Oxford Net VGG 19-Layers version E Example.

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
    end_points = {}
    with variable_scope.variable_scope(scope, 'vgg_19', [inputs]) as sc:
        # Collect outputs for conv2d, fully_connected and max_pool2d.
        with arg_scope(
            [layers.conv2d, layers_lib.fully_connected,
             layers_lib.max_pool2d]):
            net = layers_lib.repeat(inputs,
                                    2,
                                    layers.conv2d,
                                    64, [3, 3],
                                    scope='conv1')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
            net = layers_lib.repeat(net,
                                    2,
                                    layers.conv2d,
                                    128, [3, 3],
                                    scope='conv2')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
            net = layers_lib.repeat(net,
                                    4,
                                    layers.conv2d,
                                    256, [3, 3],
                                    scope='conv3')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
            net = layers_lib.repeat(net,
                                    4,
                                    layers.conv2d,
                                    512, [3, 3],
                                    scope='conv4')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
            net = layers_lib.repeat(net,
                                    4,
                                    layers.conv2d,
                                    512, [3, 3],
                                    scope='conv5')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
            # Use conv2d instead of fully_connected layers.
            net = layers.conv2d(net,
                                4096, [7, 7],
                                padding='VALID',
                                scope='fc6')
            net = layers_lib.dropout(net, dropout_keep_prob, scope='dropout6')
            net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
            end_point = 'VGG19_fc7'
            end_points[end_point] = net
            return net, end_points
示例#18
0
def overfeat(inputs,
             num_classes=1000,
             is_training=True,
             dropout_keep_prob=0.5,
             spatial_squeeze=True,
             scope='overfeat'):
  """Contains the model definition for the OverFeat network.

  The definition for the network was obtained from:
    OverFeat: Integrated Recognition, Localization and Detection using
    Convolutional Networks
    Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
    Yann LeCun, 2014
    http://arxiv.org/abs/1312.6229

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 231x231. To use in fully
        convolutional mode, set spatial_squeeze to false.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.

  """
  with variable_scope.variable_scope(scope, 'overfeat', [inputs]) as sc:
    end_points_collection = sc.name + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d
    with arg_scope(
        [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
        outputs_collections=end_points_collection):
      net = layers.conv2d(
          inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
      net = layers.conv2d(net, 256, [5, 5], padding='VALID', scope='conv2')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
      net = layers.conv2d(net, 512, [3, 3], scope='conv3')
      net = layers.conv2d(net, 1024, [3, 3], scope='conv4')
      net = layers.conv2d(net, 1024, [3, 3], scope='conv5')
      net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
      with arg_scope(
          [layers.conv2d],
          weights_initializer=trunc_normal(0.005),
          biases_initializer=init_ops.constant_initializer(0.1)):
        # Use conv2d instead of fully_connected layers.
        net = layers.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6')
        net = layers_lib.dropout(
            net, dropout_keep_prob, is_training=is_training, scope='dropout6')
        net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
        net = layers_lib.dropout(
            net, dropout_keep_prob, is_training=is_training, scope='dropout7')
        net = layers.conv2d(
            net,
            num_classes, [1, 1],
            activation_fn=None,
            normalizer_fn=None,
            biases_initializer=init_ops.zeros_initializer(),
            scope='fc8')
      # Convert end_points_collection into a end_point dict.
      end_points = utils.convert_collection_to_dict(end_points_collection)
      if spatial_squeeze:
        net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
        end_points[sc.name + '/fc8'] = net
      return net, end_points
示例#19
0
def overfeat(inputs,
             num_classes=1000,
             is_training=True,
             dropout_keep_prob=0.5,
             spatial_squeeze=True,
             scope='overfeat'):
    """Contains the model definition for the OverFeat network.

  The definition for the network was obtained from:
    OverFeat: Integrated Recognition, Localization and Detection using
    Convolutional Networks
    Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
    Yann LeCun, 2014
    http://arxiv.org/abs/1312.6229

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 231x231. To use in fully
        convolutional mode, set spatial_squeeze to false.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.

  """
    with variable_scope.variable_scope(scope, 'overfeat', [inputs]) as sc:
        end_points_collection = sc.name + '_end_points'
        # Collect outputs for conv2d, fully_connected and max_pool2d
        with arg_scope(
            [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
                outputs_collections=end_points_collection):
            net = layers.conv2d(inputs,
                                64, [11, 11],
                                4,
                                padding='VALID',
                                scope='conv1')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
            net = layers.conv2d(net,
                                256, [5, 5],
                                padding='VALID',
                                scope='conv2')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
            net = layers.conv2d(net, 512, [3, 3], scope='conv3')
            net = layers.conv2d(net, 1024, [3, 3], scope='conv4')
            net = layers.conv2d(net, 1024, [3, 3], scope='conv5')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
            with arg_scope(
                [layers.conv2d],
                    weights_initializer=trunc_normal(0.005),
                    biases_initializer=init_ops.constant_initializer(0.1)):
                # Use conv2d instead of fully_connected layers.
                net = layers.conv2d(net,
                                    3072, [6, 6],
                                    padding='VALID',
                                    scope='fc6')
                net = layers_lib.dropout(net,
                                         dropout_keep_prob,
                                         is_training=is_training,
                                         scope='dropout6')
                net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
                net = layers_lib.dropout(net,
                                         dropout_keep_prob,
                                         is_training=is_training,
                                         scope='dropout7')
                net = layers.conv2d(
                    net,
                    num_classes, [1, 1],
                    activation_fn=None,
                    normalizer_fn=None,
                    biases_initializer=init_ops.zeros_initializer(),
                    scope='fc8')
            # Convert end_points_collection into a end_point dict.
            end_points = utils.convert_collection_to_dict(
                end_points_collection)
            if spatial_squeeze:
                net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
                end_points[sc.name + '/fc8'] = net
            return net, end_points
示例#20
0
def ldnet_v1(inputs,
             num_classes=3,
             dropout_keep_prob=0.5,
             spatial_squeeze=True,
             scope="ldnet",
             use_deform_conv=True,
             print_current_tensor=False):
    """
    ldnet architecture:
        input: 32*32*3
                             input   depth   kenal   stride   padding
        conv0:   net = conv(input,   32,   [3, 3],    1,     "same")
                 --> 32*32*32
        conv1:    net = conv(net,    32,   [3, 3],    1,     "same")
                 --> 32*32*32
        conv2:   net = conv(net,    64,   [3, 3],    1,     "same")
                 --> 32*32*64
        maxpool1: net = pool(net,          [3, 3],    1,     "same")
                 --> 32*32*64
        conv3:    net = conv(net,    192,  [3, 3],    1,     "same")
                 --> 32*32*192
        maxpool2: net = pool(net,          [3, 3],    1,     "same")
                 --> 32*32*192

        ldnet blocks:
        mixed_1: 32 x 32 x 320 Feature extraction module
        mixed_2: 32 x 32 x 320 Feature extraction module
        mixed_res1: 32 x 32 x 320 Feature extraction module
        mixed_3: 16 x 16 x 640 Dimension reduction module
        mixed_4: 16 x 16 x 640 Feature extraction module
        mixed_res2: 16 x 16 x 640 Feature extraction module
        mixed_5: 8 x 8 x 1280 Dimension reduction module
        mixed_6: 8 x 8 x 1280 Feature extraction module
        mixed_res3: 8 x 8 x 1280 Feature extraction module
        Final pooling and prediction -> 3

    :param inputs: the size of imputs is [batch_num, width, height, channel].
    :param num_classes: num of classes predicted.
    :param dropout_keep_prob: dropout probability.
    :param spatial_squeeze: whether or not squeeze.
    :param scope: optional scope.
    :param use_deform_conv: whether to use deform conv.
    :param print_current_tensor: whether or not print current tenser shape, name and type.

    :return:
        logits: [batch_size, num_classes]
    """

    # end_points will collect relevant activations for the computation
    # of shortcuts.
    end_points = []

    with variable_scope.variable_scope(scope, "ldnet_v1", [inputs]):
        with arg_scope([layers.conv2d, layers_lib.max_pool2d],
                       kernel_size=[3, 3],
                       stride=1,
                       padding='SAME'):
            # input: 32 * 32 * 3
            net = inputs

            end_point = "conv0"
            # if use_deform_conv:
            #     net = ConvOffset2D(3, name='conv0_offset')(net)  # net offset
            net = layers.conv2d(net, 32, scope=end_point)
            if print_current_tensor:
                print(net)
            # --> 32 * 32 * 32

            end_point = "conv1"
            if use_deform_conv:
                net = ConvOffset2D(32, name='conv1_offset')(net)  # net offset
            net = layers.conv2d(net, 32, scope=end_point)
            if print_current_tensor: print(net)
            # --> 32 * 32 * 32

            end_point = "conv2"
            if use_deform_conv:
                net = ConvOffset2D(32, name='conv2_offset')(net)  # net offset
            net = layers.conv2d(net, 64, scope=end_point)
            if print_current_tensor: print(net)
            # --> 32 * 32 * 64

            end_point = "maxpool0"
            net = layers_lib.max_pool2d(net,
                                        kernel_size=[2, 2],
                                        scope=end_point)
            if print_current_tensor: print(net)
            # --> 32 * 32 * 64

            end_point = 'conv3'
            if use_deform_conv:
                net = ConvOffset2D(64, name='conv3_offset')(net)  # net offset
            net = layers.conv2d(net, 192, scope=end_point)
            if print_current_tensor: print(net)
            # end_points.append(net)
            # --> 32 * 32 * 192

            end_point = 'maxpool1'
            net = layers_lib.max_pool2d(net,
                                        kernel_size=[2, 2],
                                        scope=end_point)
            if print_current_tensor: print(net)
            # net.alias = end_point
            # end_points.append(net)
            # --> 32 * 32 * 192

        # ldnet blocks
        with arg_scope(
            [layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
                stride=1,
                padding='SAME'):
            # mixed_1: 32 x 32 x 320 Feature extraction module
            end_point = 'mixed_1'
            with variable_scope.variable_scope(end_point):
                net = _feature_extraction_residual(net,
                                                   first_layer_depth=48,
                                                   second_layer_depth=64,
                                                   last_layer_depth=96,
                                                   scope='feature_extraction')
                end_points.append(net)
                if print_current_tensor: print(net, len(end_points))

            # mixed_2: 32 x 32 x 320 Feature extraction module
            end_point = 'mixed_2'
            with variable_scope.variable_scope(end_point):
                net = _feature_extraction_residual(
                    net,
                    first_layer_depth=48,
                    second_layer_depth=64,
                    last_layer_depth=96,
                    scope='feature_extraction_residual')
                end_points.append(net)
                if print_current_tensor: print(net, len(end_points))

            # mixed_res1: 32 x 32 x 320 Feature extraction module
            end_point = 'mixed_res1'
            with variable_scope.variable_scope(end_point):
                net = _feature_extraction_residual(
                    net,
                    first_layer_depth=48,
                    second_layer_depth=64,
                    last_layer_depth=96,
                    scope='feature_extraction_residual')
                net_linear = layers.conv2d(net,
                                           int(net.shape[3]), [1, 1],
                                           activation_fn=None,
                                           scope='net_linear_projection')
                shortcuts = _shortcuts_addition(net.shape,
                                                end_points[-1],
                                                end_points[-2],
                                                scope="shortcuts_addition")
                net = nn_ops.relu(net_linear + shortcuts)
                end_points.append(net)
                if print_current_tensor: print(net, len(end_points))

            # mixed_3: 16 x 16 x 640 Dimension reduction module
            end_point = "mixed_3"
            with variable_scope.variable_scope(end_point):
                net = _dimension_reduction(net,
                                           branch_0_depth=224,
                                           branch_1_depth=96,
                                           scope='dimension_reduction')
                end_points.append(net)
                if print_current_tensor: print(net, len(end_points))

            # mixed_4: 16 x 16 x 640 Feature extraction module
            end_point = "mixed_4"
            with variable_scope.variable_scope(end_point):
                net = _feature_extraction_residual(
                    net,
                    first_layer_depth=48 * 2,
                    second_layer_depth=64 * 2,
                    last_layer_depth=96 * 2,
                    scope='feature_extraction_residual')
                end_points.append(net)
                if print_current_tensor: print(net, len(end_points))

            # mixed_res2: 16 x 16 x 640 Feature extraction module
            end_point = "mixed_res2"
            with variable_scope.variable_scope(end_point):
                net = _feature_extraction_residual(
                    net,
                    first_layer_depth=48 * 2,
                    second_layer_depth=64 * 2,
                    last_layer_depth=96 * 2,
                    scope='feature_extraction_residual')
                net_linear = layers.conv2d(net,
                                           int(net.shape[3]), [1, 1],
                                           activation_fn=None,
                                           scope='net_linear_projection')
                shortcuts = _shortcuts_addition(net.shape,
                                                end_points[-1],
                                                end_points[-2],
                                                scope="shortcuts_addition")
                net = nn_ops.relu(net_linear + shortcuts)
                end_points.append(net)
                if print_current_tensor: print(net, len(end_points))

            # mixed_5: 8 x 8 x 1280 Dimension reduction module
            end_point = "mixed_5"
            with variable_scope.variable_scope(end_point):
                net = _dimension_reduction(net,
                                           branch_0_depth=224 * 2,
                                           branch_1_depth=96 * 2,
                                           scope='dimension_reduction')
                end_points.append(net)
                if print_current_tensor: print(net, len(end_points))

            # mixed_6: 8 x 8 x 1280 Feature extraction module
            end_point = "mixed_6"
            with variable_scope.variable_scope(end_point):
                net = _feature_extraction_residual(
                    net,
                    first_layer_depth=48 * 4,
                    second_layer_depth=64 * 4,
                    last_layer_depth=96 * 4,
                    scope='feature_extraction_residual')
                end_points.append(net)
                if print_current_tensor: print(net, len(end_points))

            # mixed_res3: 8 x 8 x 1280 Feature extraction module
            end_point = "mixed_res3"
            with variable_scope.variable_scope(end_point):
                net = _feature_extraction_residual(
                    net,
                    first_layer_depth=48 * 4,
                    second_layer_depth=64 * 4,
                    last_layer_depth=96 * 4,
                    scope='feature_extraction_residual')
                net_linear = layers.conv2d(net,
                                           int(net.shape[3]), [1, 1],
                                           activation_fn=None,
                                           scope='net_linear_projection')
                shortcuts = _shortcuts_addition(net.shape,
                                                end_points[-1],
                                                end_points[-2],
                                                scope="shortcuts_addition")
                net = nn_ops.relu(net_linear + shortcuts)
                end_points.append(net)
                if print_current_tensor: print(net, len(end_points))

        # Final pooling and prediction
        with variable_scope.variable_scope('Logits'):
            with arg_scope([layers.conv2d],
                           normalizer_fn=None,
                           normalizer_params=None):
                net = layers.conv2d(net,
                                    int(net.shape[3]), [3, 3],
                                    stride=2,
                                    scope='conv2d_1a_3x3')
                # 4 x 4 x 1280
                net = layers_lib.avg_pool2d(net, [4, 4],
                                            padding='VALID',
                                            scope='AvgPool_1b_4x4')
                # 1 x 1 x 1280

                # net = layers.conv2d(net, 640, [1, 1], scope='Conv2d_0c_1x1')
                # local1
                with variable_scope.variable_scope('local1') as scope:
                    # Move everything into depth so we can perform a single matrix multiply.
                    reshape = tf.reshape(net, [-1, 1280])
                    weights = _variable_with_weight_decay('weights',
                                                          shape=[1280, 640],
                                                          stddev=0.04,
                                                          wd=0.0001)
                    biases = _variable_on_cpu('biases', [640],
                                              tf.constant_initializer(0.1))
                    net = tf.nn.relu(tf.matmul(reshape, weights) + biases,
                                     name=scope.name)
                # 1 x 1 x 640

                net = layers_lib.dropout(net,
                                         keep_prob=dropout_keep_prob,
                                         scope='Dropout_0c')

                # net = layers.conv2d(net, 320, [1, 1], scope='Conv2d_0d_1x1')
                # local2
                with variable_scope.variable_scope('local2') as scope:
                    weights = _variable_with_weight_decay('weights',
                                                          shape=[640, 320],
                                                          stddev=0.04,
                                                          wd=0.0001)
                    biases = _variable_on_cpu('biases', [320],
                                              tf.constant_initializer(0.1))
                    net = tf.nn.relu(tf.matmul(net, weights) + biases,
                                     name=scope.name)
                # 1 x 1 x 320

                net = layers_lib.dropout(net,
                                         keep_prob=dropout_keep_prob,
                                         scope='Dropout_0d')
                net = tf.expand_dims(net, 1)
                net = tf.expand_dims(net, 1)

                logits = layers.conv2d(net,
                                       num_classes, [1, 1],
                                       activation_fn=None,
                                       normalizer_fn=None,
                                       scope='Conv2d_0e_1x1')
                # 1 x 1 x 3
                if spatial_squeeze:
                    logits = array_ops.squeeze(logits, [1, 2],
                                               name='SpatialSqueeze')
                    # 3

    return logits
示例#21
0
def inference(image_input):
    #scope=alexnet_v2_arg_scope()
    num_classes = 20
    is_training = True
    dropout_keep_prob = 0.5
    spatial_squeeze = True
    scope = 'alexnet_v2'
    #with slim.arg_scope(alexnet_v2_arg_scope()):
    with variable_scope.variable_scope(scope, 'alexnet_v2',
                                       [image_input]) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        # Collect outputs for conv2d, fully_connected and max_pool2d.
        with arg_scope(
            [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
                outputs_collections=[end_points_collection]) as here1:
            #print(a)
            net = layers.conv2d(
                image_input,
                64, [11, 11],
                4,
                weights_initializer=tf.constant_initializer(W1),
                biases_initializer=tf.constant_initializer(b1),
                activation_fn=nn.sigmoid,
                padding='VALID',
                scope='conv1')
            net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool1')
            net = layers.conv2d(
                net,
                192, [5, 5],
                weights_initializer=tf.constant_initializer(W2),
                biases_initializer=tf.constant_initializer(b2),
                activation_fn=nn.sigmoid,
                scope='conv2')
            net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool2')
            net = layers.conv2d(
                net,
                384, [3, 3],
                weights_initializer=tf.constant_initializer(W3),
                biases_initializer=tf.constant_initializer(b3),
                activation_fn=nn.sigmoid,
                scope='conv3')
            net = layers.conv2d(
                net,
                384, [3, 3],
                weights_initializer=tf.constant_initializer(W4),
                biases_initializer=tf.constant_initializer(b4),
                activation_fn=nn.sigmoid,
                scope='conv4')
            net = layers.conv2d(
                net,
                256, [3, 3],
                weights_initializer=tf.constant_initializer(W5),
                biases_initializer=tf.constant_initializer(b5),
                activation_fn=nn.sigmoid,
                scope='conv5')
            net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool5')
            # print()
            #print(here1)
            # Use conv2d instead of fully_connected layers.
            with arg_scope([layers.conv2d],
                           weights_initializer=trunc_normal(0.005),
                           biases_initializer=init_ops.constant_initializer(
                               0.1)) as here2:
                #print(here2)
                a = 3
                net = layers.conv2d(net,
                                    4096, [5, 5],
                                    padding='VALID',
                                    scope='fc6')
                net = layers_lib.dropout(net,
                                         dropout_keep_prob,
                                         is_training=is_training,
                                         scope='dropout6')
                net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
                net = layers_lib.dropout(net,
                                         dropout_keep_prob,
                                         is_training=is_training,
                                         scope='dropout7')
                net = layers.conv2d(
                    net,
                    num_classes, [1, 1],
                    activation_fn=None,
                    normalizer_fn=None,
                    biases_initializer=init_ops.zeros_initializer(),
                    scope='fc8')
                #print(a)

                # Convert end_points_collection into a end_point dict.
                end_points = utils.convert_collection_to_dict(
                    end_points_collection)
                if spatial_squeeze:
                    net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
                    end_points[sc.name + '/fc8'] = net
                return net, end_points
示例#22
0
def inception_v3(inputs,
                 num_classes=12,
                 is_training=True,
                 dropout_keep_prob=0.8,
                 prediction_fn=layers_lib.softmax,
                 reuse=None,
                 hparam_string=None,
                 global_pool=True,
                 scope='InceptionV3'):
    """Inception model from http://arxiv.org/abs/1512.00567.

  "Rethinking the Inception Architecture for Computer Vision"

  Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
  Zbigniew Wojna.

  With the default arguments this method constructs the exact model defined in
  the paper. However, one can experiment with variations of the inception_v3
  network by changing arguments dropout_keep_prob, min_depth and
  depth_multiplier.

  The default image size used to train this network is 299x299.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    dropout_keep_prob: the percentage of activation values that are retained.
    min_depth: Minimum depth value (number of channels) for all convolution ops.
      Enforced when depth_multiplier < 1, and not an active constraint when
      depth_multiplier >= 1.
    depth_multiplier: Float multiplier for the depth (number of channels)
      for all convolution ops. The value must be greater than zero. Typical
      usage will be to set this value in (0, 1) to reduce the number of
      parameters or computation cost of the model.
    prediction_fn: a function to get predictions out of logits.
    spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
        of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
    reuse: whether or not the network and its variables should be reused. To be
      able to reuse 'scope' must be given.
    scope: Optional variable_scope.

  Returns:
    logits: the pre-softmax activations, a tensor of size
      [batch_size, num_classes]
    end_points: a dictionary from components of the network to the corresponding
      activation.

  Raises:
    ValueError: if 'depth_multiplier' is less than or equal to zero.
  """

    hparams = create_hparams(hparam_string=hparam_string)

    if hparams.depth_multiplier <= 0:
        raise ValueError('depth_multiplier is not greater than zero.')
    depth = lambda d: max(int(d * hparams.depth_multiplier), hparams.min_depth)

    with variable_scope.variable_scope(scope,
                                       'InceptionV3', [inputs, num_classes],
                                       reuse=reuse) as scope:
        with arg_scope([layers_lib.batch_norm, layers_lib.dropout],
                       is_training=is_training):
            net, end_points = inception_v3_base(inputs,
                                                scope=scope,
                                                **hparams.values())

            # Final pooling and prediction
            with variable_scope.variable_scope('Logits'):
                if global_pool:
                    # Global average pooling.
                    net_avg = math_ops.reduce_mean(net, [1, 2],
                                                   keep_dims=True,
                                                   name='global_avg_pool')
                    net_max = math_ops.reduce_max(net, [1, 2],
                                                  keep_dims=True,
                                                  name='global_max_pool')
                    net = array_ops.concat([net_avg, net_max], -1)
                else:
                    kernel_size = _reduced_kernel_size_for_small_input(
                        net, [8, 8])
                    net = layers_lib.avg_pool2d(
                        net,
                        kernel_size,
                        padding='VALID',
                        scope='AvgPool_1a_{}x{}'.format(*kernel_size))

                # 1 x 1 x 2048
                net = layers_lib.dropout(net,
                                         keep_prob=dropout_keep_prob,
                                         scope='Dropout_1b')
                end_points['PreLogits'] = net
                # 2048
                logits = layers.conv2d(net,
                                       num_classes, [1, 1],
                                       activation_fn=None,
                                       normalizer_fn=None,
                                       scope='Conv2d_1c_1x1')
                logits = layers_core.dense(array_ops.squeeze(
                    logits, [1, 2], name='SpatialSqueeze'),
                                           num_classes,
                                           name='logits')

            end_points['Logits'] = logits
            end_points['Predictions'] = prediction_fn(logits,
                                                      scope='Predictions')
    return logits, end_points
示例#23
0
def vgg_16_small_img(inputs,
                     num_classes=1000,
                     is_training=True,
                     dropout_keep_prob=0.5,
                     spatial_squeeze=True,
                     scope='vgg_16'):
    # Collect outputs for conv2d, fully_connected and max_pool2d.

    net = layers_lib.conv2d(inputs,
                            64, [3, 3],
                            padding="SAME",
                            data_format="NHWC",
                            scope="conv1")
    net = tf.nn.relu(net, name="relu_conv1")
    net = layers_lib.conv2d(net,
                            64, [3, 3],
                            padding="SAME",
                            data_format="NHWC",
                            scope="conv2")
    net = tf.nn.relu(net, name="relu_conv2")
    net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
    net = layers_lib.conv2d(net,
                            128, [3, 3],
                            padding="SAME",
                            data_format="NHWC",
                            scope="conv3")
    net = tf.nn.relu(net, name="relu_conv3")
    net = layers_lib.conv2d(net,
                            128, [3, 3],
                            padding="SAME",
                            data_format="NHWC",
                            scope="conv4")
    net = tf.nn.relu(net, name="relu_conv4")
    net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
    net = layers_lib.conv2d(net,
                            256, [3, 3],
                            padding="SAME",
                            data_format="NHWC",
                            scope="conv5")
    net = tf.nn.relu(net, name="relu_conv5")
    net = layers_lib.conv2d(net,
                            256, [3, 3],
                            padding="SAME",
                            data_format="NHWC",
                            scope="conv6")
    net = tf.nn.relu(net, name="relu_conv6")
    net = layers_lib.conv2d(net,
                            256, [3, 3],
                            padding="SAME",
                            data_format="NHWC",
                            scope="conv7")
    net = tf.nn.relu(net, name="relu_conv7")
    net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
    net = layers_lib.conv2d(net,
                            512, [3, 3],
                            padding="SAME",
                            data_format="NHWC",
                            scope="conv8")
    net = tf.nn.relu(net, name="relu_conv8")
    net = layers_lib.conv2d(net,
                            512, [3, 3],
                            padding="SAME",
                            data_format="NHWC",
                            scope="conv9")
    net = tf.nn.relu(net, name="relu_conv9")
    net = layers_lib.conv2d(net,
                            512, [3, 3],
                            padding="SAME",
                            data_format="NHWC",
                            scope="conv10")
    net = tf.nn.relu(net, name="relu_conv10")
    net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
    net = layers_lib.conv2d(net,
                            512, [3, 3],
                            padding="SAME",
                            data_format="NHWC",
                            scope="conv11")
    net = tf.nn.relu(net, name="relu_conv11")
    net = layers_lib.conv2d(net,
                            512, [3, 3],
                            padding="SAME",
                            data_format="NHWC",
                            scope="conv12")
    net = tf.nn.relu(net, name="relu_conv12")
    net = layers_lib.conv2d(net,
                            512, [3, 3],
                            padding="SAME",
                            data_format="NHWC",
                            scope="conv13")
    net = tf.nn.relu(net, name="relu_conv13")
    net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')

    # Use conv2d instead of fully_connected layers.
    net = layers.conv2d(net, 512, [1, 1], padding='VALID', scope='fc6')
    net = tf.nn.relu(net, name="relu_fc6")
    net = layers_lib.dropout(net,
                             dropout_keep_prob,
                             is_training=is_training,
                             scope='dropout6')
    net = layers.conv2d(net,
                        num_classes, [1, 1],
                        activation_fn=None,
                        normalizer_fn=None,
                        scope='fc8')
    # Convert end_points_collection into a end_point dict.
    if spatial_squeeze:
        net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
    return net
示例#24
0
def vgg_16(inputs,
           num_classes=1000,
           is_training=True,
           dataset='cifar',
           scope='vgg_16'):
    """Oxford Net VGG 16-Layers version D Example.

    Note: All the fully_connected layers have been transformed to conv2d layers.
          To use in classification mode, resize input to 224x224.

    Args:
      inputs: a tensor of size [batch_size, height, width, channels].
      num_classes: number of predicted classes.
      is_training: whether or not the model is being trained.
      dropout_keep_prob: the probability that activations are kept in the dropout
        layers during training.
      spatial_squeeze: whether or not should squeeze the spatial dimensions of the
        outputs. Useful to remove unnecessary dimensions for classification.
      scope: Optional scope for the variables.

    Returns:
      the last op containing the log predictions and end_points dict.
    """
    with variable_scope.variable_scope(scope, 'vgg_16', [inputs]) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        # Collect outputs for conv2d, fully_connected and max_pool2d.
        with arg_scope(
                [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
                outputs_collections=end_points_collection):
            def ConvBatchRelu(layer_input, n_output_plane, name):
                with variable_scope.variable_scope(name):
                    output = layers.conv2d(layer_input, n_output_plane, [3, 3], scope='conv')
                    output = layers.batch_norm(output, center=True, scale=True, activation_fn=tf.nn.relu,
                                               is_training=is_training)
                return output

            filters = [64, 64, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512, 512]
            if dataset == 'mnist':
                filters = [_ // 4 for _ in filters]
            elif dataset not in ('cifar', 'svhn'):
                raise NotImplementedError("Dataset {} is not supported!".format(dataset))

            net = ConvBatchRelu(inputs, filters[0], 'conv1_1')
            net = ConvBatchRelu(net, filters[1], 'conv1_2')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
            net = ConvBatchRelu(net, filters[2], 'conv2_1')
            net = ConvBatchRelu(net, filters[3], 'conv2_2')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
            net = ConvBatchRelu(net, filters[4], 'conv3_1')
            net = ConvBatchRelu(net, filters[5], 'conv3_2')
            net = ConvBatchRelu(net, filters[6], 'conv3_3')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
            net = ConvBatchRelu(net, filters[7], 'conv4_1')
            net = ConvBatchRelu(net, filters[8], 'conv4_2')
            net = ConvBatchRelu(net, filters[9], 'conv4_3')
            net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
            net = ConvBatchRelu(net, filters[10], 'conv5_1')
            net = ConvBatchRelu(net, filters[11], 'conv5_2')
            net = ConvBatchRelu(net, filters[12], 'conv5_3')
            if dataset == 'cifar':
                net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
            # Use conv2d instead of fully_connected layers.
            net = layers.flatten(net, scope='flatten6')
            net = layers_lib.dropout(net, 0.5, is_training=is_training, scope='dropout6')
            net = layers.relu(net, filters[13])
            net = layers_lib.dropout(net, 0.5, is_training=is_training, scope='dropout6')
            net = layers.linear(net, num_classes)
            # Convert end_points_collection into a end_point dict.
            end_points = utils.convert_collection_to_dict(end_points_collection)
            end_points[sc.name + '/fc8'] = net
            return net, end_points
示例#25
0
def ldnet(inputs, num_classes=3, dropout_keep_prob=0.5, spatial_squeeze=True, scope="ldnet",
          print_current_tensor=False):
    """
    ldnet architecture:
        input: 32*32*3
                             input   depth   kenal   stride   padding
        conv0:   net = conv(input,   32,   [3, 3],    1,     "same")
                 --> 32*32*32
        conv1:    net = conv(net,    32,   [3, 3],    1,     "same")
                 --> 32*32*32
        conv2:   net = conv(net,    64,   [3, 3],    1,     "same")
                 --> 32*32*64
        maxpool1: net = pool(net,          [3, 3],    1,     "same")
                 --> 32*32*64
        conv3:    net = conv(net,    192,  [3, 3],    1,     "same")
                 --> 32*32*192
        maxpool2: net = pool(net,          [3, 3],    1,     "same")
                 --> 32*32*192

        ldnet blocks:
        mixed_1: 32 x 32 x 320 Feature extraction module
        mixed_2: 16 x 16 x 640 Dimension reduction module
        mixed_3: 16 x 16 x 640 Feature extraction module
        mixed_4: 8 x 8 x 1280 Dimension reduction module
        mixed_5: 4 x 4 x 1280 Dimension reduction module
        Final pooling and prediction -> 3

    :param inputs: the size of imputs is [batch_num, width, height, channel].
    :param num_classes: num of classes.
    :param dropout_keep_prob:
    :param spatial_squeeze:
    :param scope:
    :param print_current_tensor: whether print current tenser shape, name and type.

    :return:
        logits: [batch, num_classes]
    """

    with variable_scope.variable_scope(scope, "ldnet", [inputs]):
        with arg_scope(
                [layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
                kernel_size=[3, 3],
                stride=1,
                padding='SAME'):
            # input: 32 * 32 * 3

            net = layers.conv2d(inputs, 32, scope="conv0")
            if print_current_tensor: print(net)
            # --> 32 * 32 * 32

            net = layers.conv2d(net, 32, scope="conv1")
            if print_current_tensor: print(net)
            # --> 32 * 32 * 32

            net = layers.conv2d(net, 64, scope="conv2")
            if print_current_tensor: print(net)
            # --> 32 * 32 * 64

            net = layers_lib.max_pool2d(net, kernel_size=[2, 2], scope="maxpool0")
            if print_current_tensor: print(net)
            # --> 32 * 32 * 64

            net = layers.conv2d(net, 192, scope="conv3")
            if print_current_tensor: print(net)
            # --> 32 * 32 * 192

            net = layers_lib.max_pool2d(net, kernel_size=[2, 2], scope="maxpool1")
            if print_current_tensor: print(net)
            # --> 32 * 32 * 192

        # ldnet blocks
        with arg_scope(
                [layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
                stride=1,
                padding='SAME'):
            # mixed_1: 32 x 32 x 320 Feature extraction module
            with variable_scope.variable_scope("mixed_1"):
                with variable_scope.variable_scope('Branch_0'):
                    branch_0 = layers.conv2d(
                        net, 48, [1, 1], scope='Conv2d_0a_1x1')
                    branch_0 = layers.conv2d(
                        branch_0, 64, [3, 3], scope='Conv2d_0b_3x3')

                with variable_scope.variable_scope('Branch_1'):
                    branch_1 = layers.conv2d(
                        net, 48, [1, 1], scope='Conv2d_0a_1x1')
                    branch_1 = layers.conv2d(
                        branch_1, 64, [5, 5], scope='Conv2d_0b_5x5')
                    branch_1 = layers.conv2d(
                        branch_1, 96, [5, 5], scope='Conv2d_0c_5x5')

                with variable_scope.variable_scope('Branch_2'):
                    branch_2 = layers.conv2d(
                        net, 48, [1, 1], scope='Conv2d_0a_1x1')
                    branch_2 = layers.conv2d(
                        branch_2, 64, [7, 7], scope='Conv2d_0b_7x7')

                with variable_scope.variable_scope('Branch_3'):
                    branch_3 = layers_lib.avg_pool2d(net, [5, 5], scope='AvgPool_0a_5x5')
                    branch_3 = layers.conv2d(
                        branch_3, 96, [1, 1], scope='Conv2d_0b_1x1')

                net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
                if print_current_tensor: print(net)

            # mixed_2: 16 x 16 x 640 Dimension reduction module
            with variable_scope.variable_scope("mixed_2"):
                with variable_scope.variable_scope('Branch_0'):
                    branch_0 = layers.conv2d(
                        net,
                        224, [3, 3],
                        stride=2,
                        scope='Conv2d_1a_1x1')

                with variable_scope.variable_scope('Branch_1'):
                    branch_1 = layers.conv2d(
                        net, 64, [1, 1], scope='Conv2d_0a_1x1')
                    branch_1 = layers.conv2d(
                        branch_1, 96, [3, 3], scope='Conv2d_0b_3x3')
                    branch_1 = layers.conv2d(
                        branch_1,
                        96, [3, 3],
                        stride=2,
                        scope='Conv2d_1a_1x1')

                with variable_scope.variable_scope('Branch_2'):
                    branch_2 = layers_lib.max_pool2d(
                        net, [3, 3], stride=2, scope='MaxPool_1a_3x3')

                net = array_ops.concat([branch_0, branch_1, branch_2], 3)
                if print_current_tensor: print(net)

            # mixed_3: 16 x 16 x 640 Feature extraction module
            with variable_scope.variable_scope("mixed_3"):
                with variable_scope.variable_scope('Branch_0'):
                    branch_0 = layers.conv2d(
                        net, 96, [1, 1], scope='Conv2d_0a_1x1')
                    branch_0 = layers.conv2d(
                        branch_0, 128, [3, 3], scope='Conv2d_0b_3x3')

                with variable_scope.variable_scope('Branch_1'):
                    branch_1 = layers.conv2d(
                        net, 96, [1, 1], scope='Conv2d_0a_1x1')
                    branch_1 = layers.conv2d(
                        branch_1, 128, [5, 5], scope='Conv2d_0b_5x5')
                    branch_1 = layers.conv2d(
                        branch_1, 192, [5, 5], scope='Conv2d_0c_5x5')

                with variable_scope.variable_scope('Branch_2'):
                    branch_2 = layers.conv2d(
                        net, 96, [1, 1], scope='Conv2d_0a_1x1')
                    branch_2 = layers.conv2d(
                        branch_2, 128, [7, 7], scope='Conv2d_0b_7x7')

                with variable_scope.variable_scope('Branch_3'):
                    branch_3 = layers_lib.avg_pool2d(net, [5, 5], scope='AvgPool_0a_5x5')
                    branch_3 = layers.conv2d(
                        branch_3, 192, [1, 1], scope='Conv2d_0b_1x1')

                net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
                if print_current_tensor: print(net)

            # mixed_4: 8 x 8 x 1280 Dimension reduction module
            with variable_scope.variable_scope("mixed_4"):
                with variable_scope.variable_scope('Branch_0'):
                    branch_0 = layers.conv2d(
                        net,
                        448, [3, 3],
                        stride=2,
                        scope='Conv2d_1a_1x1')

                with variable_scope.variable_scope('Branch_1'):
                    branch_1 = layers.conv2d(
                        net, 64, [1, 1], scope='Conv2d_0a_1x1')
                    branch_1 = layers.conv2d(
                        branch_1, 96, [3, 3], scope='Conv2d_0b_3x3')
                    branch_1 = layers.conv2d(
                        branch_1,
                        192, [3, 3],
                        stride=2,
                        scope='Conv2d_1a_1x1')

                with variable_scope.variable_scope('Branch_2'):
                    branch_2 = layers_lib.max_pool2d(
                        net, [3, 3], stride=2, scope='MaxPool_1a_3x3')

                net = array_ops.concat([branch_0, branch_1, branch_2], 3)
                if print_current_tensor: print(net)

            # mixed_5: 4 x 4 x 1280 Dimension reduction module
            with variable_scope.variable_scope("mixed_5"):
                with variable_scope.variable_scope('Branch_0'):
                    branch_0 = layers.conv2d(
                        net,
                        448, [3, 3],
                        stride=2,
                        scope='Conv2d_1a_1x1')

                with variable_scope.variable_scope('Branch_1'):
                    branch_1 = layers.conv2d(
                        net, 64, [1, 1], scope='Conv2d_0a_1x1')
                    branch_1 = layers.conv2d(
                        branch_1, 96, [3, 3], scope='Conv2d_0b_3x3')
                    branch_1 = layers.conv2d(
                        branch_1,
                        192, [3, 3],
                        stride=2,
                        scope='Conv2d_1a_1x1')

                with variable_scope.variable_scope('Branch_2'):
                    branch_2 = layers_lib.max_pool2d(
                        net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
                    branch_2 = layers.conv2d(
                        branch_2, 640, [1, 1], scope='Conv2d_0b_1x1')

                net = array_ops.concat([branch_0, branch_1, branch_2], 3)
                if print_current_tensor: print(net)

            # Final pooling and prediction
            with variable_scope.variable_scope('Logits'):
                net = layers_lib.avg_pool2d(
                    net,
                    [4, 4],
                    padding='VALID',
                    scope='AvgPool_1a_4x4')
                # 1 x 1 x 1280

                # net = layers.conv2d(net, 640, [1, 1], scope='Conv2d_0b_1x1')
                # local1
                with variable_scope.variable_scope('local1') as scope:
                    # Move everything into depth so we can perform a single matrix multiply.
                    reshape = tf.reshape(net, [-1, 1280])
                    weights = _variable_with_weight_decay('weights', shape=[1280, 640],
                                                          stddev=0.04, wd=0.0001)
                    biases = _variable_on_cpu('biases', [640], tf.constant_initializer(0.1))
                    net = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
                # 1 x 1 x 640

                net = layers_lib.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_0c')

                # net = layers.conv2d(net, 320, [1, 1], scope='Conv2d_0d_1x1')
                # local2
                with variable_scope.variable_scope('local2') as scope:
                    weights = _variable_with_weight_decay('weights', shape=[640, 320],
                                                          stddev=0.04, wd=0.0001)
                    biases = _variable_on_cpu('biases', [320], tf.constant_initializer(0.1))
                    net = tf.nn.relu(tf.matmul(net, weights) + biases, name=scope.name)
                # 1 x 1 x 320

                net = layers_lib.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_0e')
                net = tf.expand_dims(net, 1)
                net = tf.expand_dims(net, 1)

                logits = layers.conv2d(
                    net,
                    num_classes, [1, 1],
                    activation_fn=None,
                    normalizer_fn=None,
                    scope='Conv2d_0f_1x1')
                # 1 x 1 x 3
                if spatial_squeeze:
                    logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')
                    # 3

    return logits
示例#26
0
def alexnet_v2(inputs,
               is_training=True,
               dropout_keep_prob=0.5,
               scope='alexnet_v2'):
    """Modified version of AlexNet version 2 with a deconvolutional expanding
  path for semantic segmentation.

  Described in: http://arxiv.org/pdf/1404.5997v2.pdf

  Note: All the fully_connected layers have been transformed to conv2d layers.

  Args:
    inputs: a tensor of size [batch_size, 227, 227, 3].
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    scope: Optional scope for the variables.

  Returns:
    The last layer containing a segmentation map of an image.
  """

    net = layers.conv2d(inputs,
                        96, [11, 11],
                        4,
                        padding='VALID',
                        scope='conv1')
    net = layers.conv2d(net, 192, 3, 2, padding='VALID', scope='pconv1')
    net = layers.conv2d(net, 192, [5, 5], padding='VALID', scope='conv2')
    net = layers.conv2d(net, 384, 3, 2, padding='VALID', scope='pconv2')
    net = layers.conv2d(net, 384, [3, 3], padding='VALID', scope='conv3')
    net = layers.conv2d(net, 384, [3, 3], padding='VALID', scope='conv4')
    net = layers.conv2d(net, 256, [3, 3], padding='VALID', scope='conv5')

    # Convolution net
    with arg_scope([layers.conv2d],
                   weights_initializer=trunc_normal(0.005),
                   biases_initializer=init_ops.constant_initializer(0.1)):

        net = layers.conv2d(net, 4096, [5, 5], padding='VALID', scope='fc6')
        net = layers_lib.dropout(net,
                                 dropout_keep_prob,
                                 is_training=is_training,
                                 scope='dropout6')
        net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
        net = layers_lib.dropout(net,
                                 dropout_keep_prob,
                                 is_training=is_training,
                                 scope='dropout7')
        net = layers.conv2d(
            net,
            2,
            [1, 1],  # Prediction is either 'car' or 'background' for Carvana.
            padding='VALID',
            activation_fn=tf.nn.sigmoid,
            biases_initializer=init_ops.zeros_initializer(),
            scope='fc8')

    # Deconvolution net
    with arg_scope([layers.conv2d_transpose],
                   padding='VALID',
                   activation_fn=nn_ops.relu):
        net = layers.conv2d_transpose(net, 4096, 1, scope='convt9')
        net = layers.conv2d_transpose(net, 4096, 1, scope='convt10')
        net = layers.conv2d_transpose(net, 256, 5, scope='convt11')
        net = layers.conv2d_transpose(net, 384, 3, scope='convt12')
        net = layers.conv2d_transpose(net, 384, 3, scope='convt13')
        net = layers.conv2d_transpose(net, 384, 3, scope='convt14')
        net = layers.conv2d_transpose(net, 192, 3, 2, scope='convt15')
        net = layers.conv2d_transpose(net, 192, 5, scope='convt16')
        net = layers.conv2d_transpose(net, 96, 3, 2, scope='convt17')
        net = layers.conv2d_transpose(net,
                                      2,
                                      11,
                                      4,
                                      activation_fn=tf.nn.sigmoid,
                                      scope='convt18')

    return net
def vgg_16(inputs, is_training=True, dropout_keep_prob=0.5, scope='vgg_16'):
    """Oxford Net VGG 16-Layers version D Example.

    Note: All the fully_connected layers have been transformed to conv2d layers.
    To use in classification mode, resize input to 224x224.

    Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
    layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
    outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

    Returns:
    the last op containing the log predictions and end_points dict.
    """
    with slim.arg_scope(nets.vgg.vgg_arg_scope()):
        with variable_scope.variable_scope(scope, 'vgg_16', [inputs]) as sc:
            end_points_collection = sc.original_name_scope + '_end_points'
            # Collect outputs for conv2d, fully_connected and max_pool2d.
            with arg_scope([
                    layers.conv2d, layers_lib.fully_connected,
                    layers_lib.max_pool2d
            ],
                           outputs_collections=end_points_collection):
                net = layers_lib.repeat(inputs,
                                        2,
                                        layers.conv2d,
                                        64, [3, 3],
                                        scope='conv1')
                net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
                net = layers_lib.repeat(net,
                                        2,
                                        layers.conv2d,
                                        128, [3, 3],
                                        scope='conv2')
                net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
                net = layers_lib.repeat(net,
                                        3,
                                        layers.conv2d,
                                        256, [3, 3],
                                        scope='conv3')
                net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
                net = layers_lib.repeat(net,
                                        3,
                                        layers.conv2d,
                                        512, [3, 3],
                                        scope='conv4')
                net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
                net = layers_lib.repeat(net,
                                        3,
                                        layers.conv2d,
                                        512, [3, 3],
                                        scope='conv5')
                net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
                # Use conv2d instead of fully_connected layers.
                net = layers.conv2d(net,
                                    4096, [7, 7],
                                    padding='VALID',
                                    scope='fc6')
                net = layers_lib.dropout(net,
                                         dropout_keep_prob,
                                         is_training=is_training,
                                         scope='dropout6')
                fc7 = layers.conv2d(net, 4096, [1, 1], scope='fc7')
                fc7 = tf.squeeze(fc7, axis=(1, 2))
                # fc8 = layers.conv2d(
                #     fc7,
                #     num_classes, [1, 1],
                #     activation_fn=None,
                #     normalizer_fn=None,
                #     scope='fc8')
                return fc7
示例#28
0
def inception_v3(inputs,
                 num_classes=1000,
                 is_training=True,
                 dropout_keep_prob=0.8,
                 min_depth=16,
                 depth_multiplier=1.0,
                 prediction_fn=layers_lib.softmax,
                 spatial_squeeze=True,
                 reuse=None,
                 scope='InceptionV3'):
  """Inception model from http://arxiv.org/abs/1512.00567.

  "Rethinking the Inception Architecture for Computer Vision"

  Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
  Zbigniew Wojna.

  With the default arguments this method constructs the exact model defined in
  the paper. However, one can experiment with variations of the inception_v3
  network by changing arguments dropout_keep_prob, min_depth and
  depth_multiplier.

  The default image size used to train this network is 299x299.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    dropout_keep_prob: the percentage of activation values that are retained.
    min_depth: Minimum depth value (number of channels) for all convolution ops.
      Enforced when depth_multiplier < 1, and not an active constraint when
      depth_multiplier >= 1.
    depth_multiplier: Float multiplier for the depth (number of channels)
      for all convolution ops. The value must be greater than zero. Typical
      usage will be to set this value in (0, 1) to reduce the number of
      parameters or computation cost of the model.
    prediction_fn: a function to get predictions out of logits.
    spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
      of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
      To use this parameter, the input images must be smaller
      than 300x300 pixels, in which case the output logit layer
      does not contain spatial information and can be removed.
    reuse: whether or not the network and its variables should be reused. To be
      able to reuse 'scope' must be given.
    scope: Optional variable_scope.

  Returns:
    logits: the pre-softmax activations, a tensor of size
      [batch_size, num_classes]
    end_points: a dictionary from components of the network to the corresponding
      activation.

  Raises:
    ValueError: if 'depth_multiplier' is less than or equal to zero.
  """
  if depth_multiplier <= 0:
    raise ValueError('depth_multiplier is not greater than zero.')
  depth = lambda d: max(int(d * depth_multiplier), min_depth)

  with variable_scope.variable_scope(
      scope, 'InceptionV3', [inputs, num_classes], reuse=reuse) as scope:
    with arg_scope(
        [layers_lib.batch_norm, layers_lib.dropout], is_training=is_training):
      net, end_points = inception_v3_base(
          inputs,
          scope=scope,
          min_depth=min_depth,
          depth_multiplier=depth_multiplier)

      # Auxiliary Head logits
      with arg_scope(
          [layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
          stride=1,
          padding='SAME'):
        aux_logits = end_points['Mixed_6e']
        with variable_scope.variable_scope('AuxLogits'):
          aux_logits = layers_lib.avg_pool2d(
              aux_logits, [5, 5],
              stride=3,
              padding='VALID',
              scope='AvgPool_1a_5x5')
          aux_logits = layers.conv2d(
              aux_logits, depth(128), [1, 1], scope='Conv2d_1b_1x1')

          # Shape of feature map before the final layer.
          kernel_size = _reduced_kernel_size_for_small_input(aux_logits, [5, 5])
          aux_logits = layers.conv2d(
              aux_logits,
              depth(768),
              kernel_size,
              weights_initializer=trunc_normal(0.01),
              padding='VALID',
              scope='Conv2d_2a_{}x{}'.format(*kernel_size))
          aux_logits = layers.conv2d(
              aux_logits,
              num_classes, [1, 1],
              activation_fn=None,
              normalizer_fn=None,
              weights_initializer=trunc_normal(0.001),
              scope='Conv2d_2b_1x1')
          if spatial_squeeze:
            aux_logits = array_ops.squeeze(
                aux_logits, [1, 2], name='SpatialSqueeze')
          end_points['AuxLogits'] = aux_logits

      # Final pooling and prediction
      with variable_scope.variable_scope('Logits'):
        kernel_size = _reduced_kernel_size_for_small_input(net, [8, 8])
        net = layers_lib.avg_pool2d(
            net,
            kernel_size,
            padding='VALID',
            scope='AvgPool_1a_{}x{}'.format(*kernel_size))
        # 1 x 1 x 2048
        net = layers_lib.dropout(
            net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
        end_points['PreLogits'] = net
        # 2048
        logits = layers.conv2d(
            net,
            num_classes, [1, 1],
            activation_fn=None,
            normalizer_fn=None,
            scope='Conv2d_1c_1x1')
        if spatial_squeeze:
          logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')
        # 1000
      end_points['Logits'] = logits
      end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
  return logits, end_points
示例#29
0
def inception_v3_base(inputs,
                      final_endpoint='Mixed_7a',
                      min_depth=16,
                      depth_multiplier=1.0,
                      scope=None,
                      is_training=False):
    """Inception model from http://arxiv.org/abs/1512.00567.

    Constructs an Inception v3 network from inputs to the given final endpoint.
    This method can construct the network up to the final inception block
    Mixed_7c.

    Note that the names of the layers in the paper do not correspond to the names
    of the endpoints registered by this function although they build the same
    network.

    Here is a mapping from the old_names to the new names:
    Old name          | New name
    =======================================
    conv0             | Conv2d_1a_3x3
    conv1             | Conv2d_2a_3x3
    conv2             | Conv2d_2b_3x3
    pool1             | MaxPool_3a_3x3
    conv3             | Conv2d_3b_1x1
    conv4             | Conv2d_4a_3x3
    pool2             | MaxPool_5a_3x3
    mixed_35x35x256a  | Mixed_5b
    mixed_35x35x288a  | Mixed_5c
    mixed_35x35x288b  | Mixed_5d
    mixed_17x17x768a  | Mixed_6a
    mixed_17x17x768b  | Mixed_6b
    mixed_17x17x768c  | Mixed_6c
    mixed_17x17x768d  | Mixed_6d
    mixed_17x17x768e  | Mixed_6e
    mixed_8x8x1280a   | Mixed_7a
    mixed_8x8x2048a   | Mixed_7b
    mixed_8x8x2048b   | Mixed_7c

    Args:
      inputs: a tensor of size [batch_size, height, width, channels].
      final_endpoint: specifies the endpoint to construct the network up to. It
        can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
        'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',
        'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c',
        'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'].
      min_depth: Minimum depth value (number of channels) for all convolution ops.
        Enforced when depth_multiplier < 1, and not an active constraint when
        depth_multiplier >= 1.
      depth_multiplier: Float multiplier for the depth (number of channels)
        for all convolution ops. The value must be greater than zero. Typical
        usage will be to set this value in (0, 1) to reduce the number of
        parameters or computation cost of the model.
      scope: Optional variable_scope.

    Returns:
      tensor_out: output tensor corresponding to the final_endpoint.
      end_points: a set of activations for external use, for example summaries or
                  losses.

    Raises:
      ValueError: if final_endpoint is not set to one of the predefined values,
                  or depth_multiplier <= 0
                  :param is_training:
    """
    # end_points will collect relevant activations for external use, for example
    # summaries or losses.
    end_points = {}

    if depth_multiplier <= 0:
        raise ValueError('depth_multiplier is not greater than zero.')
    depth = lambda d: max(int(d * depth_multiplier), min_depth)

    with variable_scope.variable_scope(scope, 'InceptionV3', [inputs]):
        with arg_scope(
            [layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
                stride=1,
                padding='VALID'):
            # 299 x 299 x 3
            end_point = 'Conv2d_1a_3x3'
            net = layers.conv2d(inputs,
                                depth(32), [3, 3],
                                stride=2,
                                scope=end_point)
            end_points[end_point] = net
            if end_point == final_endpoint:
                return net, end_points
            # 149 x 149 x 32
            end_point = 'Conv2d_2a_3x3'
            net = layers.conv2d(net, depth(32), [3, 3], scope=end_point)
            end_points[end_point] = net
            if end_point == final_endpoint:
                return net, end_points
            # 147 x 147 x 32
            end_point = 'Conv2d_2b_3x3'
            net = layers.conv2d(net,
                                depth(64), [3, 3],
                                padding='SAME',
                                scope=end_point)
            end_points[end_point] = net
            if end_point == final_endpoint:
                return net, end_points
            # 147 x 147 x 64
            end_point = 'MaxPool_3a_3x3'
            net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
            end_points[end_point] = net
            if end_point == final_endpoint:
                return net, end_points
            # 73 x 73 x 64
            end_point = 'Conv2d_3b_1x1'
            net = layers.conv2d(net, depth(80), [1, 1], scope=end_point)
            net = layers_lib.dropout(net,
                                     keep_prob=0.9,
                                     is_training=is_training)
            end_points[end_point] = net
            if end_point == final_endpoint:
                return net, end_points
            # 73 x 73 x 80.
            end_point = 'Conv2d_4a_3x3'
            net = layers.conv2d(net, depth(192), [3, 3], scope=end_point)
            end_points[end_point] = net
            if end_point == final_endpoint:
                return net, end_points
            # 71 x 71 x 192.
            end_point = 'MaxPool_5a_3x3'
            net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
            end_points[end_point] = net
            if end_point == final_endpoint:
                return net, end_points
                # 35 x 35 x 192.

                # Inception blocks
        with arg_scope(
            [layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
                stride=1,
                padding='SAME'):
            # mixed: 35 x 35 x 256.
            end_point = 'Mixed_5b'
            with variable_scope.variable_scope(end_point):
                with variable_scope.variable_scope('Branch_0'):
                    branch_0 = layers.conv2d(net,
                                             depth(64), [1, 1],
                                             scope='Conv2d_0a_1x1')
                with variable_scope.variable_scope('Branch_1'):
                    branch_1 = layers.conv2d(net,
                                             depth(48), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_1 = layers.conv2d(branch_1,
                                             depth(64), [5, 5],
                                             scope='Conv2d_0b_5x5')
                with variable_scope.variable_scope('Branch_2'):
                    branch_2 = layers.conv2d(net,
                                             depth(64), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(96), [3, 3],
                                             scope='Conv2d_0b_3x3')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(96), [3, 3],
                                             scope='Conv2d_0c_3x3')
                with variable_scope.variable_scope('Branch_3'):
                    branch_3 = layers_lib.avg_pool2d(net, [3, 3],
                                                     scope='AvgPool_0a_3x3')
                    branch_3 = layers.conv2d(branch_3,
                                             depth(32), [1, 1],
                                             scope='Conv2d_0b_1x1')
                net = array_ops.concat(
                    [branch_0, branch_1, branch_2, branch_3], 3)
            end_points[end_point] = net
            if end_point == final_endpoint:
                return net, end_points

            # mixed_1: 35 x 35 x 288.
            end_point = 'Mixed_5c'
            with variable_scope.variable_scope(end_point):
                with variable_scope.variable_scope('Branch_0'):
                    branch_0 = layers.conv2d(net,
                                             depth(64), [1, 1],
                                             scope='Conv2d_0a_1x1')
                with variable_scope.variable_scope('Branch_1'):
                    branch_1 = layers.conv2d(net,
                                             depth(48), [1, 1],
                                             scope='Conv2d_0b_1x1')
                    branch_1 = layers.conv2d(branch_1,
                                             depth(64), [5, 5],
                                             scope='Conv_1_0c_5x5')
                with variable_scope.variable_scope('Branch_2'):
                    branch_2 = layers.conv2d(net,
                                             depth(64), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(96), [3, 3],
                                             scope='Conv2d_0b_3x3')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(96), [3, 3],
                                             scope='Conv2d_0c_3x3')
                with variable_scope.variable_scope('Branch_3'):
                    branch_3 = layers_lib.avg_pool2d(net, [3, 3],
                                                     scope='AvgPool_0a_3x3')
                    branch_3 = layers.conv2d(branch_3,
                                             depth(64), [1, 1],
                                             scope='Conv2d_0b_1x1')
                net = array_ops.concat(
                    [branch_0, branch_1, branch_2, branch_3], 3)
            end_points[end_point] = net
            if end_point == final_endpoint:
                return net, end_points

            # mixed_2: 35 x 35 x 288.
            end_point = 'Mixed_5d'
            with variable_scope.variable_scope(end_point):
                with variable_scope.variable_scope('Branch_0'):
                    branch_0 = layers.conv2d(net,
                                             depth(64), [1, 1],
                                             scope='Conv2d_0a_1x1')
                with variable_scope.variable_scope('Branch_1'):
                    branch_1 = layers.conv2d(net,
                                             depth(48), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_1 = layers.conv2d(branch_1,
                                             depth(64), [5, 5],
                                             scope='Conv2d_0b_5x5')
                with variable_scope.variable_scope('Branch_2'):
                    branch_2 = layers.conv2d(net,
                                             depth(64), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(96), [3, 3],
                                             scope='Conv2d_0b_3x3')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(96), [3, 3],
                                             scope='Conv2d_0c_3x3')
                with variable_scope.variable_scope('Branch_3'):
                    branch_3 = layers_lib.avg_pool2d(net, [3, 3],
                                                     scope='AvgPool_0a_3x3')
                    branch_3 = layers.conv2d(branch_3,
                                             depth(64), [1, 1],
                                             scope='Conv2d_0b_1x1')
                net = array_ops.concat(
                    [branch_0, branch_1, branch_2, branch_3], 3)
            end_points[end_point] = net
            if end_point == final_endpoint:
                return net, end_points

            net = layers_lib.dropout(net,
                                     keep_prob=0.8,
                                     is_training=is_training)
            # mixed_3: 17 x 17 x 768.
            end_point = 'Mixed_6a'
            with variable_scope.variable_scope(end_point):
                with variable_scope.variable_scope('Branch_0'):
                    branch_0 = layers.conv2d(net,
                                             depth(384), [3, 3],
                                             stride=2,
                                             padding='VALID',
                                             scope='Conv2d_1a_1x1')
                with variable_scope.variable_scope('Branch_1'):
                    branch_1 = layers.conv2d(net,
                                             depth(64), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_1 = layers.conv2d(branch_1,
                                             depth(96), [3, 3],
                                             scope='Conv2d_0b_3x3')
                    branch_1 = layers.conv2d(branch_1,
                                             depth(96), [3, 3],
                                             stride=2,
                                             padding='VALID',
                                             scope='Conv2d_1a_1x1')
                with variable_scope.variable_scope('Branch_2'):
                    branch_2 = layers_lib.max_pool2d(net, [3, 3],
                                                     stride=2,
                                                     padding='VALID',
                                                     scope='MaxPool_1a_3x3')
                net = array_ops.concat([branch_0, branch_1, branch_2], 3)
            end_points[end_point] = net
            if end_point == final_endpoint:
                return net, end_points

            # mixed4: 17 x 17 x 768.
            end_point = 'Mixed_6b'
            with variable_scope.variable_scope(end_point):
                with variable_scope.variable_scope('Branch_0'):
                    branch_0 = layers.conv2d(net,
                                             depth(192), [1, 1],
                                             scope='Conv2d_0a_1x1')
                with variable_scope.variable_scope('Branch_1'):
                    branch_1 = layers.conv2d(net,
                                             depth(128), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_1 = layers.conv2d(branch_1,
                                             depth(128), [1, 7],
                                             scope='Conv2d_0b_1x7')
                    branch_1 = layers.conv2d(branch_1,
                                             depth(192), [7, 1],
                                             scope='Conv2d_0c_7x1')
                with variable_scope.variable_scope('Branch_2'):
                    branch_2 = layers.conv2d(net,
                                             depth(128), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(128), [7, 1],
                                             scope='Conv2d_0b_7x1')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(128), [1, 7],
                                             scope='Conv2d_0c_1x7')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(128), [7, 1],
                                             scope='Conv2d_0d_7x1')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(192), [1, 7],
                                             scope='Conv2d_0e_1x7')
                with variable_scope.variable_scope('Branch_3'):
                    branch_3 = layers_lib.avg_pool2d(net, [3, 3],
                                                     scope='AvgPool_0a_3x3')
                    branch_3 = layers.conv2d(branch_3,
                                             depth(192), [1, 1],
                                             scope='Conv2d_0b_1x1')
                net = array_ops.concat(
                    [branch_0, branch_1, branch_2, branch_3], 3)
            end_points[end_point] = net
            if end_point == final_endpoint:
                return net, end_points

            # mixed_5: 17 x 17 x 768.
            end_point = 'Mixed_6c'
            with variable_scope.variable_scope(end_point):
                with variable_scope.variable_scope('Branch_0'):
                    branch_0 = layers.conv2d(net,
                                             depth(192), [1, 1],
                                             scope='Conv2d_0a_1x1')
                with variable_scope.variable_scope('Branch_1'):
                    branch_1 = layers.conv2d(net,
                                             depth(160), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_1 = layers.conv2d(branch_1,
                                             depth(160), [1, 7],
                                             scope='Conv2d_0b_1x7')
                    branch_1 = layers.conv2d(branch_1,
                                             depth(192), [7, 1],
                                             scope='Conv2d_0c_7x1')
                with variable_scope.variable_scope('Branch_2'):
                    branch_2 = layers.conv2d(net,
                                             depth(160), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(160), [7, 1],
                                             scope='Conv2d_0b_7x1')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(160), [1, 7],
                                             scope='Conv2d_0c_1x7')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(160), [7, 1],
                                             scope='Conv2d_0d_7x1')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(192), [1, 7],
                                             scope='Conv2d_0e_1x7')
                with variable_scope.variable_scope('Branch_3'):
                    branch_3 = layers_lib.avg_pool2d(net, [3, 3],
                                                     scope='AvgPool_0a_3x3')
                    branch_3 = layers.conv2d(branch_3,
                                             depth(192), [1, 1],
                                             scope='Conv2d_0b_1x1')
                net = array_ops.concat(
                    [branch_0, branch_1, branch_2, branch_3], 3)
            end_points[end_point] = net
            if end_point == final_endpoint:
                return net, end_points
            # mixed_6: 17 x 17 x 768.
            end_point = 'Mixed_6d'
            with variable_scope.variable_scope(end_point):
                with variable_scope.variable_scope('Branch_0'):
                    branch_0 = layers.conv2d(net,
                                             depth(192), [1, 1],
                                             scope='Conv2d_0a_1x1')
                with variable_scope.variable_scope('Branch_1'):
                    branch_1 = layers.conv2d(net,
                                             depth(160), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_1 = layers.conv2d(branch_1,
                                             depth(160), [1, 7],
                                             scope='Conv2d_0b_1x7')
                    branch_1 = layers.conv2d(branch_1,
                                             depth(192), [7, 1],
                                             scope='Conv2d_0c_7x1')
                with variable_scope.variable_scope('Branch_2'):
                    branch_2 = layers.conv2d(net,
                                             depth(160), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(160), [7, 1],
                                             scope='Conv2d_0b_7x1')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(160), [1, 7],
                                             scope='Conv2d_0c_1x7')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(160), [7, 1],
                                             scope='Conv2d_0d_7x1')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(192), [1, 7],
                                             scope='Conv2d_0e_1x7')
                with variable_scope.variable_scope('Branch_3'):
                    branch_3 = layers_lib.avg_pool2d(net, [3, 3],
                                                     scope='AvgPool_0a_3x3')
                    branch_3 = layers.conv2d(branch_3,
                                             depth(192), [1, 1],
                                             scope='Conv2d_0b_1x1')
                net = array_ops.concat(
                    [branch_0, branch_1, branch_2, branch_3], 3)
            end_points[end_point] = net
            if end_point == final_endpoint:
                return net, end_points

            # mixed_7: 17 x 17 x 768.
            end_point = 'Mixed_6e'
            with variable_scope.variable_scope(end_point):
                with variable_scope.variable_scope('Branch_0'):
                    branch_0 = layers.conv2d(net,
                                             depth(192), [1, 1],
                                             scope='Conv2d_0a_1x1')
                with variable_scope.variable_scope('Branch_1'):
                    branch_1 = layers.conv2d(net,
                                             depth(192), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_1 = layers.conv2d(branch_1,
                                             depth(192), [1, 7],
                                             scope='Conv2d_0b_1x7')
                    branch_1 = layers.conv2d(branch_1,
                                             depth(192), [7, 1],
                                             scope='Conv2d_0c_7x1')
                with variable_scope.variable_scope('Branch_2'):
                    branch_2 = layers.conv2d(net,
                                             depth(192), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(192), [7, 1],
                                             scope='Conv2d_0b_7x1')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(192), [1, 7],
                                             scope='Conv2d_0c_1x7')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(192), [7, 1],
                                             scope='Conv2d_0d_7x1')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(192), [1, 7],
                                             scope='Conv2d_0e_1x7')
                with variable_scope.variable_scope('Branch_3'):
                    branch_3 = layers_lib.avg_pool2d(net, [3, 3],
                                                     scope='AvgPool_0a_3x3')
                    branch_3 = layers.conv2d(branch_3,
                                             depth(192), [1, 1],
                                             scope='Conv2d_0b_1x1')
                net = array_ops.concat(
                    [branch_0, branch_1, branch_2, branch_3], 3)
            end_points[end_point] = net
            if end_point == final_endpoint:
                return net, end_points

            net = layers_lib.dropout(net,
                                     keep_prob=0.8,
                                     is_training=is_training)
            # mixed_8: 8 x 8 x 1280.
            end_point = 'Mixed_7a'
            with variable_scope.variable_scope(end_point):
                with variable_scope.variable_scope('Branch_0'):
                    branch_0 = layers.conv2d(net,
                                             depth(192), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_0 = layers.conv2d(branch_0,
                                             depth(320), [3, 3],
                                             stride=2,
                                             padding='VALID',
                                             scope='Conv2d_1a_3x3')
                with variable_scope.variable_scope('Branch_1'):
                    branch_1 = layers.conv2d(net,
                                             depth(192), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_1 = layers.conv2d(branch_1,
                                             depth(192), [1, 7],
                                             scope='Conv2d_0b_1x7')
                    branch_1 = layers.conv2d(branch_1,
                                             depth(192), [7, 1],
                                             scope='Conv2d_0c_7x1')
                    branch_1 = layers.conv2d(branch_1,
                                             depth(192), [3, 3],
                                             stride=2,
                                             padding='VALID',
                                             scope='Conv2d_1a_3x3')
                with variable_scope.variable_scope('Branch_2'):
                    branch_2 = layers_lib.max_pool2d(net, [3, 3],
                                                     stride=2,
                                                     padding='VALID',
                                                     scope='MaxPool_1a_3x3')
                net = array_ops.concat([branch_0, branch_1, branch_2], 3)
            end_points[end_point] = net
            if end_point == final_endpoint:
                return net, end_points
            # mixed_9: 8 x 8 x 2048.
            end_point = 'Mixed_7b'
            with variable_scope.variable_scope(end_point):
                with variable_scope.variable_scope('Branch_0'):
                    branch_0 = layers.conv2d(net,
                                             depth(320), [1, 1],
                                             scope='Conv2d_0a_1x1')
                with variable_scope.variable_scope('Branch_1'):
                    branch_1 = layers.conv2d(net,
                                             depth(384), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_1 = array_ops.concat([
                        layers.conv2d(branch_1,
                                      depth(384), [1, 3],
                                      scope='Conv2d_0b_1x3'),
                        layers.conv2d(branch_1,
                                      depth(384), [3, 1],
                                      scope='Conv2d_0b_3x1')
                    ], 3)
                with variable_scope.variable_scope('Branch_2'):
                    branch_2 = layers.conv2d(net,
                                             depth(448), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(384), [3, 3],
                                             scope='Conv2d_0b_3x3')
                    branch_2 = array_ops.concat([
                        layers.conv2d(branch_2,
                                      depth(384), [1, 3],
                                      scope='Conv2d_0c_1x3'),
                        layers.conv2d(branch_2,
                                      depth(384), [3, 1],
                                      scope='Conv2d_0d_3x1')
                    ], 3)
                with variable_scope.variable_scope('Branch_3'):
                    branch_3 = layers_lib.avg_pool2d(net, [3, 3],
                                                     scope='AvgPool_0a_3x3')
                    branch_3 = layers.conv2d(branch_3,
                                             depth(192), [1, 1],
                                             scope='Conv2d_0b_1x1')
                net = array_ops.concat(
                    [branch_0, branch_1, branch_2, branch_3], 3)
            end_points[end_point] = net
            if end_point == final_endpoint:
                return net, end_points

            # mixed_10: 8 x 8 x 2048.
            end_point = 'Mixed_7c'
            with variable_scope.variable_scope(end_point):
                with variable_scope.variable_scope('Branch_0'):
                    branch_0 = layers.conv2d(net,
                                             depth(320), [1, 1],
                                             scope='Conv2d_0a_1x1')
                with variable_scope.variable_scope('Branch_1'):
                    branch_1 = layers.conv2d(net,
                                             depth(384), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_1 = array_ops.concat([
                        layers.conv2d(branch_1,
                                      depth(384), [1, 3],
                                      scope='Conv2d_0b_1x3'),
                        layers.conv2d(branch_1,
                                      depth(384), [3, 1],
                                      scope='Conv2d_0c_3x1')
                    ], 3)
                with variable_scope.variable_scope('Branch_2'):
                    branch_2 = layers.conv2d(net,
                                             depth(448), [1, 1],
                                             scope='Conv2d_0a_1x1')
                    branch_2 = layers.conv2d(branch_2,
                                             depth(384), [3, 3],
                                             scope='Conv2d_0b_3x3')
                    branch_2 = array_ops.concat([
                        layers.conv2d(branch_2,
                                      depth(384), [1, 3],
                                      scope='Conv2d_0c_1x3'),
                        layers.conv2d(branch_2,
                                      depth(384), [3, 1],
                                      scope='Conv2d_0d_3x1')
                    ], 3)
                with variable_scope.variable_scope('Branch_3'):
                    branch_3 = layers_lib.avg_pool2d(net, [3, 3],
                                                     scope='AvgPool_0a_3x3')
                    branch_3 = layers.conv2d(branch_3,
                                             depth(192), [1, 1],
                                             scope='Conv2d_0b_1x1')
                net = array_ops.concat(
                    [branch_0, branch_1, branch_2, branch_3], 3)
            end_points[end_point] = net
            if end_point == final_endpoint:
                return net, end_points
        raise ValueError('Unknown final endpoint %s' % final_endpoint)
示例#30
0
def vgg_16(inputs, boxes, box_idx, scope='vgg_16'):
    """Oxford Net VGG 16-Layers version D Example.

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
    inputs -= tf.constant([123.68, 116.779, 103.939])
    inputs /= 255
    with variable_scope.variable_scope(scope, 'vgg_16', [inputs]) as sc:
        end_points_collection = sc.original_name_scope + '_end_points'
        # Collect outputs for conv2d, fully_connected and max_pool2d.
        with arg_scope(
            [layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
                outputs_collections=end_points_collection):
            net = layers_lib.repeat(inputs,
                                    2,
                                    layers.conv2d,
                                    64, [3, 3],
                                    scope='conv1',
                                    trainable=False)
            net = layers_lib.max_pool2d(net, [2, 2],
                                        scope='pool1',
                                        padding="SAME")
            net = layers_lib.repeat(net,
                                    2,
                                    layers.conv2d,
                                    128, [3, 3],
                                    scope='conv2',
                                    trainable=False)
            net = layers_lib.max_pool2d(net, [2, 2],
                                        scope='pool2',
                                        padding="SAME")
            net = layers_lib.repeat(net,
                                    3,
                                    layers.conv2d,
                                    256, [3, 3],
                                    scope='conv3')
            net = layers_lib.max_pool2d(net, [2, 2],
                                        scope='pool3',
                                        padding="SAME")
            net = layers_lib.repeat(net,
                                    3,
                                    layers.conv2d,
                                    512, [3, 3],
                                    scope='conv4')
            net = layers_lib.max_pool2d(net, [2, 2],
                                        scope='pool4',
                                        padding="SAME")
            net = layers_lib.repeat(net,
                                    3,
                                    layers.conv2d,
                                    512, [3, 3],
                                    scope='conv5')
            net = roi_pooling(net, boxes, box_idx)
            # Use conv2d instead of fully_connected layers.
            net = layers.conv2d(net,
                                4096, [7, 7],
                                padding='VALID',
                                scope='fc6')
            net = layers_lib.dropout(net,
                                     keep_prob=0.5,
                                     is_training=True,
                                     scope='dropout6')
            net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
            net = layers_lib.dropout(net,
                                     keep_prob=0.5,
                                     is_training=True,
                                     scope='dropout7')
            net = tf.squeeze(net, axis=[1, 2])
            return net
def stack_blocks_dense(net,
                       blocks,
                       output_stride=None,
                       outputs_collections=None,
                       dropout=False):
    """Stacks ResNet `Blocks` and controls output feature density.

  First, this function creates scopes for the ResNet in the form of
  'block_name/unit_1', 'block_name/unit_2', etc.

  Second, this function allows the user to explicitly control the ResNet
  output_stride, which is the ratio of the input to output spatial resolution.
  This is useful for dense prediction tasks such as semantic segmentation or
  object detection.

  Most ResNets consist of 4 ResNet blocks and subsample the activations by a
  factor of 2 when transitioning between consecutive ResNet blocks. This results
  to a nominal ResNet output_stride equal to 8. If we set the output_stride to
  half the nominal network stride (e.g., output_stride=4), then we compute
  responses twice.

  Control of the output feature density is implemented by atrous convolution.

  Args:
    net: A `Tensor` of size [batch, height, width, channels].
    blocks: A list of length equal to the number of ResNet `Blocks`. Each
      element is a ResNet `Block` object describing the units in the `Block`.
    output_stride: If `None`, then the output will be computed at the nominal
      network stride. If output_stride is not `None`, it specifies the requested
      ratio of input to output spatial resolution, which needs to be equal to
      the product of unit strides from the start up to some level of the ResNet.
      For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1,
      then valid values for the output_stride are 1, 2, 6, 24 or None (which
      is equivalent to output_stride=24).
    outputs_collections: Collection to add the ResNet block outputs.

  Returns:
    net: Output tensor with stride equal to the specified output_stride.

  Raises:
    ValueError: If the target output_stride is not valid.
  """
    # The current_stride variable keeps track of the effective stride of the
    # activations. This allows us to invoke atrous convolution whenever applying
    # the next residual unit would result in the activations having stride larger
    # than the target output_stride.
    current_stride = 1

    # The atrous convolution rate parameter.
    rate = 1
    for block in blocks:
        with variable_scope.variable_scope(block.scope, 'block', [net]) as sc:
            for i, unit in enumerate(block.args):
                if output_stride is not None and current_stride > output_stride:
                    raise ValueError(
                        'The target output_stride cannot be reached.')

                with variable_scope.variable_scope('unit_%d' % (i + 1),
                                                   values=[net]):
                    # If we have reached the target output_stride, then we need to employ
                    # atrous convolution with stride=1 and multiply the atrous rate by the
                    # current unit's stride for use in subsequent layers.
                    if output_stride is not None and current_stride == output_stride:
                        net = block.unit_fn(net,
                                            rate=rate,
                                            **dict(unit, stride=1))
                        rate *= unit.get('stride', 1)

                    else:
                        net = block.unit_fn(net, rate=1, **unit)
                        current_stride *= unit.get('stride', 1)
            # for dropout
            if dropout and i > 0:
                net = layers.dropout(net, keep_prob=0.5)
            net = utils.collect_named_outputs(outputs_collections, sc.name,
                                              net)

    if output_stride is not None and current_stride != output_stride:
        raise ValueError('The target output_stride cannot be reached.')

    return net
示例#32
0
def inception_g(inputs,
                num_classes = 5,
                dropout_keep_prob = 0.8,
                prediction_fn = layers_lib.softmax,
                scope='InceptionG'
                ):
    end_points = {}

    # conv2d
    with arg_scope(
        [layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
        stride=1,
        padding='VALID'):
        # 48 x 48 x 1 
        end_point = 'Conv2d_1a_3x3'
        net = layers.conv2d(inputs, 32, [3, 3], stride=2, scope=end_point)
        end_points[end_point] = net
        # 23 x 23 x 32
        end_point = 'Conv2d_2a_3x3'
        net = layers.conv2d(net, 64, [3, 3], scope=end_point)
        end_points[end_point] = net

        # 21 x 21 x 64
        end_point = 'MaxPool_3a_3x3'
        net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
        end_points[end_point] = net

        # 10 x 10 x 64
        end_point = 'Conv2d_3b_1x1'
        net = layers.conv2d(net, 80, [1, 1], scope=end_point)
        end_points[end_point] = net

        # 8 x 8 x 80
        end_point = 'Conv2d_4a_3x3'
        net = layers.conv2d(net, 128, [3, 3], scope=end_point)
        end_points[end_point] = net
    
    # Inception blocks
    with arg_scope(
        [layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
        stride=1,
        padding='SAME'):
        # 8 x 8 x 144
        end_point = 'Mixed_5a'
        branch_0 = layers.conv2d(
            net, 64, [1, 1], scope='Conv2d_0a_1x1')
        branch_1 = layers.conv2d(
            net, 48, [1, 1], scope='Conv2d_1a_1x1')
        branch_1 = layers.conv2d(
            branch_1, 64, [5, 5], scope='Conv2d_1b_5x5')
        branch_2 = layers.conv2d(
            net, 64, [1, 1], scope='Conv2d_2a_1x1')
        branch_2 = layers.conv2d(
            branch_2, 96, [3, 3], scope='Conv2d_2b_3x3')
        branch_2 = layers.conv2d(
            branch_2, 96, [3, 3], scope='Conv2d_2c_3x3')
        branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_3a_3x3')
        branch_3 = layers.conv2d(
            branch_3, 64, [1, 1], scope='Conv2d_3c_1x1')
        net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
        end_points[end_point] = net
    
    # aver pooling and softmax
    net = layers_lib.avg_pool2d(
            net,
            [8,8],
            padding='VALID',
            scope='AvgPool_0a_7x7')
    net = layers_lib.dropout(
            net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
    end_points['PreLogits'] = net
    logits = layers.conv2d(
            net,
            num_classes,    # num classes
            [1, 1],
            activation_fn=None,
            normalizer_fn=None,
            scope='Conv2d_1c_1x1')
    logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')

    end_points['Logits'] = logits
    end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
    return logits, end_points
示例#33
0
def model_fn(features, labels, mode, params):
  """
  Based on https://github.com/tensorflow/tpu/blob/master/models/experimental/inception/inception_v2_tpu_model.py
  :param features:
  :param labels:
  :param mode:
  :param params:
  :return:
  """
  tf.summary.image('0_input', features, max_outputs=4)
  training = mode == tf.estimator.ModeKeys.TRAIN

  # 224 x 224 x 3
  end_point = 'Conv2d_1a_7x7'
  net = layers.conv2d(features, 64, [7, 7], stride=2, weights_initializer=trunc_normal(1.0), activation_fn=None, scope=end_point)
  net = tf.layers.batch_normalization(net, training=training, name='{}_bn'.format(end_point))
  net = tf.nn.relu(net, name='{}_act'.format(end_point))
  tf.summary.image('1_{}'.format(end_point), net[:, :, :, 0:3], max_outputs=4)

  # 112 x 112 x 64
  end_point = 'MaxPool_2a_3x3'
  net = layers_lib.max_pool2d(net, [3, 3], scope=end_point, stride=2, padding='SAME')
  tf.summary.image('2_{}'.format(end_point), net[:, :, :, 0:3], max_outputs=4)

  # 56 x 56 x 64
  end_point = 'Conv2d_2b_1x1'
  net = layers.conv2d(net, 64, [1, 1], activation_fn=None, scope=end_point, weights_initializer=trunc_normal(0.1))
  net = tf.layers.batch_normalization(net, training=training, name='{}_bn'.format(end_point))
  net = tf.nn.relu(net, name='{}_act'.format(end_point))
  tf.summary.image('3_{}'.format(end_point), net[:, :, :, 0:3], max_outputs=4)

  # 56 x 56 x 64
  end_point = 'Conv2d_2c_3x3'
  net = layers.conv2d(net, 192, [3, 3], activation_fn=None, scope=end_point)
  net = tf.layers.batch_normalization(net, training=training, name='{}_bn'.format(end_point))
  net = tf.nn.relu(net, name='{}_act'.format(end_point))
  tf.summary.image('4_{}'.format(end_point), net[:, :, :, 0:3], max_outputs=4)

  # 56 x 56 x 192
  end_point = 'MaxPool_3a_3x3'
  net = layers_lib.max_pool2d(net, [3, 3], scope=end_point, stride=2, padding='SAME')
  tf.summary.image('5_{}'.format(end_point), net[:, :, :, 0:3], max_outputs=4)

  # 28 x 28 x 192
  # Inception module.
  end_point = 'Mixed_3b'
  with variable_scope.variable_scope(end_point):
    with variable_scope.variable_scope('Branch_0'):
      branch_0 = layers.conv2d(net, 64, [1, 1], activation_fn=None, scope='Conv2d_0a_1x1')
      branch_0 = tf.layers.batch_normalization(branch_0, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
      branch_0 = tf.nn.relu(branch_0, name='{}_act'.format('Conv2d_0a_1x1'))
    with variable_scope.variable_scope('Branch_1'):
      branch_1 = layers.conv2d(net, 64, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
      branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
      branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0a_1x1'))

      branch_1 = layers.conv2d(branch_1, 64, [3, 3], activation_fn=None, scope='Conv2d_0b_3x3')
      branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0b_3x3'))
      branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0b_3x3'))
    with variable_scope.variable_scope('Branch_2'):
      branch_2 = layers.conv2d(net, 64, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
      branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
      branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0a_1x1'))

      branch_2 = layers.conv2d(branch_2, 96, [3, 3], activation_fn=None, scope='Conv2d_0b_3x3')
      branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0b_3x3'))
      branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0b_3x3'))

      branch_2 = layers.conv2d(branch_2, 96, [3, 3], activation_fn=None, scope='Conv2d_0c_3x3')
      branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0c_3x3'))
      branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0c_3x3'))
    with variable_scope.variable_scope('Branch_3'):
      branch_3 = layers_lib.avg_pool2d(net, [3, 3], padding='SAME', stride=1, scope='AvgPool_0a_3x3')
      branch_3 = layers.conv2d(branch_3, 32, [1, 1], weights_initializer=trunc_normal(0.1), activation_fn=None, scope='Conv2d_0b_1x1')
      branch_3 = tf.layers.batch_normalization(branch_3, training=training, name='{}_bn'.format('Conv2d_0b_1x1'))
      branch_3 = tf.nn.relu(branch_3, name='{}_act'.format('Conv2d_0b_1x1'))
    net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)

    # 28 x 28 x 256
    end_point = 'Mixed_3c'
    with variable_scope.variable_scope(end_point):
      with variable_scope.variable_scope('Branch_0'):
        branch_0 = layers.conv2d(net, 64, [1, 1], activation_fn=None, scope='Conv2d_0a_1x1')
        branch_0 = tf.layers.batch_normalization(branch_0, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_0 = tf.nn.relu(branch_0, name='{}_act'.format('Conv2d_0a_1x1'))
      with variable_scope.variable_scope('Branch_1'):
        branch_1 = layers.conv2d(net, 64, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0a_1x1'))

        branch_1 = layers.conv2d(branch_1, 96, [3, 3], activation_fn=None, scope='Conv2d_0b_3x3')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0b_3x3'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0b_3x3'))
      with variable_scope.variable_scope('Branch_2'):
        branch_2 = layers.conv2d(net, 64, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0a_1x1'))

        branch_2 = layers.conv2d(branch_2, 96, [3, 3], activation_fn=None, scope='Conv2d_0b_3x3')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0b_3x3'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0b_3x3'))

        branch_2 = layers.conv2d(branch_2, 96, [3, 3], activation_fn=None, scope='Conv2d_0c_3x3')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0c_3x3'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0c_3x3'))
      with variable_scope.variable_scope('Branch_3'):
        branch_3 = layers_lib.avg_pool2d(net, [3, 3], padding='SAME', stride=1, scope='AvgPool_0a_3x3')
        branch_3 = layers.conv2d(branch_3, 64, [1, 1], weights_initializer=trunc_normal(0.1), activation_fn=None, scope='Conv2d_0b_1x1')
        branch_3 = tf.layers.batch_normalization(branch_3, training=training, name='{}_bn'.format('Conv2d_0b_1x1'))
        branch_3 = tf.nn.relu(branch_3, name='{}_act'.format('Conv2d_0b_1x1'))
      net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)

    # 28 x 28 x 320
    end_point = 'Mixed_4a'
    with variable_scope.variable_scope(end_point):
      with variable_scope.variable_scope('Branch_0'):
        branch_0 = layers.conv2d(net, 128, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
        branch_0 = tf.layers.batch_normalization(branch_0, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_0 = tf.nn.relu(branch_0, name='{}_act'.format('Conv2d_0a_1x1'))

        branch_0 = layers.conv2d(branch_0, 160, [3, 3], stride=2, activation_fn=None, scope='Conv2d_1a_3x3')
        branch_0 = tf.layers.batch_normalization(branch_0, training=training, name='{}_bn'.format('Conv2d_1a_3x3'))
        branch_0 = tf.nn.relu(branch_0, name='{}_act'.format('Conv2d_1a_3x3'))
      with variable_scope.variable_scope('Branch_1'):
        branch_1 = layers.conv2d(net, 64, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0a_1x1'))

        branch_1 = layers.conv2d(branch_1, 96, [3, 3], activation_fn=None, scope='Conv2d_0b_3x3')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0b_3x3'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0b_3x3'))

        branch_1 = layers.conv2d(branch_1, 96, [3, 3], stride=2, activation_fn=None, scope='Conv2d_1a_3x3')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_1a_3x3'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_1a_3x3'))
      with variable_scope.variable_scope('Branch_2'):
        branch_2 = layers_lib.max_pool2d(net, [3, 3], stride=2, padding='SAME', scope='MaxPool_1a_3x3')
      net = array_ops.concat([branch_0, branch_1, branch_2], 3)

    # 14 x 14 x 576
    end_point = 'Mixed_4b'
    with variable_scope.variable_scope(end_point):
      with variable_scope.variable_scope('Branch_0'):
        branch_0 = layers.conv2d(net, 224, [1, 1], activation_fn=None, scope='Conv2d_0a_1x1')
        branch_0 = tf.layers.batch_normalization(branch_0, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_0 = tf.nn.relu(branch_0, name='{}_act'.format('Conv2d_0a_1x1'))
      with variable_scope.variable_scope('Branch_1'):
        branch_1 = layers.conv2d(net, 64, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0a_1x1'))

        branch_1 = layers.conv2d(branch_1, 96, [3, 3], activation_fn=None, scope='Conv2d_0b_3x3')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0b_3x3'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0b_3x3'))
      with variable_scope.variable_scope('Branch_2'):
        branch_2 = layers.conv2d(net, 96, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0a_1x1'))

        branch_2 = layers.conv2d(branch_2, 128, [3, 3], activation_fn=None, scope='Conv2d_0b_3x3')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0b_3x3'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0b_3x3'))

        branch_2 = layers.conv2d(branch_2, 128, [3, 3], activation_fn=None, scope='Conv2d_0c_3x3')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0c_3x3'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0c_3x3'))
      with variable_scope.variable_scope('Branch_3'):
        branch_3 = layers_lib.avg_pool2d(net, [3, 3], padding='SAME', stride=1, scope='AvgPool_0a_3x3')
        branch_3 = layers.conv2d(branch_3, 128, [1, 1], weights_initializer=trunc_normal(0.1), activation_fn=None, scope='Conv2d_0b_1x1')
        branch_3 = tf.layers.batch_normalization(branch_3, training=training, name='{}_bn'.format('Conv2d_0b_1x1'))
        branch_3 = tf.nn.relu(branch_3, name='{}_act'.format('Conv2d_0b_1x1'))
      net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)

    # 14 x 14 x 576
    end_point = 'Mixed_4c'
    with variable_scope.variable_scope(end_point):
      with variable_scope.variable_scope('Branch_0'):
        branch_0 = layers.conv2d(net, 192, [1, 1], activation_fn=None, scope='Conv2d_0a_1x1')
        branch_0 = tf.layers.batch_normalization(branch_0, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_0 = tf.nn.relu(branch_0, name='{}_act'.format('Conv2d_0a_1x1'))
      with variable_scope.variable_scope('Branch_1'):
        branch_1 = layers.conv2d(net, 96, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0a_1x1'))

        branch_1 = layers.conv2d(branch_1, 128, [3, 3], activation_fn=None, scope='Conv2d_0b_3x3')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0b_3x3'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0b_3x3'))
      with variable_scope.variable_scope('Branch_2'):
        branch_2 = layers.conv2d(net, 96, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0a_1x1'))

        branch_2 = layers.conv2d(branch_2, 128, [3, 3], activation_fn=None, scope='Conv2d_0b_3x3')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0b_3x3'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0b_3x3'))

        branch_2 = layers.conv2d(branch_2, 128, [3, 3], activation_fn=None, scope='Conv2d_0c_3x3')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0c_3x3'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0c_3x3'))
      with variable_scope.variable_scope('Branch_3'):
        branch_3 = layers_lib.avg_pool2d(net, [3, 3], padding='SAME', stride=1, scope='AvgPool_0a_3x3')
        branch_3 = layers.conv2d(branch_3, 128, [1, 1], weights_initializer=trunc_normal(0.1), activation_fn=None, scope='Conv2d_0b_1x1')
        branch_3 = tf.layers.batch_normalization(branch_3, training=training, name='{}_bn'.format('Conv2d_0b_1x1'))
        branch_3 = tf.nn.relu(branch_3, name='{}_act'.format('Conv2d_0b_1x1'))
      net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)

    # 14 x 14 x 576
    end_point = 'Mixed_4d'
    with variable_scope.variable_scope(end_point):
      with variable_scope.variable_scope('Branch_0'):
        branch_0 = layers.conv2d(net, 160, [1, 1], activation_fn=None, scope='Conv2d_0a_1x1')
        branch_0 = tf.layers.batch_normalization(branch_0, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_0 = tf.nn.relu(branch_0, name='{}_act'.format('Conv2d_0a_1x1'))
      with variable_scope.variable_scope('Branch_1'):
        branch_1 = layers.conv2d(net, 128, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0a_1x1'))

        branch_1 = layers.conv2d(branch_1, 160, [3, 3], activation_fn=None, scope='Conv2d_0b_3x3')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0b_3x3'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0b_3x3'))
      with variable_scope.variable_scope('Branch_2'):
        branch_2 = layers.conv2d(net, 128, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0a_1x1'))

        branch_2 = layers.conv2d(branch_2, 160, [3, 3], activation_fn=None, scope='Conv2d_0b_3x3')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0b_3x3'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0b_3x3'))

        branch_2 = layers.conv2d(branch_2, 160, [3, 3], activation_fn=None, scope='Conv2d_0c_3x3')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0c_3x3'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0c_3x3'))
      with variable_scope.variable_scope('Branch_3'):
        branch_3 = layers_lib.avg_pool2d(net, [3, 3], padding='SAME', stride=1, scope='AvgPool_0a_3x3')
        branch_3 = layers.conv2d(branch_3, 96, [1, 1], weights_initializer=trunc_normal(0.1), activation_fn=None, scope='Conv2d_0b_1x1')
        branch_3 = tf.layers.batch_normalization(branch_3, training=training, name='{}_bn'.format('Conv2d_0b_1x1'))
        branch_3 = tf.nn.relu(branch_3, name='{}_act'.format('Conv2d_0b_1x1'))
      net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)

    # 14 x 14 x 576
    end_point = 'Mixed_4e'
    with variable_scope.variable_scope(end_point):
      with variable_scope.variable_scope('Branch_0'):
        branch_0 = layers.conv2d(net, 96, [1, 1], activation_fn=None, scope='Conv2d_0a_1x1')
        branch_0 = tf.layers.batch_normalization(branch_0, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_0 = tf.nn.relu(branch_0, name='{}_act'.format('Conv2d_0a_1x1'))
      with variable_scope.variable_scope('Branch_1'):
        branch_1 = layers.conv2d(net, 128, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0a_1x1'))

        branch_1 = layers.conv2d(branch_1, 192, [3, 3], activation_fn=None, scope='Conv2d_0b_3x3')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0b_3x3'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0b_3x3'))
      with variable_scope.variable_scope('Branch_2'):
        branch_2 = layers.conv2d(net, 160, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0a_1x1'))

        branch_2 = layers.conv2d(branch_2, 192, [3, 3], activation_fn=None, scope='Conv2d_0b_3x3')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0b_3x3'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0b_3x3'))

        branch_2 = layers.conv2d(branch_2, 192, [3, 3], activation_fn=None, scope='Conv2d_0c_3x3')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0c_3x3'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0c_3x3'))
      with variable_scope.variable_scope('Branch_3'):
        branch_3 = layers_lib.avg_pool2d(net, [3, 3], padding='SAME', stride=1, scope='AvgPool_0a_3x3')
        branch_3 = layers.conv2d(branch_3, 96, [1, 1], weights_initializer=trunc_normal(0.1), activation_fn=None, scope='Conv2d_0b_1x1')
        branch_3 = tf.layers.batch_normalization(branch_3, training=training, name='{}_bn'.format('Conv2d_0b_1x1'))
        branch_3 = tf.nn.relu(branch_3, name='{}_act'.format('Conv2d_0b_1x1'))
      net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)

    # 14 x 14 x 576
    end_point = 'Mixed_5a'
    with variable_scope.variable_scope(end_point):
      with variable_scope.variable_scope('Branch_0'):
        branch_0 = layers.conv2d(net, 128, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
        branch_0 = tf.layers.batch_normalization(branch_0, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_0 = tf.nn.relu(branch_0, name='{}_act'.format('Conv2d_0a_1x1'))

        branch_0 = layers.conv2d(branch_0, 192, [3, 3], stride=2, activation_fn=None, scope='Conv2d_1a_3x3')
        branch_0 = tf.layers.batch_normalization(branch_0, training=training, name='{}_bn'.format('Conv2d_1a_3x3'))
        branch_0 = tf.nn.relu(branch_0, name='{}_act'.format('Conv2d_1a_3x3'))
      with variable_scope.variable_scope('Branch_1'):
        branch_1 = layers.conv2d(net, 192, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0a_1x1'))

        branch_1 = layers.conv2d(branch_1, 256, [3, 3], activation_fn=None, scope='Conv2d_0b_3x3')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0b_3x3'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0b_3x3'))

        branch_1 = layers.conv2d(branch_1, 256, [3, 3], stride=2, activation_fn=None, scope='Conv2d_1a_3x3')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_1a_3x3'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_1a_3x3'))
      with variable_scope.variable_scope('Branch_2'):
        branch_2 = layers_lib.max_pool2d(net, [3, 3], stride=2, padding='SAME', scope='MaxPool_1a_3x3')
      net = array_ops.concat([branch_0, branch_1, branch_2], 3)

    # 7 x 7 x 1024
    end_point = 'Mixed_5b'
    with variable_scope.variable_scope(end_point):
      with variable_scope.variable_scope('Branch_0'):
        branch_0 = layers.conv2d(net, 352, [1, 1], activation_fn=None, scope='Conv2d_0a_1x1')
        branch_0 = tf.layers.batch_normalization(branch_0, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_0 = tf.nn.relu(branch_0, name='{}_act'.format('Conv2d_0a_1x1'))
      with variable_scope.variable_scope('Branch_1'):
        branch_1 = layers.conv2d(net, 192, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0a_1x1'))

        branch_1 = layers.conv2d(branch_1, 320, [3, 3], activation_fn=None, scope='Conv2d_0b_3x3')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0b_3x3'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0b_3x3'))
      with variable_scope.variable_scope('Branch_2'):
        branch_2 = layers.conv2d(net, 160, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0a_1x1'))

        branch_2 = layers.conv2d(branch_2, 224, [3, 3], activation_fn=None, scope='Conv2d_0b_3x3')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0b_3x3'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0b_3x3'))

        branch_2 = layers.conv2d(branch_2, 224, [3, 3], activation_fn=None, scope='Conv2d_0c_3x3')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0c_3x3'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0c_3x3'))
      with variable_scope.variable_scope('Branch_3'):
        branch_3 = layers_lib.avg_pool2d(net, [3, 3], padding='SAME', stride=1, scope='AvgPool_0a_3x3')
        branch_3 = layers.conv2d(branch_3, 128, [1, 1], weights_initializer=trunc_normal(0.1), activation_fn=None, scope='Conv2d_0b_1x1')
        branch_3 = tf.layers.batch_normalization(branch_3, training=training, name='{}_bn'.format('Conv2d_0b_1x1'))
        branch_3 = tf.nn.relu(branch_3, name='{}_act'.format('Conv2d_0b_1x1'))
      net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)

    # 7 x 7 x 1024
    end_point = 'Mixed_5c'
    with variable_scope.variable_scope(end_point):
      with variable_scope.variable_scope('Branch_0'):
        branch_0 = layers.conv2d(net, 352, [1, 1], activation_fn=None, scope='Conv2d_0a_1x1')
        branch_0 = tf.layers.batch_normalization(branch_0, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_0 = tf.nn.relu(branch_0, name='{}_act'.format('Conv2d_0a_1x1'))
      with variable_scope.variable_scope('Branch_1'):
        branch_1 = layers.conv2d(net, 192, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0a_1x1'))

        branch_1 = layers.conv2d(branch_1, 320, [3, 3], activation_fn=None, scope='Conv2d_0b_3x3')
        branch_1 = tf.layers.batch_normalization(branch_1, training=training, name='{}_bn'.format('Conv2d_0b_3x3'))
        branch_1 = tf.nn.relu(branch_1, name='{}_act'.format('Conv2d_0b_3x3'))
      with variable_scope.variable_scope('Branch_2'):
        branch_2 = layers.conv2d(net, 192, [1, 1], weights_initializer=trunc_normal(0.09), activation_fn=None, scope='Conv2d_0a_1x1')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0a_1x1'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0a_1x1'))

        branch_2 = layers.conv2d(branch_2, 224, [3, 3], activation_fn=None, scope='Conv2d_0b_3x3')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0b_3x3'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0b_3x3'))

        branch_2 = layers.conv2d(branch_2, 224, [3, 3], activation_fn=None, scope='Conv2d_0c_3x3')
        branch_2 = tf.layers.batch_normalization(branch_2, training=training, name='{}_bn'.format('Conv2d_0c_3x3'))
        branch_2 = tf.nn.relu(branch_2, name='{}_act'.format('Conv2d_0c_3x3'))
      with variable_scope.variable_scope('Branch_3'):
        branch_3 = layers_lib.max_pool2d(net, [3, 3], padding='SAME', stride=1, scope='MaxPool_0a_3x3')
        branch_3 = layers.conv2d(branch_3, 128, [1, 1], weights_initializer=trunc_normal(0.1), activation_fn=None, scope='Conv2d_0b_1x1')
        branch_3 = tf.layers.batch_normalization(branch_3, training=training, name='{}_bn'.format('Conv2d_0b_1x1'))
        branch_3 = tf.nn.relu(branch_3, name='{}_act'.format('Conv2d_0b_1x1'))
      net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)

  with variable_scope.variable_scope('Logits'):
    kernel_size = util._reduced_kernel_size_for_small_input(net, [7, 7])
    net = layers_lib.avg_pool2d(net, kernel_size, stride=1, padding='VALID', scope='AvgPool_1a_{}x{}'.format(*kernel_size))

    # 1 x 1 x 1024
    net = layers_lib.dropout(net, keep_prob=params['dropout_keep_prob'], scope='Dropout_1b')
    logits = layers.conv2d(net, params['num_classes'], [1, 1], normalizer_fn=None, activation_fn=None, scope='Conv2d_1c_1x1')
    if params['spatial_squeeze']:
      logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')

  predictions = {
    'argmax': tf.argmax(logits, axis=1, name='prediction_classes'),
    'predictions': layers_lib.softmax(logits, scope='Predictions'),
  }

  if mode == tf.estimator.ModeKeys.PREDICT:
    return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)

  loss = tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=labels)
  tf.summary.scalar('loss', loss)

  eval_metric_ops = {
    'accuracy_val': tf.metrics.accuracy(labels=labels, predictions=predictions['argmax'])
  }

  if mode == tf.estimator.ModeKeys.EVAL:
    return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)

  optimizer = tf.train.GradientDescentOptimizer(learning_rate=params['learning_rate'])
  extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
  with tf.control_dependencies(extra_update_ops):
    train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())

  tf.summary.scalar('accuracy_train', eval_metric_ops['accuracy_val'][1])
  tf.summary.histogram('labels', labels)
  tf.summary.histogram('predictions', predictions['argmax'])

  return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
示例#34
0
def inception_v1(inputs,
                 num_classes=1000,
                 is_training=True,
                 dropout_keep_prob=0.8,
                 prediction_fn=layers_lib.softmax,
                 spatial_squeeze=True,
                 reuse=None,
                 scope='InceptionV1',
                 use_5x5=None):
  """Defines the Inception V1 architecture.

  This architecture is defined in:

    Going deeper with convolutions
    Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
    Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
    http://arxiv.org/pdf/1409.4842v1.pdf.

  The default image size used to train this network is 224x224.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    dropout_keep_prob: the percentage of activation values that are retained.
    prediction_fn: a function to get predictions out of logits.
    spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
        of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
    reuse: whether or not the network and its variables should be reused. To be
      able to reuse 'scope' must be given.
    scope: Optional variable_scope.
    use_5x5: If True, the inception filter sizes will be set to 5x5 
      as specified in Figure 2(b) of the paper. The default behavior is to continue
      with 3x3 filter sizes.
      Note: This invalidates all current checkpoints and the saved models will 
      be incompatible as well. The training would have to be done from the beginning.
  Returns:
    logits: the pre-softmax activations, a tensor of size
      [batch_size, num_classes]
    end_points: a dictionary from components of the network to the corresponding
      activation.
  """
  # Final pooling and prediction
  with variable_scope.variable_scope(
      scope, 'InceptionV1', [inputs, num_classes], reuse=reuse) as scope:
    with arg_scope(
        [layers_lib.batch_norm, layers_lib.dropout], is_training=is_training):
      # If we want the same filters as in the original paper. The default is 3x3.
      if use_5x5:
        filter_size=[5, 5]
        filter_size_str=str(filter_size[0])+'x'+str(filter_size[1])
      net, end_points = inception_v1_base(inputs, scope=scope)
      with variable_scope.variable_scope('Logits'):
        net = layers_lib.avg_pool2d(
            net, [7, 7], stride=1, scope='MaxPool_0a_7x7')
        net = layers_lib.dropout(net, dropout_keep_prob, scope='Dropout_0b')
        logits = layers.conv2d(
            net,
            num_classes, [1, 1],
            activation_fn=None,
            normalizer_fn=None,
            scope='Conv2d_0c_1x1')
        if spatial_squeeze:
          logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')

        end_points['Logits'] = logits
        end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
  return logits, end_points
示例#35
0
def inception_v2(inputs,
                 num_classes=1000,
                 is_training=True,
                 dropout_keep_prob=0.8,
                 min_depth=16,
                 depth_multiplier=1.0,
                 prediction_fn=layers_lib.softmax,
                 spatial_squeeze=True,
                 reuse=None,
                 scope='InceptionV2'):
  """Inception v2 model for classification.

  Constructs an Inception v2 network for classification as described in
  http://arxiv.org/abs/1502.03167.

  The recommended image size used to train this network is 224x224. For image
  sizes that differ substantially, it is recommended to use inception_v2_base()
  and connect custom final layers to the output.

  Args:
    inputs: a tensor of shape [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    dropout_keep_prob: the percentage of activation values that are retained.
    min_depth: Minimum depth value (number of channels) for all convolution ops.
      Enforced when depth_multiplier < 1, and not an active constraint when
      depth_multiplier >= 1.
    depth_multiplier: Float multiplier for the depth (number of channels)
      for all convolution ops. The value must be greater than zero. Typical
      usage will be to set this value in (0, 1) to reduce the number of
      parameters or computation cost of the model.
    prediction_fn: a function to get predictions out of logits.
    spatial_squeeze: if True, logits is of shape [B, C], if false logits is
        of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
        Note that input image sizes other than 224x224 might lead to different
        spatial dimensions, and hence cannot be squeezed. In this event,
        it is best to set spatial_squeeze as False, and perform a reduce_mean
        over the resulting spatial dimensions with sizes exceeding 1.
    reuse: whether or not the network and its variables should be reused. To be
      able to reuse 'scope' must be given.
    scope: Optional variable_scope.

  Returns:
    logits: the pre-softmax activations, a tensor of size
      [batch_size, num_classes]
    end_points: a dictionary from components of the network to the corresponding
      activation.

  Raises:
    ValueError: if depth_multiplier <= 0.
  """
  if depth_multiplier <= 0:
    raise ValueError('depth_multiplier is not greater than zero.')

  # Final pooling and prediction
  with variable_scope.variable_scope(
      scope, 'InceptionV2', [inputs, num_classes], reuse=reuse) as scope:
    with arg_scope(
        [layers_lib.batch_norm, layers_lib.dropout], is_training=is_training):
      net, end_points = inception_v2_base(
          inputs,
          scope=scope,
          min_depth=min_depth,
          depth_multiplier=depth_multiplier)
      with variable_scope.variable_scope('Logits'):
        kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
        net = layers_lib.avg_pool2d(
            net,
            kernel_size,
            padding='VALID',
            scope='AvgPool_1a_{}x{}'.format(*kernel_size))
        # 1 x 1 x 1024
        net = layers_lib.dropout(
            net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
        logits = layers.conv2d(
            net,
            num_classes, [1, 1],
            activation_fn=None,
            normalizer_fn=None,
            scope='Conv2d_1c_1x1')
        if spatial_squeeze:
          logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')
      end_points['Logits'] = logits
      end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
  return logits, end_points
示例#36
0
def inception_v3(inputs,
                 num_classes=1000,
                 is_training=True,
                 dropout_keep_prob=0.8,
                 min_depth=16,
                 depth_multiplier=1.0,
                 prediction_fn=layers_lib.softmax,
                 spatial_squeeze=True,
                 reuse=None,
                 scope='InceptionV3'):
    """Inception model from http://arxiv.org/abs/1512.00567.

  "Rethinking the Inception Architecture for Computer Vision"

  Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
  Zbigniew Wojna.

  With the default arguments this method constructs the exact model defined in
  the paper. However, one can experiment with variations of the inception_v3
  network by changing arguments dropout_keep_prob, min_depth and
  depth_multiplier.

  The default image size used to train this network is 299x299.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    dropout_keep_prob: the percentage of activation values that are retained.
    min_depth: Minimum depth value (number of channels) for all convolution ops.
      Enforced when depth_multiplier < 1, and not an active constraint when
      depth_multiplier >= 1.
    depth_multiplier: Float multiplier for the depth (number of channels)
      for all convolution ops. The value must be greater than zero. Typical
      usage will be to set this value in (0, 1) to reduce the number of
      parameters or computation cost of the model.
    prediction_fn: a function to get predictions out of logits.
    spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
        of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
    reuse: whether or not the network and its variables should be reused. To be
      able to reuse 'scope' must be given.
    scope: Optional variable_scope.

  Returns:
    logits: the pre-softmax activations, a tensor of size
      [batch_size, num_classes]
    end_points: a dictionary from components of the network to the corresponding
      activation.

  Raises:
    ValueError: if 'depth_multiplier' is less than or equal to zero.
  """
    if depth_multiplier <= 0:
        raise ValueError('depth_multiplier is not greater than zero.')
    depth = lambda d: max(int(d * depth_multiplier), min_depth)

    with variable_scope.variable_scope(scope,
                                       'InceptionV3', [inputs, num_classes],
                                       reuse=reuse) as scope:
        with arg_scope([layers_lib.batch_norm, layers_lib.dropout],
                       is_training=is_training):
            net, end_points = inception_v3_base(
                inputs,
                scope=scope,
                min_depth=min_depth,
                depth_multiplier=depth_multiplier)

            # Auxiliary Head logits
            with arg_scope(
                [layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
                    stride=1,
                    padding='SAME'):
                aux_logits = end_points['Mixed_6e']
                with variable_scope.variable_scope('AuxLogits'):
                    aux_logits = layers_lib.avg_pool2d(aux_logits, [5, 5],
                                                       stride=3,
                                                       padding='VALID',
                                                       scope='AvgPool_1a_5x5')
                    aux_logits = layers.conv2d(aux_logits,
                                               depth(128), [1, 1],
                                               scope='Conv2d_1b_1x1')

                    # Shape of feature map before the final layer.
                    kernel_size = _reduced_kernel_size_for_small_input(
                        aux_logits, [5, 5])
                    aux_logits = layers.conv2d(
                        aux_logits,
                        depth(768),
                        kernel_size,
                        weights_initializer=trunc_normal(0.01),
                        padding='VALID',
                        scope='Conv2d_2a_{}x{}'.format(*kernel_size))
                    aux_logits = layers.conv2d(
                        aux_logits,
                        num_classes, [1, 1],
                        activation_fn=None,
                        normalizer_fn=None,
                        weights_initializer=trunc_normal(0.001),
                        scope='Conv2d_2b_1x1')
                    if spatial_squeeze:
                        aux_logits = array_ops.squeeze(aux_logits, [1, 2],
                                                       name='SpatialSqueeze')
                    end_points['AuxLogits'] = aux_logits

            # Final pooling and prediction
            with variable_scope.variable_scope('Logits'):
                kernel_size = _reduced_kernel_size_for_small_input(net, [8, 8])
                net = layers_lib.avg_pool2d(
                    net,
                    kernel_size,
                    padding='VALID',
                    scope='AvgPool_1a_{}x{}'.format(*kernel_size))
                # 1 x 1 x 2048
                net = layers_lib.dropout(net,
                                         keep_prob=dropout_keep_prob,
                                         scope='Dropout_1b')
                end_points['PreLogits'] = net
                # 2048
                logits = layers.conv2d(net,
                                       num_classes, [1, 1],
                                       activation_fn=None,
                                       normalizer_fn=None,
                                       scope='Conv2d_1c_1x1')
                if spatial_squeeze:
                    logits = array_ops.squeeze(logits, [1, 2],
                                               name='SpatialSqueeze')
                # 1000
            end_points['Logits'] = logits
            end_points['Predictions'] = prediction_fn(logits,
                                                      scope='Predictions')
    return logits, end_points
示例#37
0
def create_model(input_tensor, mode, hyper_params):
    """
    An alexnet network.

    :param input_tensor: The input tensor dict containing a "image" rgb tensor.
    :param mode: Execution mode as a tf.estimator.ModeKeys
    :param hyper_params: The hyper param file.
    :return: A dictionary containing all output tensors.
    """
    model = {}

    spatial_squeeze = False
    dropout_keep_prob = 0.5
    is_training = mode == tf.estimator.ModeKeys.TRAIN
    num_classes = 10

    with tf.variable_scope('alexnet_v2') as scope:
        if mode == tf.estimator.ModeKeys.EVAL:
            scope.reuse_variables()

        net = layers.conv2d(input_tensor,
                            64, [11, 11],
                            4,
                            padding='VALID',
                            scope='conv1')
        model["conv1"] = net
        net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool1')
        model["pool1"] = net
        net = layers.conv2d(net, 192, [5, 5], scope='conv2')
        model["conv2"] = net
        net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool2')
        model["pool2"] = net
        net = layers.conv2d(net, 384, [3, 3], scope='conv3')
        model["conv3"] = net
        net = layers.conv2d(net, 384, [3, 3], scope='conv4')
        model["conv4"] = net
        net = layers.conv2d(net, 256, [3, 3], scope='conv5')
        model["conv5"] = net
        net = layers_lib.max_pool2d(net, [3, 3], 2, scope='pool5')
        model["pool5"] = net

        # Use conv2d instead of fully_connected layers.
        with arg_scope([layers.conv2d],
                       weights_initializer=trunc_normal(0.005),
                       biases_initializer=init_ops.constant_initializer(0.1)):
            net = layers.conv2d(net,
                                4096, [5, 5],
                                padding='VALID',
                                scope='fc6')
            model["fc6"] = net
            net = layers_lib.dropout(net,
                                     dropout_keep_prob,
                                     is_training=is_training,
                                     scope='dropout6')
            net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
            model["fc7"] = net
            net = layers_lib.dropout(net,
                                     dropout_keep_prob,
                                     is_training=is_training,
                                     scope='dropout7')
            net = layers.conv2d(
                net,
                num_classes, [1, 1],
                activation_fn=None,
                normalizer_fn=None,
                biases_initializer=init_ops.zeros_initializer(),
                scope='fc8')

        # Convert end_points_collection into a end_point dict.
        if spatial_squeeze:
            net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
            model['fc8'] = net

        # Collect outputs for api of network.
        model["logits"] = net
        model["probs"] = tf.nn.softmax(net)
    return model