コード例 #1
0
ファイル: squeezenet_v1.py プロジェクト: liuwang1/tefla
def model(is_training, reuse, dropout_keep_prob=0.5):
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(batch_norm=True, activation=prelu, w_init=initz.he_normal(
        scale=1), untie_biases=False, **common_args)
    pool_args = make_args(padding='SAME', **common_args)
    inputs = input((None, crop_size[1], crop_size[0], 3), **common_args)
    with tf.variable_scope('squeezenet', values=[inputs]):
        net = conv2d(inputs, 96, stride=(2, 2), name='conv1', **conv_args)
        net = max_pool(net, name='maxpool1', **pool_args)
        net = fire_module(net, 16, 64, name='fire2', **conv_args)
        net = fire_module(net, 16, 64, name='fire3', **conv_args)
        net = fire_module(net, 32, 128, name='fire4', **conv_args)
        net = max_pool(net, name='maxpool4', **pool_args)
        net = fire_module(net, 32, 128, name='fire5', **conv_args)
        net = fire_module(net, 48, 192, name='fire6', **conv_args)
        net = fire_module(net, 48, 192, name='fire7', **conv_args)
        net = fire_module(net, 64, 256, name='fire8', **conv_args)
        net = max_pool(net,  name='maxpool8', **pool_args)
        net = fire_module(net, 64, 256, name='fire9', **conv_args)
        # Reversed avg and conv layers per 'Network in Network'
        net = dropout(net, drop_p=1 - dropout_keep_prob,
                      name='dropout6', **common_args)
        net = conv2d(net, 10, filter_size=(1, 1), name='conv10', **conv_args)
        logits = global_avg_pool(net, name='logits', **pool_args)
        predictions = softmax(logits, name='predictions', **common_args)
        return end_points(is_training)
コード例 #2
0
ファイル: cifar10.py プロジェクト: souravroy0708/tefla
def model(inputs, is_training, reuse, num_classes=10, dropout_keep_prob=0.5):
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(batch_norm=True,
                          activation=prelu,
                          w_init=initz.he_normal(scale=1),
                          untie_biases=False,
                          **common_args)
    conv_args_fm = make_args(w_init=initz.he_normal(scale=1),
                             untie_biases=False,
                             **common_args)
    pool_args = make_args(padding='SAME', **common_args)
    with tf.variable_scope('squeezenet', values=[inputs]):
        net = separable_conv2d(inputs,
                               256,
                               stride=(2, 2),
                               name='conv1',
                               **conv_args)
        # net = conv2d(inputs, 96, stride=(2, 2), name='conv1', **conv_args)
        net = max_pool(net, name='maxpool1', **pool_args)
        net = fire_module(net, 16, 64, name='fire2', **conv_args_fm)
        net = bottleneck_simple(net, 16, 64, name='fire3', **conv_args_fm)
        net = batch_norm(net,
                         activation_fn=tf.nn.relu,
                         name='fire3_bn',
                         is_training=is_training,
                         reuse=reuse)
        net = fire_module(net, 32, 128, name='fire4', **conv_args_fm)
        net = max_pool(net, name='maxpool4', **pool_args)
        net = bottleneck_simple(net, 32, 128, name='fire5', **conv_args_fm)
        net = batch_norm(net,
                         activation_fn=tf.nn.relu,
                         name='fire5_bn',
                         is_training=is_training,
                         reuse=reuse)
        net = fire_module(net, 48, 192, name='fire6', **conv_args_fm)
        net = bottleneck_simple(net, 48, 192, name='fire7', **conv_args_fm)
        net = batch_norm(net,
                         activation_fn=tf.nn.relu,
                         name='fire7_bn',
                         is_training=is_training,
                         reuse=reuse)
        net = fire_module(net, 64, 256, name='fire8', **conv_args_fm)
        net = max_pool(net, name='maxpool8', **pool_args)
        net = dropout(net,
                      drop_p=1 - dropout_keep_prob,
                      name='dropout6',
                      **common_args)
        net = conv2d(net,
                     num_classes,
                     filter_size=(1, 1),
                     name='conv10',
                     **conv_args_fm)
        logits = global_avg_pool(net, name='logits', **pool_args)
        predictions = softmax(logits, name='predictions', **common_args)
        return end_points(is_training)
コード例 #3
0
ファイル: aegan.py プロジェクト: souravroy0708/tefla
def encoder(inputs, is_training, reuse, z_dim=512):
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(batch_norm=True,
                          activation=lrelu,
                          w_init=initz.he_normal(scale=1),
                          untie_biases=False,
                          **common_args)
    conv_args_1st = make_args(batch_norm=None,
                              activation=lrelu,
                              w_init=initz.he_normal(scale=1),
                              untie_biases=False,
                              **common_args)
    logits_args = make_args(activation=None,
                            w_init=initz.he_normal(scale=1),
                            **common_args)
    pool_args = make_args(padding='SAME', **common_args)
    end_points = {}
    x = inputs
    end_points['inputs'] = x
    x = dropout(x, drop_p=0.2, name="input_dropout1", **common_args)
    x = conv2d(x,
               96,
               filter_size=(5, 5),
               stride=(2, 2),
               name="e_conv1_1",
               **conv_args_1st)
    end_points['e_conv1_1'] = x
    x = conv2d(x, 96, name="e_conv1_2", **conv_args)
    end_points['e_conv1_2'] = x
    x = conv2d(x, 96, stride=(2, 2), name="e_conv1_3", **conv_args)
    end_points['e_conv1_3'] = x
    x = dropout(x, drop_p=0.2, name="dropout1", **common_args)
    x = conv2d(x, 192, name="e_conv2_1", **conv_args)
    end_points['e_conv2_1'] = x
    x = conv2d(x, 192, name="e_conv2_2", **conv_args)
    end_points['e_conv2_2'] = x
    # x = conv2d(x, 192, stride=(2, 2), name="e_conv2_3", **conv_args)
    # end_points['e_conv2_3'] = x
    x = dropout(x, drop_p=0.2, name="dropout2", **common_args)
    # x = conv2d(x, 192, stride=(2, 2), name="e_conv3_1", **conv_args)
    # end_points['e_conv3_1'] = x
    x = conv2d(x, 192, filter_size=(1, 1), name="e_conv4_1", **conv_args)
    end_points['e_conv4_1'] = x
    x = conv2d(x, 192, filter_size=(1, 1), name="e_conv4_2", **conv_args)
    end_points['e_conv4_2'] = x
    x = global_avg_pool(x, name="global_pool")
    end_points['global_pool'] = x
    logits1 = fully_connected(x, z_dim, name="e_logits1", **logits_args)
    logits2 = fully_connected(x, z_dim, name="e_logits2", **logits_args)
    logits2 = tf.tanh(logits2, name='e_logits2_tanh')
    end_points['e_logits1'] = logits1
    end_points['e_logits2'] = logits2
    return end_points
コード例 #4
0
ファイル: aegan.py プロジェクト: souravroy0708/tefla
def discriminator(inputs, is_training, reuse, num_classes=1):
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(batch_norm=True,
                          activation=lrelu,
                          w_init=initz.he_normal(scale=1),
                          untie_biases=False,
                          **common_args)
    conv_args_1st = make_args(batch_norm=None,
                              activation=lrelu,
                              w_init=initz.he_normal(scale=1),
                              untie_biases=False,
                              **common_args)
    logits_args = make_args(activation=None,
                            w_init=initz.he_normal(scale=1),
                            **common_args)
    pool_args = make_args(padding='SAME', **common_args)
    end_points = {}
    x = inputs
    end_points['inputs'] = x
    x = dropout(x, drop_p=0.2, name="input_dropout1", **common_args)
    x = conv2d(x,
               96,
               filter_size=(5, 5),
               stride=(2, 2),
               name="d_conv1_1",
               **conv_args_1st)
    end_points['d_conv1_1'] = x
    x = conv2d(x, 96, name="d_conv1_2", **conv_args)
    end_points['d_conv1_2'] = x
    x = conv2d(x, 96, stride=(2, 2), name="d_conv1_3", **conv_args)
    end_points['d_conv1_3'] = x
    x = dropout(x, drop_p=0.2, name="dropout1", **common_args)
    x = conv2d(x, 192, name="d_conv2_1", **conv_args)
    end_points['d_conv2_1'] = x
    x = conv2d(x, 192, name="d_conv2_2", **conv_args)
    end_points['d_conv2_2'] = x
    # x = conv2d(x, 192, stride=(2, 2), name="d_conv2_3", **conv_args)
    # end_points['d_conv2_3'] = x
    x = dropout(x, drop_p=0.2, name="dropout2", **common_args)
    # x = conv2d(x, 192, stride=(2, 2), name="d_conv3_1", **conv_args)
    # end_points['d_conv3_1'] = x
    x = conv2d(x, 192, filter_size=(1, 1), name="d_conv4_1", **conv_args)
    end_points['d_conv4_1'] = x
    x = conv2d(x, 192, filter_size=(1, 1), name="d_conv4_2", **conv_args)
    end_points['d_conv4_2'] = x
    x = global_avg_pool(x, name="global_pool")
    end_points['global_pool'] = x
    logits = fully_connected(x, num_classes, name="d_logits", **logits_args)
    end_points['logits'] = logits
    end_points['predictions'] = softmax(logits,
                                        name='predictions',
                                        **common_args)
    return end_points
コード例 #5
0
def model(inputs,
          is_training,
          reuse,
          dropout_keep_prob=1,
          num_classes=5,
          **kwargs):
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(untie_biases=True,
                          batch_norm=batch_norm,
                          **common_args)
    logit_args = make_args(activation=prelu, **common_args)

    net = conv2d(inputs,
                 32,
                 filter_size=(3, 3),
                 stride=(2, 2),
                 name='conv1',
                 **conv_args)
    net = conv2d(net, 64, name='conv2', **conv_args)
    net = bottleneck_v1(net, num_unit=128, name='block_v1_1', **conv_args)
    net = bottleneck_v1(net, num_unit=256, name='block_v1_2', **conv_args)
    net = bottleneck_v1(net, num_unit=728, name='block_v1_3', **conv_args)

    for i in range(8):
        prefix = 'block_v2_' + str(i + 5)
        net = bottleneck_v2(net, num_unit=728, name=prefix, **conv_args)

    net = bottleneck_v1(net, num_unit=1024, name='block_v1_4', **conv_args)
    net = separable_conv2d(net,
                           1536,
                           filter_size=(3, 3),
                           stride=(1, 1),
                           name='sconv1',
                           **conv_args)
    net = separable_conv2d(net,
                           2048,
                           filter_size=(3, 3),
                           stride=(1, 1),
                           name='sconv2',
                           **conv_args)
    with tf.variable_scope('Logits'):
        net = global_avg_pool(net, name='AvgPool_1a')
        net = dropout(net,
                      is_training,
                      drop_p=1 - dropout_keep_prob,
                      name='Dropout_1b')
        logits = fully_connected(net, num_classes, name='logits', **logit_args)
        predictions = softmax(logits, name='predictions', **common_args)
    return end_points(is_training)
コード例 #6
0
ファイル: densenet.py プロジェクト: openAGI/models
def model(inputs,
          is_training,
          reuse,
          input_size=image_size[0],
          drop_p_conv=0.0,
          drop_p_trans=0.0,
          n_filters=64,
          n_layers=[1, 2, 2, 3],
          num_classes=5, **kwargs):
  common_args = common_layer_args(is_training, reuse)
  conv_args = make_args(
      batch_norm=True,
      activation=prelu,
      w_init=initz.he_normal(scale=1),
      untie_biases=True,
      **common_args)
  fc_args = make_args(activation=prelu, w_init=initz.he_normal(scale=1), **common_args)
  logit_args = make_args(activation=None, w_init=initz.he_normal(scale=1), **common_args)
  pred_args = make_args(activation=prelu, w_init=initz.he_normal(scale=1), **common_args)
  pool_args = make_args(padding='SAME', filter_size=(2, 2), stride=(2, 2), **common_args)

  x = conv2d(inputs, 48, filter_size=(7, 7), name="conv1", **conv_args)
  x = max_pool(x, name='pool1', **pool_args)
  x = conv2d(x, 64, name="conv2_1", **conv_args)
  x = conv2d(x, 64, name="conv2_2", **conv_args)
  x = max_pool(x, name='pool2', **pool_args)

  # 112
  for block_idx in range(3):
    x, n_filters = dense_block(
        x,
        n_filters,
        num_layers=n_layers[block_idx],
        drop_p=drop_p_conv,
        block_name='dense_' + str(block_idx),
        **conv_args)
    x = trans_block(
        x, n_filters, drop_p=drop_p_trans, block_name='trans_' + str(block_idx), **conv_args)

  x, n_filters = dense_block(
      x, n_filters, num_layers=n_layers[3], drop_p=drop_p_trans, block_name='dense_3', **conv_args)
  # 8
  x = global_avg_pool(x, name='avgpool_1a_8x8')
  logits = fully_connected(x, n_output=num_classes, name="logits", **logit_args)

  predictions = softmax(logits, name='predictions', **common_args)
  return end_points(is_training)
コード例 #7
0
def discriminator(inputs, is_training, reuse, num_classes=11, batch_size=32):
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(batch_norm=True,
                          activation=lrelu,
                          w_init=initz.he_normal(scale=1),
                          untie_biases=False,
                          **common_args)
    conv_args_1st = make_args(batch_norm=None,
                              activation=lrelu,
                              w_init=initz.he_normal(scale=1),
                              untie_biases=False,
                              **common_args)
    logits_args = make_args(activation=None,
                            w_init=initz.he_normal(scale=1),
                            **common_args)
    pool_args = make_args(padding='SAME', **common_args)
    end_points = {}
    x = inputs
    end_points['inputs'] = x
    x = dropout(x, drop_p=0.2, name="input_dropout1", **common_args)
    x = conv2d(x,
               96,
               filter_size=(5, 5),
               stride=(2, 2),
               name="d_conv1_1",
               **conv_args_1st)
    end_points['d_conv1_1'] = x
    x = conv2d(x, 96, name="d_conv1_2", **conv_args)
    end_points['d_conv1_2'] = x
    x = conv2d(x, 96, stride=(2, 2), name="d_conv1_3", **conv_args)
    end_points['d_conv1_3'] = x
    x = dropout(x, drop_p=0.2, name="dropout1", **common_args)
    x = conv2d(x, 192, name="d_conv2_1", **conv_args)
    end_points['d_conv2_1'] = x
    x = conv2d(x, 192, name="d_conv2_2", **conv_args)
    end_points['d_conv2_2'] = x
    x = conv2d(x, 192, stride=(2, 2), name="d_conv2_3", **conv_args)
    end_points['d_conv2_3'] = x
    x = dropout(x, drop_p=0.2, name="dropout2", **common_args)
    x = conv2d(x, 192, stride=(2, 2), name="d_conv3_1", **conv_args)
    end_points['d_conv3_1'] = x
    x = conv2d(x, 192, filter_size=(1, 1), name="d_conv4_1", **conv_args)
    end_points['d_conv4_1'] = x
    x = conv2d(x, 192, filter_size=(1, 1), name="d_conv4_2", **conv_args)
    end_points['d_conv4_2'] = x
    x = global_avg_pool(x, name="global_pool")
    end_points['global_pool'] = x
    logits = fully_connected(x, num_classes, name="d_logits", **logits_args)
    end_points['logits'] = logits
    end_points['predictions'] = softmax(logits,
                                        name='predictions',
                                        **common_args)

    if is_training:
        batch_size = 2 * batch_size
        generated_class_logits = tf.squeeze(
            tf.slice(logits, [0, num_classes - 1], [batch_size, 1]))
        end_points['generated_class_logits'] = generated_class_logits
        positive_class_logits = tf.slice(logits, [0, 0],
                                         [batch_size, num_classes - 1])
        end_points['positive_class_logits'] = positive_class_logits

        max_ = tf.reduce_max(positive_class_logits, 1, keep_dims=True)
        safe_pos_class_logits = positive_class_logits - max_
        end_points['safe_pos_class_logits'] = safe_pos_class_logits

        gan_logits = tf.log(
            tf.reduce_sum(tf.exp(safe_pos_class_logits),
                          1)) + tf.squeeze(max_) - generated_class_logits
        end_points['gan_logits'] = gan_logits
        assert len(gan_logits.get_shape()) == 1

        probs = tf.nn.sigmoid(gan_logits)
        end_points['probs'] = probs
        class_logits = tf.slice(logits, [0, 0], [batch_size / 2, num_classes])
        end_points['class_logits'] = class_logits
        D_on_data = tf.slice(probs, [0], [batch_size / 2])
        end_points['D_on_data'] = D_on_data
        D_on_data_logits = tf.slice(gan_logits, [0], [batch_size / 2])
        end_points['D_on_data_logits'] = D_on_data_logits
        D_on_G = tf.slice(probs, [batch_size / 2], [batch_size / 2])
        end_points['D_on_G'] = D_on_G
        D_on_G_logits = tf.slice(gan_logits, [batch_size / 2],
                                 [batch_size / 2])
        end_points['D_on_G_logits'] = D_on_G_logits

        return end_points
    else:
        return end_points
コード例 #8
0
ファイル: alexnet.py プロジェクト: openAGI/models
def model(inputs,
          is_training,
          reuse,
          num_classes=5,
          dropout_keep_prob=0.5,
          spatial_squeeze=True,
          name='alexnet_v2',
          **kwargs):
    """AlexNet version 2.

  Described in: http://arxiv.org/pdf/1404.5997v2.pdf
  Parameters from:
  github.com/akrizhevsky/cuda-convnet2/blob/master/layers/
  layers-imagenet-1gpu.cfg

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224. To use in fully
        convolutional mode, set spatial_squeeze to false.
        The LRN layers have been removed and change the initializers from
        random_normal_initializer to xavier_initializer.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    name: Optional name for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(batch_norm=True,
                          activation=prelu,
                          w_init=initz.he_normal(scale=1),
                          untie_biases=False,
                          **common_args)
    logit_args = make_args(activation=None,
                           w_init=initz.he_normal(scale=1),
                           **common_args)
    pred_args = make_args(activation=prelu,
                          w_init=initz.he_normal(scale=1),
                          **common_args)
    pool_args = make_args(padding='SAME', **common_args)

    # inputs = input((None, crop_size[1], crop_size[0], 3), **common_args)
    with tf.variable_scope(name, 'alexnet_v2', [inputs]):
        net = conv2d(inputs,
                     64,
                     filter_size=(11, 11),
                     stride=(4, 4),
                     name='conv1',
                     **conv_args)
        net = max_pool(net, stride=(2, 2), name='pool1', **pool_args)
        net = conv2d(net, 192, filter_size=(5, 5), name='conv2', **conv_args)
        net = max_pool(net, stride=(2, 2), name='pool2', **pool_args)
        net = conv2d(net, 384, name='conv3', **conv_args)
        net = conv2d(net, 384, name='conv4', **conv_args)
        net = conv2d(net, 256, name='conv5', **conv_args)
        net = max_pool(net, stride=(2, 2), name='pool5', **pool_args)

        # Use conv2d instead of fully_connected layers.
        net = conv2d(net, 4096, filter_size=(5, 5), name='fc6', **conv_args)
        net = dropout(net,
                      drop_p=1 - dropout_keep_prob,
                      name='dropout6',
                      **common_args)
        net = conv2d(net, 4096, filter_size=(1, 1), name='fc7', **conv_args)
        net = dropout(net,
                      drop_p=1 - dropout_keep_prob,
                      name='dropout7',
                      **common_args)
        net = global_avg_pool(net)
        logits = fc(net, num_classes, name='logits', **logit_args)

        predictions = softmax(logits, name='predictions', **common_args)
        return end_points(is_training)
コード例 #9
0
ファイル: resnet_v2.py プロジェクト: openAGI/models
def resnet_v2(inputs,
              is_training,
              reuse,
              blocks,
              num_classes=None,
              global_pool=True,
              output_stride=None,
              include_root_block=True,
              name=None):
  """Generator for v2 (preactivation) ResNet models.

  This function generates a family of ResNet v2 models. See the resnet_v2_*()
  methods for specific model instantiations, obtained by selecting different
  block instantiations that produce ResNets of various depths.

  Training for image classification on Imagenet is usually done with [224, 224]
  inputs, resulting in [7, 7] feature maps at the output of the last ResNet
  block for the ResNets defined in [1] that have nominal stride equal to 32.
  However, for dense prediction tasks we advise that one uses inputs with
  spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
  this case the feature maps at the ResNet output will have spatial shape
  [(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
  and corners exactly aligned with the input image corners, which greatly
  facilitates alignment of the features to the image. Using as input [225, 225]
  images results in [8, 8] feature maps at the output of the last ResNet block.

  For dense prediction tasks, the ResNet needs to run in fully-convolutional
  (FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
  have nominal stride equal to 32 and a good choice in FCN mode is to use
  output_stride=16 in order to increase the density of the computed features at
  small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.

  Args:
    inputs: A tensor of size [batch, height_in, width_in, channels].
    blocks: A list of length equal to the number of ResNet blocks. Each element
      is a resnet_utils.Block object describing the units in the block.
    num_classes: Number of predicted classes for classification tasks. If None
      we return the features before the logit layer.
    is_training: whether is training or not.
    global_pool: If True, we perform global average pooling before computing the
      logits. Set to True for image classification, False for dense prediction.
    output_stride: If None, then the output will be computed at the nominal
      network stride. If output_stride is not None, it specifies the requested
      ratio of input to output spatial resolution.
    include_root_block: If True, include the initial convolution followed by
      max-pooling, if False excludes it. If excluded, `inputs` should be the
      results of an activation-less convolution.
    reuse: whether or not the network and its variables should be reused. To be
      able to reuse 'scope' must be given.
    name: Optional variable_scope.


  Returns:
    net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
      If global_pool is False, then height_out and width_out are reduced by a
      factor of output_stride compared to the respective height_in and width_in,
      else both height_out and width_out equal one. If num_classes is None, then
      net is the output of the last ResNet block, potentially after global
      average pooling. If num_classes is not None, net contains the pre-softmax
      activations.
    end_points: A dictionary from components of the network to the corresponding
      activation.

  Raises:
    ValueError: If the target output_stride is not valid.
  """
  common_args = common_layer_args(is_training, reuse)
  conv_args = make_args(
      batch_norm=True,
      activation=prelu,
      w_init=initz.he_normal(scale=1),
      untie_biases=False,
      **common_args)
  logits_args = make_args(activation=None, w_init=initz.he_normal(scale=1), **common_args)
  pred_args = make_args(activation=None, **common_args)
  pool_args = make_args(padding='SAME', **common_args)

  with tf.variable_scope(name, 'resnet_v2', [inputs], reuse=reuse):
    net = inputs
    if include_root_block:
      if output_stride is not None:
        if output_stride % 4 != 0:
          raise ValueError('The output_stride needs to be a multiple of 4.')
        output_stride /= 4
      # We do not include batch normalization or activation functions in
      # conv1 because the first ResNet unit will perform these. Cf.
      # Appendix of [2].
      net = resnet_utils.conv2d_same(net, 64, 7, 1, rate=2, name='conv1', **common_args)
      net = max_pool(net, name='pool1', **pool_args)
    net = resnet_utils.stack_blocks_dense(net, blocks, output_stride, **conv_args)
    # This is needed because the pre-activation variant does not have batch
    # normalization or activation functions in the residual unit output. See
    # Appendix of [2].
    net = batch_norm(net, name='postnorm', is_training=is_training, reuse=reuse)
    net = tf.nn.relu(net, name='postnorm_activation')
    if num_classes is not None:
      net = conv2d(net, num_classes, filter_size=(1, 1), name='num_clasess_conv2d', **logits_args)
    if global_pool:
      # Global average pooling.
      net = global_avg_pool(net, name='logits', **common_args)
    if num_classes is not None:
      predictions = softmax(net, name='predictions', **common_args)

    return end_points(is_training)
コード例 #10
0
def bottleneck_se(inputs,
                  depth,
                  depth_bottleneck,
                  stride,
                  rate=1,
                  name=None,
                  **kwargs):
    """SE Bottleneck residual unit variant with BN before convolutions.

  This is the full preactivation residual unit variant proposed in [2]. See
  Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
  variant which has an extra bottleneck layer.

  When putting together two consecutive ResNet blocks that use this unit, one
  should use stride = 2 in the last unit of the first block.

  Args:
    inputs: A tensor of size [batch, height, width, channels].
    depth: The depth of the ResNet unit output.
    depth_bottleneck: The depth of the bottleneck layers.
    stride: The ResNet unit's stride. Determines the amount of downsampling of
      the units output compared to its input.
    rate: An integer, rate for atrous convolution.
    outputs_collections: Collection to add the ResNet unit output.
    name: Optional variable_scope.

  Returns:
    The ResNet unit's output.
  """
    is_training = kwargs.get('is_training')
    reuse = kwargs.get('reuse')
    with tf.variable_scope(name, 'bottleneck_se', [inputs]):
        depth_in = util.last_dimension(inputs.get_shape(), min_rank=4)
        preact = batch_norm(inputs,
                            activation_fn=tf.nn.relu,
                            name='preact',
                            is_training=is_training,
                            reuse=reuse)
        if depth * 4 == depth_in:
            shortcut = subsample(preact, stride, 'shortcut')
        else:
            shortcut = conv2d(preact,
                              depth * 4,
                              is_training,
                              reuse,
                              filter_size=(1, 1),
                              stride=(stride, stride),
                              batch_norm=None,
                              activation=None,
                              name='shortcut')

        residual = conv2d(preact,
                          depth,
                          filter_size=(1, 1),
                          stride=(1, 1),
                          name='conv1',
                          **kwargs)
        residual = conv2d(residual,
                          depth,
                          filter_size=(3, 3),
                          stride=(stride, stride),
                          name='conv2',
                          **kwargs)
        residual = conv2d(residual,
                          depth * 4,
                          is_training,
                          reuse,
                          filter_size=(1, 1),
                          stride=(1, 1),
                          batch_norm=None,
                          activation=None,
                          name='conv3')

        squeeze = global_avg_pool(residual, name='se_global_avg_pool')
        squeeze = fully_connected(squeeze,
                                  depth // 4,
                                  is_training,
                                  reuse,
                                  name='fc1')
        squeeze = fully_connected(squeeze,
                                  depth * 4,
                                  is_training,
                                  reuse,
                                  name='fc2')
        squeeze = tf.nn.sigmoid(squeeze, name='se_fc_sigmoid')
        residual = residual * tf.reshape(squeeze, [-1, 1, 1, depth * 4])
        return residual + shortcut
コード例 #11
0
def model(inputs,
          is_training,
          reuse,
          num_classes=5,
          drop_prob=0.2,
          name='InceptionResnetV2'):
    common_args = common_layer_args(is_training, reuse)
    rest_conv_params = make_args(use_bias=False,
                                 batch_norm=batch_norm,
                                 activation=relu,
                                 **common_args)
    conv_params_no_bias = make_args(use_bias=False,
                                    batch_norm=batch_norm,
                                    activation=relu,
                                    **common_args)
    conv_params = make_args(use_bias=True,
                            batch_norm=batch_norm,
                            activation=None,
                            **common_args)
    rest_logit_params = make_args(activation=None, **common_args)
    rest_pool_params = make_args(padding='SAME', **common_args)
    rest_dropout_params = make_args(drop_p=drop_prob, **common_args)

    # inputs = input((None, crop_size[1], crop_size[0], 3), **common_args)

    with tf.variable_scope(name, 'InceptionResnetV2'):
        net = conv2d(inputs,
                     32,
                     stride=(2, 2),
                     name='Conv2d_1a_3x3',
                     **conv_params_no_bias)
        net = conv2d(net, 32, name='Conv2d_2a_3x3', **conv_params_no_bias)
        # 112 x 112
        net = conv2d(net, 64, name='Conv2d_2b_3x3', **rest_conv_params)
        # 112 x 112
        net = max_pool(net, name='MaxPool_3a_3x3', **rest_pool_params)
        # 64 x 64
        net = conv2d(net,
                     80,
                     filter_size=(1, 1),
                     name='Conv2d_3b_1x1',
                     **rest_conv_params)
        # 64 x 64
        net = conv2d(net, 192, name='Conv2d_4a_3x3', **rest_conv_params)
        # 64 x 64
        net = max_pool(net,
                       stride=(2, 2),
                       name='maxpool_5a_3x3',
                       **rest_pool_params)

        # 32 x 32
        with tf.variable_scope('Mixed_5b'):
            with tf.variable_scope('Branch_0'):
                tower_conv = conv2d(net,
                                    96,
                                    filter_size=(1, 1),
                                    name='Conv2d_1x1',
                                    **rest_conv_params)
            with tf.variable_scope('Branch_1'):
                tower_conv1_0 = conv2d(net,
                                       48,
                                       filter_size=(1, 1),
                                       name='Conv2d_0a_1x1',
                                       **rest_conv_params)
                tower_conv1_1 = conv2d(tower_conv1_0,
                                       64,
                                       filter_size=(5, 5),
                                       name='Conv2d_0b_5x5',
                                       **rest_conv_params)
            with tf.variable_scope('Branch_2'):
                tower_conv2_0 = conv2d(net,
                                       64,
                                       filter_size=(1, 1),
                                       name='Conv2d_0a_1x1',
                                       **rest_conv_params)
                tower_conv2_1 = conv2d(tower_conv2_0,
                                       96,
                                       name='Conv2d_0b_3x3',
                                       **rest_conv_params)
                tower_conv2_2 = conv2d(tower_conv2_1,
                                       96,
                                       name='Conv2d_0c_3x3',
                                       **rest_conv_params)
            with tf.variable_scope('Branch_3'):
                tower_pool = avg_pool_2d(net,
                                         stride=(1, 1),
                                         name='avgpool_0a_3x3',
                                         **rest_pool_params)
                tower_pool_1 = conv2d(tower_pool,
                                      64,
                                      filter_size=(1, 1),
                                      name='Conv2d_0b_1x1',
                                      **rest_conv_params)
            net = tf.concat(
                [tower_conv, tower_conv1_1, tower_conv2_2, tower_pool_1], 3)
        with tf.variable_scope('Repeat'):
            for i in range(1, 11):
                net = block35(net,
                              name='block35_' + str(i),
                              scale=0.17,
                              **conv_params_no_bias)

        # 32 x 32
        with tf.variable_scope('Mixed_6a'):
            with tf.variable_scope('Branch_0'):
                tower_conv = conv2d(net,
                                    384,
                                    stride=(2, 2),
                                    name='Conv2d_1a_3x3',
                                    **rest_conv_params)
            with tf.variable_scope('Branch_1'):
                tower_conv1_0 = conv2d(net,
                                       256,
                                       filter_size=(1, 1),
                                       name='Conv2d_0a_1x1',
                                       **rest_conv_params)
                tower_conv1_1 = conv2d(tower_conv1_0,
                                       256,
                                       name='Conv2d_0b_3x3',
                                       **rest_conv_params)
                tower_conv1_2 = conv2d(tower_conv1_1,
                                       384,
                                       stride=(2, 2),
                                       name='Conv2d_1a_3x3',
                                       **rest_conv_params)
            with tf.variable_scope('Branch_2'):
                tower_pool = max_pool(net,
                                      name='maxpool_1a_3x3',
                                      **rest_pool_params)
            net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3)

        with tf.variable_scope('Repeat_1'):
            for i in range(1, 21):
                net = block17(net,
                              name='block17_' + str(i),
                              scale=0.10,
                              **conv_params_no_bias)

        with tf.variable_scope('Mixed_7a'):
            with tf.variable_scope('Branch_0'):
                tower_conv = conv2d(net,
                                    256,
                                    filter_size=(1, 1),
                                    name='Conv2d_0a_1x1',
                                    **rest_conv_params)
                tower_conv_1 = conv2d(tower_conv,
                                      384,
                                      stride=(2, 2),
                                      name='Conv2d_1a_3x3',
                                      **rest_conv_params)
            with tf.variable_scope('Branch_1'):
                tower_conv1 = conv2d(net,
                                     256,
                                     filter_size=(1, 1),
                                     name='Conv2d_0a_1x1',
                                     **rest_conv_params)
                tower_conv1_1 = conv2d(tower_conv1,
                                       288,
                                       stride=(2, 2),
                                       name='Conv2d_1a_3x3',
                                       **rest_conv_params)
            with tf.variable_scope('Branch_2'):
                tower_conv2 = conv2d(net,
                                     256,
                                     filter_size=(1, 1),
                                     name='Conv2d_0a_1x1',
                                     **rest_conv_params)
                tower_conv2_1 = conv2d(tower_conv2,
                                       288,
                                       name='Conv2d_0b_3x3',
                                       **rest_conv_params)
                tower_conv2_2 = conv2d(tower_conv2_1,
                                       320,
                                       stride=(2, 2),
                                       name='Conv2d_1a_3x3',
                                       **rest_conv_params)
            with tf.variable_scope('Branch_3'):
                tower_pool = max_pool(net,
                                      name='maxpool_1a_3x3',
                                      **rest_pool_params)
            net = tf.concat(
                [tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3)
        # 8 x 8
        with tf.variable_scope('Repeat_2'):
            for i in range(1, 10):
                net = block8(net,
                             name='block8_' + str(i),
                             scale=0.20,
                             **conv_params_no_bias)
        net = block8(net, name='Block8', **conv_params_no_bias)

        net = conv2d(net,
                     1536,
                     filter_size=(1, 1),
                     name='Conv2d_7b_1x1',
                     **rest_conv_params)

        with tf.variable_scope('Logits'):
            net = global_avg_pool(net, name='avgpool_1a_8x8')
            net = dropout(net, name='dropout', **rest_dropout_params)
            logits = fully_connected(net,
                                     num_classes,
                                     name='Logits',
                                     **rest_logit_params)
            predictions = softmax(logits, name='Predictions', **common_args)

    return end_points(is_training)