Beispiel #1
0
def model(inputs, is_training, reuse, num_classes=2):
    common_args = common_layer_args(is_training, reuse)
    conv1 = conv2d(inputs, 32, name='conv1', activation=prelu, **common_args)
    conv1 = conv2d(conv1, 32, name='conv2', activation=prelu, **common_args)
    fc1 = fc(conv1, num_classes, name='logits', **common_args)
    prediction = softmax(fc1, name='prediction', **common_args)
    return end_points(is_training)
Beispiel #2
0
def model(is_training, reuse, dropout_keep_prob=0.5):
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(batch_norm=True, activation=prelu, w_init=initz.he_normal(
        scale=1), untie_biases=False, **common_args)
    pool_args = make_args(padding='SAME', **common_args)
    inputs = input((None, crop_size[1], crop_size[0], 3), **common_args)
    with tf.variable_scope('squeezenet', values=[inputs]):
        net = conv2d(inputs, 96, stride=(2, 2), name='conv1', **conv_args)
        net = max_pool(net, name='maxpool1', **pool_args)
        net = fire_module(net, 16, 64, name='fire2', **conv_args)
        net = fire_module(net, 16, 64, name='fire3', **conv_args)
        net = fire_module(net, 32, 128, name='fire4', **conv_args)
        net = max_pool(net, name='maxpool4', **pool_args)
        net = fire_module(net, 32, 128, name='fire5', **conv_args)
        net = fire_module(net, 48, 192, name='fire6', **conv_args)
        net = fire_module(net, 48, 192, name='fire7', **conv_args)
        net = fire_module(net, 64, 256, name='fire8', **conv_args)
        net = max_pool(net,  name='maxpool8', **pool_args)
        net = fire_module(net, 64, 256, name='fire9', **conv_args)
        # Reversed avg and conv layers per 'Network in Network'
        net = dropout(net, drop_p=1 - dropout_keep_prob,
                      name='dropout6', **common_args)
        net = conv2d(net, 10, filter_size=(1, 1), name='conv10', **conv_args)
        logits = global_avg_pool(net, name='logits', **pool_args)
        predictions = softmax(logits, name='predictions', **common_args)
        return end_points(is_training)
Beispiel #3
0
def model(is_training, resue, num_classes=5):
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(
        untie_biases=True, batch_norm=batch_norm, **common_args)
    logit_args = make_args(activation=prelu, **common_args)

    inputs = input((None, crop_size[1], crop_size[0], 3), **common_args)

    net = conv2d(inputs, 32, filter_size=(3, 3), stride=(
        2, 2), name='conv1', **conv_params)
    net = conv2d(net, 64, name='conv2', **conv_params)
    net = bottleneck_v1(net, num_unit=128, name='block_v1_1', **conv_args)
    net = bottleneck_v1(net, num_unit=256, name='block_v1_2', **conv_args)
    net = bottleneck_v1(net, num_unit=728, name='block_v1_3', **conv_args)

    for i in range(8):
        prefix = 'block_v2_' + str(i + 5)
        net = bottleneck_v2(net, num_unit=728, name=prefix, **kwargs)

    net = bottleneck_v1(net, num_unit=1024, name='block_v1_4', **conv_args)
    net = separable_conv2d(net, 1536, filter_size=(3, 3), stride=(1, 1),
                           name='sconv1', **kwargs)
    net = separable_conv2d(net, 2048, filter_size=(3, 3), stride=(1, 1),
                           name='sconv2', **kwargs)
    with tf.variable_scope('Logits'):
        net = avg_pool_2d(net, net.get_shape()[1:3], name='AvgPool_1a')
        net = dropout(
            net, is_training, drop_p=1 - dropout_keep_prob, name='Dropout_1b')
        logits = fully_connected(net, num_classes,
                                 name='logits', **logit_args)
        predictions = softmax(logits, name='predictions', **common_args)
    return end_points(is_training)
Beispiel #4
0
def model(x, is_training, reuse):
    common_args = common_layer_args(is_training, reuse)
    fc_args = make_args(activation=relu, **common_args)
    logit_args = make_args(activation=None, **common_args)
    x = embedding(x, 10000, 128, reuse)
    x = lstm(x, 34, reuse, is_training)
    logits = fc(x, 2, name='logits', **logit_args)

    predictions = softmax(logits, name='predictions', **common_args)
    return end_points(is_training)
Beispiel #5
0
def model(inputs, is_training, reuse, num_classes=10, dropout_keep_prob=0.5):
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(batch_norm=True,
                          activation=prelu,
                          w_init=initz.he_normal(scale=1),
                          untie_biases=False,
                          **common_args)
    conv_args_fm = make_args(w_init=initz.he_normal(scale=1),
                             untie_biases=False,
                             **common_args)
    pool_args = make_args(padding='SAME', **common_args)
    with tf.variable_scope('squeezenet', values=[inputs]):
        net = separable_conv2d(inputs,
                               256,
                               stride=(2, 2),
                               name='conv1',
                               **conv_args)
        # net = conv2d(inputs, 96, stride=(2, 2), name='conv1', **conv_args)
        net = max_pool(net, name='maxpool1', **pool_args)
        net = fire_module(net, 16, 64, name='fire2', **conv_args_fm)
        net = bottleneck_simple(net, 16, 64, name='fire3', **conv_args_fm)
        net = batch_norm(net,
                         activation_fn=tf.nn.relu,
                         name='fire3_bn',
                         is_training=is_training,
                         reuse=reuse)
        net = fire_module(net, 32, 128, name='fire4', **conv_args_fm)
        net = max_pool(net, name='maxpool4', **pool_args)
        net = bottleneck_simple(net, 32, 128, name='fire5', **conv_args_fm)
        net = batch_norm(net,
                         activation_fn=tf.nn.relu,
                         name='fire5_bn',
                         is_training=is_training,
                         reuse=reuse)
        net = fire_module(net, 48, 192, name='fire6', **conv_args_fm)
        net = bottleneck_simple(net, 48, 192, name='fire7', **conv_args_fm)
        net = batch_norm(net,
                         activation_fn=tf.nn.relu,
                         name='fire7_bn',
                         is_training=is_training,
                         reuse=reuse)
        net = fire_module(net, 64, 256, name='fire8', **conv_args_fm)
        net = max_pool(net, name='maxpool8', **pool_args)
        net = dropout(net,
                      drop_p=1 - dropout_keep_prob,
                      name='dropout6',
                      **common_args)
        net = conv2d(net,
                     num_classes,
                     filter_size=(1, 1),
                     name='conv10',
                     **conv_args_fm)
        logits = global_avg_pool(net, name='logits', **pool_args)
        predictions = softmax(logits, name='predictions', **common_args)
        return end_points(is_training)
Beispiel #6
0
def model(x, is_training, reuse, num_classes=2, **kwargs):
    common_args = common_layer_args(is_training, reuse)
    fc_args = make_args(activation=relu, **common_args)
    logit_args = make_args(activation=None, **common_args)
    x = embedding(x, 10000, 128, reuse)
    x = bidirectional_rnn(x, LSTMCell(128, reuse), LSTMCell(128, reuse),
                          **common_args)
    logits = fc(x, num_classes, name='logits', **logit_args)

    predictions = softmax(logits, name='predictions', **common_args)
    return end_points(is_training)
Beispiel #7
0
def model(is_training, reuse):
    common_args = common_layer_args(is_training, reuse)
    fc_args = make_args(activation=relu, **common_args)
    logit_args = make_args(activation=None, **common_args)

    x = input((None, height * width), **common_args)
    x = fully_connected(x, n_output=100, name='fc1', **fc_args)
    logits = fully_connected(x, n_output=10, name="logits", **logit_args)
    predictions = softmax(logits, name='predictions', **common_args)

    return end_points(is_training)
Beispiel #8
0
def model(x, is_training, reuse, num_classes=10, **kwargs):
    common_args = common_layer_args(is_training, reuse)
    fc_args = make_args(activation=relu, **common_args)
    logit_args = make_args(activation=None, **common_args)

    x = fully_connected(x, n_output=100, name='fc1', **fc_args)
    logits = fully_connected(x,
                             n_output=num_classes,
                             name="logits",
                             **logit_args)
    predictions = softmax(logits, name='predictions', **common_args)

    return end_points(is_training)
Beispiel #9
0
def model(is_training, reuse):
    common_args = common_layer_args(is_training, reuse)

    x = input((None, 7, 7, 512), **common_args)
    # x = batch_norm_tf(x, **common_args)
    x = fully_connected(x, 512, activation=relu, name='fc1', **common_args)
    x = dropout(x, drop_p=0.5, name='dropout1', **common_args)
    logits = fully_connected(x,
                             6,
                             activation=None,
                             name='logits',
                             **common_args)
    predictions = softmax(logits, name='predictions', **common_args)
    return end_points(is_training)
Beispiel #10
0
def model(x, is_training, reuse, num_classes=2, **kwargs):
    common_args = common_layer_args(is_training, reuse)
    fc_args = make_args(activation=relu, **common_args)
    logit_args = make_args(activation=None, **common_args)
    x = embedding(x, 10000, 128, reuse)
    x1 = conv1d(x, 128, name='conv1_1', **common_args)
    x2 = conv1d(x, 128, filter_size=4, name='conv1_2', **common_args)
    x3 = conv1d(x, 128, filter_size=5, name='conv1_3', **common_args)
    x = merge([x1, x2, x3], 'concat', axis=1)
    x = lstm(x, 384, reuse, is_training)
    x = dropout(x, drop_p=0.3, **common_args)
    logits = fc(x, num_classes, name='logits', **logit_args)

    predictions = softmax(logits, name='predictions', **common_args)
    return end_points(is_training)
Beispiel #11
0
def model(x, is_training, reuse):
    common_args = common_layer_args(is_training, reuse)
    fc_args = make_args(activation=relu, **common_args)
    logit_args = make_args(activation=None, **common_args)
    x = embedding(x, 10000, 128, reuse)
    x1 = conv1d(x, 128, name='conv1_1', **common_args)
    x2 = conv1d(x, 128, filter_size=4, name='conv1_2', **common_args)
    x3 = conv1d(x, 128, filter_size=5, name='conv1_3', **common_args)
    x = merge([x1, x2, x3], 'concat', axis=1)
    x = tf.expand_dims(x, 2)
    x = global_max_pool(x)
    x = dropout(x, drop_p=0.3, **common_args)
    logits = fc(x, 2, name='logits', **logit_args)

    predictions = softmax(logits, name='predictions', **common_args)
    return end_points(is_training)
Beispiel #12
0
def model(inputs,
          is_training,
          reuse,
          input_size=image_size[0],
          drop_p_conv=0.0,
          drop_p_trans=0.0,
          n_filters=64,
          n_layers=[1, 2, 2, 3],
          num_classes=5, **kwargs):
  common_args = common_layer_args(is_training, reuse)
  conv_args = make_args(
      batch_norm=True,
      activation=prelu,
      w_init=initz.he_normal(scale=1),
      untie_biases=True,
      **common_args)
  fc_args = make_args(activation=prelu, w_init=initz.he_normal(scale=1), **common_args)
  logit_args = make_args(activation=None, w_init=initz.he_normal(scale=1), **common_args)
  pred_args = make_args(activation=prelu, w_init=initz.he_normal(scale=1), **common_args)
  pool_args = make_args(padding='SAME', filter_size=(2, 2), stride=(2, 2), **common_args)

  x = conv2d(inputs, 48, filter_size=(7, 7), name="conv1", **conv_args)
  x = max_pool(x, name='pool1', **pool_args)
  x = conv2d(x, 64, name="conv2_1", **conv_args)
  x = conv2d(x, 64, name="conv2_2", **conv_args)
  x = max_pool(x, name='pool2', **pool_args)

  # 112
  for block_idx in range(3):
    x, n_filters = dense_block(
        x,
        n_filters,
        num_layers=n_layers[block_idx],
        drop_p=drop_p_conv,
        block_name='dense_' + str(block_idx),
        **conv_args)
    x = trans_block(
        x, n_filters, drop_p=drop_p_trans, block_name='trans_' + str(block_idx), **conv_args)

  x, n_filters = dense_block(
      x, n_filters, num_layers=n_layers[3], drop_p=drop_p_trans, block_name='dense_3', **conv_args)
  # 8
  x = global_avg_pool(x, name='avgpool_1a_8x8')
  logits = fully_connected(x, n_output=num_classes, name="logits", **logit_args)

  predictions = softmax(logits, name='predictions', **common_args)
  return end_points(is_training)
def model(is_training, reuse):
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(batch_norm=None, activation=prelu, **common_args)
    fc_args = make_args(activation=prelu, **common_args)
    logit_args = make_args(activation=None, **common_args)

    x = input((None, crop_size[1], crop_size[0], 1), **common_args)
    x = conv2d(x, 32, name='conv1_1', **conv_args)
    x = conv2d(x, 32, name='conv1_2', **conv_args)
    x = max_pool(x, name='pool1', **common_args)
    x = dropout(x, drop_p=0.25, name='dropout1', **common_args)
    x = fully_connected(x, n_output=128, name='fc1', **fc_args)
    x = dropout(x, drop_p=0.5, name='dropout2', **common_args)
    logits = fully_connected(x, n_output=36, name="logits", **logit_args)
    predictions = softmax(logits, name='predictions', **common_args)

    return end_points(is_training)
Beispiel #14
0
def model(is_training, reuse):
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(activation=relu, **common_args)
    pool_args = make_args(filter_size=(2, 2), **common_args)
    fc_args = make_args(activation=relu, **common_args)
    logit_args = make_args(activation=None, **common_args)

    x = input((None, crop_size[1], crop_size[0], 3), **common_args)

    x = conv2d(x, 64, name='conv1_1', **conv_args)
    x = conv2d(x, 64, name='conv1_2', **conv_args)
    x = max_pool(x, name='maxpool1', **pool_args)

    x = conv2d(x, 128, name='conv2_1', **conv_args)
    x = conv2d(x, 128, name='conv2_2', **conv_args)
    x = max_pool(x, name='maxpool2', **pool_args)

    x = conv2d(x, 256, name='conv3_1', **conv_args)
    x = conv2d(x, 256, name='conv3_2', **conv_args)
    x = conv2d(x, 256, name='conv3_3', **conv_args)
    x = max_pool(x, name='maxpool3', **pool_args)

    x = conv2d(x, 512, name='conv4_1', **conv_args)
    x = conv2d(x, 512, name='conv4_2', **conv_args)
    x = conv2d(x, 512, name='conv4_3', **conv_args)
    x = max_pool(x, name='maxpool4', **pool_args)

    x = conv2d(x, 512, name='conv5_1', **conv_args)
    x = conv2d(x, 512, name='conv5_2', **conv_args)
    x = conv2d(x, 512, name='conv5_3', **conv_args)
    x = max_pool(x, name='maxpool5', **pool_args)

    x = fully_connected(x, n_output=4096, name='fc6', **fc_args)
    x = dropout(x, drop_p=0.5, name='dropout1', **common_args)

    x = fully_connected(x, n_output=4096, name='fc7', **fc_args)
    x = dropout(x, drop_p=0.5, name='dropout2', **common_args)

    logits = fully_connected(x, n_output=1000, name="logits", **logit_args)
    predictions = softmax(logits, name='predictions', **common_args)

    return end_points(is_training)
Beispiel #15
0
def resnet_v1(inputs,
              blocks,
              num_classes=None,
              global_pool=True,
              output_stride=None,
              include_root_block=True,
              scope=None,
              **common_args):
    conv_args = make_args(use_bias=False,
                          activation=relu,
                          batch_norm=batch_norm_tf,
                          batch_norm_args=batch_norm_params,
                          **common_args)
    with tf.variable_scope(scope,
                           'resnet_v1', [inputs],
                           reuse=common_args['reuse']) as sc:
        net = inputs
        if include_root_block:
            if output_stride is not None:
                if output_stride % 4 != 0:
                    raise ValueError(
                        'The output_stride needs to be a multiple of 4.')
                output_stride /= 4
            net = conv2d_same(net, 64, 7, stride=2, scope='conv1', **conv_args)
            net = max_pool(net,
                           filter_size=(3, 3),
                           stride=(2, 2),
                           padding='SAME',
                           name='pool1')
        net = stack_blocks_dense(net, blocks, output_stride, **common_args)
        if global_pool:
            # Global average pooling.
            net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
        if num_classes is not None:
            net = conv2d(net,
                         num_classes,
                         filter_size=(1, 1),
                         activation=None,
                         name='logits',
                         **common_args)
            predictions = softmax(net, name='predictions', **common_args)
        return end_points(common_args['is_training'])
Beispiel #16
0
def model(x, is_training, reuse, num_classes=10, **config):
  common_args = common_layer_args(is_training, reuse)
  logit_args = make_args(activation=None, **common_args)

  if config['max_conv_layers']>0:
    for i in range(1, config['n_conv_layers']+1):
      activation, size, maxpool = layer_config(config, i, layer_type='conv')
      conv_args = make_args(batch_norm=bool(config['batch_norm']), activation=prelu, **common_args)
      x = conv2d(x, size, name='conv{}'.format(i), **conv_args)
      if maxpool:
        x = max_pool(x, name='pool{}'.format(i), **common_args)

  if config['max_fc_layers']>0:
    for i in range(1, config['n_fc_layers']+1):
      activation, size, _dropout = layer_config(config, i, layer_type='fc')
      fc_args = make_args(activation=prelu, **common_args)
      x = fully_connected(x, n_output=size, name='fc{}'.format(i), **fc_args)
      x = dropout(x, drop_p=np.round(_dropout, 2), name='dropout{}'.format(i), **common_args)

  logits = fully_connected(x, n_output=num_classes, name="logits", **logit_args)
  predictions = softmax(logits, name='predictions', **common_args)
  return end_points(is_training)
Beispiel #17
0
def model(inputs,
          is_training,
          reuse,
          num_classes=5,
          drop_prob=0.2,
          name='InceptionResnetV2'):
    common_args = common_layer_args(is_training, reuse)
    rest_conv_params = make_args(use_bias=False,
                                 batch_norm=batch_norm,
                                 activation=relu,
                                 **common_args)
    conv_params_no_bias = make_args(use_bias=False,
                                    batch_norm=batch_norm,
                                    activation=relu,
                                    **common_args)
    conv_params = make_args(use_bias=True,
                            batch_norm=batch_norm,
                            activation=None,
                            **common_args)
    rest_logit_params = make_args(activation=None, **common_args)
    rest_pool_params = make_args(padding='SAME', **common_args)
    rest_dropout_params = make_args(drop_p=drop_prob, **common_args)

    # inputs = input((None, crop_size[1], crop_size[0], 3), **common_args)

    with tf.variable_scope(name, 'InceptionResnetV2'):
        net = conv2d(inputs,
                     32,
                     stride=(2, 2),
                     name='Conv2d_1a_3x3',
                     **conv_params_no_bias)
        net = conv2d(net, 32, name='Conv2d_2a_3x3', **conv_params_no_bias)
        # 112 x 112
        net = conv2d(net, 64, name='Conv2d_2b_3x3', **rest_conv_params)
        # 112 x 112
        net = max_pool(net, name='MaxPool_3a_3x3', **rest_pool_params)
        # 64 x 64
        net = conv2d(net,
                     80,
                     filter_size=(1, 1),
                     name='Conv2d_3b_1x1',
                     **rest_conv_params)
        # 64 x 64
        net = conv2d(net, 192, name='Conv2d_4a_3x3', **rest_conv_params)
        # 64 x 64
        net = max_pool(net,
                       stride=(2, 2),
                       name='maxpool_5a_3x3',
                       **rest_pool_params)

        # 32 x 32
        with tf.variable_scope('Mixed_5b'):
            with tf.variable_scope('Branch_0'):
                tower_conv = conv2d(net,
                                    96,
                                    filter_size=(1, 1),
                                    name='Conv2d_1x1',
                                    **rest_conv_params)
            with tf.variable_scope('Branch_1'):
                tower_conv1_0 = conv2d(net,
                                       48,
                                       filter_size=(1, 1),
                                       name='Conv2d_0a_1x1',
                                       **rest_conv_params)
                tower_conv1_1 = conv2d(tower_conv1_0,
                                       64,
                                       filter_size=(5, 5),
                                       name='Conv2d_0b_5x5',
                                       **rest_conv_params)
            with tf.variable_scope('Branch_2'):
                tower_conv2_0 = conv2d(net,
                                       64,
                                       filter_size=(1, 1),
                                       name='Conv2d_0a_1x1',
                                       **rest_conv_params)
                tower_conv2_1 = conv2d(tower_conv2_0,
                                       96,
                                       name='Conv2d_0b_3x3',
                                       **rest_conv_params)
                tower_conv2_2 = conv2d(tower_conv2_1,
                                       96,
                                       name='Conv2d_0c_3x3',
                                       **rest_conv_params)
            with tf.variable_scope('Branch_3'):
                tower_pool = avg_pool_2d(net,
                                         stride=(1, 1),
                                         name='avgpool_0a_3x3',
                                         **rest_pool_params)
                tower_pool_1 = conv2d(tower_pool,
                                      64,
                                      filter_size=(1, 1),
                                      name='Conv2d_0b_1x1',
                                      **rest_conv_params)
            net = tf.concat(
                [tower_conv, tower_conv1_1, tower_conv2_2, tower_pool_1], 3)
        with tf.variable_scope('Repeat'):
            for i in range(1, 11):
                net = block35(net,
                              name='block35_' + str(i),
                              scale=0.17,
                              **conv_params_no_bias)

        # 32 x 32
        with tf.variable_scope('Mixed_6a'):
            with tf.variable_scope('Branch_0'):
                tower_conv = conv2d(net,
                                    384,
                                    stride=(2, 2),
                                    name='Conv2d_1a_3x3',
                                    **rest_conv_params)
            with tf.variable_scope('Branch_1'):
                tower_conv1_0 = conv2d(net,
                                       256,
                                       filter_size=(1, 1),
                                       name='Conv2d_0a_1x1',
                                       **rest_conv_params)
                tower_conv1_1 = conv2d(tower_conv1_0,
                                       256,
                                       name='Conv2d_0b_3x3',
                                       **rest_conv_params)
                tower_conv1_2 = conv2d(tower_conv1_1,
                                       384,
                                       stride=(2, 2),
                                       name='Conv2d_1a_3x3',
                                       **rest_conv_params)
            with tf.variable_scope('Branch_2'):
                tower_pool = max_pool(net,
                                      name='maxpool_1a_3x3',
                                      **rest_pool_params)
            net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3)

        with tf.variable_scope('Repeat_1'):
            for i in range(1, 21):
                net = block17(net,
                              name='block17_' + str(i),
                              scale=0.10,
                              **conv_params_no_bias)

        with tf.variable_scope('Mixed_7a'):
            with tf.variable_scope('Branch_0'):
                tower_conv = conv2d(net,
                                    256,
                                    filter_size=(1, 1),
                                    name='Conv2d_0a_1x1',
                                    **rest_conv_params)
                tower_conv_1 = conv2d(tower_conv,
                                      384,
                                      stride=(2, 2),
                                      name='Conv2d_1a_3x3',
                                      **rest_conv_params)
            with tf.variable_scope('Branch_1'):
                tower_conv1 = conv2d(net,
                                     256,
                                     filter_size=(1, 1),
                                     name='Conv2d_0a_1x1',
                                     **rest_conv_params)
                tower_conv1_1 = conv2d(tower_conv1,
                                       288,
                                       stride=(2, 2),
                                       name='Conv2d_1a_3x3',
                                       **rest_conv_params)
            with tf.variable_scope('Branch_2'):
                tower_conv2 = conv2d(net,
                                     256,
                                     filter_size=(1, 1),
                                     name='Conv2d_0a_1x1',
                                     **rest_conv_params)
                tower_conv2_1 = conv2d(tower_conv2,
                                       288,
                                       name='Conv2d_0b_3x3',
                                       **rest_conv_params)
                tower_conv2_2 = conv2d(tower_conv2_1,
                                       320,
                                       stride=(2, 2),
                                       name='Conv2d_1a_3x3',
                                       **rest_conv_params)
            with tf.variable_scope('Branch_3'):
                tower_pool = max_pool(net,
                                      name='maxpool_1a_3x3',
                                      **rest_pool_params)
            net = tf.concat(
                [tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3)
        # 8 x 8
        with tf.variable_scope('Repeat_2'):
            for i in range(1, 10):
                net = block8(net,
                             name='block8_' + str(i),
                             scale=0.20,
                             **conv_params_no_bias)
        net = block8(net, name='Block8', **conv_params_no_bias)

        net = conv2d(net,
                     1536,
                     filter_size=(1, 1),
                     name='Conv2d_7b_1x1',
                     **rest_conv_params)

        with tf.variable_scope('Logits'):
            net = global_avg_pool(net, name='avgpool_1a_8x8')
            net = dropout(net, name='dropout', **rest_dropout_params)
            logits = fully_connected(net,
                                     num_classes,
                                     name='Logits',
                                     **rest_logit_params)
            predictions = softmax(logits, name='Predictions', **common_args)

    return end_points(is_training)
Beispiel #18
0
def model(inputs, is_training, reuse, num_classes=21, batch_size=1):
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(batch_norm=True, activation=lrelu, w_init=initz.he_normal(
        scale=1), untie_biases=False, **common_args)
    upsample_args = make_args(
        batch_norm=False, activation=lrelu, use_bias=False, **common_args)
    logits_args = make_args(
        activation=None, **common_args)
    pool_args = make_args(padding='SAME', **common_args)

    conv1_1 = conv2d(inputs, 64, name="vgg_19/conv1/conv1_1", **conv_args)
    conv1_2 = conv2d(conv1_1, 64, name="vgg_19/conv1/conv1_2", **conv_args)
    pool1 = max_pool(conv1_2, stride=2, name='pool1', **pool_args)
    conv2_1 = conv2d(pool1, 128, name="vgg_19/conv2/conv2_1", **conv_args)
    conv2_2 = conv2d(conv2_1, 128, name="vgg_19/conv2/conv2_2", **conv_args)
    pool2 = max_pool(conv2_2, stride=2, name='pool2', **pool_args)
    conv3_1 = conv2d(pool2, 256, name="vgg_19/conv3/conv3_1", **conv_args)
    conv3_2 = conv2d(conv3_1, 256, name="vgg_19/conv3/conv3_2", **conv_args)
    conv3_3 = conv2d(conv3_2, 256, name="vgg_19/conv3/conv3_3", **conv_args)
    conv3_4 = conv2d(conv3_3, 256, name="vgg_19/conv3/conv3_4", **conv_args)
    pool3 = max_pool(conv3_4, stride=2, name='pool3', **pool_args)
    conv4_1 = conv2d(pool3, 512, name="vgg_19/conv4/conv4_1", **conv_args)
    conv4_2 = conv2d(conv4_1, 512, name="vgg_19/conv4/conv4_2", **conv_args)
    conv4_3 = conv2d(conv4_2, 512, name="vgg_19/conv4/conv4_3", **conv_args)
    conv4_4 = conv2d(conv4_3, 512, name="vgg_19/conv4/conv4_4", **conv_args)
    pool4 = max_pool(conv4_4, stride=2, name='pool4', **pool_args)
    conv5_1 = conv2d(pool4, 512, name="vgg_19/conv5/conv5_1", **conv_args)
    conv5_2 = conv2d(conv5_1, 512, name="vgg_19/conv5/conv5_2", **conv_args)
    conv5_3 = conv2d(conv5_2, 512, name="vgg_19/conv5/conv5_3", **conv_args)
    conv5_4 = conv2d(conv5_3, 512, name="vgg_19/conv5/conv5_4", **conv_args)
    pool5 = max_pool(conv5_4, stride=2, name='pool5', **pool_args)

    fc6 = conv2d(pool5, 4096, filter_size=(7, 7),
                 name="vgg_19/fc6", **conv_args)
    fc6 = dropout(fc6, **common_args)
    fc7 = conv2d(fc6, 4096, filter_size=(1, 1), name="vgg_19/fc7", **conv_args)
    fc7 = dropout(fc7, **common_args)
    score_fr = conv2d(fc7, num_classes, filter_size=(1, 1),
                      name="score_fr", **conv_args)

    pred = tf.argmax(score_fr, axis=3)
    pool4_shape = pool4.get_shape().as_list()
    upscore2 = upsample2d(score_fr, [batch_size, pool4_shape[1], pool4_shape[2], num_classes], filter_size=(4, 4), stride=(2, 2),
                          name="deconv2d_1", w_init=initz.bilinear((4, 4, num_classes, num_classes)), **upsample_args)
    score_pool4 = conv2d(pool4, num_classes, filter_size=(1, 1),
                         name="score_pool4", **conv_args)
    fuse_pool4 = tf.add(upscore2, score_pool4)

    pool3_shape = pool3.get_shape().as_list()
    upscore4 = upsample2d(fuse_pool4, [batch_size, pool3_shape[1], pool3_shape[2], num_classes], filter_size=(4, 4), stride=(2, 2),
                          name="deconv2d_2", w_init=initz.bilinear((4, 4, num_classes, num_classes)), **upsample_args)
    score_pool3 = conv2d(pool3, num_classes, filter_size=(1, 1),
                         name="score_pool3", **conv_args)
    fuse_pool3 = tf.add(upscore4, score_pool3)
    input_shape = inputs.get_shape().as_list()
    upscore32 = upsample2d(fuse_pool3, [batch_size, input_shape[1], input_shape[2], num_classes], filter_size=(16, 16), stride=(8, 8),
                           name="deconv2d_3", w_init=initz.bilinear((16, 16, num_classes, num_classes)), **logits_args)
    logits = register_to_collections(tf.reshape(
        upscore32, shape=(-1, num_classes)), name='logits', **common_args)
    pred_up = tf.argmax(upscore32, axis=3)
    pred_up = register_to_collections(
        pred_up, name='final_prediction_map', **common_args)
    predictions = softmax(logits, name='predictions', **common_args)
    return end_points(is_training)
Beispiel #19
0
def model(height,
          width,
          num_actions,
          is_training=False,
          reuse=None,
          name=None):
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(batch_norm=True,
                          activation=prelu,
                          w_init=initz.he_normal(scale=1),
                          untie_biases=False,
                          **common_args)
    logits_args = make_args(activation=None,
                            w_init=initz.he_normal(scale=1),
                            **common_args)
    fc_args = make_args(activation=prelu,
                        w_init=initz.he_normal(scale=1),
                        **common_args)
    pool_args = make_args(padding='SAME', **common_args)
    with tf.variable_scope(name):
        state = register_to_collections(tf.placeholder(
            shape=[None, 4, height, width], dtype=tf.float32, name='state'),
                                        name='state',
                                        **common_args)
        state_perm = tf.transpose(state, perm=[0, 2, 3, 1])
        summary_ops = [
            tf.summary.image("states",
                             state[:, 0, :, :][..., tf.newaxis],
                             max_outputs=10,
                             collections='train')
        ]
        conv1_0 = conv2d(state_perm,
                         32,
                         filter_size=8,
                         stride=(1, 1),
                         name="conv1_0",
                         **conv_args)
        conv1_1 = conv2d(conv1_0,
                         64,
                         filter_size=8,
                         stride=(2, 2),
                         name="conv1_1",
                         **conv_args)
        pool = max_pool(conv1_1, filter_size=2, name="maxpool", **pool_args)
        conv2_0 = conv2d(pool,
                         128,
                         filter_size=4,
                         stride=2,
                         name="conv2_0",
                         **conv_args)
        conv2_1 = conv2d(conv2_0,
                         256,
                         filter_size=3,
                         stride=(2, 2),
                         name="conv2_1",
                         **conv_args)
        conv3_0 = conv2d(conv2_1,
                         256,
                         filter_size=4,
                         stride=1,
                         name="conv3_0",
                         **conv_args)
        conv3_1 = conv2d(conv3_0,
                         512,
                         filter_size=4,
                         stride=2,
                         name="conv3_1",
                         **conv_args)
        # Dueling
        value_hid = fc(conv3_1, 512, name="value_hid", **fc_args)
        adv_hid = fc(conv3_1, 512, name="adv_hid", **fc_args)

        value = fc(value_hid, 1, name="value", **logits_args)
        advantage = fc(adv_hid, num_actions, name="advantage", **logits_args)

        # Average Dueling
        Qs = value + (advantage -
                      tf.reduce_mean(advantage, axis=1, keep_dims=True))

        # action with highest Q values
        a = register_to_collections(tf.argmax(Qs, 1), name='a', **common_args)
        # Q value belonging to selected action
        Q = register_to_collections(tf.reduce_max(Qs, 1),
                                    name='Q',
                                    **common_args)
        summary_ops.append(tf.summary.histogram("Q", Q, collections='train'))
        # For training
        Q_target = register_to_collections(tf.placeholder(shape=[None],
                                                          dtype=tf.float32),
                                           name='Q_target',
                                           **common_args)
        actions = register_to_collections(tf.placeholder(shape=[None],
                                                         dtype=tf.int32),
                                          name='actions',
                                          **common_args)
        actions_onehot = tf.one_hot(actions,
                                    num_actions,
                                    on_value=1.,
                                    off_value=0.,
                                    axis=1,
                                    dtype=tf.float32)

        Q_tmp = tf.reduce_sum(tf.multiply(Qs, actions_onehot), axis=1)
        loss = register_to_collections(tf.reduce_mean(
            tf.square(Q_target - Q_tmp)),
                                       name='loss',
                                       **common_args)
        summary_ops.append(tf.summary.scalar("loss", loss,
                                             collections='train'))
        register_to_collections(summary_ops, name='summary_ops', **common_args)
        return end_points(is_training)
Beispiel #20
0
def resnet_v1(inputs,
              is_training,
              reuse,
              blocks,
              num_classes=None,
              global_pool=True,
              output_stride=None,
              include_root_block=True,
              name=None):
    """Generator for v2 (preactivation) ResNet models.

    This function generates a family of ResNet v2 models. See the resnet_v2_*()
    methods for specific model instantiations, obtained by selecting different
    block instantiations that produce ResNets of various depths.

    Training for image classification on Imagenet is usually done with [224, 224]
    inputs, resulting in [7, 7] feature maps at the output of the last ResNet
    block for the ResNets defined in [1] that have nominal stride equal to 32.
    However, for dense prediction tasks we advise that one uses inputs with
    spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
    this case the feature maps at the ResNet output will have spatial shape
    [(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
    and corners exactly aligned with the input image corners, which greatly
    facilitates alignment of the features to the image. Using as input [225, 225]
    images results in [8, 8] feature maps at the output of the last ResNet block.

    For dense prediction tasks, the ResNet needs to run in fully-convolutional
    (FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
    have nominal stride equal to 32 and a good choice in FCN mode is to use
    output_stride=16 in order to increase the density of the computed features at
    small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.

    Args:
      inputs: A tensor of size [batch, height_in, width_in, channels].
      blocks: A list of length equal to the number of ResNet blocks. Each element
        is a resnet_utils.Block object describing the units in the block.
      num_classes: Number of predicted classes for classification tasks. If None
        we return the features before the logit layer.
      is_training: whether is training or not.
      global_pool: If True, we perform global average pooling before computing the
        logits. Set to True for image classification, False for dense prediction.
      output_stride: If None, then the output will be computed at the nominal
        network stride. If output_stride is not None, it specifies the requested
        ratio of input to output spatial resolution.
      include_root_block: If True, include the initial convolution followed by
        max-pooling, if False excludes it. If excluded, `inputs` should be the
        results of an activation-less convolution.
      reuse: whether or not the network and its variables should be reused. To be
        able to reuse 'scope' must be given.
      name: Optional variable_scope.


    Returns:
      net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
        If global_pool is False, then height_out and width_out are reduced by a
        factor of output_stride compared to the respective height_in and width_in,
        else both height_out and width_out equal one. If num_classes is None, then
        net is the output of the last ResNet block, potentially after global
        average pooling. If num_classes is not None, net contains the pre-softmax
        activations.
      end_points: A dictionary from components of the network to the corresponding
        activation.

    Raises:
      ValueError: If the target output_stride is not valid.
    """
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(batch_norm=True,
                          activation=prelu,
                          w_init=initz.he_normal(scale=1),
                          untie_biases=False,
                          **common_args)
    logits_args = make_args(activation=None,
                            w_init=initz.he_normal(scale=1),
                            **common_args)
    pred_args = make_args(activation=prelu,
                          w_init=initz.he_normal(scale=1),
                          **common_args)
    pool_args = make_args(padding='SAME', **common_args)

    with tf.variable_scope(name, 'resnet_v2', [inputs], reuse=reuse):
        net = inputs
        if include_root_block:
            if output_stride is not None:
                if output_stride % 4 != 0:
                    raise ValueError(
                        'The output_stride needs to be a multiple of 4.')
                output_stride /= 4
            # We do not include batch normalization or activation functions in
            # conv1 because the first ResNet unit will perform these. Cf.
            # Appendix of [2].
            net = resnet_utils.conv2d_same(net,
                                           64,
                                           7,
                                           stride=2,
                                           name='conv1',
                                           **common_args)
            net = max_pool(net, name='pool1', **pool_args)
        net = resnet_utils.stack_blocks_dense(net, blocks, output_stride,
                                              **conv_args)
        # This is needed because the pre-activation variant does not have batch
        # normalization or activation functions in the residual unit output. See
        # Appendix of [2].
        net = batch_norm(net,
                         activation=tf.nn.relu,
                         name='postnorm',
                         is_training=is_training,
                         reuse=reuse)
        if global_pool:
            # Global average pooling.
            net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
        if num_classes is not None:
            net = conv2d(net,
                         num_classes,
                         filter_size=(1, 1),
                         name='logits',
                         **logits_args)
        if num_classes is not None:
            predictions = softmax(net, name='predictions', **pred_args)

        return end_points(is_training)
Beispiel #21
0
def model(is_training, reuse, flexi_inputs=False):
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(activation=relu, **common_args)
    pool_args = make_args(filter_size=(2, 2), **common_args)
    logit_args = make_args(activation=None, **common_args)

    if flexi_inputs:
        inputs_shape = (None, None, None, 3)
    else:
        inputs_shape = (None, crop_size[1], crop_size[0], 3)

    net_inputs = input(inputs_shape, **common_args)
    x = net_inputs
    with tf.variable_scope('vgg_16', reuse=reuse):
        mean_rgb = tf.get_variable(name='mean_rgb',
                                   initializer=tf.truncated_normal(shape=[3]),
                                   trainable=False)
        x = x - mean_rgb
        with tf.variable_scope('conv1'):
            x = conv2d(x, 64, name='conv1_1', **conv_args)
            x = conv2d(x, 64, name='conv1_2', **conv_args)
            x = max_pool(x, name='maxpool1', **pool_args)

        with tf.variable_scope('conv2'):
            x = conv2d(x, 128, name='conv2_1', **conv_args)
            x = conv2d(x, 128, name='conv2_2', **conv_args)
            x = max_pool(x, name='maxpool2', **pool_args)

        with tf.variable_scope('conv3'):
            x = conv2d(x, 256, name='conv3_1', **conv_args)
            x = conv2d(x, 256, name='conv3_2', **conv_args)
            x = conv2d(x, 256, name='conv3_3', **conv_args)
            x = max_pool(x, name='maxpool3', **pool_args)

        with tf.variable_scope('conv4'):
            x = conv2d(x, 512, name='conv4_1', **conv_args)
            x = conv2d(x, 512, name='conv4_2', **conv_args)
            x = conv2d(x, 512, name='conv4_3', **conv_args)
            x = max_pool(x, name='maxpool4', **pool_args)

        with tf.variable_scope('conv5'):
            x = conv2d(x, 512, name='conv5_1', **conv_args)
            x = conv2d(x, 512, name='conv5_2', **conv_args)
            x = conv2d(x, 512, name='conv5_3', **conv_args)
            x = max_pool(x, name='maxpool5', **pool_args)

        x = conv2d(x,
                   4096,
                   name='fc6',
                   filter_size=(7, 7),
                   padding='VALID',
                   **conv_args)
        x = dropout(x, drop_p=0.5, name='dropout6', **common_args)

        x = conv2d(x, 4096, name='fc7', filter_size=(1, 1), **conv_args)
        x = dropout(x, drop_p=0.5, name='dropout7', **common_args)

        x = conv2d(x, 1000, name='fc8', filter_size=(1, 1), **logit_args)

    if flexi_inputs:
        logits = alias(x, name='logits', **common_args)
    else:
        logits = squeeze(x, axis=[1, 2], name='logits', **common_args)

    predictions = softmax(logits, name='predictions', **common_args)
    return end_points(is_training)
Beispiel #22
0
def vgg_16(is_training, reuse,
           num_classes=1000,
           dropout_keep_prob=0.5,
           spatial_squeeze=True,
           name='vgg_16'):
    """Oxford Net VGG 16-Layers version D Example.

    Note: All the fully_connected layers have been transformed to conv2d layers.
          To use in classification mode, resize input to 224x224.

    Args:
      inputs: a tensor of size [batch_size, height, width, channels].
      num_classes: number of predicted classes.
      is_training: whether or not the model is being trained.
      dropout_keep_prob: the probability that activations are kept in the dropout
        layers during training.
      spatial_squeeze: whether or not should squeeze the spatial dimensions of the
        outputs. Useful to remove unnecessary dimensions for classification.
      name: Optional name for the variables.

    Returns:
      the last op containing the log predictions and end_points dict.
    """
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(batch_norm=True, activation=prelu, w_init=initz.he_normal(
        scale=1), untie_biases=False, **common_args)
    logit_args = make_args(
        activation=None, w_init=initz.he_normal(scale=1), **common_args)
    pred_args = make_args(
        activation=prelu, w_init=initz.he_normal(scale=1), **common_args)
    pool_args = make_args(padding='SAME', **common_args)
    inputs = input((None, crop_size[1], crop_size[0], 3), **common_args)
    with tf.variable_scope(name, 'vgg_16', [inputs]):
        net = repeat(inputs, 2, conv2d,
                     64, filter_size=(3, 3), name='conv1', **conv_args)
        net = max_pool(net, name='pool1', **pool_args)
        net = repeat(net, 2, conv2d, 128, filter_size=(
            3, 3), name='conv2', **conv_args)
        net = max_pool(net, name='pool2', **pool_args)
        net = repeat(net, 3, conv2d, 256, filter_size=(
            3, 3), name='conv3', **conv_args)
        net = max_pool(net, name='pool3', **pool_args)
        net = repeat(net, 3, conv2d, 512, filter_size=(
            3, 3), name='conv4', **conv_args)
        net = max_pool(net, name='pool4', **pool_args)
        net = repeat(net, 3, conv2d, 512, filter_size=(
            3, 3), name='conv5', **conv_args)
        net = max_pool(net, name='pool5', **pool_args)
        # Use conv2d instead of fully_connected layers.
        net = conv2d(net, 4096, filter_size=(7, 7), name='fc6', **conv_args)
        net = dropout(net, drop_p=1 - dropout_keep_prob, is_training=is_training,
                      name='dropout6', **common_args)
        net = conv2d(net, 4096, filter_size=(1, 1), name='fc7', **conv_args)
        net = dropout(net, drop_p=1 - dropout_keep_prob, is_training=is_training,
                      name='dropout7', **common_args)
        logits = conv2d(net, num_classes, filter_size=(1, 1),
                        activation=None,
                        name='logits', **logit_args)
        # Convert end_points_collection into a end_point dict.
        if spatial_squeeze:
            logits = tf.squeeze(logits, [1, 2], name='logits/squeezed')
        predictions = softmax(logits, name='predictions', **pred_args)
        return end_points(is_training)
Beispiel #23
0
def model(inputs,
          is_training,
          reuse,
          num_classes=5,
          dropout_keep_prob=0.5,
          spatial_squeeze=True,
          name='alexnet_v2',
          **kwargs):
    """AlexNet version 2.

  Described in: http://arxiv.org/pdf/1404.5997v2.pdf
  Parameters from:
  github.com/akrizhevsky/cuda-convnet2/blob/master/layers/
  layers-imagenet-1gpu.cfg

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224. To use in fully
        convolutional mode, set spatial_squeeze to false.
        The LRN layers have been removed and change the initializers from
        random_normal_initializer to xavier_initializer.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    name: Optional name for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
    common_args = common_layer_args(is_training, reuse)
    conv_args = make_args(batch_norm=True,
                          activation=prelu,
                          w_init=initz.he_normal(scale=1),
                          untie_biases=False,
                          **common_args)
    logit_args = make_args(activation=None,
                           w_init=initz.he_normal(scale=1),
                           **common_args)
    pred_args = make_args(activation=prelu,
                          w_init=initz.he_normal(scale=1),
                          **common_args)
    pool_args = make_args(padding='SAME', **common_args)

    # inputs = input((None, crop_size[1], crop_size[0], 3), **common_args)
    with tf.variable_scope(name, 'alexnet_v2', [inputs]):
        net = conv2d(inputs,
                     64,
                     filter_size=(11, 11),
                     stride=(4, 4),
                     name='conv1',
                     **conv_args)
        net = max_pool(net, stride=(2, 2), name='pool1', **pool_args)
        net = conv2d(net, 192, filter_size=(5, 5), name='conv2', **conv_args)
        net = max_pool(net, stride=(2, 2), name='pool2', **pool_args)
        net = conv2d(net, 384, name='conv3', **conv_args)
        net = conv2d(net, 384, name='conv4', **conv_args)
        net = conv2d(net, 256, name='conv5', **conv_args)
        net = max_pool(net, stride=(2, 2), name='pool5', **pool_args)

        # Use conv2d instead of fully_connected layers.
        net = conv2d(net, 4096, filter_size=(5, 5), name='fc6', **conv_args)
        net = dropout(net,
                      drop_p=1 - dropout_keep_prob,
                      name='dropout6',
                      **common_args)
        net = conv2d(net, 4096, filter_size=(1, 1), name='fc7', **conv_args)
        net = dropout(net,
                      drop_p=1 - dropout_keep_prob,
                      name='dropout7',
                      **common_args)
        net = global_avg_pool(net)
        logits = fc(net, num_classes, name='logits', **logit_args)

        predictions = softmax(logits, name='predictions', **common_args)
        return end_points(is_training)
Beispiel #24
0
def model(is_training, reuse, inputs=None):
    common_trainable_args = common_layer_args(is_training,
                                              reuse,
                                              trainable=True)
    common_frozen_args = common_layer_args(is_training, reuse, trainable=False)
    conv_args = make_conv_args(activation=relu, **common_frozen_args)
    logit_args = make_args(activation=None, **common_trainable_args)

    common_args = common_frozen_args
    # move this down to train only a few layers
    common_args = common_trainable_args
    if inputs is None:
        net = input((None, crop_size[1], crop_size[0], 3), **common_args)
    else:
        net = inputs
    with tf.variable_scope('resnet_v1_50', reuse=reuse):
        mean_rgb = tf.get_variable(name='mean_rgb',
                                   initializer=tf.truncated_normal(shape=[3]),
                                   trainable=False)
        net = net - mean_rgb
        net = conv2d_same(net,
                          64,
                          filter_size=(7, 7),
                          stride=(2, 2),
                          name='conv1',
                          **conv_args)
        net = max_pool(net,
                       filter_size=(3, 3),
                       stride=(2, 2),
                       padding='SAME',
                       name='pool1')

        with tf.variable_scope('block1') as sc:
            with tf.variable_scope('unit_1'):
                net = bottleneck(net, 256, 64, 1, **common_args)
            with tf.variable_scope('unit_2'):
                net = bottleneck(net, 256, 64, 1, **common_args)
            with tf.variable_scope('unit_3'):
                net = bottleneck(net, 256, 64, 2, **common_args)
            net = collect_named_outputs(common_args['outputs_collections'],
                                        sc.name, net)

        with tf.variable_scope('block2') as sc:
            with tf.variable_scope('unit_1'):
                net = bottleneck(net, 512, 128, 1, **common_args)
            with tf.variable_scope('unit_2'):
                net = bottleneck(net, 512, 128, 1, **common_args)
            with tf.variable_scope('unit_3'):
                net = bottleneck(net, 512, 128, 1, **common_args)
            with tf.variable_scope('unit_4'):
                net = bottleneck(net, 512, 128, 2, **common_args)
            net = collect_named_outputs(common_args['outputs_collections'],
                                        sc.name, net)

        with tf.variable_scope('block3') as sc:
            with tf.variable_scope('unit_1'):
                net = bottleneck(net, 1024, 256, 1, **common_args)
            with tf.variable_scope('unit_2'):
                net = bottleneck(net, 1024, 256, 1, **common_args)
            with tf.variable_scope('unit_3'):
                net = bottleneck(net, 1024, 256, 1, **common_args)
            with tf.variable_scope('unit_4'):
                net = bottleneck(net, 1024, 256, 1, **common_args)
            with tf.variable_scope('unit_5'):
                net = bottleneck(net, 1024, 256, 1, **common_args)
            with tf.variable_scope('unit_6'):
                net = bottleneck(net, 1024, 256, 2, **common_args)
            net = collect_named_outputs(common_args['outputs_collections'],
                                        sc.name, net)

        with tf.variable_scope('block4') as sc:
            with tf.variable_scope('unit_1'):
                net = bottleneck(net, 2048, 512, 1, **common_args)
            with tf.variable_scope('unit_2'):
                net = bottleneck(net, 2048, 512, 1, **common_args)
            with tf.variable_scope('unit_3'):
                net = bottleneck(net, 2048, 512, 1, **common_args)
            net = collect_named_outputs(common_args['outputs_collections'],
                                        sc.name, net)

        net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
        net = conv2d(net,
                     1000,
                     filter_size=(1, 1),
                     name='logits',
                     **logit_args)
        logits = squeeze(net, axis=[1, 2], name='logits', **common_args)

    predictions = softmax(logits, name='predictions', **common_args)
    return end_points(common_args['is_training'])