Example #1
0
def nin(inputs,
        num_classes=10,
        is_training=True,
        restore_logits=True,
        scope=''):
  # end_points will collect relevant activations for external use, for example
  # summaries or losses.
  end_points = {}
  with tf.op_scope([inputs], scope, 'nin'):
    with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm],
                          is_training=is_training):
        # conv1
        end_points['conv1'] = ops.conv2d(inputs,192,[5,5],scope='conv1')
        end_points['conv1_1'] = ops.conv2d(end_points['conv1'],160,[1,1],scope='conv1_1')
        end_points['conv1_2'] = ops.conv2d(end_points['conv1_1'],96,[1,1],scope='conv1_2')
        end_points['pool1'] = ops.max_pool(end_points['conv1_2'],[3,3],stride=2,
                padding='SAME',scope='pool1')
        net = ops.dropout(end_points['pool1'],0.5)
        # conv2
        end_points['conv2'] = ops.conv2d(net,192,[5,5],scope='conv2')
        end_points['conv2_1'] = ops.conv2d(end_points['conv2'],192,[1,1],scope='conv2_1')
        end_points['conv2_2'] = ops.conv2d(end_points['conv2_1'],192,[1,1],scope='conv2_2')
        end_points['pool2'] = ops.max_pool(end_points['conv2_2'],[3,3],stride=2,
                padding='SAME',scope='pool2')
        net = ops.dropout(end_points['pool2'],0.5)
        # conv3
        end_points['conv3'] = ops.conv2d(net,192,[3,3],scope='conv3')
        end_points['conv3_1'] = ops.conv2d(end_points['conv3'],192,[1,1],scope='conv3_1')
        end_points['conv3_2'] = ops.conv2d(end_points['conv3_1'],10,[1,1],scope='conv3_2')
        net = ops.avg_pool(end_points['conv3_2'],[8,8],scope='avg_pool')
        flatten = ops.flatten(net,scope='flatten')
        #TODO take care this,using num_classes but 10..
        end_points['logits'] = ops.fc(flatten,num_classes,activation=None,scope='fc')

    return end_points['logits'],end_points
Example #2
0
def inference(images):
  """Build the CIFAR-10 model.

  Args:
    images: Images returned from distorted_inputs() or inputs().

  Returns:
    Logits.
  """
  # We instantiate all variables using tf.get_variable() instead of
  # tf.Variable() in order to share variables across multiple GPU training runs.
  # If we only ran this model on a single GPU, we could simplify this function
  # by replacing all instances of tf.get_variable() with tf.Variable().
  #
  with scopes.arg_scope([ops.conv2d, ops.fc], stddev=0.1, bias=0.1, batch_norm_params={}):
  # with scopes.arg_scope([ops.conv2d, ops.fc], stddev=0.1, bias=0.1):
      with scopes.arg_scope([ops.conv2d], kernel_size=[3,3], padding='SAME'):
          with scopes.arg_scope([ops.max_pool], kernel_size=[3,3], padding='SAME'):
            net = ops.conv2d(images, num_filters_out=64)
            net = ops.conv2d(net, num_filters_out=64)
            net = ops.max_pool(net)
            net = ops.conv2d(net, num_filters_out=128)
            net = ops.conv2d(net, num_filters_out=128)
            net = ops.max_pool(net)
            net = ops.conv2d(net, num_filters_out=256)
            net = ops.conv2d(net, num_filters_out=256)
            net = ops.max_pool(net)
            net = ops.conv2d(net, num_filters_out=512)
            net = ops.conv2d(net, num_filters_out=512)
            net = ops.avg_pool(net, kernel_size=[3,3], padding='SAME')
            net = ops.flatten(net)
            # net = ops.fc(net, num_units_out=1024)
            # net = ops.fc(net, num_units_out=256)
            net = ops.fc(net, num_units_out=10)
            return net
Example #3
0
def lenet(inputs,
                 dropout_keep_prob=1.0,
                 num_classes=10,
                 is_training=True,
                 restore_logits=True,
                 weight_decay=0.0005,
                 seed=1,
                 scope=''):
  """LeNet in Caffe https://github.com/BVLC/caffe/blob/master/examples/mnist/lenet_train_test.prototxt

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    dropout_keep_prob: dropout keep_prob.
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    restore_logits: whether or not the logits layers should be restored.
      Useful for fine-tuning a model with different num_classes.
    scope: Optional scope for name_scope.

  Returns:
    a list containing 'logits', 'aux_logits' Tensors.
  """
  # end_points will collect relevant activations for external use, for example
  # summaries or losses.
  print ("Warning: batch_norm_params is always None in lenet")
  end_points = {}
  with tf.name_scope(scope, 'lenet', [inputs]):
    with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                          is_training=is_training):
      with scopes.arg_scope([ops.conv2d, ops.fc],
                            bias=0.0, batch_norm_params=None, seed=seed):
        with scopes.arg_scope([ops.conv2d], stride=1, padding='SAME'):
          with scopes.arg_scope([ops.max_pool], stride=2, padding='SAME'):
            # 32 x 32 x 3
            end_points['conv1'] = ops.conv2d(inputs, 20, [5, 5], stride=1, stddev=0.05,
                                             weight_decay=weight_decay, seed=seed+1, scope='conv1')
            end_points['pool1'] = ops.max_pool(end_points['conv1'], [2, 2], scope='pool1')

            end_points['conv2'] = ops.conv2d(end_points['pool1'], 50, [5, 5], stride=1, stddev=0.05,
                                             weight_decay=weight_decay, seed=seed+2, scope='conv2')
            end_points['pool2'] = ops.max_pool(end_points['conv2'], [2, 2], scope='pool2')


            end_points['pool2'] = ops.flatten(end_points['pool2'], scope='flatten')
            net = ops.fc(end_points['pool2'], 500, stddev=0.048, weight_decay=weight_decay,
                                       seed = seed +3, scope='fc3')

            # Final pooling and prediction
            with tf.variable_scope('logits'):
              logits = ops.fc(net, num_classes, activation=None, stddev=0.0767, weight_decay=weight_decay,
                              scope='logits', seed = seed +5, restore=restore_logits)
              # 10
              end_points['logits'] = logits
              end_points['predictions'] = tf.nn.softmax(logits, name='predictions')
  # There is no aux_logits for LeNet
  end_points['aux_logits'] = tf.constant(0)
  return logits, end_points
Example #4
0
def nin_dssm(inputs,
        num_classes,
        num_of_exs,
        is_training=True,
        restore_logits=True,
        scope=''):
  # end_points will collect relevant activations for external use, for example
  # summaries or losses.
  end_points = {}
  with tf.op_scope([inputs], scope, 'nin'):
    with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm],
                          is_training=is_training):
        # conv1
        end_points['conv1'] = ops.conv2d(inputs,192,[5,5],scope='conv1')
        end_points['conv1_1'] = ops.conv2d(end_points['conv1'],160,[1,1],scope='conv1_1')
        end_points['conv1_2'] = ops.conv2d(end_points['conv1_1'],96,[1,1],scope='conv1_2')
        end_points['pool1'] = ops.max_pool(end_points['conv1_2'],[3,3],stride=2,
                padding='SAME',scope='pool1')
        net = ops.dropout(end_points['pool1'],0.5)
        # conv2 96*16*16
        end_points['conv2'] = ops.conv2d(net,192,[5,5],scope='conv2')
        end_points['conv2_1'] = ops.conv2d(end_points['conv2'],192,[1,1],scope='conv2_1')
        end_points['conv2_2'] = ops.conv2d(end_points['conv2_1'],192,[1,1],scope='conv2_2')
        end_points['pool2'] = ops.max_pool(end_points['conv2_2'],[3,3],stride=2,
                padding='SAME',scope='pool2')
        net = ops.dropout(end_points['pool2'],0.5)
        # conv3 192*8*8
        end_points['conv3'] = ops.conv2d(net,192,[3,3],scope='conv3')
        # 192 * 8 * 8
        end_points['conv3_1'] = ops.conv2d(end_points['conv3'],192,[1,1],scope='conv3_1')
        # 192 * 8 * 8
        #TODO using which layer feature?
        #firstly,consider conv3_1, and then consider fusion conv3 & conv3_1
        end_points['max_pool'] = ops.max_pool(end_points['conv3_1'],[8,8],scope='max_pool')
        end_points['avg_pool'] = ops.avg_pool(end_points['conv3_1'],[8,8],scope='avg_pool')
        end_points['hybrid_pool'] = 0.9*end_points['max_pool'] + 0.1*end_points['avg_pool']
        end_points['feature'] = tf.nn.l2_normalize(tf.squeeze(end_points['hybrid_pool']),dim=1)
        #OUTPUT (batch_size * num_negs_and_pos+1) * 192 ,eg. batch_size*3*192
        imgs = tf.split(0,num_of_exs ,end_points['feature'])
        anchors = imgs[0]
        positives = imgs[1]

        rst=[tf.reduce_sum(tf.mul(anchors,positives),1)]
        for k in xrange(2,num_of_exs):
            rst.append(tf.reduce_sum(tf.mul(anchors,imgs[k]),1))
        #batch*(negs-1)

        end_points['dssm'] = tf.concat(1,[tf.expand_dims(_,-1) for _ in rst])

        end_points['conv3_2'] = ops.conv2d(end_points['conv3_1'],10,[1,1],scope='conv3_2')
        net = ops.avg_pool(end_points['conv3_2'],[8,8],scope='avg_pool')
        flatten = ops.flatten(net,scope='flatten')
        #TODO take care this,using num_classes but 10..
        end_points['logits'] = ops.fc(flatten,num_classes,activation=None,scope='fc')

    return end_points['logits'],end_points['dssm'],end_points
Example #5
0
 def testCreateSquareMaxPool(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         output = ops.max_pool(images, 3)
         self.assertEqual(output.op.name, 'MaxPool/MaxPool')
         self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
 def testCreateSquareMaxPool(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     output = ops.max_pool(images, 3)
     self.assertEquals(output.op.name, 'MaxPool/MaxPool')
     self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
Example #7
0
 def testCreateMaxPoolStrideSAME(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         output = ops.max_pool(images, [3, 3], stride=1, padding='SAME')
         self.assertListEqual(output.get_shape().as_list(),
                              [5, height, width, 3])
Example #8
0
def inception_v3(inputs,
                 dropout_keep_prob=0.8,
                 num_classes=1000,
                 is_training=True,
                 restore_logits=True,
                 scope=''):
    """Latest Inception from http://arxiv.org/abs/1512.00567.

    "Rethinking the Inception Architecture for Computer Vision"

    Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
    Zbigniew Wojna

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    dropout_keep_prob: dropout keep_prob.
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    restore_logits: whether or not the logits layers should be restored.
      Useful for fine-tuning a model with different num_classes.
    scope: Optional scope for name_scope.

  Returns:
    a list containing 'logits', 'aux_logits' Tensors.
  """
    # end_points will collect relevant activations for external use, for example
    # summaries or losses.
    end_points = {}
    with tf.name_scope(scope, 'inception_v3', [inputs]):
        with scopes.arg_scope(
            [ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                is_training=is_training):
            with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                                  stride=1,
                                  padding='VALID'):
                # 299 x 299 x 3
                end_points['conv0'] = ops.conv2d(inputs,
                                                 32, [3, 3],
                                                 stride=2,
                                                 scope='conv0')
                # 149 x 149 x 32
                end_points['conv1'] = ops.conv2d(end_points['conv0'],
                                                 32, [3, 3],
                                                 scope='conv1')
                # 147 x 147 x 32
                end_points['conv2'] = ops.conv2d(end_points['conv1'],
                                                 64, [3, 3],
                                                 padding='SAME',
                                                 scope='conv2')
                # 147 x 147 x 64
                end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3],
                                                   stride=2,
                                                   scope='pool1')
                # 73 x 73 x 64
                end_points['conv3'] = ops.conv2d(end_points['pool1'],
                                                 80, [1, 1],
                                                 scope='conv3')
                # 73 x 73 x 80.
                end_points['conv4'] = ops.conv2d(end_points['conv3'],
                                                 192, [3, 3],
                                                 scope='conv4')
                # 71 x 71 x 192.
                end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],
                                                   stride=2,
                                                   scope='pool2')
                # 35 x 35 x 192.
                net = end_points['pool2']
            # Inception blocks
            with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                                  stride=1,
                                  padding='SAME'):
                # mixed: 35 x 35 x 256.
                with tf.variable_scope('mixed_35x35x256a'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 64, [1, 1])
                    with tf.variable_scope('branch5x5'):
                        branch5x5 = ops.conv2d(net, 48, [1, 1])
                        branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 32, [1, 1])
                    net = tf.concat(
                        [branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)
                    end_points['mixed_35x35x256a'] = net
                # mixed_1: 35 x 35 x 288.
                with tf.variable_scope('mixed_35x35x288a'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 64, [1, 1])
                    with tf.variable_scope('branch5x5'):
                        branch5x5 = ops.conv2d(net, 48, [1, 1])
                        branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
                    net = tf.concat(
                        [branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)
                    end_points['mixed_35x35x288a'] = net
                # mixed_2: 35 x 35 x 288.
                with tf.variable_scope('mixed_35x35x288b'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 64, [1, 1])
                    with tf.variable_scope('branch5x5'):
                        branch5x5 = ops.conv2d(net, 48, [1, 1])
                        branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
                    net = tf.concat(
                        [branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)
                    end_points['mixed_35x35x288b'] = net
                # mixed_3: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768a'):
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net,
                                               384, [3, 3],
                                               stride=2,
                                               padding='VALID')
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl,
                                                  96, [3, 3],
                                                  stride=2,
                                                  padding='VALID')
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.max_pool(net, [3, 3],
                                                   stride=2,
                                                   padding='VALID')
                    net = tf.concat([branch3x3, branch3x3dbl, branch_pool], 3)
                    end_points['mixed_17x17x768a'] = net
                # mixed4: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768b'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 128, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 128, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 128, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(
                        [branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
                    end_points['mixed_17x17x768b'] = net
                # mixed_5: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768c'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 160, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 160, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(
                        [branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
                    end_points['mixed_17x17x768c'] = net
                # mixed_6: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768d'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 160, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 160, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(
                        [branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
                    end_points['mixed_17x17x768d'] = net
                # mixed_7: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768e'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 192, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 192, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 192, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(
                        [branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
                    end_points['mixed_17x17x768e'] = net
                # Auxiliary Head logits
                aux_logits = tf.identity(end_points['mixed_17x17x768e'])
                with tf.variable_scope('aux_logits'):
                    aux_logits = ops.avg_pool(aux_logits, [5, 5],
                                              stride=3,
                                              padding='VALID')
                    aux_logits = ops.conv2d(aux_logits,
                                            128, [1, 1],
                                            scope='proj')
                    # Shape of feature map before the final layer.
                    shape = aux_logits.get_shape()
                    aux_logits = ops.conv2d(aux_logits,
                                            768,
                                            shape[1:3],
                                            stddev=0.01,
                                            padding='VALID')
                    aux_logits = ops.flatten(aux_logits)
                    aux_logits = ops.fc(aux_logits,
                                        num_classes,
                                        activation=None,
                                        stddev=0.001,
                                        restore=restore_logits)
                    end_points['aux_logits'] = aux_logits
                # mixed_8: 8 x 8 x 1280.
                # Note that the scope below is not changed to not void previous
                # checkpoints.
                # (TODO) Fix the scope when appropriate.
                with tf.variable_scope('mixed_17x17x1280a'):
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net, 192, [1, 1])
                        branch3x3 = ops.conv2d(branch3x3,
                                               320, [3, 3],
                                               stride=2,
                                               padding='VALID')
                    with tf.variable_scope('branch7x7x3'):
                        branch7x7x3 = ops.conv2d(net, 192, [1, 1])
                        branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7])
                        branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1])
                        branch7x7x3 = ops.conv2d(branch7x7x3,
                                                 192, [3, 3],
                                                 stride=2,
                                                 padding='VALID')
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.max_pool(net, [3, 3],
                                                   stride=2,
                                                   padding='VALID')
                    net = tf.concat([branch3x3, branch7x7x3, branch_pool], 3)
                    end_points['mixed_17x17x1280a'] = net
                # mixed_9: 8 x 8 x 2048.
                with tf.variable_scope('mixed_8x8x2048a'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 320, [1, 1])
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net, 384, [1, 1])
                        branch3x3 = tf.concat([
                            ops.conv2d(branch3x3, 384, [1, 3]),
                            ops.conv2d(branch3x3, 384, [3, 1])
                        ], 3)
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 448, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
                        branch3x3dbl = tf.concat([
                            ops.conv2d(branch3x3dbl, 384, [1, 3]),
                            ops.conv2d(branch3x3dbl, 384, [3, 1])
                        ], 3)
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(
                        [branch1x1, branch3x3, branch3x3dbl, branch_pool], 3)
                    end_points['mixed_8x8x2048a'] = net
                # mixed_10: 8 x 8 x 2048.
                with tf.variable_scope('mixed_8x8x2048b'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 320, [1, 1])
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net, 384, [1, 1])
                        branch3x3 = tf.concat([
                            ops.conv2d(branch3x3, 384, [1, 3]),
                            ops.conv2d(branch3x3, 384, [3, 1])
                        ], 3)
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 448, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
                        branch3x3dbl = tf.concat([
                            ops.conv2d(branch3x3dbl, 384, [1, 3]),
                            ops.conv2d(branch3x3dbl, 384, [3, 1])
                        ], 3)
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(
                        [branch1x1, branch3x3, branch3x3dbl, branch_pool], 3)
                    end_points['mixed_8x8x2048b'] = net
                # Final pooling and prediction
                with tf.variable_scope('logits'):
                    shape = net.get_shape()
                    net = ops.avg_pool(net,
                                       shape[1:3],
                                       padding='VALID',
                                       scope='pool')
                    # 1 x 1 x 2048
                    net = ops.dropout(net, dropout_keep_prob, scope='dropout')
                    net = ops.flatten(net, scope='flatten')
                    # 2048
                    logits_2048 = net
                    logits = ops.fc(net,
                                    num_classes,
                                    activation=None,
                                    scope='logits',
                                    restore=restore_logits)
                    # 1000
                    end_points['logits'] = logits
                    end_points['predictions'] = tf.nn.softmax(
                        logits, name='predictions')
            return logits, end_points, logits_2048
Example #9
0
def vgg(inputs,
        num_classes=1000,
        is_training=True,
        restore_logits=True,
        scope=''):
    # end_points will collect relevant activations for external use, for example
    # summaries or losses.
    end_points = {}
    with tf.op_scope([inputs], scope, 'vgg'):
        with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm],
                              is_training=is_training):
            # conv1
            end_points['conv1'] = ops.repeat_op(2,
                                                inputs,
                                                ops.conv2d,
                                                64, [3, 3],
                                                scope='conv1')
            end_points['pool1'] = ops.max_pool(end_points['conv1'], [2, 2],
                                               scope='pool1')
            # conv2
            end_points['conv2'] = ops.repeat_op(2,
                                                end_points['pool1'],
                                                ops.conv2d,
                                                128, [3, 3],
                                                scope='conv2')
            end_points['pool2'] = ops.max_pool(end_points['conv2'], [2, 2],
                                               scope='pool2')
            # conv3
            end_points['conv3'] = ops.repeat_op(2,
                                                end_points['pool2'],
                                                ops.conv2d,
                                                256, [3, 3],
                                                scope='conv3')
            end_points['pool3'] = ops.max_pool(end_points['conv3'], [2, 2],
                                               scope='pool3')
            # conv4
            end_points['conv4'] = ops.repeat_op(2,
                                                end_points['pool3'],
                                                ops.conv2d,
                                                512, [3, 3],
                                                scope='conv4')
            end_points['pool4'] = ops.max_pool(end_points['conv4'], [2, 2],
                                               scope='pool4')
            # conv5
            end_points['conv5'] = ops.repeat_op(2,
                                                end_points['pool4'],
                                                ops.conv2d,
                                                512, [3, 3],
                                                scope='conv5')
            end_points['pool5'] = ops.max_pool(end_points['conv5'], [2, 2],
                                               scope='pool5')

            end_points['flatten5'] = ops.flatten(end_points['pool5'],
                                                 scope='flatten5')
            end_points['fc6'] = ops.fc(end_points['flatten5'],
                                       4096,
                                       scope='fc6')
            end_points['dropout6'] = ops.dropout(end_points['fc6'],
                                                 0.5,
                                                 scope='dropout6')
            end_points['fc7'] = ops.fc(end_points['dropout6'],
                                       4096,
                                       scope='fc7')
            end_points['dropout7'] = ops.dropout(end_points['fc7'],
                                                 0.5,
                                                 scope='dropout7')

            logits = ops.fc(end_points['fc7'],
                            num_classes,
                            activation=None,
                            scope='fc8')
        return logits, end_points
Example #10
0
def resnet34(inputs,
             num_classes=1000,
             is_training=True,
             restore_logits=True,
             scope=''):
    end_points = {}
    with tf.op_scope([inputs], scope, 'resnet'):
        with scopes.arg_scope([ops.conv2d, ops.batch_norm],
                              is_training=is_training):
            # 224 x 224 x 3
            end_points['conv1'] = ops.conv2d(inputs,
                                             64, [7, 7],
                                             stride=2,
                                             scope='conv1')
            end_points['pool1'] = ops.max_pool(end_points['conv1'], [3, 3],
                                               stride=2,
                                               padding='SAME',
                                               scope='pool1')
            # 56 * 56
            #TODO (using loop)
            end_points['conv2_1'] = block34(end_points['pool1'], 64, 3,
                                            'res2_1')
            end_points['conv2_2'] = block34(end_points['conv2_1'], 64, 3,
                                            'res2_2')
            end_points['conv2_3'] = block34(end_points['conv2_2'], 64, 3,
                                            'res2_3')
            # 56 * 56
            end_points['conv3_1'] = block34(end_points['conv2_3'],
                                            128,
                                            3,
                                            'res3_1',
                                            stride=2,
                                            ex=True)
            end_points['conv3_2'] = block34(end_points['conv3_1'], 128, 3,
                                            'res3_2')
            end_points['conv3_3'] = block34(end_points['conv3_2'], 128, 3,
                                            'res3_3')
            end_points['conv3_4'] = block34(end_points['conv3_3'], 128, 3,
                                            'res3_4')
            # 28 * 28
            end_points['conv4_1'] = block34(end_points['conv3_4'],
                                            256,
                                            3,
                                            'res4_1',
                                            stride=2,
                                            ex=True)
            end_points['conv4_2'] = block34(end_points['conv4_1'], 256, 3,
                                            'res4_2')
            end_points['conv4_3'] = block34(end_points['conv4_2'], 256, 3,
                                            'res4_3')
            end_points['conv4_4'] = block34(end_points['conv4_3'], 256, 3,
                                            'res4_4')
            end_points['conv4_5'] = block34(end_points['conv4_4'], 256, 3,
                                            'res4_5')
            end_points['conv4_6'] = block34(end_points['conv4_5'], 256, 3,
                                            'res4_6')
            # 14 * 14
            end_points['conv5_1'] = block34(end_points['conv4_6'],
                                            512,
                                            3,
                                            'res5_1',
                                            stride=2,
                                            ex=True)
            end_points['conv5_2'] = block34(end_points['conv5_1'], 512, 3,
                                            'res5_2')
            end_points['conv5_3'] = block34(end_points['conv5_2'], 512, 3,
                                            'res5_3')
            #7 * 7 * 512
            end_points['avg'] = ops.avg_pool(end_points['conv5_3'], [7, 7],
                                             stride=1,
                                             padding='SAME',
                                             scope='avg_pooling')
            end_points['flatten'] = ops.flatten(end_points['avg'],
                                                scope='flatten')
            end_points['logits'] = ops.fc(end_points['flatten'],
                                          num_classes,
                                          scope='logits')

            return end_points['logits'], end_points
 def testCreateMaxPoolStrideSAME(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     output = ops.max_pool(images, [3, 3], stride=1, padding='SAME')
     self.assertListEqual(output.get_shape().as_list(), [5, height, width, 3])
Example #12
0
 def testGlobalMaxPool(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         output = ops.max_pool(images, images.get_shape()[1:3], stride=1)
         self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
Example #13
0
def cifar10_alexnet(inputs,
                 dropout_keep_prob=0.5,
                 num_classes=10,
                 is_training=True,
                 restore_logits=True,
                 weight_decay=0.004,
                 seed=1,
                 scope=''):
  """AlexNet on cifar10 from https://www.tensorflow.org/tutorials/deep_cnn

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    dropout_keep_prob: dropout keep_prob.
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    restore_logits: whether or not the logits layers should be restored.
      Useful for fine-tuning a model with different num_classes.
    scope: Optional scope for name_scope.

  Returns:
    a list containing 'logits', 'aux_logits' Tensors.
  """
  # end_points will collect relevant activations for external use, for example
  # summaries or losses.
  print ("Warning: batch_norm_params is always None in cifar10_alexnet")
  end_points = {}
  with tf.name_scope(scope, 'cifar10_alexnet', [inputs]):
    with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                          is_training=is_training):
      with scopes.arg_scope([ops.conv2d, ops.fc],
                            bias=0.0, batch_norm_params=None, seed=seed):
        with scopes.arg_scope([ops.conv2d], stride=1, padding='SAME'):
          with scopes.arg_scope([ops.max_pool], stride=2, padding='SAME'):
            # 32 x 32 x 3
            end_points['conv1'] = ops.conv2d(inputs, 64, [5, 5], stride=1, stddev=0.05,
                                             weight_decay=0.0, seed=seed+1, scope='conv1')
            end_points['pool1'] = ops.max_pool(end_points['conv1'], [3, 3], scope='pool1')
            end_points['lrn1'] = ops.lrn(end_points['pool1'], depth_radius=4, bias=1.0, alpha=0.001/9.0, beta=0.75, scope='lrn1')

            end_points['conv2'] = ops.conv2d(end_points['lrn1'], 64, [5, 5], stride=1, stddev=0.05, bias=0.1,
                                             weight_decay=0.0, seed=seed+2, scope='conv2')
            end_points['lrn2'] = ops.lrn(end_points['conv2'], depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, scope='lrn2')
            end_points['pool2'] = ops.max_pool(end_points['lrn2'], [3, 3], scope='pool2')


            end_points['pool2'] = ops.flatten(end_points['pool2'], scope='flatten')
            end_points['fc3'] = ops.fc(end_points['pool2'], 384, stddev=0.04, weight_decay=weight_decay, bias=0.1,
                                       seed = seed +3, scope='fc3')
            net = ops.fc(end_points['fc3'], 192, stddev=0.04, weight_decay=weight_decay, bias=0.1,
                         seed=seed + 4, scope='fc4')

            # Final pooling and prediction
            with tf.variable_scope('logits'):
              logits = ops.fc(net, num_classes, activation=None, stddev=1/192.0, weight_decay=0.0,
                              bias=0.0, scope='logits', seed = seed +5, restore=restore_logits)
              # 10
              end_points['logits'] = logits
              end_points['predictions'] = tf.nn.softmax(logits, name='predictions')
  # There is no aux_logits for AlexNet
  end_points['aux_logits'] = tf.constant(0)
  return logits, end_points
Example #14
0
def alexnet(inputs,
                 dropout_keep_prob=0.5,
                 num_classes=1000,
                 is_training=True,
                 restore_logits=True,
                 seed=1,
                 weight_decay=0.0005,
                 scope=''):
  """AlexNet from https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    dropout_keep_prob: dropout keep_prob.
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    restore_logits: whether or not the logits layers should be restored.
      Useful for fine-tuning a model with different num_classes.
    scope: Optional scope for name_scope.

  Returns:
    a list containing 'logits', 'aux_logits' Tensors.
  """
  # end_points will collect relevant activations for external use, for example
  # summaries or losses.
  #print ("INFO: batch norm in alexnet is disabled")
  end_points = {}
  with tf.name_scope(scope, 'alexnet', [inputs]):
    with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                          is_training=is_training):
      with scopes.arg_scope([ops.conv2d, ops.fc],
                            weight_decay=weight_decay, stddev=0.01, bias=0.1,
                            #batch_norm_params=None,
                            weights_initializer=tf.truncated_normal_initializer):
        with scopes.arg_scope([ops.conv2d],
                              stride=1, padding='SAME'):
          with scopes.arg_scope([ops.max_pool],
                                stride=2, padding='VALID'):
            # 224 x 224 x 3
            end_points['conv1_1'] = ops.conv2d(inputs, 48, [11, 11], stride=4, bias=0.0, seed = seed +1, scope='conv1_1')
            end_points['conv1_2'] = ops.conv2d(inputs, 48, [11, 11], stride=4, bias=0.0, seed = seed +2, scope='conv1_2')
            end_points['lrn1_1'] = ops.lrn(end_points['conv1_1'], scope='lrn1_1')
            end_points['lrn1_2'] = ops.lrn(end_points['conv1_2'], scope='lrn1_2')
            end_points['pool1_1'] = ops.max_pool(end_points['lrn1_1'], [3, 3], scope='pool1_1')
            end_points['pool1_2'] = ops.max_pool(end_points['lrn1_2'], [3, 3], scope='pool1_2')

            # 27 x 27 x 48 x 2
            end_points['conv2_1'] = ops.conv2d(end_points['pool1_1'], 128, [5, 5], seed = seed +3, scope='conv2_1')
            end_points['conv2_2'] = ops.conv2d(end_points['pool1_2'], 128, [5, 5], seed = seed +4, scope='conv2_2')
            end_points['lrn2_1'] = ops.lrn(end_points['conv2_1'], scope='lrn2_1')
            end_points['lrn2_2'] = ops.lrn(end_points['conv2_2'], scope='lrn2_2')
            end_points['pool2_1'] = ops.max_pool(end_points['lrn2_1'], [3, 3], scope='pool2_1')
            end_points['pool2_2'] = ops.max_pool(end_points['lrn2_2'], [3, 3], scope='pool2_2')
            end_points['pool2'] = tf.concat([end_points['pool2_1'],end_points['pool2_2']],3)

            # 13 x 13 x 256
            end_points['conv3_1'] = ops.conv2d(end_points['pool2'], 192, [3, 3], bias=0.0, seed = seed +5, scope='conv3_1')
            end_points['conv3_2'] = ops.conv2d(end_points['pool2'], 192, [3, 3], bias=0.0, seed = seed +6, scope='conv3_2')

            # 13 x 13 x 192 x 2
            end_points['conv4_1'] = ops.conv2d(end_points['conv3_1'], 192, [3, 3], seed = seed +7, scope='conv4_1')
            end_points['conv4_2'] = ops.conv2d(end_points['conv3_2'], 192, [3, 3], seed = seed +8, scope='conv4_2')

            # 13 x 13 x 192 x 2
            end_points['conv5_1'] = ops.conv2d(end_points['conv4_1'], 128, [3, 3], seed = seed +9, scope='conv5_1')
            end_points['conv5_2'] = ops.conv2d(end_points['conv4_2'], 128, [3, 3], seed = seed +10, scope='conv5_2')
            end_points['pool5_1'] = ops.max_pool(end_points['conv5_1'], [3, 3], scope='pool5_1')
            end_points['pool5_2'] = ops.max_pool(end_points['conv5_2'], [3, 3], scope='pool5_2')
            end_points['pool5'] = tf.concat([end_points['pool5_1'], end_points['pool5_2']], 3)

            end_points['pool5'] = ops.flatten(end_points['pool5'], scope='flatten')
            end_points['fc6'] = ops.fc(end_points['pool5'], 4096, stddev=0.005, seed = seed +11, scope='fc6')
            end_points['dropout6'] = ops.dropout(end_points['fc6'], dropout_keep_prob, scope='dropout6')
            end_points['fc7'] = ops.fc(end_points['dropout6'], 4096, stddev=0.005, seed = seed +12, scope='fc7')
            net = ops.dropout(end_points['fc7'], dropout_keep_prob, scope='dropout7')

            # Final pooling and prediction
            with tf.variable_scope('logits'):
              # 4096
              logits = ops.fc(net, num_classes, activation=None, bias=0.0, seed = seed +13, scope='logits',
                              restore=restore_logits)
              # 1000
              end_points['logits'] = logits
              end_points['predictions'] = tf.nn.softmax(logits, name='predictions')
  # There is no aux_logits for AlexNet
  end_points['aux_logits'] = tf.constant(0)
  return logits, end_points
Example #15
0
 def testCreateMaxPoolWithScope(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         output = ops.max_pool(images, [3, 3], scope='pool1')
         self.assertEqual(output.op.name, 'pool1/MaxPool')
Example #16
0
def alexnet(inputs,
                 dropout_keep_prob=0.8,
                 num_classes=1000,
                 is_training=True,
                 restore_logits=True,
                 scope=''):
  """Latest Inception from http://arxiv.org/abs/1512.00567.

    "Rethinking the Inception Architecture for Computer Vision"

    Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
    Zbigniew Wojna

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    dropout_keep_prob: dropout keep_prob.
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    restore_logits: whether or not the logits layers should be restored.
      Useful for fine-tuning a model with different num_classes.
    scope: Optional scope for op_scope.

  Returns:
    a list containing 'logits', 'aux_logits' Tensors.
  """
  # end_points will collect relevant activations for external use, for example
  # summaries or losses.
  end_points = {}
  with tf.op_scope([inputs], scope, 'alexnet'):
    with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                          is_training=is_training):
      # conv and pool will do padding
      with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                            padding='SAME'):
        # define the initial distribution of filter weight
        with scopes.arg_scope([ops.conv2d], stddev=0.01):
          end_points['conv1'] = ops.conv2d(inputs, 96, [11, 11], stride=4,
                                           scope='conv1')
          end_points['pool1'] = ops.max_pool(end_points['conv1'], [3, 3],
                                             stride=2, scope='pool1')
          end_points['conv2'] = ops.conv2d(end_points['pool1'], 256, [5, 5],
                                           bias=1.0, scope='conv2')
          end_points['pool2'] = ops.max_pool(end_points['conv2'], [3, 3],
                                             stride=2, scope='pool2')
          end_points['conv3'] = ops.conv2d(end_points['pool2'], 384, [3, 3],
                                           scope='conv3')
          end_points['conv4'] = ops.conv2d(end_points['conv3'], 384, [3, 3],
                                           bias=1.0, scope='conv4')
          end_points['conv5'] = ops.conv2d(end_points['conv4'], 256, [3, 3],
                                           bias=1.0, scope='conv5')
          end_points['pool5'] = ops.max_pool(end_points['conv5'], [3, 3],
                                             stride=2, scope='pool5')

      # reshape the 4d tensor into 2d
      end_points['flatten'] = ops.flatten(end_points['pool5'], scope='flatten')

      # define the initial distribution of fc weight
      with scopes.arg_scope([ops.fc], stddev=0.005, bias=1.0):
        # define the dropout ratio
        with scopes.arg_scope([ops.dropout], keep_prob=dropout_keep_prob):
          end_points['fc6'] = ops.fc(end_points['flatten'], 4096, scope='fc6')
          end_points['drop6'] = ops.dropout(end_points['fc6'], scope='drop6')
          end_points['fc7'] = ops.fc(end_points['drop6'], 4096, scope='fc7')
          end_points['drop7'] = ops.dropout(end_points['fc7'], scope='drop7')
          end_points['fc8'] = ops.fc(end_points['drop7'], num_classes,
                                     activation=None,
                                     scope='fc8', restore=restore_logits)
      return end_points['fc8'], end_points
Example #17
0
def vgg_16(inputs,
                 dropout_keep_prob=0.8,
                 num_classes=10,
                 is_training=True,
                 restore_logits=True,
                 scope=''):
  """Latest Inception from http://arxiv.org/abs/1512.00567.

    "Rethinking the Inception Architecture for Computer Vision"

    Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
    Zbigniew Wojna

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    dropout_keep_prob: dropout keep_prob.
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    restore_logits: whether or not the logits layers should be restored.
      Useful for fine-tuning a model with different num_classes.
    scope: Optional scope for op_scope.

  Returns:
    a list containing 'logits', 'aux_logits' Tensors.
  """
  # end_points will collect relevant activations for external use, for example
  # summaries or losses.
  dropout_keep_prob = 0.4 if training else 1.0
  
  end_points = {}
  with tf.op_scope([inputs], scope, 'vgg_16'):
    with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                          is_training=is_training):
      with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                            stride=1, padding='SAME'):
        # assume input_op shape is 224x224x3
        # block 1 -- outputs 112x112x64
        end_points['conv1_1'] = ops.conv2d(inputs, 64, [3, 3], stride=1,
                                         scope='conv1_1')
        end_points['conv1_2'] = ops.conv2d(end_points['conv1_1'], 64, [3, 3],
                                         scope='conv1_2')
        end_points['pool1'] = ops.max_pool(end_points['conv1_2'], [2, 2],
                                          stride=2, scope='pool1')

        # block 2 -- outputs 56x56x128
        end_points['conv2_1'] = ops.conv2d(end_points['pool1'], 128, [3, 3],
                                         scope='conv2_1')
        end_points['conv2_2'] = ops.conv2d(end_points['conv2_1'], 128, [3, 3],
                                         scope='conv2_2')
        end_points['pool2'] = ops.max_pool(end_points['conv2_2'], [2, 2],
                                           stride=2, scope='pool2')
        # block 3 -- outputs 28x28x256
        end_points['conv3_1'] = ops.conv2d(end_points['pool2'], 256, [3, 3],
                                         scope='conv3_1')
        end_points['conv3_2'] = ops.conv2d(end_points['conv3_1'], 256, [3, 3],
                                         scope='conv3_2')
        end_points['pool3'] = ops.max_pool(end_points['conv3_2'], [2, 2],
                                           stride=2, scope='pool3')

        # block 4 -- outputs 14x14x512
        end_points['conv4_1'] = ops.conv2d(end_points['pool3'], 512, [3, 3],
                                         scope='conv4_1')
        end_points['conv4_2'] = ops.conv2d(end_points['conv4_1'], 512, [3, 3],
                                         scope='conv4_2')
        end_points['pool4'] = ops.max_pool(end_points['conv4_2'], [2, 2],
                                           stride=2, scope='pool4')

        # block 5 -- outputs 7x7x512
        end_points['conv5_1'] = ops.conv2d(end_points['pool4'], 512, [3, 3],
                                         scope='conv5_1')
        end_points['conv5_2'] = ops.conv2d(end_points['conv5_1'], 512, [3, 3],
                                         scope='conv5_2')
        end_points['pool5'] = ops.max_pool(end_points['conv5_2'], [2, 2],
                                           stride=2, scope='pool5')

        net = end_points['pool5']

        # Final pooling and prediction
        with tf.variable_scope('logits'):
          # flatten
          net = ops.flatten(net, scope='flatten')

          # fully connected
          end_points['fc6'] = ops.fc(net, 1000, activation=None, scope='fc6',
                          restore=restore_logits)
          end_points['fc6_drop'] = ops.dropout(end_points['fc6'], dropout_keep_prob, scope='fc6_drop')

          end_points['fc7'] = ops.fc(end_points['fc6_drop'], 50, activation=None, scope='fc7',
                          restore=restore_logits)
          end_points['fc7_drop'] = ops.dropout(end_points['fc7'], dropout_keep_prob, scope='fc7_drop')

          end_points['fc8'] = ops.fc(end_points['fc7_drop'], num_classes, activation=None, scope='fc8',
                          restore=restore_logits)
          end_points['fc8_drop'] = ops.dropout(end_points['fc8'], dropout_keep_prob, scope='fc8_drop')

          logits = end_points['fc8_drop']
          end_points['logits'] = logits
          end_points['predictions'] = tf.nn.softmax(logits, name='predictions')
      return logits, end_points
Example #18
0
def inception_v3_base(inputs,
                      final_endpoint='Mixed_7c',
                      min_depth=16,
                      depth_multiplier=1.0,
                      scope=None,
                      is_training=True):
    """Inception model from http://arxiv.org/abs/1512.00567.

  Constructs an Inception v3 network from inputs to the given final endpoint.
  This method can construct the network up to the final inception block
  Mixed_7c.

  Note that the names of the layers in the paper do not correspond to the names
  of the endpoints registered by this function although they build the same
  network.

  Here is a mapping from the old_names to the new names:
  Old name          | New name
  =======================================
  conv0             | Conv2d_1a_3x3
  conv1             | Conv2d_2a_3x3
  conv2             | Conv2d_2b_3x3
  pool1             | MaxPool_3a_3x3
  conv3             | Conv2d_3b_1x1
  conv4             | Conv2d_4a_3x3
  pool2             | MaxPool_5a_3x3
  mixed_35x35x256a  | Mixed_5b
  mixed_35x35x288a  | Mixed_5c
  mixed_35x35x288b  | Mixed_5d
  mixed_17x17x768a  | Mixed_6a
  mixed_17x17x768b  | Mixed_6b
  mixed_17x17x768c  | Mixed_6c
  mixed_17x17x768d  | Mixed_6d
  mixed_17x17x768e  | Mixed_6e
  mixed_8x8x1280a   | Mixed_7a
  mixed_8x8x2048a   | Mixed_7b
  mixed_8x8x2048b   | Mixed_7c

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    final_endpoint: specifies the endpoint to construct the network up to. It
      can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
      'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',
      'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c',
      'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'].
    min_depth: Minimum depth value (number of channels) for all convolution ops.
      Enforced when depth_multiplier < 1, and not an active constraint when
      depth_multiplier >= 1.
    depth_multiplier: Float multiplier for the depth (number of channels)
      for all convolution ops. The value must be greater than zero. Typical
      usage will be to set this value in (0, 1) to reduce the number of
      parameters or computation cost of the model.
    scope: Optional variable_scope.

  Returns:
    tensor_out: output tensor corresponding to the final_endpoint.
    end_points: a set of activations for external use, for example summaries or
                losses.

  Raises:
    ValueError: if final_endpoint is not set to one of the predefined values,
                or depth_multiplier <= 0
  """
    # end_points will collect relevant activations for external use, for example
    # summaries or losses.
    end_points = {}

    if depth_multiplier <= 0:
        raise ValueError('depth_multiplier is not greater than zero.')
    depth = lambda d: max(int(d * depth_multiplier), min_depth)

    with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                          is_training=is_training):
        with scopes.arg_scope(
            [ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                is_training=is_training):
            with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                                  stride=1,
                                  padding='VALID'):
                # 299 x 299 x 3
                end_point = 'Conv2d_1a_3x3'
                net = ops.conv2d(inputs,
                                 depth(32), [3, 3],
                                 stride=2,
                                 scope=end_point)
                end_points[end_point] = net
                if end_point == final_endpoint: return net, end_points
                # 149 x 149 x 32
                end_point = 'Conv2d_2a_3x3'
                net = ops.conv2d(net, depth(32), [3, 3], scope=end_point)
                end_points[end_point] = net
                if end_point == final_endpoint: return net, end_points
                # 147 x 147 x 32
                end_point = 'Conv2d_2b_3x3'
                net = ops.conv2d(net,
                                 depth(64), [3, 3],
                                 padding='SAME',
                                 scope=end_point)
                end_points[end_point] = net
                if end_point == final_endpoint: return net, end_points
                # 147 x 147 x 64
                end_point = 'MaxPool_3a_3x3'
                net = ops.max_pool(net, [3, 3], stride=2, scope=end_point)
                end_points[end_point] = net
                if end_point == final_endpoint: return net, end_points
                # 73 x 73 x 64
                end_point = 'Conv2d_3b_1x1'
                net = ops.conv2d(net, depth(80), [1, 1], scope=end_point)
                end_points[end_point] = net
                if end_point == final_endpoint: return net, end_points
                # 73 x 73 x 80.
                end_point = 'Conv2d_4a_3x3'
                net = ops.conv2d(net, depth(192), [3, 3], scope=end_point)
                end_points[end_point] = net
                if end_point == final_endpoint: return net, end_points
                # 71 x 71 x 192.
                end_point = 'MaxPool_5a_3x3'
                net = ops.max_pool(net, [3, 3], stride=2, scope=end_point)
                end_points[end_point] = net
                if end_point == final_endpoint: return net, end_points
                # 35 x 35 x 192.
                # Inception blocks
            with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                                  stride=1,
                                  padding='SAME'):
                # mixed: 35 x 35 x 256.
                end_point = 'Mixed_5b'
                with tf.variable_scope(end_point):
                    with tf.variable_scope('Branch_0'):
                        branch_0 = ops.conv2d(net,
                                              depth(64), [1, 1],
                                              scope='Conv2d_0a_1x1')
                    with tf.variable_scope('Branch_1'):
                        branch_1 = ops.conv2d(net,
                                              depth(48), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_1 = ops.conv2d(branch_1,
                                              depth(64), [5, 5],
                                              scope='Conv2d_0b_5x5')
                    with tf.variable_scope('Branch_2'):
                        branch_2 = ops.conv2d(net,
                                              depth(64), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(96), [3, 3],
                                              scope='Conv2d_0b_3x3')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(96), [3, 3],
                                              scope='Conv2d_0c_3x3')
                    with tf.variable_scope('Branch_3'):
                        branch_3 = ops.avg_pool(net, [3, 3],
                                                scope='AvgPool_0a_3x3')
                        branch_3 = ops.conv2d(branch_3,
                                              depth(32), [1, 1],
                                              scope='Conv2d_0b_1x1')
                    net = tf.concat(
                        axis=3,
                        values=[branch_0, branch_1, branch_2, branch_3])
                end_points[end_point] = net
                if end_point == final_endpoint: return net, end_points

                # mixed_1: 35 x 35 x 288.
                end_point = 'Mixed_5c'
                with tf.variable_scope(end_point):
                    with tf.variable_scope('Branch_0'):
                        branch_0 = ops.conv2d(net,
                                              depth(64), [1, 1],
                                              scope='Conv2d_0a_1x1')
                    with tf.variable_scope('Branch_1'):
                        branch_1 = ops.conv2d(net,
                                              depth(48), [1, 1],
                                              scope='Conv2d_0b_1x1')
                        branch_1 = ops.conv2d(branch_1,
                                              depth(64), [5, 5],
                                              scope='Conv_1_0c_5x5')
                    with tf.variable_scope('Branch_2'):
                        branch_2 = ops.conv2d(net,
                                              depth(64), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(96), [3, 3],
                                              scope='Conv2d_0b_3x3')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(96), [3, 3],
                                              scope='Conv2d_0c_3x3')
                    with tf.variable_scope('Branch_3'):
                        branch_3 = ops.avg_pool(net, [3, 3],
                                                scope='AvgPool_0a_3x3')
                        branch_3 = ops.conv2d(branch_3,
                                              depth(64), [1, 1],
                                              scope='Conv2d_0b_1x1')
                    net = tf.concat(
                        axis=3,
                        values=[branch_0, branch_1, branch_2, branch_3])
                end_points[end_point] = net
                if end_point == final_endpoint: return net, end_points

                # mixed_2: 35 x 35 x 288.
                end_point = 'Mixed_5d'
                with tf.variable_scope(end_point):
                    with tf.variable_scope('Branch_0'):
                        branch_0 = ops.conv2d(net,
                                              depth(64), [1, 1],
                                              scope='Conv2d_0a_1x1')
                    with tf.variable_scope('Branch_1'):
                        branch_1 = ops.conv2d(net,
                                              depth(48), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_1 = ops.conv2d(branch_1,
                                              depth(64), [5, 5],
                                              scope='Conv2d_0b_5x5')
                    with tf.variable_scope('Branch_2'):
                        branch_2 = ops.conv2d(net,
                                              depth(64), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(96), [3, 3],
                                              scope='Conv2d_0b_3x3')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(96), [3, 3],
                                              scope='Conv2d_0c_3x3')
                    with tf.variable_scope('Branch_3'):
                        branch_3 = ops.avg_pool(net, [3, 3],
                                                scope='AvgPool_0a_3x3')
                        branch_3 = ops.conv2d(branch_3,
                                              depth(64), [1, 1],
                                              scope='Conv2d_0b_1x1')
                    net = tf.concat(
                        axis=3,
                        values=[branch_0, branch_1, branch_2, branch_3])
                end_points[end_point] = net
                if end_point == final_endpoint: return net, end_points

                # mixed_3: 17 x 17 x 768.
                end_point = 'Mixed_6a'
                with tf.variable_scope(end_point):
                    with tf.variable_scope('Branch_0'):
                        branch_0 = ops.conv2d(net,
                                              depth(384), [3, 3],
                                              stride=2,
                                              padding='VALID',
                                              scope='Conv2d_1a_1x1')
                    with tf.variable_scope('Branch_1'):
                        branch_1 = ops.conv2d(net,
                                              depth(64), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_1 = ops.conv2d(branch_1,
                                              depth(96), [3, 3],
                                              scope='Conv2d_0b_3x3')
                        branch_1 = ops.conv2d(branch_1,
                                              depth(96), [3, 3],
                                              stride=2,
                                              padding='VALID',
                                              scope='Conv2d_1a_1x1')
                    with tf.variable_scope('Branch_2'):
                        branch_2 = ops.max_pool(net, [3, 3],
                                                stride=2,
                                                padding='VALID',
                                                scope='MaxPool_1a_3x3')
                    net = tf.concat(axis=3,
                                    values=[branch_0, branch_1, branch_2])
                end_points[end_point] = net
                if end_point == final_endpoint: return net, end_points

                # mixed4: 17 x 17 x 768.
                end_point = 'Mixed_6b'
                with tf.variable_scope(end_point):
                    with tf.variable_scope('Branch_0'):
                        branch_0 = ops.conv2d(net,
                                              depth(192), [1, 1],
                                              scope='Conv2d_0a_1x1')
                    with tf.variable_scope('Branch_1'):
                        branch_1 = ops.conv2d(net,
                                              depth(128), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_1 = ops.conv2d(branch_1,
                                              depth(128), [1, 7],
                                              scope='Conv2d_0b_1x7')
                        branch_1 = ops.conv2d(branch_1,
                                              depth(192), [7, 1],
                                              scope='Conv2d_0c_7x1')
                    with tf.variable_scope('Branch_2'):
                        branch_2 = ops.conv2d(net,
                                              depth(128), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(128), [7, 1],
                                              scope='Conv2d_0b_7x1')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(128), [1, 7],
                                              scope='Conv2d_0c_1x7')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(128), [7, 1],
                                              scope='Conv2d_0d_7x1')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(192), [1, 7],
                                              scope='Conv2d_0e_1x7')
                    with tf.variable_scope('Branch_3'):
                        branch_3 = ops.avg_pool(net, [3, 3],
                                                scope='AvgPool_0a_3x3')
                        branch_3 = ops.conv2d(branch_3,
                                              depth(192), [1, 1],
                                              scope='Conv2d_0b_1x1')
                    net = tf.concat(
                        axis=3,
                        values=[branch_0, branch_1, branch_2, branch_3])
                end_points[end_point] = net
                if end_point == final_endpoint: return net, end_points

                # mixed_5: 17 x 17 x 768.
                end_point = 'Mixed_6c'
                with tf.variable_scope(end_point):
                    with tf.variable_scope('Branch_0'):
                        branch_0 = ops.conv2d(net,
                                              depth(192), [1, 1],
                                              scope='Conv2d_0a_1x1')
                    with tf.variable_scope('Branch_1'):
                        branch_1 = ops.conv2d(net,
                                              depth(160), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_1 = ops.conv2d(branch_1,
                                              depth(160), [1, 7],
                                              scope='Conv2d_0b_1x7')
                        branch_1 = ops.conv2d(branch_1,
                                              depth(192), [7, 1],
                                              scope='Conv2d_0c_7x1')
                    with tf.variable_scope('Branch_2'):
                        branch_2 = ops.conv2d(net,
                                              depth(160), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(160), [7, 1],
                                              scope='Conv2d_0b_7x1')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(160), [1, 7],
                                              scope='Conv2d_0c_1x7')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(160), [7, 1],
                                              scope='Conv2d_0d_7x1')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(192), [1, 7],
                                              scope='Conv2d_0e_1x7')
                    with tf.variable_scope('Branch_3'):
                        branch_3 = ops.avg_pool(net, [3, 3],
                                                scope='AvgPool_0a_3x3')
                        branch_3 = ops.conv2d(branch_3,
                                              depth(192), [1, 1],
                                              scope='Conv2d_0b_1x1')
                    net = tf.concat(
                        axis=3,
                        values=[branch_0, branch_1, branch_2, branch_3])
                end_points[end_point] = net
                if end_point == final_endpoint: return net, end_points
                # mixed_6: 17 x 17 x 768.
                end_point = 'Mixed_6d'
                with tf.variable_scope(end_point):
                    with tf.variable_scope('Branch_0'):
                        branch_0 = ops.conv2d(net,
                                              depth(192), [1, 1],
                                              scope='Conv2d_0a_1x1')
                    with tf.variable_scope('Branch_1'):
                        branch_1 = ops.conv2d(net,
                                              depth(160), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_1 = ops.conv2d(branch_1,
                                              depth(160), [1, 7],
                                              scope='Conv2d_0b_1x7')
                        branch_1 = ops.conv2d(branch_1,
                                              depth(192), [7, 1],
                                              scope='Conv2d_0c_7x1')
                    with tf.variable_scope('Branch_2'):
                        branch_2 = ops.conv2d(net,
                                              depth(160), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(160), [7, 1],
                                              scope='Conv2d_0b_7x1')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(160), [1, 7],
                                              scope='Conv2d_0c_1x7')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(160), [7, 1],
                                              scope='Conv2d_0d_7x1')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(192), [1, 7],
                                              scope='Conv2d_0e_1x7')
                    with tf.variable_scope('Branch_3'):
                        branch_3 = ops.avg_pool(net, [3, 3],
                                                scope='AvgPool_0a_3x3')
                        branch_3 = ops.conv2d(branch_3,
                                              depth(192), [1, 1],
                                              scope='Conv2d_0b_1x1')
                    net = tf.concat(
                        axis=3,
                        values=[branch_0, branch_1, branch_2, branch_3])
                end_points[end_point] = net
                if end_point == final_endpoint: return net, end_points

                # mixed_7: 17 x 17 x 768.
                end_point = 'Mixed_6e'
                with tf.variable_scope(end_point):
                    with tf.variable_scope('Branch_0'):
                        branch_0 = ops.conv2d(net,
                                              depth(192), [1, 1],
                                              scope='Conv2d_0a_1x1')
                    with tf.variable_scope('Branch_1'):
                        branch_1 = ops.conv2d(net,
                                              depth(192), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_1 = ops.conv2d(branch_1,
                                              depth(192), [1, 7],
                                              scope='Conv2d_0b_1x7')
                        branch_1 = ops.conv2d(branch_1,
                                              depth(192), [7, 1],
                                              scope='Conv2d_0c_7x1')
                    with tf.variable_scope('Branch_2'):
                        branch_2 = ops.conv2d(net,
                                              depth(192), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(192), [7, 1],
                                              scope='Conv2d_0b_7x1')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(192), [1, 7],
                                              scope='Conv2d_0c_1x7')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(192), [7, 1],
                                              scope='Conv2d_0d_7x1')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(192), [1, 7],
                                              scope='Conv2d_0e_1x7')
                    with tf.variable_scope('Branch_3'):
                        branch_3 = ops.avg_pool(net, [3, 3],
                                                scope='AvgPool_0a_3x3')
                        branch_3 = ops.conv2d(branch_3,
                                              depth(192), [1, 1],
                                              scope='Conv2d_0b_1x1')
                    net = tf.concat(
                        axis=3,
                        values=[branch_0, branch_1, branch_2, branch_3])
                end_points[end_point] = net
                if end_point == final_endpoint: return net, end_points

                # mixed_8: 8 x 8 x 1280.
                end_point = 'Mixed_7a'
                with tf.variable_scope(end_point):
                    with tf.variable_scope('Branch_0'):
                        branch_0 = ops.conv2d(net,
                                              depth(192), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_0 = ops.conv2d(branch_0,
                                              depth(320), [3, 3],
                                              stride=2,
                                              padding='VALID',
                                              scope='Conv2d_1a_3x3')
                    with tf.variable_scope('Branch_1'):
                        branch_1 = ops.conv2d(net,
                                              depth(192), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_1 = ops.conv2d(branch_1,
                                              depth(192), [1, 7],
                                              scope='Conv2d_0b_1x7')
                        branch_1 = ops.conv2d(branch_1,
                                              depth(192), [7, 1],
                                              scope='Conv2d_0c_7x1')
                        branch_1 = ops.conv2d(branch_1,
                                              depth(192), [3, 3],
                                              stride=2,
                                              padding='VALID',
                                              scope='Conv2d_1a_3x3')
                    with tf.variable_scope('Branch_2'):
                        branch_2 = ops.max_pool(net, [3, 3],
                                                stride=2,
                                                padding='VALID',
                                                scope='MaxPool_1a_3x3')
                    net = tf.concat(axis=3,
                                    values=[branch_0, branch_1, branch_2])
                end_points[end_point] = net
                if end_point == final_endpoint: return net, end_points
                # mixed_9: 8 x 8 x 2048.
                end_point = 'Mixed_7b'
                with tf.variable_scope(end_point):
                    with tf.variable_scope('Branch_0'):
                        branch_0 = ops.conv2d(net,
                                              depth(320), [1, 1],
                                              scope='Conv2d_0a_1x1')
                    with tf.variable_scope('Branch_1'):
                        branch_1 = ops.conv2d(net,
                                              depth(384), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_1 = tf.concat(
                            axis=3,
                            values=[
                                ops.conv2d(branch_1,
                                           depth(384), [1, 3],
                                           scope='Conv2d_0b_1x3'),
                                ops.conv2d(branch_1,
                                           depth(384), [3, 1],
                                           scope='Conv2d_0b_3x1')
                            ])
                    with tf.variable_scope('Branch_2'):
                        branch_2 = ops.conv2d(net,
                                              depth(448), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(384), [3, 3],
                                              scope='Conv2d_0b_3x3')
                        branch_2 = tf.concat(
                            axis=3,
                            values=[
                                ops.conv2d(branch_2,
                                           depth(384), [1, 3],
                                           scope='Conv2d_0c_1x3'),
                                ops.conv2d(branch_2,
                                           depth(384), [3, 1],
                                           scope='Conv2d_0d_3x1')
                            ])
                    with tf.variable_scope('Branch_3'):
                        branch_3 = ops.avg_pool(net, [3, 3],
                                                scope='AvgPool_0a_3x3')
                        branch_3 = ops.conv2d(branch_3,
                                              depth(192), [1, 1],
                                              scope='Conv2d_0b_1x1')
                    net = tf.concat(
                        axis=3,
                        values=[branch_0, branch_1, branch_2, branch_3])
                end_points[end_point] = net
                if end_point == final_endpoint: return net, end_points

                # mixed_10: 8 x 8 x 2048.
                end_point = 'Mixed_7c'
                with tf.variable_scope(end_point):
                    with tf.variable_scope('Branch_0'):
                        branch_0 = ops.conv2d(net,
                                              depth(320), [1, 1],
                                              scope='Conv2d_0a_1x1')
                    with tf.variable_scope('Branch_1'):
                        branch_1 = ops.conv2d(net,
                                              depth(384), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_1 = tf.concat(
                            axis=3,
                            values=[
                                ops.conv2d(branch_1,
                                           depth(384), [1, 3],
                                           scope='Conv2d_0b_1x3'),
                                ops.conv2d(branch_1,
                                           depth(384), [3, 1],
                                           scope='Conv2d_0c_3x1')
                            ])
                    with tf.variable_scope('Branch_2'):
                        branch_2 = ops.conv2d(net,
                                              depth(448), [1, 1],
                                              scope='Conv2d_0a_1x1')
                        branch_2 = ops.conv2d(branch_2,
                                              depth(384), [3, 3],
                                              scope='Conv2d_0b_3x3')
                        branch_2 = tf.concat(
                            axis=3,
                            values=[
                                ops.conv2d(branch_2,
                                           depth(384), [1, 3],
                                           scope='Conv2d_0c_1x3'),
                                ops.conv2d(branch_2,
                                           depth(384), [3, 1],
                                           scope='Conv2d_0d_3x1')
                            ])
                    with tf.variable_scope('Branch_3'):
                        branch_3 = ops.avg_pool(net, [3, 3],
                                                scope='AvgPool_0a_3x3')
                        branch_3 = ops.conv2d(branch_3,
                                              depth(192), [1, 1],
                                              scope='Conv2d_0b_1x1')
                    net = tf.concat(
                        axis=3,
                        values=[branch_0, branch_1, branch_2, branch_3])
                end_points[end_point] = net
                if end_point == final_endpoint:
                    return net, end_points

        raise ValueError('Unknown final endpoint %s' % final_endpoint)
Example #19
0
def inception_v3(inputs,
                 dropout_keep_prob=0.8,
                 num_classes=1000,
                 is_training=True,
                 restore_logits=True,
                 scope=''):
  """Latest Inception from http://arxiv.org/abs/1512.00567.

    "Rethinking the Inception Architecture for Computer Vision"

    Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
    Zbigniew Wojna

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    dropout_keep_prob: dropout keep_prob.
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    restore_logits: whether or not the logits layers should be restored.
      Useful for fine-tuning a model with different num_classes.
    scope: Optional scope for name_scope.

  Returns:
    a list containing 'logits', 'aux_logits' Tensors.
  """
  # end_points will collect relevant activations for external use, for example
  # summaries or losses.
  end_points = {}
  with tf.name_scope(scope, 'inception_v3', [inputs]):
    with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                          is_training=is_training):
      with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                            stride=1, padding='VALID'):
        # 299 x 299 x 3
        end_points['conv0'] = ops.conv2d(inputs, 32, [3, 3], stride=2,
                                         scope='conv0')
        # 149 x 149 x 32
        end_points['conv1'] = ops.conv2d(end_points['conv0'], 32, [3, 3],
                                         scope='conv1')
        # 147 x 147 x 32
        end_points['conv2'] = ops.conv2d(end_points['conv1'], 64, [3, 3],
                                         padding='SAME', scope='conv2')
        # 147 x 147 x 64
        end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3],
                                           stride=2, scope='pool1')
        # 73 x 73 x 64
        end_points['conv3'] = ops.conv2d(end_points['pool1'], 80, [1, 1],
                                         scope='conv3')
        # 73 x 73 x 80.
        end_points['conv4'] = ops.conv2d(end_points['conv3'], 192, [3, 3],
                                         scope='conv4')
        # 71 x 71 x 192.
        end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],
                                           stride=2, scope='pool2')
        # 35 x 35 x 192.
        net = end_points['pool2']
      # Inception blocks
      with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                            stride=1, padding='SAME'):
        # mixed: 35 x 35 x 256.
        with tf.variable_scope('mixed_35x35x256a'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 64, [1, 1])
          with tf.variable_scope('branch5x5'):
            branch5x5 = ops.conv2d(net, 48, [1, 1])
            branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 32, [1, 1])
          net = tf.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)
          end_points['mixed_35x35x256a'] = net
        # mixed_1: 35 x 35 x 288.
        with tf.variable_scope('mixed_35x35x288a'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 64, [1, 1])
          with tf.variable_scope('branch5x5'):
            branch5x5 = ops.conv2d(net, 48, [1, 1])
            branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
          net = tf.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)
          end_points['mixed_35x35x288a'] = net
        # mixed_2: 35 x 35 x 288.
        with tf.variable_scope('mixed_35x35x288b'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 64, [1, 1])
          with tf.variable_scope('branch5x5'):
            branch5x5 = ops.conv2d(net, 48, [1, 1])
            branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
          net = tf.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)
          end_points['mixed_35x35x288b'] = net
        # mixed_3: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768a'):
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 384, [3, 3], stride=2, padding='VALID')
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3],
                                      stride=2, padding='VALID')
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
          net = tf.concat([branch3x3, branch3x3dbl, branch_pool], 3)
          end_points['mixed_17x17x768a'] = net
        # mixed4: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768b'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 128, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 128, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 128, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
          end_points['mixed_17x17x768b'] = net
        # mixed_5: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768c'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 160, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 160, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
          end_points['mixed_17x17x768c'] = net
        # mixed_6: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768d'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 160, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 160, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
          end_points['mixed_17x17x768d'] = net
        # mixed_7: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768e'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 192, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 192, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 192, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
          end_points['mixed_17x17x768e'] = net
        # Auxiliary Head logits
        aux_logits = tf.identity(end_points['mixed_17x17x768e'])
        with tf.variable_scope('aux_logits'):
          aux_logits = ops.avg_pool(aux_logits, [5, 5], stride=3,
                                    padding='VALID')
          aux_logits = ops.conv2d(aux_logits, 128, [1, 1], scope='proj')
          # Shape of feature map before the final layer.
          shape = aux_logits.get_shape()
          aux_logits = ops.conv2d(aux_logits, 768, shape[1:3], stddev=0.01,
                                  padding='VALID')
          aux_logits = ops.flatten(aux_logits)
          aux_logits = ops.fc(aux_logits, num_classes, activation=None,
                              stddev=0.001, restore=restore_logits)
          end_points['aux_logits'] = aux_logits
        # mixed_8: 8 x 8 x 1280.
        # Note that the scope below is not changed to not void previous
        # checkpoints.
        # (TODO) Fix the scope when appropriate.
        with tf.variable_scope('mixed_17x17x1280a'):
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 192, [1, 1])
            branch3x3 = ops.conv2d(branch3x3, 320, [3, 3], stride=2,
                                   padding='VALID')
          with tf.variable_scope('branch7x7x3'):
            branch7x7x3 = ops.conv2d(net, 192, [1, 1])
            branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7])
            branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1])
            branch7x7x3 = ops.conv2d(branch7x7x3, 192, [3, 3],
                                     stride=2, padding='VALID')
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
          net = tf.concat([branch3x3, branch7x7x3, branch_pool], 3)
          end_points['mixed_17x17x1280a'] = net
        # mixed_9: 8 x 8 x 2048.
        with tf.variable_scope('mixed_8x8x2048a'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 320, [1, 1])
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 384, [1, 1])
            branch3x3 = tf.concat([ops.conv2d(branch3x3, 384, [1, 3]),
                                   ops.conv2d(branch3x3, 384, [3, 1])], 3)
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 448, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
            branch3x3dbl = tf.concat([ops.conv2d(branch3x3dbl, 384, [1, 3]),
                                      ops.conv2d(branch3x3dbl, 384, [3, 1])], 3)
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat([branch1x1, branch3x3, branch3x3dbl, branch_pool], 3)
          end_points['mixed_8x8x2048a'] = net
        # mixed_10: 8 x 8 x 2048.
        with tf.variable_scope('mixed_8x8x2048b'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 320, [1, 1])
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 384, [1, 1])
            branch3x3 = tf.concat([ops.conv2d(branch3x3, 384, [1, 3]),
                                   ops.conv2d(branch3x3, 384, [3, 1])], 3)
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 448, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
            branch3x3dbl = tf.concat([ops.conv2d(branch3x3dbl, 384, [1, 3]),
                                      ops.conv2d(branch3x3dbl, 384, [3, 1])], 3)
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat([branch1x1, branch3x3, branch3x3dbl, branch_pool], 3)
          end_points['mixed_8x8x2048b'] = net
        # Final pooling and prediction
        with tf.variable_scope('logits'):
          shape = net.get_shape()
          net = ops.avg_pool(net, shape[1:3], padding='VALID', scope='pool')
          # 1 x 1 x 2048
          net = ops.dropout(net, dropout_keep_prob, scope='dropout')
          net = ops.flatten(net, scope='flatten')
          # 2048
          logits = ops.fc(net, num_classes, activation=None, scope='logits',
                          restore=restore_logits)
          # 1000
          end_points['logits'] = logits
          end_points['predictions'] = tf.nn.softmax(logits, name='predictions')
      return logits, end_points
 def testCreateMaxPoolWithScope(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     output = ops.max_pool(images, [3, 3], scope='pool1')
     self.assertEquals(output.op.name, 'pool1/MaxPool')
Example #21
0
def inception_v3(inputs,
                 dropout_keep_prob=0.8,
                 num_classes=1000,
                 is_training=True,
                 restore_logits=True,
                 scope=''):

    # end_points will collect relevant activations for external use, for example
    # summaries or losses.
    end_points = {}
    with tf.name_scope(scope, 'inception_v3', [inputs]):
        with scopes.arg_scope(
            [ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                is_training=is_training):
            with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                                  stride=1,
                                  padding='VALID'):
                # 299 x 299 x 3
                end_points['conv0'] = ops.conv2d(inputs,
                                                 32, [3, 3],
                                                 stride=2,
                                                 scope='conv0')
                # 149 x 149 x 32
                end_points['conv1'] = ops.conv2d(end_points['conv0'],
                                                 32, [3, 3],
                                                 scope='conv1')
                # 147 x 147 x 32
                end_points['conv2'] = ops.conv2d(end_points['conv1'],
                                                 64, [3, 3],
                                                 padding='SAME',
                                                 scope='conv2')
                # 147 x 147 x 64
                end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3],
                                                   stride=2,
                                                   scope='pool1')
                # 73 x 73 x 64
                end_points['conv3'] = ops.conv2d(end_points['pool1'],
                                                 80, [1, 1],
                                                 scope='conv3')
                # 73 x 73 x 80.
                end_points['conv4'] = ops.conv2d(end_points['conv3'],
                                                 192, [3, 3],
                                                 scope='conv4')
                # 71 x 71 x 192.
                end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],
                                                   stride=2,
                                                   scope='pool2')
                # 35 x 35 x 192.
                net = end_points['pool2']
            # Inception blocks
            with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                                  stride=1,
                                  padding='SAME'):
                # mixed: 35 x 35 x 256.
                with tf.variable_scope('mixed_35x35x256a'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 64, [1, 1])
                    with tf.variable_scope('branch5x5'):
                        branch5x5 = ops.conv2d(net, 48, [1, 1])
                        branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 32, [1, 1])
                    net = tf.concat(axis=3,
                                    values=[
                                        branch1x1, branch5x5, branch3x3dbl,
                                        branch_pool
                                    ])
                    end_points['mixed_35x35x256a'] = net
                # mixed_1: 35 x 35 x 288.
                with tf.variable_scope('mixed_35x35x288a'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 64, [1, 1])
                    with tf.variable_scope('branch5x5'):
                        branch5x5 = ops.conv2d(net, 48, [1, 1])
                        branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
                    net = tf.concat(axis=3,
                                    values=[
                                        branch1x1, branch5x5, branch3x3dbl,
                                        branch_pool
                                    ])
                    end_points['mixed_35x35x288a'] = net
                # mixed_2: 35 x 35 x 288.
                with tf.variable_scope('mixed_35x35x288b'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 64, [1, 1])
                    with tf.variable_scope('branch5x5'):
                        branch5x5 = ops.conv2d(net, 48, [1, 1])
                        branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
                    net = tf.concat(axis=3,
                                    values=[
                                        branch1x1, branch5x5, branch3x3dbl,
                                        branch_pool
                                    ])
                    end_points['mixed_35x35x288b'] = net
                # mixed_3: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768a'):
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net,
                                               384, [3, 3],
                                               stride=2,
                                               padding='VALID')
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl,
                                                  96, [3, 3],
                                                  stride=2,
                                                  padding='VALID')
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.max_pool(net, [3, 3],
                                                   stride=2,
                                                   padding='VALID')
                    net = tf.concat(
                        axis=3, values=[branch3x3, branch3x3dbl, branch_pool])
                    end_points['mixed_17x17x768a'] = net
                # mixed4: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768b'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 128, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 128, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 128, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(axis=3,
                                    values=[
                                        branch1x1, branch7x7, branch7x7dbl,
                                        branch_pool
                                    ])
                    end_points['mixed_17x17x768b'] = net
                # mixed_5: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768c'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 160, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 160, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(axis=3,
                                    values=[
                                        branch1x1, branch7x7, branch7x7dbl,
                                        branch_pool
                                    ])
                    end_points['mixed_17x17x768c'] = net
                # mixed_6: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768d'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 160, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 160, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(axis=3,
                                    values=[
                                        branch1x1, branch7x7, branch7x7dbl,
                                        branch_pool
                                    ])
                    end_points['mixed_17x17x768d'] = net
                # mixed_7: 17 x 17 x 768.
                with tf.variable_scope('mixed_17x17x768e'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 192, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 192, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 192, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(axis=3,
                                    values=[
                                        branch1x1, branch7x7, branch7x7dbl,
                                        branch_pool
                                    ])
                    end_points['mixed_17x17x768e'] = net
                # Auxiliary Head logits
                aux_logits = tf.identity(end_points['mixed_17x17x768e'])
                with tf.variable_scope('aux_logits'):
                    aux_logits = ops.avg_pool(aux_logits, [5, 5],
                                              stride=3,
                                              padding='VALID')
                    aux_logits = ops.conv2d(aux_logits,
                                            128, [1, 1],
                                            scope='proj')
                    # Shape of feature map before the final layer.
                    shape = aux_logits.get_shape()
                    aux_logits = ops.conv2d(aux_logits,
                                            768,
                                            shape[1:3],
                                            stddev=0.01,
                                            padding='VALID')
                    aux_logits = ops.flatten(aux_logits)
                    aux_logits = ops.fc(aux_logits,
                                        num_classes,
                                        activation=None,
                                        stddev=0.001,
                                        restore=restore_logits)
                    end_points['aux_logits'] = aux_logits
                # mixed_8: 8 x 8 x 1280.
                # Note that the scope below is not changed to not void previous
                # checkpoints.
                # (TODO) Fix the scope when appropriate.
                with tf.variable_scope('mixed_17x17x1280a'):
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net, 192, [1, 1])
                        branch3x3 = ops.conv2d(branch3x3,
                                               320, [3, 3],
                                               stride=2,
                                               padding='VALID')
                    with tf.variable_scope('branch7x7x3'):
                        branch7x7x3 = ops.conv2d(net, 192, [1, 1])
                        branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7])
                        branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1])
                        branch7x7x3 = ops.conv2d(branch7x7x3,
                                                 192, [3, 3],
                                                 stride=2,
                                                 padding='VALID')
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.max_pool(net, [3, 3],
                                                   stride=2,
                                                   padding='VALID')
                    net = tf.concat(
                        axis=3, values=[branch3x3, branch7x7x3, branch_pool])
                    end_points['mixed_17x17x1280a'] = net
                # mixed_9: 8 x 8 x 2048.
                with tf.variable_scope('mixed_8x8x2048a'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 320, [1, 1])
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net, 384, [1, 1])
                        branch3x3 = tf.concat(
                            axis=3,
                            values=[
                                ops.conv2d(branch3x3, 384, [1, 3]),
                                ops.conv2d(branch3x3, 384, [3, 1])
                            ])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 448, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
                        branch3x3dbl = tf.concat(
                            axis=3,
                            values=[
                                ops.conv2d(branch3x3dbl, 384, [1, 3]),
                                ops.conv2d(branch3x3dbl, 384, [3, 1])
                            ])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(axis=3,
                                    values=[
                                        branch1x1, branch3x3, branch3x3dbl,
                                        branch_pool
                                    ])
                    end_points['mixed_8x8x2048a'] = net
                # mixed_10: 8 x 8 x 2048.
                with tf.variable_scope('mixed_8x8x2048b'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 320, [1, 1])
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net, 384, [1, 1])
                        branch3x3 = tf.concat(
                            axis=3,
                            values=[
                                ops.conv2d(branch3x3, 384, [1, 3]),
                                ops.conv2d(branch3x3, 384, [3, 1])
                            ])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 448, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
                        branch3x3dbl = tf.concat(
                            axis=3,
                            values=[
                                ops.conv2d(branch3x3dbl, 384, [1, 3]),
                                ops.conv2d(branch3x3dbl, 384, [3, 1])
                            ])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat(axis=3,
                                    values=[
                                        branch1x1, branch3x3, branch3x3dbl,
                                        branch_pool
                                    ])
                    end_points['mixed_8x8x2048b'] = net
                # Final pooling and prediction
                with tf.variable_scope('logits'):
                    shape = net.get_shape()
                    net = ops.avg_pool(net,
                                       shape[1:3],
                                       padding='VALID',
                                       scope='pool')
                    # 1 x 1 x 2048
                    net = ops.dropout(net, dropout_keep_prob, scope='dropout')
                    net = ops.flatten(net, scope='flatten')
                    # 2048
                    logits = ops.fc(net,
                                    num_classes,
                                    activation=None,
                                    scope='logits',
                                    restore=restore_logits)
                    # 1000
                    end_points['logits'] = logits
                    end_points['predictions'] = tf.nn.softmax(
                        logits, name='predictions')
            return logits, end_points
 def testGlobalMaxPool(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     output = ops.max_pool(images, images.get_shape()[1:3], stride=1)
     self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
Example #23
0
def inception_v1(inputs,
                 dropout_keep_prob=0.8,
                 num_classes=1000,
                 is_training=True,
                 restore_logits=True,
                 scope=''):
    """inception v1
  Returns:
    a list like ('logits', ) of Tensors.
  """
    # end_points will collect relevant activations for external use, for example
    # summaries or losses.
    end_points = {}
    with tf.op_scope([inputs], scope, 'InceptionV1'):
        with scopes.arg_scope(
            [ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                is_training=is_training):
            # replaces weights_initializer with trunc_norm(0.01) since w_i is not specified
            with scopes.arg_scope([ops.conv2d, ops.fc], stddev=0.01):
                with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                                      stride=1,
                                      padding='SAME'):
                    end_point = 'Conv2d_1a_7x7'
                    net = ops.conv2d(inputs,
                                     64, [7, 7],
                                     stride=2,
                                     scope=end_point)
                    end_points[end_point] = net
                    end_point = 'MaxPool_2a_3x3'
                    net = ops.max_pool(net, [3, 3], stride=2, scope=end_point)
                    end_points[end_point] = net
                    end_point = 'Conv2d_2b_1x1'
                    net = ops.conv2d(net, 64, [1, 1], scope=end_point)
                    end_points[end_point] = net
                    end_point = 'Conv2d_2c_3x3'
                    net = ops.conv2d(net, 192, [3, 3], scope=end_point)
                    end_points[end_point] = net
                    end_point = 'MaxPool_3a_3x3'
                    net = ops.max_pool(net, [3, 3], stride=2, scope=end_point)
                    end_points[end_point] = net

                    end_point = 'Mixed_3b'
                    with tf.variable_scope(end_point):
                        with tf.variable_scope('Branch_0'):
                            branch_0 = ops.conv2d(net,
                                                  64, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                        with tf.variable_scope('Branch_1'):
                            branch_1 = ops.conv2d(net,
                                                  96, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                            branch_1 = ops.conv2d(branch_1,
                                                  128, [3, 3],
                                                  scope='Conv2d_0b_3x3')
                        with tf.variable_scope('Branch_2'):
                            branch_2 = ops.conv2d(net,
                                                  16, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                            branch_2 = ops.conv2d(branch_2,
                                                  32, [3, 3],
                                                  scope='Conv2d_0b_3x3')
                        with tf.variable_scope('Branch_3'):
                            print('net shape before 3x3 maxpool is:' +
                                  str(net))
                            branch_3 = ops.max_pool(net, [3, 3],
                                                    scope='MaxPool_0a_3x3')
                            print('net shape after maxpool is:' +
                                  str(branch_3))
                            branch_3 = ops.conv2d(branch_3,
                                                  32, [1, 1],
                                                  scope='Conv2d_0b_1x1')
                        net = tf.concat(
                            3, [branch_0, branch_1, branch_2, branch_3])
                    end_points[end_point] = net

                    end_point = 'Mixed_3c'
                    with tf.variable_scope(end_point):
                        with tf.variable_scope('Branch_0'):
                            branch_0 = ops.conv2d(net,
                                                  128, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                        with tf.variable_scope('Branch_1'):
                            branch_1 = ops.conv2d(net,
                                                  128, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                            branch_1 = ops.conv2d(branch_1,
                                                  192, [3, 3],
                                                  scope='Conv2d_0b_3x3')
                        with tf.variable_scope('Branch_2'):
                            branch_2 = ops.conv2d(net,
                                                  32, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                            branch_2 = ops.conv2d(branch_2,
                                                  96, [3, 3],
                                                  scope='Conv2d_0b_3x3')
                        with tf.variable_scope('Branch_3'):
                            branch_3 = ops.max_pool(net, [3, 3],
                                                    scope='MaxPool_0a_3x3')
                            branch_3 = ops.conv2d(branch_3,
                                                  64, [1, 1],
                                                  scope='Conv2d_0b_1x1')
                        net = tf.concat(
                            3, [branch_0, branch_1, branch_2, branch_3])
                    end_points[end_point] = net

                    end_point = 'MaxPool_4a_3x3'
                    net = ops.max_pool(net, [3, 3], stride=2, scope=end_point)
                    end_points[end_point] = net

                    end_point = 'Mixed_4b'
                    with tf.variable_scope(end_point):
                        with tf.variable_scope('Branch_0'):
                            branch_0 = ops.conv2d(net,
                                                  192, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                        with tf.variable_scope('Branch_1'):
                            branch_1 = ops.conv2d(net,
                                                  96, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                            branch_1 = ops.conv2d(branch_1,
                                                  208, [3, 3],
                                                  scope='Conv2d_0b_3x3')
                        with tf.variable_scope('Branch_2'):
                            branch_2 = ops.conv2d(net,
                                                  16, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                            branch_2 = ops.conv2d(branch_2,
                                                  48, [3, 3],
                                                  scope='Conv2d_0b_3x3')
                        with tf.variable_scope('Branch_3'):
                            branch_3 = ops.max_pool(net, [3, 3],
                                                    scope='MaxPool_0a_3x3')
                            branch_3 = ops.conv2d(branch_3,
                                                  64, [1, 1],
                                                  scope='Conv2d_0b_1x1')
                        net = tf.concat(
                            3, [branch_0, branch_1, branch_2, branch_3])
                    end_points[end_point] = net

                    end_point = 'Mixed_4c'
                    with tf.variable_scope(end_point):
                        with tf.variable_scope('Branch_0'):
                            branch_0 = ops.conv2d(net,
                                                  160, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                        with tf.variable_scope('Branch_1'):
                            branch_1 = ops.conv2d(net,
                                                  112, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                            branch_1 = ops.conv2d(branch_1,
                                                  224, [3, 3],
                                                  scope='Conv2d_0b_3x3')
                        with tf.variable_scope('Branch_2'):
                            branch_2 = ops.conv2d(net,
                                                  24, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                            branch_2 = ops.conv2d(branch_2,
                                                  64, [3, 3],
                                                  scope='Conv2d_0b_3x3')
                        with tf.variable_scope('Branch_3'):
                            branch_3 = ops.max_pool(net, [3, 3],
                                                    scope='MaxPool_0a_3x3')
                            branch_3 = ops.conv2d(branch_3,
                                                  64, [1, 1],
                                                  scope='Conv2d_0b_1x1')
                        net = tf.concat(
                            3, [branch_0, branch_1, branch_2, branch_3])
                    end_points[end_point] = net

                    end_point = 'Mixed_4d'
                    with tf.variable_scope(end_point):
                        with tf.variable_scope('Branch_0'):
                            branch_0 = ops.conv2d(net,
                                                  128, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                        with tf.variable_scope('Branch_1'):
                            branch_1 = ops.conv2d(net,
                                                  128, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                            branch_1 = ops.conv2d(branch_1,
                                                  256, [3, 3],
                                                  scope='Conv2d_0b_3x3')
                        with tf.variable_scope('Branch_2'):
                            branch_2 = ops.conv2d(net,
                                                  24, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                            branch_2 = ops.conv2d(branch_2,
                                                  64, [3, 3],
                                                  scope='Conv2d_0b_3x3')
                        with tf.variable_scope('Branch_3'):
                            branch_3 = ops.max_pool(net, [3, 3],
                                                    scope='MaxPool_0a_3x3')
                            branch_3 = ops.conv2d(branch_3,
                                                  64, [1, 1],
                                                  scope='Conv2d_0b_1x1')
                        net = tf.concat(
                            3, [branch_0, branch_1, branch_2, branch_3])
                    end_points[end_point] = net

                    end_point = 'Mixed_4e'
                    with tf.variable_scope(end_point):
                        with tf.variable_scope('Branch_0'):
                            branch_0 = ops.conv2d(net,
                                                  112, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                        with tf.variable_scope('Branch_1'):
                            branch_1 = ops.conv2d(net,
                                                  144, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                            branch_1 = ops.conv2d(branch_1,
                                                  288, [3, 3],
                                                  scope='Conv2d_0b_3x3')
                        with tf.variable_scope('Branch_2'):
                            branch_2 = ops.conv2d(net,
                                                  32, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                            branch_2 = ops.conv2d(branch_2,
                                                  64, [3, 3],
                                                  scope='Conv2d_0b_3x3')
                        with tf.variable_scope('Branch_3'):
                            branch_3 = ops.max_pool(net, [3, 3],
                                                    scope='MaxPool_0a_3x3')
                            branch_3 = ops.conv2d(branch_3,
                                                  64, [1, 1],
                                                  scope='Conv2d_0b_1x1')
                        net = tf.concat(
                            3, [branch_0, branch_1, branch_2, branch_3])
                    end_points[end_point] = net

                    end_point = 'Mixed_4f'
                    with tf.variable_scope(end_point):
                        with tf.variable_scope('Branch_0'):
                            branch_0 = ops.conv2d(net,
                                                  256, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                        with tf.variable_scope('Branch_1'):
                            branch_1 = ops.conv2d(net,
                                                  160, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                            branch_1 = ops.conv2d(branch_1,
                                                  320, [3, 3],
                                                  scope='Conv2d_0b_3x3')
                        with tf.variable_scope('Branch_2'):
                            branch_2 = ops.conv2d(net,
                                                  32, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                            branch_2 = ops.conv2d(branch_2,
                                                  128, [3, 3],
                                                  scope='Conv2d_0b_3x3')
                        with tf.variable_scope('Branch_3'):
                            branch_3 = ops.max_pool(net, [3, 3],
                                                    scope='MaxPool_0a_3x3')
                            branch_3 = ops.conv2d(branch_3,
                                                  128, [1, 1],
                                                  scope='Conv2d_0b_1x1')
                        net = tf.concat(
                            3, [branch_0, branch_1, branch_2, branch_3])
                    end_points[end_point] = net

                    end_point = 'MaxPool_5a_2x2'
                    net = ops.max_pool(net, [2, 2], stride=2, scope=end_point)
                    end_points[end_point] = net

                    end_point = 'Mixed_5b'
                    with tf.variable_scope(end_point):
                        with tf.variable_scope('Branch_0'):
                            branch_0 = ops.conv2d(net,
                                                  256, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                        with tf.variable_scope('Branch_1'):
                            branch_1 = ops.conv2d(net,
                                                  160, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                            branch_1 = ops.conv2d(branch_1,
                                                  320, [3, 3],
                                                  scope='Conv2d_0b_3x3')
                        with tf.variable_scope('Branch_2'):
                            branch_2 = ops.conv2d(net,
                                                  32, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                            branch_2 = ops.conv2d(branch_2,
                                                  128, [3, 3],
                                                  scope='Conv2d_0a_3x3')
                        with tf.variable_scope('Branch_3'):
                            branch_3 = ops.max_pool(net, [3, 3],
                                                    scope='MaxPool_0a_3x3')
                            branch_3 = ops.conv2d(branch_3,
                                                  128, [1, 1],
                                                  scope='Conv2d_0b_1x1')
                        net = tf.concat(
                            3, [branch_0, branch_1, branch_2, branch_3])
                    end_points[end_point] = net

                    end_point = 'Mixed_5c'
                    with tf.variable_scope(end_point):
                        with tf.variable_scope('Branch_0'):
                            branch_0 = ops.conv2d(net,
                                                  384, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                        with tf.variable_scope('Branch_1'):
                            branch_1 = ops.conv2d(net,
                                                  192, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                            branch_1 = ops.conv2d(branch_1,
                                                  384, [3, 3],
                                                  scope='Conv2d_0b_3x3')
                        with tf.variable_scope('Branch_2'):
                            branch_2 = ops.conv2d(net,
                                                  48, [1, 1],
                                                  scope='Conv2d_0a_1x1')
                            branch_2 = ops.conv2d(branch_2,
                                                  128, [3, 3],
                                                  scope='Conv2d_0b_3x3')
                        with tf.variable_scope('Branch_3'):
                            branch_3 = ops.max_pool(net, [3, 3],
                                                    scope='MaxPool_0a_3x3')
                            branch_3 = ops.conv2d(branch_3,
                                                  128, [1, 1],
                                                  scope='Conv2d_0b_1x1')
                        net = tf.concat(
                            3, [branch_0, branch_1, branch_2, branch_3])
                    end_points[end_point] = net

                    # Final pooling and prediction
                    with tf.variable_scope('Logits'):
                        # TODO fix this being in argscope and instead manually supply the correct weights_initializer fn
                        # which is probably xavier_initializer() or something
                        net = ops.avg_pool(net, [7, 7],
                                           stride=1,
                                           scope='MaxPool_0a_7x7',
                                           padding='VALID')
                        net = ops.dropout(net,
                                          dropout_keep_prob,
                                          scope='Dropout_0b')
                        logits = ops.conv2d(net,
                                            num_classes, [1, 1],
                                            activation=None,
                                            batch_norm_params=None,
                                            scope='Conv2d_0c_1x1')
                        logits = tf.squeeze(logits, [1, 2],
                                            name='SpatialSqueeze')
                        end_points['Logits'] = logits
                        end_points['Predictions'] = tf.nn.softmax(
                            logits, name='Predictions')
                [print(v.name) for v in tf.all_variables()]
                return logits, end_points