예제 #1
0
파일: ops_test.py 프로젝트: trigrass2/Note
 def testCreateConvWithoutWD(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     ops.conv2d(images, 32, [3, 3], weight_decay=0)
     self.assertEquals(
         tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES), [])
예제 #2
0
파일: ops_test.py 프로젝트: trigrass2/Note
 def testNonReuseVars(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     ops.conv2d(images, 32, [3, 3])
     self.assertEquals(len(variables.get_variables()), 2)
     ops.conv2d(images, 32, [3, 3])
     self.assertEquals(len(variables.get_variables()), 4)
예제 #3
0
파일: ops_test.py 프로젝트: trigrass2/Note
 def testCreateConvCreatesWeightsAndBiasesVars(self):
   height, width = 3, 3
   images = tf.random_uniform((5, height, width, 3), seed=1)
   with self.test_session():
     self.assertFalse(variables.get_variables('conv1/weights'))
     self.assertFalse(variables.get_variables('conv1/biases'))
     ops.conv2d(images, 32, [3, 3], scope='conv1')
     self.assertTrue(variables.get_variables('conv1/weights'))
     self.assertTrue(variables.get_variables('conv1/biases'))
예제 #4
0
파일: ops_test.py 프로젝트: trigrass2/Note
 def testReuseConvWithBatchNorm(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 32), seed=1)
     with scopes.arg_scope([ops.conv2d], batch_norm_params={'decay': 0.9}):
       net = ops.conv2d(images, 32, [3, 3], scope='Conv')
       net = ops.conv2d(net, 32, [3, 3], scope='Conv', reuse=True)
     self.assertEquals(len(variables.get_variables()), 4)
     self.assertEquals(len(variables.get_variables('Conv/BatchNorm')), 3)
     self.assertEquals(len(variables.get_variables('Conv_1/BatchNorm')), 0)
예제 #5
0
파일: ops_test.py 프로젝트: trigrass2/Note
 def testCreateConvWithWD(self):
   height, width = 3, 3
   with self.test_session() as sess:
     images = tf.random_uniform((5, height, width, 3), seed=1)
     ops.conv2d(images, 32, [3, 3], weight_decay=0.01)
     wd = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)[0]
     self.assertEquals(wd.op.name,
                       'Conv/weights/Regularizer/L2Regularizer/value')
     sess.run(tf.initialize_all_variables())
     self.assertTrue(sess.run(wd) <= 0.01)
예제 #6
0
파일: ops_test.py 프로젝트: trigrass2/Note
 def testReuseConvWithWD(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     ops.conv2d(images, 32, [3, 3], weight_decay=0.01, scope='conv1')
     self.assertEquals(len(variables.get_variables()), 2)
     self.assertEquals(
         len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
     ops.conv2d(images, 32, [3, 3], weight_decay=0.01, scope='conv1',
                reuse=True)
     self.assertEquals(len(variables.get_variables()), 2)
     self.assertEquals(
         len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)), 1)
예제 #7
0
파일: ops_test.py 프로젝트: trigrass2/Note
 def testCreateFullyConv(self):
   height, width = 6, 6
   with self.test_session():
     images = tf.random_uniform((5, height, width, 32), seed=1)
     output = ops.conv2d(images, 64, images.get_shape()[1:3], padding='VALID')
     self.assertEquals(output.op.name, 'Conv/Relu')
     self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 64])
예제 #8
0
파일: ops_test.py 프로젝트: trigrass2/Note
 def testCreateConvWithTensorShape(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     output = ops.conv2d(images, 32, images.get_shape()[1:3])
     self.assertEquals(output.op.name, 'Conv/Relu')
     self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
예제 #9
0
파일: ops_test.py 프로젝트: trigrass2/Note
 def testCreateConvWithStride(self):
   height, width = 6, 6
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     output = ops.conv2d(images, 32, [3, 3], stride=2)
     self.assertEquals(output.op.name, 'Conv/Relu')
     self.assertListEqual(output.get_shape().as_list(),
                          [5, height/2, width/2, 32])
예제 #10
0
파일: ops_test.py 프로젝트: trigrass2/Note
 def testCreateConvValid(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     output = ops.conv2d(images, 32, [3, 3], padding='VALID')
     self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 32])
예제 #11
0
파일: ops_test.py 프로젝트: trigrass2/Note
 def testCreateConvWithoutActivation(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     output = ops.conv2d(images, 32, [3, 3], activation=None)
     self.assertEquals(output.op.name, 'Conv/BiasAdd')
예제 #12
0
파일: ops_test.py 프로젝트: trigrass2/Note
 def testCreateConvWithScope(self):
   height, width = 3, 3
   with self.test_session():
     images = tf.random_uniform((5, height, width, 3), seed=1)
     output = ops.conv2d(images, 32, [3, 3], scope='conv1')
     self.assertEquals(output.op.name, 'conv1/Relu')
def inception_v3(inputs,
                 dropout_keep_prob=0.8,
                 num_classes=1000,
                 is_training=True,
                 restore_logits=True,
                 scope='',
                 reuse=None):
    """Latest Inception from http://arxiv.org/abs/1512.00567.

      "Rethinking the Inception Architecture for Computer Vision"

      Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
      Zbigniew Wojna

    Args:
      inputs: a tensor of size [batch_size, height, width, channels].
      dropout_keep_prob: dropout keep_prob.
      num_classes: number of predicted classes.
      is_training: whether is training or not.
      restore_logits: whether or not the logits layers should be restored.
        Useful for fine-tuning a model with different num_classes.
      scope: Optional scope for op_scope.
      reuse: weather to reuse weights or not (used for evaluation)

    Returns:
      a list containing 'logits', 'aux_logits' Tensors.
    """
    # end_points will collect relevant activations for external use, for example
    # summaries or losses.
    end_points = {}
    with tf.name_scope(scope, 'inception_v3', [inputs]):
        with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                              is_training=is_training):
            with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                                  stride=1, padding='VALID'):
                # 256 x 256 x 3
                end_points['conv0'] = ops.conv2d(inputs, 32, [3, 3], stride=2,
                                                 scope='conv0')
                # 128 x 128 x 32
                end_points['conv1'] = ops.conv2d(end_points['conv0'], 32, [3, 3],
                                                 scope='conv1')
                # 128 x 128 x 32
                end_points['conv2'] = ops.conv2d(end_points['conv1'], 64, [3, 3],
                                                 padding='SAME', scope='conv2')
                # 128 x 128 x 64
                end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3],
                                                   stride=2, scope='pool1')
                # 64 x 64 x 64
                end_points['conv3'] = ops.conv2d(end_points['pool1'], 80, [1, 1],
                                                 scope='conv3')
                # 64 x 64 x 80.
                end_points['conv4'] = ops.conv2d(end_points['conv3'], 192, [3, 3],
                                                 scope='conv4')
                # 64 x 64 x 192.
                end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],
                                                   stride=2, scope='pool2')
                # 32 x 32 x 192.
                net = end_points['pool2']
            # Inception blocks
            with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                                  stride=1, padding='SAME'):
                # mixed: 32 x 32 x 256.
                with tf.variable_scope('mixed_32x32x256a'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 64, [1, 1])
                    with tf.variable_scope('branch5x5'):
                        branch5x5 = ops.conv2d(net, 48, [1, 1])
                        branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 32, [1, 1])
                    net = tf.concat_v2([branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)
                    end_points['mixed_32x32x256a'] = net
                # mixed_1: 32 x 32 x 288.
                with tf.variable_scope('mixed_32x32x288a'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 64, [1, 1])
                    with tf.variable_scope('branch5x5'):
                        branch5x5 = ops.conv2d(net, 48, [1, 1])
                        branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
                    net = tf.concat_v2([branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)
                    end_points['mixed_32x32x288a'] = net
                # mixed_2: 32 x 32 x 288.
                with tf.variable_scope('mixed_32x32x288b'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 64, [1, 1])
                    with tf.variable_scope('branch5x5'):
                        branch5x5 = ops.conv2d(net, 48, [1, 1])
                        branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
                    net = tf.concat_v2([branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)
                    end_points['mixed_32x32x288b'] = net
                # mixed_3: 16 x 16 x 768.
                with tf.variable_scope('mixed_16x16x768a'):
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net, 384, [3, 3], stride=2, padding='VALID')
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3],
                                                  stride=2, padding='VALID')
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
                    net = tf.concat_v2([branch3x3, branch3x3dbl, branch_pool], 3)
                    end_points['mixed_16x16x768a'] = net
                # mixed4: 16 x 16 x 768.
                with tf.variable_scope('mixed_16x16x768b'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 128, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 128, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 128, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat_v2([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
                    end_points['mixed_16x16x768b'] = net
                # mixed_5: 16 x 16 x 768.
                with tf.variable_scope('mixed_16x16x768c'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 160, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 160, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat_v2([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
                    end_points['mixed_16x16x768c'] = net
                # mixed_6: 16 x 16 x 768.
                with tf.variable_scope('mixed_16x16x768d'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 160, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 160, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat_v2([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
                    end_points['mixed_16x16x768d'] = net
                # mixed_7: 16 x 16 x 768.
                with tf.variable_scope('mixed_16x16x768e'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 192, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 192, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 192, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat_v2([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
                    end_points['mixed_16x16x768e'] = net
                # Auxiliary Head logits
                aux_logits = tf.identity(end_points['mixed_16x16x768e'])
                with tf.variable_scope('aux_logits'):
                    aux_logits = ops.avg_pool(aux_logits, [5, 5], stride=3,
                                              padding='VALID')
                    aux_logits = ops.conv2d(aux_logits, 128, [1, 1], scope='proj')
                    # Shape of feature map before the final layer.
                    shape = aux_logits.get_shape()
                    aux_logits = ops.conv2d(aux_logits, 768, shape[1:3], stddev=0.01,
                                            padding='VALID')
                    aux_logits = ops.flatten(aux_logits)
                    aux_logits = ops.fc(aux_logits, num_classes, activation=None,
                                        stddev=0.001, restore=restore_logits)
                    end_points['aux_logits'] = aux_logits
                # mixed_8: 8 x 8 x 1280.
                # Note that the scope below is not changed to not void previous
                # checkpoints.
                # (TODO) Fix the scope when appropriate.
                with tf.variable_scope('mixed_16x16x1280a'):
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net, 192, [1, 1])
                        branch3x3 = ops.conv2d(branch3x3, 320, [3, 3], stride=2,
                                               padding='VALID')
                    with tf.variable_scope('branch7x7x3'):
                        branch7x7x3 = ops.conv2d(net, 192, [1, 1])
                        branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7])
                        branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1])
                        branch7x7x3 = ops.conv2d(branch7x7x3, 192, [3, 3],
                                                 stride=2, padding='VALID')
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
                    net = tf.concat_v2([branch3x3, branch7x7x3, branch_pool], 3)
                    end_points['mixed_16x16x1280a'] = net
                # mixed_9: 8 x 8 x 2048.
                with tf.variable_scope('mixed_8x8x2048a'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 320, [1, 1])
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net, 384, [1, 1])
                        branch3x3 = tf.concat_v2([ops.conv2d(branch3x3, 384, [1, 3]),
                                                  ops.conv2d(branch3x3, 384, [3, 1])], 3)
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 448, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
                        branch3x3dbl = tf.concat_v2([ops.conv2d(branch3x3dbl, 384, [1, 3]),
                                                     ops.conv2d(branch3x3dbl, 384, [3, 1])], 3)
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat_v2([branch1x1, branch3x3, branch3x3dbl, branch_pool], 3)
                    end_points['mixed_8x8x2048a'] = net
                # mixed_10: 8 x 8 x 2048.
                with tf.variable_scope('mixed_8x8x2048b'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 320, [1, 1])
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net, 384, [1, 1])
                        branch3x3 = tf.concat_v2([ops.conv2d(branch3x3, 384, [1, 3]),
                                                  ops.conv2d(branch3x3, 384, [3, 1])], 3)
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 448, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
                        branch3x3dbl = tf.concat_v2([ops.conv2d(branch3x3dbl, 384, [1, 3]),
                                                     ops.conv2d(branch3x3dbl, 384, [3, 1])], 3)
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat_v2([branch1x1, branch3x3, branch3x3dbl, branch_pool], 3)
                    end_points['mixed_8x8x2048b'] = net
                # Final pooling and prediction
                with tf.variable_scope('logits'):
                    shape = net.get_shape()
                    net = ops.avg_pool(net, shape[1:3], padding='VALID', scope='pool')
                    # 1 x 1 x 2048
                    net = ops.dropout(net, dropout_keep_prob, scope='dropout')
                    net = ops.flatten(net, scope='flatten')
                    # 2048
                    logits = ops.fc(net, num_classes, activation=None, scope='logits',
                                    restore=restore_logits)
                    # 2
                    end_points['logits'] = logits
                    end_points['predictions'] = tf.nn.softmax(logits, name='predictions')
            return logits, end_points
예제 #14
0
def inception_v3(inputs,
                 dropout_keep_prob=0.8,
                 num_classes=1000,
                 is_training=True,
                 restore_logits=True,
                 scope='',
                 reuse=None):
    """Latest Inception from http://arxiv.org/abs/1512.00567.

      "Rethinking the Inception Architecture for Computer Vision"

      Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
      Zbigniew Wojna

    Args:
      inputs: a tensor of size [batch_size, height, width, channels].
      dropout_keep_prob: dropout keep_prob.
      num_classes: number of predicted classes.
      is_training: whether is training or not.
      restore_logits: whether or not the logits layers should be restored.
        Useful for fine-tuning a model with different num_classes.
      scope: Optional scope for op_scope.
      reuse: weather to reuse weights or not (used for evaluation)

    Returns:
      a list containing 'logits', 'aux_logits' Tensors.
    """
    # end_points will collect relevant activations for external use, for example
    # summaries or losses.
    end_points = {}
    with tf.name_scope(scope, 'inception_v3', [inputs]):
        with scopes.arg_scope(
            [ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                is_training=is_training):
            with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                                  stride=1,
                                  padding='VALID'):
                # 256 x 256 x 3
                end_points['conv0'] = ops.conv2d(inputs,
                                                 32, [3, 3],
                                                 stride=2,
                                                 scope='conv0')
                # 128 x 128 x 32
                end_points['conv1'] = ops.conv2d(end_points['conv0'],
                                                 32, [3, 3],
                                                 scope='conv1')
                # 128 x 128 x 32
                end_points['conv2'] = ops.conv2d(end_points['conv1'],
                                                 64, [3, 3],
                                                 padding='SAME',
                                                 scope='conv2')
                # 128 x 128 x 64
                end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3],
                                                   stride=2,
                                                   scope='pool1')
                # 64 x 64 x 64
                end_points['conv3'] = ops.conv2d(end_points['pool1'],
                                                 80, [1, 1],
                                                 scope='conv3')
                # 64 x 64 x 80.
                end_points['conv4'] = ops.conv2d(end_points['conv3'],
                                                 192, [3, 3],
                                                 scope='conv4')
                # 64 x 64 x 192.
                end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],
                                                   stride=2,
                                                   scope='pool2')
                # 32 x 32 x 192.
                net = end_points['pool2']
            # Inception blocks
            with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                                  stride=1,
                                  padding='SAME'):
                # mixed: 32 x 32 x 256.
                with tf.variable_scope('mixed_32x32x256a'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 64, [1, 1])
                    with tf.variable_scope('branch5x5'):
                        branch5x5 = ops.conv2d(net, 48, [1, 1])
                        branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 32, [1, 1])
                    net = tf.concat_v2(
                        [branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)
                    end_points['mixed_32x32x256a'] = net
                # mixed_1: 32 x 32 x 288.
                with tf.variable_scope('mixed_32x32x288a'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 64, [1, 1])
                    with tf.variable_scope('branch5x5'):
                        branch5x5 = ops.conv2d(net, 48, [1, 1])
                        branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
                    net = tf.concat_v2(
                        [branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)
                    end_points['mixed_32x32x288a'] = net
                # mixed_2: 32 x 32 x 288.
                with tf.variable_scope('mixed_32x32x288b'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 64, [1, 1])
                    with tf.variable_scope('branch5x5'):
                        branch5x5 = ops.conv2d(net, 48, [1, 1])
                        branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
                    net = tf.concat_v2(
                        [branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)
                    end_points['mixed_32x32x288b'] = net
                # mixed_3: 16 x 16 x 768.
                with tf.variable_scope('mixed_16x16x768a'):
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net,
                                               384, [3, 3],
                                               stride=2,
                                               padding='VALID')
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 64, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
                        branch3x3dbl = ops.conv2d(branch3x3dbl,
                                                  96, [3, 3],
                                                  stride=2,
                                                  padding='VALID')
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.max_pool(net, [3, 3],
                                                   stride=2,
                                                   padding='VALID')
                    net = tf.concat_v2([branch3x3, branch3x3dbl, branch_pool],
                                       3)
                    end_points['mixed_16x16x768a'] = net
                # mixed4: 16 x 16 x 768.
                with tf.variable_scope('mixed_16x16x768b'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 128, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 128, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 128, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat_v2(
                        [branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
                    end_points['mixed_16x16x768b'] = net
                # mixed_5: 16 x 16 x 768.
                with tf.variable_scope('mixed_16x16x768c'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 160, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 160, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat_v2(
                        [branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
                    end_points['mixed_16x16x768c'] = net
                # mixed_6: 16 x 16 x 768.
                with tf.variable_scope('mixed_16x16x768d'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 160, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 160, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat_v2(
                        [branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
                    end_points['mixed_16x16x768d'] = net
                # mixed_7: 16 x 16 x 768.
                with tf.variable_scope('mixed_16x16x768e'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 192, [1, 1])
                    with tf.variable_scope('branch7x7'):
                        branch7x7 = ops.conv2d(net, 192, [1, 1])
                        branch7x7 = ops.conv2d(branch7x7, 192, [1, 7])
                        branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
                    with tf.variable_scope('branch7x7dbl'):
                        branch7x7dbl = ops.conv2d(net, 192, [1, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
                        branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat_v2(
                        [branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
                    end_points['mixed_16x16x768e'] = net
                # Auxiliary Head logits
                aux_logits = tf.identity(end_points['mixed_16x16x768e'])
                with tf.variable_scope('aux_logits'):
                    aux_logits = ops.avg_pool(aux_logits, [5, 5],
                                              stride=3,
                                              padding='VALID')
                    aux_logits = ops.conv2d(aux_logits,
                                            128, [1, 1],
                                            scope='proj')
                    # Shape of feature map before the final layer.
                    shape = aux_logits.get_shape()
                    aux_logits = ops.conv2d(aux_logits,
                                            768,
                                            shape[1:3],
                                            stddev=0.01,
                                            padding='VALID')
                    aux_logits = ops.flatten(aux_logits)
                    aux_logits = ops.fc(aux_logits,
                                        num_classes,
                                        activation=None,
                                        stddev=0.001,
                                        restore=restore_logits)
                    end_points['aux_logits'] = aux_logits
                # mixed_8: 8 x 8 x 1280.
                # Note that the scope below is not changed to not void previous
                # checkpoints.
                # (TODO) Fix the scope when appropriate.
                with tf.variable_scope('mixed_16x16x1280a'):
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net, 192, [1, 1])
                        branch3x3 = ops.conv2d(branch3x3,
                                               320, [3, 3],
                                               stride=2,
                                               padding='VALID')
                    with tf.variable_scope('branch7x7x3'):
                        branch7x7x3 = ops.conv2d(net, 192, [1, 1])
                        branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7])
                        branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1])
                        branch7x7x3 = ops.conv2d(branch7x7x3,
                                                 192, [3, 3],
                                                 stride=2,
                                                 padding='VALID')
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.max_pool(net, [3, 3],
                                                   stride=2,
                                                   padding='VALID')
                    net = tf.concat_v2([branch3x3, branch7x7x3, branch_pool],
                                       3)
                    end_points['mixed_16x16x1280a'] = net
                # mixed_9: 8 x 8 x 2048.
                with tf.variable_scope('mixed_8x8x2048a'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 320, [1, 1])
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net, 384, [1, 1])
                        branch3x3 = tf.concat_v2([
                            ops.conv2d(branch3x3, 384, [1, 3]),
                            ops.conv2d(branch3x3, 384, [3, 1])
                        ], 3)
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 448, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
                        branch3x3dbl = tf.concat_v2([
                            ops.conv2d(branch3x3dbl, 384, [1, 3]),
                            ops.conv2d(branch3x3dbl, 384, [3, 1])
                        ], 3)
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat_v2(
                        [branch1x1, branch3x3, branch3x3dbl, branch_pool], 3)
                    end_points['mixed_8x8x2048a'] = net
                # mixed_10: 8 x 8 x 2048.
                with tf.variable_scope('mixed_8x8x2048b'):
                    with tf.variable_scope('branch1x1'):
                        branch1x1 = ops.conv2d(net, 320, [1, 1])
                    with tf.variable_scope('branch3x3'):
                        branch3x3 = ops.conv2d(net, 384, [1, 1])
                        branch3x3 = tf.concat_v2([
                            ops.conv2d(branch3x3, 384, [1, 3]),
                            ops.conv2d(branch3x3, 384, [3, 1])
                        ], 3)
                    with tf.variable_scope('branch3x3dbl'):
                        branch3x3dbl = ops.conv2d(net, 448, [1, 1])
                        branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
                        branch3x3dbl = tf.concat_v2([
                            ops.conv2d(branch3x3dbl, 384, [1, 3]),
                            ops.conv2d(branch3x3dbl, 384, [3, 1])
                        ], 3)
                    with tf.variable_scope('branch_pool'):
                        branch_pool = ops.avg_pool(net, [3, 3])
                        branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
                    net = tf.concat_v2(
                        [branch1x1, branch3x3, branch3x3dbl, branch_pool], 3)
                    end_points['mixed_8x8x2048b'] = net
                # Final pooling and prediction
                with tf.variable_scope('logits'):
                    shape = net.get_shape()
                    net = ops.avg_pool(net,
                                       shape[1:3],
                                       padding='VALID',
                                       scope='pool')
                    # 1 x 1 x 2048
                    net = ops.dropout(net, dropout_keep_prob, scope='dropout')
                    net = ops.flatten(net, scope='flatten')
                    # 2048
                    logits = ops.fc(net,
                                    num_classes,
                                    activation=None,
                                    scope='logits',
                                    restore=restore_logits)
                    # 2
                    end_points['logits'] = logits
                    end_points['predictions'] = tf.nn.softmax(
                        logits, name='predictions')
            return logits, end_points