コード例 #1
0
ファイル: ops_test.py プロジェクト: chrifer7/inf659-inception
 def testCreateAvgPoolStrideSAME(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         output = ops.avg_pool(images, [3, 3], stride=1, padding='SAME')
         self.assertListEqual(output.get_shape().as_list(),
                              [5, height, width, 3])
コード例 #2
0
ファイル: ops_test.py プロジェクト: chrifer7/inf659-inception
 def testCreateSquareAvgPool(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         output = ops.avg_pool(images, 3)
         self.assertEquals(output.op.name, 'AvgPool/AvgPool')
         self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
コード例 #3
0
 def _pool_layer(self, inputs, scope):
     #        self.layer_list.append("pool")
     return ops.avg_pool(  #try avg pool
         inputs=inputs,
         pool_size=self.pool_size,
         padding=self.padding,
         scope=scope + '/pool',
         data_format=self.data_format)
コード例 #4
0
def inception_v3(inputs,
                 dropout_keep_prob=0.8,
                 num_classes=1000,
                 is_training=True,
                 restore_logits=True,
                 scope=''):
  """Latest Inception from http://arxiv.org/abs/1512.00567.

    "Rethinking the Inception Architecture for Computer Vision"

    Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
    Zbigniew Wojna

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    dropout_keep_prob: dropout keep_prob.
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    restore_logits: whether or not the logits layers should be restored.
      Useful for fine-tuning a model with different num_classes.
    scope: Optional scope for name_scope.

  Returns:
    a list containing 'logits', 'aux_logits' Tensors.
  """
  # end_points will collect relevant activations for external use, for example
  # summaries or losses.
  end_points = {}
  with tf.name_scope(scope, 'inception_v3', [inputs]):
    with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                          is_training=is_training):
      with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                            stride=1, padding='VALID'):
        # 299 x 299 x 3
        end_points['conv0'] = ops.conv2d(inputs, 32, [3, 3], stride=2,
                                         scope='conv0')
        # 149 x 149 x 32
        end_points['conv1'] = ops.conv2d(end_points['conv0'], 32, [3, 3],
                                         scope='conv1')
        # 147 x 147 x 32
        end_points['conv2'] = ops.conv2d(end_points['conv1'], 64, [3, 3],
                                         padding='SAME', scope='conv2')
        # 147 x 147 x 64
        end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3],
                                           stride=2, scope='pool1')
        # 73 x 73 x 64
        end_points['conv3'] = ops.conv2d(end_points['pool1'], 80, [1, 1],
                                         scope='conv3')
        # 73 x 73 x 80.
        end_points['conv4'] = ops.conv2d(end_points['conv3'], 192, [3, 3],
                                         scope='conv4')
        # 71 x 71 x 192.
        end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],
                                           stride=2, scope='pool2')
        # 35 x 35 x 192.
        net = end_points['pool2']
      # Inception blocks
      with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                            stride=1, padding='SAME'):
        # mixed: 35 x 35 x 256.
        with tf.variable_scope('mixed_35x35x256a'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 64, [1, 1])
          with tf.variable_scope('branch5x5'):
            branch5x5 = ops.conv2d(net, 48, [1, 1])
            branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 32, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool])
          end_points['mixed_35x35x256a'] = net
        # mixed_1: 35 x 35 x 288.
        with tf.variable_scope('mixed_35x35x288a'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 64, [1, 1])
          with tf.variable_scope('branch5x5'):
            branch5x5 = ops.conv2d(net, 48, [1, 1])
            branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool])
          end_points['mixed_35x35x288a'] = net
        # mixed_2: 35 x 35 x 288.
        with tf.variable_scope('mixed_35x35x288b'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 64, [1, 1])
          with tf.variable_scope('branch5x5'):
            branch5x5 = ops.conv2d(net, 48, [1, 1])
            branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool])
          end_points['mixed_35x35x288b'] = net
        # mixed_3: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768a'):
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 384, [3, 3], stride=2, padding='VALID')
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3],
                                      stride=2, padding='VALID')
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
          net = tf.concat(axis=3, values=[branch3x3, branch3x3dbl, branch_pool])
          end_points['mixed_17x17x768a'] = net
        # mixed4: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768b'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 128, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 128, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 128, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
          end_points['mixed_17x17x768b'] = net
        # mixed_5: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768c'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 160, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 160, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
          end_points['mixed_17x17x768c'] = net
        # mixed_6: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768d'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 160, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 160, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
          end_points['mixed_17x17x768d'] = net
        # mixed_7: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768e'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 192, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 192, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 192, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
          end_points['mixed_17x17x768e'] = net
        # Auxiliary Head logits
        aux_logits = tf.identity(end_points['mixed_17x17x768e'])
        with tf.variable_scope('aux_logits'):
          aux_logits = ops.avg_pool(aux_logits, [5, 5], stride=3,
                                    padding='VALID')
          aux_logits = ops.conv2d(aux_logits, 128, [1, 1], scope='proj')
          # Shape of feature map before the final layer.
          shape = aux_logits.get_shape()
          aux_logits = ops.conv2d(aux_logits, 768, shape[1:3], stddev=0.01,
                                  padding='VALID')
          aux_logits = ops.flatten(aux_logits)
          aux_logits = ops.fc(aux_logits, num_classes, activation=None,
                              stddev=0.001, restore=restore_logits)
          end_points['aux_logits'] = aux_logits
        # mixed_8: 8 x 8 x 1280.
        # Note that the scope below is not changed to not void previous
        # checkpoints.
        # (TODO) Fix the scope when appropriate.
        with tf.variable_scope('mixed_17x17x1280a'):
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 192, [1, 1])
            branch3x3 = ops.conv2d(branch3x3, 320, [3, 3], stride=2,
                                   padding='VALID')
          with tf.variable_scope('branch7x7x3'):
            branch7x7x3 = ops.conv2d(net, 192, [1, 1])
            branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7])
            branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1])
            branch7x7x3 = ops.conv2d(branch7x7x3, 192, [3, 3],
                                     stride=2, padding='VALID')
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
          net = tf.concat(axis=3, values=[branch3x3, branch7x7x3, branch_pool])
          end_points['mixed_17x17x1280a'] = net
        # mixed_9: 8 x 8 x 2048.
        with tf.variable_scope('mixed_8x8x2048a'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 320, [1, 1])
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 384, [1, 1])
            branch3x3 = tf.concat(axis=3, values=[ops.conv2d(branch3x3, 384, [1, 3]),
                                                  ops.conv2d(branch3x3, 384, [3, 1])])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 448, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
            branch3x3dbl = tf.concat(axis=3, values=[ops.conv2d(branch3x3dbl, 384, [1, 3]),
                                                     ops.conv2d(branch3x3dbl, 384, [3, 1])])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch3x3, branch3x3dbl, branch_pool])
          end_points['mixed_8x8x2048a'] = net
        # mixed_10: 8 x 8 x 2048.
        with tf.variable_scope('mixed_8x8x2048b'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 320, [1, 1])
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 384, [1, 1])
            branch3x3 = tf.concat(axis=3, values=[ops.conv2d(branch3x3, 384, [1, 3]),
                                                  ops.conv2d(branch3x3, 384, [3, 1])])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 448, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
            branch3x3dbl = tf.concat(axis=3, values=[ops.conv2d(branch3x3dbl, 384, [1, 3]),
                                                     ops.conv2d(branch3x3dbl, 384, [3, 1])])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch3x3, branch3x3dbl, branch_pool])
          end_points['mixed_8x8x2048b'] = net
        # Final pooling and prediction
        with tf.variable_scope('logits'):
          shape = net.get_shape()
          net = ops.avg_pool(net, shape[1:3], padding='VALID', scope='pool')
          # 1 x 1 x 2048
          net = ops.dropout(net, dropout_keep_prob, scope='dropout')
          net = ops.flatten(net, scope='flatten')
          # 2048
          logits = ops.fc(net, num_classes, activation=None, scope='logits',
                          restore=restore_logits)
          # 1000
          end_points['logits'] = logits
          end_points['predictions'] = tf.nn.softmax(logits, name='predictions')
      return logits, end_points
コード例 #5
0
def create_network(X, h, keep_prob, numClasses):
    num_channels = X.get_shape().as_list()[-1]
    res1 = new_conv_layer(inputs=X,
                          layer_name='res1',
                          stride=2,
                          num_inChannel=num_channels,
                          filter_size=4,
                          num_filters=32,
                          batch_norm=True,
                          use_relu=True)

    #res1 = max_pool(res1, ksize=2, stride=2, name='res1_max_pool')
    print('---------------------')
    print('Res1')
    print(res1.get_shape())
    print('---------------------')
    # Res2
    with tf.variable_scope('Res2'):
        res2a = bottleneck_block(res1,
                                 32,
                                 block_name='res2a',
                                 s1=1,
                                 k1=1,
                                 nf1=32,
                                 name1='res2a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=32,
                                 name2='res2a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=64,
                                 name3='res2a_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2a_branch1',
                                 first_block=True)
        print('Res2a')
        print(res2a.get_shape())
        print('---------------------')
        res2b = bottleneck_block(res2a,
                                 64,
                                 block_name='res2b',
                                 s1=1,
                                 k1=1,
                                 nf1=32,
                                 name1='res2b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=32,
                                 name2='res2b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=64,
                                 name3='res2b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2b_branch1',
                                 first_block=False)
        print('Res2b')
        print(res2b.get_shape())
        print('---------------------')
        res2c = bottleneck_block(res2b,
                                 64,
                                 block_name='res2c',
                                 s1=1,
                                 k1=1,
                                 nf1=32,
                                 name1='res2c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=32,
                                 name2='res2c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=64,
                                 name3='res2c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2c_branch1',
                                 first_block=False)
        print('Res2c')
        print(res2c.get_shape())
        print('---------------------')

    # Res3
    with tf.variable_scope('Res3'):
        res3a = bottleneck_block(res2c,
                                 64,
                                 block_name='res3a',
                                 s1=2,
                                 k1=1,
                                 nf1=48,
                                 name1='res3a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=48,
                                 name2='res3a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=128,
                                 name3='res3a_branch2c',
                                 s4=2,
                                 k4=1,
                                 name4='res3a_branch1',
                                 first_block=True)
        print('Res3a')
        print(res3a.get_shape())
        print('---------------------')
        res3b = bottleneck_block(res3a,
                                 128,
                                 block_name='res3b',
                                 s1=1,
                                 k1=1,
                                 nf1=48,
                                 name1='res3b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=48,
                                 name2='res3b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=128,
                                 name3='res3b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2b_branch1',
                                 first_block=False)
        print('Res3b')
        print(res3b.get_shape())
        print('---------------------')
        res3c = bottleneck_block(res3b,
                                 128,
                                 block_name='res3c',
                                 s1=1,
                                 k1=1,
                                 nf1=48,
                                 name1='res3c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=48,
                                 name2='res3c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=128,
                                 name3='res3c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res3c_branch1',
                                 first_block=False)
        print('Res3c')
        print(res3c.get_shape())
        print('---------------------')
        res3d = bottleneck_block(res3c,
                                 128,
                                 block_name='res3d',
                                 s1=1,
                                 k1=1,
                                 nf1=48,
                                 name1='res3d_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=48,
                                 name2='res3d_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=128,
                                 name3='res3d_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res3d_branch1',
                                 first_block=False)
        print('Res3d')
        print(res3d.get_shape())
        print('---------------------')

    # Res4
    with tf.variable_scope('Res4'):
        res4a = bottleneck_block(res3d,
                                 128,
                                 block_name='res4a',
                                 s1=2,
                                 k1=1,
                                 nf1=64,
                                 name1='res4a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res4a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res4a_branch2c',
                                 s4=2,
                                 k4=1,
                                 name4='res4a_branch1',
                                 first_block=True)
        print('---------------------')
        print('Res4a')
        print(res4a.get_shape())
        print('---------------------')
        res4b = bottleneck_block(res4a,
                                 256,
                                 block_name='res4b',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res4b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res4b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res4b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4b_branch1',
                                 first_block=False)
        print('Res4b')
        print(res4b.get_shape())
        print('---------------------')
        res4c = bottleneck_block(res4b,
                                 256,
                                 block_name='res4c',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res4c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res4c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res4c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4c_branch1',
                                 first_block=False)
        print('Res4c')
        print(res4c.get_shape())
        print('---------------------')
        res4d = bottleneck_block(res4c,
                                 256,
                                 block_name='res4d',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res4d_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res4d_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res4d_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4d_branch1',
                                 first_block=False)
        print('Res4d')
        print(res4d.get_shape())
        print('---------------------')
        res4e = bottleneck_block(res4d,
                                 256,
                                 block_name='res4e',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res4e_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res4e_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res4e_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4e_branch1',
                                 first_block=False)
        print('Res4e')
        print(res4e.get_shape())
        print('---------------------')
        res4f = bottleneck_block(res4e,
                                 256,
                                 block_name='res4f',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res4f_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res4f_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res4f_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4f_branch1',
                                 first_block=False)
        print('Res4f')
        print(res4f.get_shape())
        print('---------------------')

    # Res5
    with tf.variable_scope('Res5'):
        res5a = bottleneck_block(res4f,
                                 256,
                                 block_name='res5a',
                                 s1=1,
                                 k1=1,
                                 nf1=128,
                                 name1='res5a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res5a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res5a_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res5a_branch1',
                                 first_block=True)
        print('---------------------')
        print('Res5a')
        print(res5a.get_shape())
        print('---------------------')
        res5b = bottleneck_block(res5a,
                                 512,
                                 block_name='res5b',
                                 s1=1,
                                 k1=1,
                                 nf1=128,
                                 name1='res5b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res5b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res5b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res5b_branch1',
                                 first_block=False)
        print('Res5b')
        print(res5b.get_shape())
        print('---------------------')
        res5c = bottleneck_block(res5b,
                                 512,
                                 block_name='res5c',
                                 s1=1,
                                 k1=1,
                                 nf1=128,
                                 name1='res5c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res5c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res5c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res5c_branch1',
                                 first_block=False)
        print('Res5c')
        print(res5c.get_shape())

        res5c = avg_pool(res5c, ksize=4, stride=1, name='res5_avg_pool')
        print('---------------------')
        print('Res5c after AVG_POOL')
        print(res5c.get_shape())
        print('---------------------')

    net_flatten, _ = flatten_layer(res5c)
    print('---------------------')
    print('Matrix dimension to the first FC layer')
    print(net_flatten.get_shape())
    print('---------------------')
    net = fc_layer(net_flatten,
                   h,
                   'FC1',
                   batch_norm=True,
                   add_reg=True,
                   use_relu=True)
    net = dropout(net, keep_prob)
    net = fc_layer(net,
                   numClasses,
                   'FC2',
                   batch_norm=True,
                   add_reg=True,
                   use_relu=False)

    return net
コード例 #6
0
def create_network(X, numClasses, is_train):
    """
    Building the Residual Network with 50 layer
    :param X: input
    :param h: number of units in the fully connected layer
    :param keep_prob: dropout rate
    :param numClasses: number of classes
    :param is_train: to be used by batch normalization
    :return:
    """
    res1 = conv_2d(X,
                   layer_name='res1',
                   stride=2,
                   filter_size=7,
                   num_filters=64,
                   is_train=is_train,
                   batch_norm=True,
                   use_relu=True)
    print('---------------------')
    print('Res1')
    print(res1.get_shape())
    print('---------------------')
    res1 = max_pool(res1, ksize=3, stride=2, name='res1_max_pool')
    print('---------------------')
    print('Res1')
    print(res1.get_shape())
    print('---------------------')
    # Res2
    with tf.variable_scope('Res2'):
        res2a = bottleneck_block(res1,
                                 is_train,
                                 block_name='res2a',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res2a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res2a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res2a_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2a_branch1',
                                 first_block=True)
        print('Res2a')
        print(res2a.get_shape())
        print('---------------------')
        res2b = bottleneck_block(res2a,
                                 is_train,
                                 block_name='res2b',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res2b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res2b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res2b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2b_branch1',
                                 first_block=False)
        print('Res2b')
        print(res2b.get_shape())
        print('---------------------')
        res2c = bottleneck_block(res2b,
                                 is_train,
                                 block_name='res2c',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res2c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res2c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res2c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2c_branch1',
                                 first_block=False)
        print('Res2c')
        print(res2c.get_shape())
        print('---------------------')

    # Res3
    with tf.variable_scope('Res3'):
        res3a = bottleneck_block(res2c,
                                 is_train,
                                 block_name='res3a',
                                 s1=2,
                                 k1=1,
                                 nf1=128,
                                 name1='res3a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res3a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res3a_branch2c',
                                 s4=2,
                                 k4=1,
                                 name4='res3a_branch1',
                                 first_block=True)
        print('Res3a')
        print(res3a.get_shape())
        print('---------------------')
        res3b = bottleneck_block(res3a,
                                 is_train,
                                 block_name='res3b',
                                 s1=1,
                                 k1=1,
                                 nf1=128,
                                 name1='res3b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res3b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res3b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2b_branch1',
                                 first_block=False)
        print('Res3b')
        print(res3b.get_shape())
        print('---------------------')
        res3c = bottleneck_block(res3b,
                                 is_train,
                                 block_name='res3c',
                                 s1=1,
                                 k1=1,
                                 nf1=128,
                                 name1='res3c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res3c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res3c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res3c_branch1',
                                 first_block=False)
        print('Res3c')
        print(res3c.get_shape())
        print('---------------------')
        res3d = bottleneck_block(res3c,
                                 is_train,
                                 block_name='res3d',
                                 s1=1,
                                 k1=1,
                                 nf1=128,
                                 name1='res3d_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res3d_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res3d_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res3d_branch1',
                                 first_block=False)
        print('Res3d')
        print(res3d.get_shape())
        print('---------------------')

    # Res4
    with tf.variable_scope('Res4'):
        res4a = bottleneck_block(res3d,
                                 is_train,
                                 block_name='res4a',
                                 s1=2,
                                 k1=1,
                                 nf1=256,
                                 name1='res4a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4a_branch2c',
                                 s4=2,
                                 k4=1,
                                 name4='res4a_branch1',
                                 first_block=True)
        print('---------------------')
        print('Res4a')
        print(res4a.get_shape())
        print('---------------------')
        res4b = bottleneck_block(res4a,
                                 is_train,
                                 block_name='res4b',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4b_branch1',
                                 first_block=False)
        print('Res4b')
        print(res4b.get_shape())
        print('---------------------')
        res4c = bottleneck_block(res4b,
                                 is_train,
                                 block_name='res4c',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4c_branch1',
                                 first_block=False)
        print('Res4c')
        print(res4c.get_shape())
        print('---------------------')
        res4d = bottleneck_block(res4c,
                                 is_train,
                                 block_name='res4d',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4d_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4d_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4d_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4d_branch1',
                                 first_block=False)
        print('Res4d')
        print(res4d.get_shape())
        print('---------------------')
        res4e = bottleneck_block(res4d,
                                 is_train,
                                 block_name='res4e',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4e_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4e_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4e_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4e_branch1',
                                 first_block=False)
        print('Res4e')
        print(res4e.get_shape())
        print('---------------------')
        res4f = bottleneck_block(res4e,
                                 is_train,
                                 block_name='res4f',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4f_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4f_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4f_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4f_branch1',
                                 first_block=False)
        print('Res4f')
        print(res4f.get_shape())
        print('---------------------')

    # Res5
    with tf.variable_scope('Res5'):
        res5a = bottleneck_block(res4f,
                                 is_train,
                                 block_name='res5a',
                                 s1=2,
                                 k1=1,
                                 nf1=512,
                                 name1='res5a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=512,
                                 name2='res5a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=2048,
                                 name3='res5a_branch2c',
                                 s4=2,
                                 k4=1,
                                 name4='res5a_branch1',
                                 first_block=True)
        print('---------------------')
        print('Res5a')
        print(res5a.get_shape())
        print('---------------------')
        res5b = bottleneck_block(res5a,
                                 is_train,
                                 block_name='res5b',
                                 s1=1,
                                 k1=1,
                                 nf1=512,
                                 name1='res5b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=512,
                                 name2='res5b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=2048,
                                 name3='res5b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res5b_branch1',
                                 first_block=False)
        print('Res5b')
        print(res5b.get_shape())
        print('---------------------')
        res5c = bottleneck_block(res5b,
                                 is_train,
                                 block_name='res5c',
                                 s1=1,
                                 k1=1,
                                 nf1=512,
                                 name1='res5c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=512,
                                 name2='res5c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=2048,
                                 name3='res5c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res5c_branch1',
                                 first_block=False)
        # res5c: [batch_size, 8, 8, 2048]
        print('Res5c')
        print(res5c.get_shape())
        k_size = res5c.get_shape().as_list()[1]
        num_filters = res5c.get_shape().as_list()[-1]

        f_map = tf.reshape(res5c, [-1, k_size * k_size, num_filters],
                           name='reshape_fmaps')
        # [batch_size, 64, 2048]

        res5c_gap = avg_pool(res5c,
                             ksize=k_size,
                             stride=1,
                             name='res5_avg_pool')
        # [batch_size, 1, 1, 2048]
        print('---------------------')
        print('Res5c after AVG_POOL')
        print(res5c.get_shape())
        print('---------------------')

    net_flatten = flatten_layer(res5c_gap)
    # [batch_size, 2048]
    print('---------------------')
    print('Matrix dimension to the first FC layer')
    print(net_flatten.get_shape())
    print('---------------------')
    net, W = fc_layer(net_flatten,
                      numClasses,
                      'FC1',
                      is_train=is_train,
                      batch_norm=True,
                      add_reg=True,
                      use_relu=False)
    # W: [2048, 14]
    W_tiled = tf.tile(tf.expand_dims(W, axis=0), [args.val_batch_size, 1, 1])

    # [2048, 14] -> [1, 2048, 14] -> [batch_size, 2048, 14]

    heat_map_list = tf.unstack(tf.matmul(f_map, W_tiled), axis=0)
    # [batch_size, 64, 14]
    # list of heat-maps of length batch_size, each element: [64, 14]

    cls_act_map_list = [
        tf.nn.softmax(heat_map, dim=0) for heat_map in heat_map_list
    ]
    cls_act_map = tf.stack(cls_act_map_list, axis=0)
    # [batch_size, 64, 14]

    return net, net_flatten, res5c, cls_act_map
コード例 #7
0
ファイル: ops_test.py プロジェクト: chrifer7/inf659-inception
 def testGlobalAvgPool(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         output = ops.avg_pool(images, images.get_shape()[1:3], stride=1)
         self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
コード例 #8
0
ファイル: ops_test.py プロジェクト: chrifer7/inf659-inception
 def testCreateAvgPoolWithScope(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         output = ops.avg_pool(images, [3, 3], scope='pool1')
         self.assertEquals(output.op.name, 'pool1/AvgPool')
コード例 #9
0
    def densenet(self, x, is_training, no_of_blocks=3, block_layers=7, first_conv_op_channel=16, block_op_channel=12,
                 kernal_size=3):

        strides = {'1': [1, 1, 1, 1], '2': [1, 2, 2, 1], '3': [1, 3, 3, 1], '4': [1, 4, 4, 1], '8': [1, 8, 8, 1]}
        pool_win_size = {'1': [1, 1, 1, 1], '2': [1, 2, 2, 1], '3': [1, 3, 3, 1], '4': [1, 4, 4, 1], '8': [1, 8, 8, 1]}
        x_shape = x.get_shape().as_list()[1:]

        kernel = [kernal_size, kernal_size, x_shape[2], first_conv_op_channel]
        conv = ops.conv2d(x, kernel, strides['1'], 'SAME', initial='xavier', with_bias=False)
        if isinstance(block_layers, int):
            with tf.variable_scope('Dense_Block_1') as scope:
                kernel = [kernal_size, kernal_size, first_conv_op_channel, block_op_channel]
                conv = ops.conv2d_dense_block(conv,'Dense_Block_1', is_training, kernel, layers=block_layers,dropout_rate=self.dropout_rate)
                op_channel = first_conv_op_channel + block_layers * block_op_channel
            for _ in range(1, no_of_blocks):
                with tf.variable_scope('transition_layer_' + str(_ - 1)) as scope:
                    conv = tf.contrib.layers.batch_norm(conv, scale=True, is_training=is_training,
                                                        updates_collections=None)
                    conv = tf.nn.relu(conv)
                    kernel = [kernal_size, kernal_size, op_channel, op_channel]
                    conv = ops.conv2d(conv,'transition_layer_' + str(_ - 1), kernel, strides=[1, 1, 1, 1], padding='SAME', initial='xavier',
                                       with_bias=False)
                    conv = tf.nn.dropout(conv, self.dropout_rate)
                    conv = ops.avg_pool(conv, pool_win_size['2'], strides['2'], 'VALID')
                with tf.variable_scope('Dense_Block_' + str(_)) as scope:
                    kernel = [kernal_size, kernal_size, op_channel, block_op_channel]
                    conv = ops.conv2d_dense_block(conv,'Dense_Block_'+str(_),is_training, kernel, layers=block_layers,dropout_rate=self.dropout_rate)
                    op_channel += block_layers * block_op_channel
        elif isinstance(block_layers, list):
            no_of_blocks = len(block_layers)

            with tf.variable_scope('Dense_Block_1') as scope:
                kernel = [kernal_size, kernal_size, first_conv_op_channel, block_op_channel]
                conv = ops.conv2d_dense_block(conv,'Dense_Block_1', is_training, kernel, layers=block_layers[0],dropout_rate=self.dropout_rate)
                op_channel = first_conv_op_channel + block_layers[0] * block_op_channel

            for _ in range(1, no_of_blocks):
                with tf.variable_scope('transition_layer_' + str(_)) as scope:
                    conv = tf.contrib.layers.batch_norm(conv, scale=True, is_training=is_training,
                                                        updates_collections=None)
                    conv = tf.nn.relu(conv)
                    kernel = [kernal_size, kernal_size, op_channel, op_channel]
                    conv = ops.conv2d(conv,'transition_layer_' + str(_), kernel, strides=[1, 1, 1, 1], padding='SAME', initial='xavier',
                                       with_bias=False)
                    conv = tf.nn.dropout(conv, self.dropout_rate)
                    conv = ops.avg_pool(conv, pool_win_size['2'], strides['2'], 'VALID')
                with tf.variable_scope('Dense_Block_' + str(_ + 1)) as scope:
                    kernel = [kernal_size, kernal_size, op_channel, block_op_channel]
                    conv = ops.conv2d_dense_block(conv,'Dense_Block_'+str(_), is_training, kernel, layers=block_layers[_],dropout_rate=self.dropout_rate)
                    op_channel += block_layers[_] * block_op_channel
        with tf.variable_scope('Global_Average_Pooling') as scope:
            conv = tf.contrib.layers.batch_norm(conv, scale=True, is_training=is_training, updates_collections=None)
            conv = tf.nn.relu(conv)
            conv = ops.avg_pool(conv, pool_win_size['8'], strides['8'], 'VALID')

        with tf.variable_scope('Flatten_layer') as scope:
            conv= ops.flatten(conv)

        with tf.variable_scope('Output_layer') as scope:
            conv = ops.get_hidden_layer(conv,'output_layer',self.no_of_classes, activation='none', initializer='xavier')

        return conv