示例#1
0
 def _vgg_max_pool(self, x, scope, pool5=False):
   with tf.variable_scope(scope):
     if not pool5:
       pool = ops.max_pool(x, 2, 2, 'SAME')
     else:
       pool = ops.max_pool(x, 3, 1, 'SAME')
   return pool
示例#2
0
    def vgg16(self, x, is_training):
        x_shape = x.get_shape().as_list()[1:]
        kernel = {
            'c1_1': [3, 3, x_shape[2], 64], 'c1_2': [3, 3, 64, 64],
            'c2_1': [3, 3, 64, 128], 'c2_2': [3, 3, 128, 128],
            'c3_1': [3, 3, 128, 256], 'c3_2': [3, 3, 256, 256],
            'c3_3': [3, 3, 256, 256],
            'c4_1': [3, 3, 256, 512], 'c4_2': [3, 3, 512, 512],
            'c4_3': [3, 3, 512, 512],
            'c5_1': [3, 3, 512, 512], 'c5_2': [3, 3, 512, 512],
            'c5_3': [3, 3, 512, 512]}
        strides = {'c': [1, 1, 1, 1], 'p': [1, 2, 2, 1]}
        pool_win_size = [1, 2, 2, 1]
        conv = x

        with tf.variable_scope('Conv_1') as scope:
            conv = ops.conv2d(conv,'Conv_1_1', kernel['c1_1'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_1_2', kernel['c1_2'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.max_pool(conv, pool_win_size, strides['p'])
        with tf.variable_scope('Conv_2') as scope:
            conv = ops.conv2d(conv,'Conv_2_1', kernel['c2_1'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_2_2', kernel['c2_2'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.max_pool(conv, pool_win_size, strides['p'])
        with tf.variable_scope('Conv_3') as scope:
            conv = ops.conv2d(conv,'Conv_3_1', kernel['c3_1'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_3_2', kernel['c3_2'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_3_3', kernel['c3_3'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.max_pool(conv, pool_win_size, strides['p'])
        with tf.variable_scope('Conv_4') as scope:
            conv = ops.conv2d(conv,'Conv_4_1', kernel['c4_1'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_4_2', kernel['c4_2'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_4_3', kernel['c4_3'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.max_pool(conv, pool_win_size, strides['p'])
        with tf.variable_scope('Conv_5') as scope:
            conv = ops.conv2d(conv,'Conv_5_1', kernel['c5_1'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_5_2', kernel['c5_2'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.conv2d(conv,'Conv_5_3', kernel['c5_3'], strides['c'], 'SAME')
            conv = tf.nn.relu(conv)
            conv = ops.max_pool(conv, pool_win_size, strides['p'])
        with tf.variable_scope('Flatten_layer') as scope:
            conv = ops.flatten(conv)
        with tf.variable_scope('Hidden_layer_1') as scope:
            conv = ops.get_hidden_layer(conv,'Hidden_layer_1', 4096, activation='relu', initializer='xavier')
        with tf.variable_scope('Hidden_layer_2') as scope:
            conv = ops.get_hidden_layer(conv,'Hidden_layer_2', 4096, activation='relu', initializer='xavier')
        with tf.variable_scope('Output_layer') as scope:
            conv = ops.get_hidden_layer(conv,'output_layer', self.no_of_classes, activation="none", initializer='xavier')
        return conv
示例#3
0
    def alexnet(self, x, is_training):
        x_shape = x.get_shape().as_list()[1:]
        kernel = {'c1': [11, 11, x_shape[2], 96], 'c2': [5, 5, 96, 256],
                  'c3': [3, 3, 256, 384], 'c4': [3, 3, 384, 384],
                  'c5': [3, 3, 384, 256]}
        strides = {'1': [1, 1, 1, 1], '2': [1, 2, 2, 1], '3': [1, 3, 3, 1], '4': [1, 4, 4, 1]}
        pool_win_size = {'1': [1, 1, 1, 1], '2': [1, 2, 2, 1], '3': [1, 3, 3, 1], '4': [1, 4, 4, 1]}

        with tf.variable_scope('Conv_1') as scope:
            conv = ops.conv2d(x,'conv_1', kernel['c1'], strides['4'], 'VALID')
            conv = tf.nn.lrn(conv, depth_radius=2, bias=1.0, alpha=1e-05, beta=0.75)
            conv = ops.max_pool(conv, pool_win_size['3'], strides['2'], "VALID")
        with tf.variable_scope('Conv_2') as scope:
            conv = ops.conv2d(conv,'conv_2', kernel['c2'], strides['1'], padding='SAME', groups=2)
            conv = tf.nn.lrn(conv, depth_radius=2, bias=1.0, alpha=1e-05, beta=0.75)
            conv = ops.max_pool(conv, pool_win_size['3'], strides['2'], 'VALID')
        with tf.variable_scope('Conv_3') as scope:
            conv = ops.conv2d(conv,'conv_3', kernel['c3'], strides['1'], 'SAME')
        with tf.variable_scope('Conv_4') as scope:
            conv = ops.conv2d(conv,'conv_4', kernel['c4'], strides['1'], 'SAME', groups=2)
        with tf.variable_scope('Conv_5') as scope:
            conv = ops.conv2d(conv,'conv_5', kernel['c5'], strides['1'], 'SAME', groups=2)
            conv = ops.max_pool(conv, pool_win_size['3'], strides['2'], 'VALID')
        with tf.variable_scope('Flatten_layer') as scope:
            conv=ops.flatten(conv)
        with tf.variable_scope('Hidden_layer_1') as scope:
            conv = ops.get_hidden_layer(conv,'Hidden_layer_1', 4096, activation=['relu', 'dropout'], initializer='xavier')
        with tf.variable_scope('Hidden_layer_2') as scope:
            conv = ops.get_hidden_layer(conv,'Hidden_layer_2', 4096, activation=['relu', 'dropout'], initializer='xavier')
        with tf.variable_scope('Output_layer') as scope:
            conv = ops.get_hidden_layer(conv,'output_layer',self.no_of_classes, activation='none', initializer='xavier')
        return conv
 def build_network(self, x):
     # Building network...
     with tf.variable_scope('LeNet'):
         x = conv_2d(x,
                     filter_size=5,
                     num_filters=6,
                     name='conv_1',
                     keep_prob=1)
         x = drop_out(x, self.keep_prob_pl)
         x = max_pool(x, 2, 2, 'pool_1')
         x = conv_2d(x,
                     filter_size=5,
                     num_filters=16,
                     name='conv_2',
                     keep_prob=1)
         x = drop_out(x, self.keep_prob_pl)
         x = max_pool(x, 2, 2, 'pool_2')
         x = flatten_layer(x)
         x = drop_out(x, self.keep_prob_pl)
         x = fc_layer(x, 120, name='fc_1', keep_prob=1)
         x = drop_out(x, self.keep_prob_pl)
         x = fc_layer(x, 84, name='fc_2', keep_prob=1)
         x = drop_out(x, self.keep_prob_pl)
         self.logits = fc_layer(x,
                                self.conf.num_cls,
                                name='fc_3',
                                use_relu=False,
                                keep_prob=1)
示例#5
0
    def build_network(self, x):
        # Building network...
        with tf.variable_scope('FCNet'):
            x = conv_2d(x,
                        filter_size=3,
                        stride=1,
                        num_filters=32,
                        name='conv_1',
                        keep_prob=1)
            x = tf.contrib.slim.batch_norm(x)
            x = conv_2d(x,
                        filter_size=3,
                        stride=1,
                        num_filters=32,
                        name='conv_2',
                        keep_prob=1)
            x = tf.contrib.slim.batch_norm(x)
            x = max_pool(x, 2, 2, 'pool_1')

            x = conv_2d(x,
                        filter_size=3,
                        stride=1,
                        num_filters=64,
                        name='conv_3',
                        keep_prob=1)
            x = tf.contrib.slim.batch_norm(x)
            x = conv_2d(x,
                        filter_size=3,
                        stride=1,
                        num_filters=64,
                        name='conv_4',
                        keep_prob=1)
            x = tf.contrib.slim.batch_norm(x)
            x = max_pool(x, 2, 2, 'pool_2')

            x = conv_2d(x,
                        filter_size=3,
                        stride=1,
                        num_filters=128,
                        name='conv_5',
                        keep_prob=1)
            x = tf.contrib.slim.batch_norm(x)
            x = conv_2d(x,
                        filter_size=3,
                        stride=1,
                        num_filters=128,
                        name='conv_6',
                        keep_prob=1)
            x = tf.contrib.slim.batch_norm(x)
            x = max_pool(x, 2, 2, 'pool_3')

            x = flatten_layer(x)
            self.logits = fc_layer(x,
                                   self.conf.num_cls,
                                   name='fc_3',
                                   use_relu=False,
                                   keep_prob=1)
        def Counter(img, reuse=True, scope='Counter'):
            with tf.variable_scope(scope, reuse=reuse) as scope:
                if not reuse: log.warn(scope.name)

                _ = conv2d(img, 64, is_train, info=not reuse, name='conv1_1')
                _ = conv2d(_, 64, is_train, info=not reuse, name='conv1_2')
                conv1 = max_pool(_, name='conv1')

                _ = conv2d(conv1,
                           128,
                           is_train,
                           info=not reuse,
                           name='conv2_1')
                _ = conv2d(_, 128, is_train, info=not reuse, name='conv2_2')
                conv2 = max_pool(_, name='conv2')

                _ = conv2d(conv2,
                           256,
                           is_train,
                           info=not reuse,
                           name='conv3_1')
                _ = conv2d(_, 256, is_train, info=not reuse, name='conv3_2')
                _ = conv2d(_, 256, is_train, info=not reuse, name='conv3_3')
                conv3 = max_pool(_, name='conv3')

                _ = conv2d(conv3,
                           512,
                           is_train,
                           info=not reuse,
                           name='conv4_1')
                _ = conv2d(_, 512, is_train, info=not reuse, name='conv4_2')
                _ = conv2d(_, 512, is_train, info=not reuse, name='conv4_3')
                conv4 = max_pool(_, name='conv4')

                _ = conv2d(conv4,
                           512,
                           is_train,
                           info=not reuse,
                           name='conv5_1')
                _ = conv2d(_, 512, is_train, info=not reuse, name='conv5_2')
                _ = conv2d(_, 512, is_train, info=not reuse, name='conv5_3')
                conv5 = max_pool(_, name='conv5')

                fc1 = fc(tf.reshape(conv5, [self.batch_size, -1]),
                         4096,
                         is_train,
                         info=not reuse,
                         name='fc_1')
                fc2 = fc(fc1, 4096, is_train, info=not reuse, name='fc_2')
                fc3 = fc(fc2, 1000, is_train, info=not reuse, name='fc_3')
                fc4 = fc(fc3,
                         1000,
                         is_train,
                         info=not reuse,
                         batch_norm=False,
                         name='fc_4')
                return [conv1, conv2, conv3, conv4, conv5, fc1, fc2, fc3, fc4]
def AlexNet(X, keep_prob, is_train):
    net = conv_2d(X, 7, 2, 96, 'CONV1', trainable=True)
    net = lrn(net)
    net = max_pool(net, 3, 2, 'MaxPool1')
    net = conv_2d(net, 5, 2, 256, 'CONV2', trainable=True)
    net = lrn(net)
    net = max_pool(net, 3, 2, 'MaxPool2')
    net = conv_2d(net, 3, 1, 384, 'CONV3', trainable=True)
    net = conv_2d(net, 3, 1, 384, 'CONV4', trainable=True)
    net = conv_2d(net, 3, 1, 256, 'CONV5', trainable=True)
    net = max_pool(net, 3, 2, 'MaxPool3')
    layer_flat = flatten_layer(net)
    net = fc_layer(layer_flat, 512, 'FC1', trainable=True, use_relu=True)
    net = dropout(net, keep_prob)
    return net
示例#8
0
 def testCreateSquareMaxPool(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         output = ops.max_pool(images, 3)
         self.assertEquals(output.op.name, 'MaxPool/MaxPool')
         self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])
示例#9
0
    def resnet_with_bottleneck(self,input,is_training,layer_from_2=[3,4,6,3],first_kernel=7,first_stride=2,first_pool=True,stride=2):

        input_shape = input.get_shape().as_list()[1:]
        conv=ops.conv2d(input,'initial_conv',[first_kernel,first_kernel,input_shape[2],64],[1,first_stride,first_stride,1])
        if first_pool:
            conv=ops.max_pool(conv, [1, 3, 3, 1], [1, 2, 2, 1])

        for i in range(layer_from_2[0]):
            conv=ops.residual_bottleneck_block(conv,'Block_1_'+str(i),is_training,256,kernel=3,first_block=True,stride=stride)

        for i in range(layer_from_2[1]):
            conv=ops.residual_bottleneck_block(conv,'Block_2_'+str(i),is_training,512,kernel=3,first_block=True,stride=stride)

        for i in range(layer_from_2[2]):
            conv=ops.residual_bottleneck_block(conv,'Block_3_'+str(i),is_training,1024,kernel=3,first_block=True,stride=stride)

        for i in range(layer_from_2[3]):
            conv=ops.residual_bottleneck_block(conv,'Block_4_'+str(i),is_training,2048,kernel=3,first_block=True,stride=stride)
        with tf.variable_scope('unit'):
            conv = ops.batch_normalization(conv,is_training)
            conv = tf.nn.relu(conv)
            conv = ops.global_avg_pool(conv)
            conv =ops.flatten(conv)
        with tf.variable_scope('logit'):
            conv = ops.get_hidden_layer(conv,'output',self.no_of_classes,'none')
        return conv
示例#10
0
 def testCreateMaxPoolStrideSAME(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         output = ops.max_pool(images, [3, 3], stride=1, padding='SAME')
         self.assertListEqual(output.get_shape().as_list(),
                              [5, height, width, 3])
def AlexNet_target_task(X, keep_prob, num_cls):
    net = conv_2d(X, 7, 2, 96, 'CONV1', trainable=False)
    net = lrn(net)
    net = max_pool(net, 3, 2, 'MaxPool1')
    net = conv_2d(net, 5, 2, 256, 'CONV2', trainable=False)
    net = lrn(net)
    net = max_pool(net, 3, 2, 'MaxPool2')
    net = conv_2d(net, 3, 1, 384, 'CONV3', trainable=False)
    net = conv_2d(net, 3, 1, 384, 'CONV4', trainable=False)
    net = conv_2d(net, 3, 1, 256, 'CONV5', trainable=False)
    net = max_pool(net, 3, 2, 'MaxPool3')
    layer_flat = flatten_layer(net)
    net = fc_layer(layer_flat, 512, 'FC_1', trainable=True, use_relu=True)
    net = dropout(net, keep_prob)
    net = fc_layer(net, num_cls, 'FC_2', trainable=True, use_relu=False)
    return net
示例#12
0
def inference(input_tensor_batch, n):
    """
    The main function that defines the ResNet. total layers = 1 + 2n + 2n + 2n +1 = 6n + 2
    :param input_tensor_batch: 4D tensor
    :param n: num_residual_blocks
    :return: last layer in the network. Not softmax-ed
    """
    tensor = vgg_block(input_tensor_batch, 2, 16)
    tensor = ops.max_pool(tensor, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME')

    tensor = vgg_block(tensor, 2, 32)
    tensor = ops.max_pool(tensor, [1, 3, 3, 1], [1, 2, 2, 1], 'SAME')

    tensor = vgg_block(tensor, 1, 128)
    tensor = tf.reduce_mean(tensor, [1, 2])

    with tf.name_scope('fc'):
        logits = fc_layer(tensor, 10)
    return logits
示例#13
0
def max_pool_test():
    with tf.device('/' + FLAGS.device + ":0"):
        input_tensor = tf.Variable(initial_value=tf.truncated_normal([128, 11, 11, 64], mean=0.1, dtype=dtype))
        param = tf.constant(np.random.rand(128, 2, 2, 64), dtype=dtype)

        output_tensor = ops.max_pool(input_tensor, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
        output_tensor = ops.max_pool(output_tensor, ksize=[1, 3, 3, 1], strides=[1, 3, 3, 1], padding="VALID")
        recv = tf.reduce_mean(output_tensor * param)
        grads = tf.gradients(recv, [input_tensor])

    with tf.device("/cpu:0"):
        output_tensor_ = tf.nn.max_pool(input_tensor, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
        output_tensor_ = tf.nn.max_pool(output_tensor_, ksize=[1, 3, 3, 1], strides=[1, 3, 3, 1], padding="VALID")
        recv_ = tf.reduce_mean(output_tensor_ * param)
        grads_ = tf.gradients(recv_, [input_tensor])

    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        assert np.max(np.abs(sess.run(output_tensor) - sess.run(output_tensor_))) < 1e-5
        for g, g_ in zip(grads, grads_):
            assert np.max(np.abs(sess.run(g) - sess.run(g_))) < 1e-5
示例#14
0
    def lenet(self, x, is_training):
        x_shape = x.get_shape().as_list()[1:]
        kernel = {'c1': [5, 5, x_shape[2], 20], 'c2': [5, 5, 20, 50]}
        strides = {'1': [1, 1, 1, 1], '2': [1, 2, 2, 1]}
        pool_win_size = {'2': [1, 2, 2, 1]}

        with tf.variable_scope('Conv_1') as scope:
            conv = ops.conv2d(x,'conv1', kernel['c1'], strides['1'], 'SAME')
            conv = tf.nn.lrn(conv, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
            conv = ops.max_pool(conv, pool_win_size['2'], strides['2'])
        with tf.variable_scope('Conv_2') as scope:
            conv = ops.conv2d(conv,'conv2', kernel['c2'], strides['1'], 'SAME')
            conv = tf.nn.lrn(conv, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
            conv = ops.max_pool(conv, pool_win_size['2'], strides['2'])
        with tf.variable_scope('Flatten_layer') as scope:
            conv=ops.flatten(conv)
        with tf.variable_scope('Hidden_layer_1') as scope:
            conv = ops.get_hidden_layer(conv,'Hidden_layer_1',120, initializer='xavier')
        with tf.variable_scope('Hidden_layer_2') as scope:
            conv = ops.get_hidden_layer(conv,'Hidden_layer_2', 84, initializer='xavier')
        with tf.variable_scope('Output_layer') as scope:
            conv = ops.get_hidden_layer(conv,'output_layer', self.no_of_classes, activation="none", initializer='xavier')
        return conv
示例#15
0
def model_fun(x, is_training):
    x_shape = x.get_shape().as_list()[1:]
    kernel = {'c1': [5, 5, x_shape[2], 64], 'c2': [5, 5, 20, 50]}
    strides = {'1': [1, 1, 1, 1], '2': [1, 2, 2, 1]}
    pool_win_size = {'2': [1, 2, 2, 1]}

    conv = ops.conv2d(x, 'conv1', kernel['c1'], strides['1'], 'SAME')

    conv = ops.max_pool(conv, [1, 3, 3, 1], [1, 1, 1, 1])

    conv = ops.residual_bottleneck_block(conv, 'ins_block', is_training, 64)

    with tf.variable_scope('Flatten_layer') as scope:
        conv = ops.flatten(conv)
    with tf.variable_scope('Output_layer') as scope:
        conv = ops.get_hidden_layer(conv,
                                    'output_layer',
                                    5,
                                    activation="none",
                                    initializer='xavier')
    return conv
示例#16
0
    def inception_v2(self, input, is_training):
        input_shape = input.get_shape().as_list()[1:]
        conv = ops.conv2d(input,'conv1',kernel_size=[7, 7, input_shape[2], 64], strides=[1, 2, 2, 1])
        conv = tf.nn.relu(conv)
        conv = ops.max_pool(conv, size=[1, 3, 3, 1], strides=[1, 2, 2, 1])
        conv = tf.nn.local_response_normalization(conv, depth_radius=2, alpha=2e-05, beta=0.75)

        conv = ops.conv2d(conv,'conv2', kernel_size=[1, 1, 64, 64], strides=[1, 1, 1, 1], padding='VALID')
        conv = tf.nn.relu(conv)

        conv_shape = conv.get_shape().as_list()[1:]
        conv = ops.conv2d(conv,'conv3', kernel_size=[3, 3, conv_shape[2], 192], strides=[1, 1, 1, 1])
        conv = tf.nn.relu(conv)

        conv = tf.nn.local_response_normalization(conv, depth_radius=2, alpha=2e-05, beta=0.75)
        conv = ops.max_pool(conv, size=[1, 3, 3, 1], strides=[1, 2, 2, 1])

        conv = ops.inception_v2_block(conv,'Block_1',is_training, out_channel={'1': 64, '3': 128, '5': 32},
                                      reduced_out_channel={'3': 96, '5': 16, 'p': 32})
        conv = ops.batch_normalization(conv, is_training)
        conv = tf.nn.relu(conv)

        conv = ops.inception_v2_block(conv,'Block_2', is_training, out_channel={'1': 128, '3': 192, '5': 96},
                                      reduced_out_channel={'3': 128, '5': 32, 'p': 64})
        conv = ops.batch_normalization(conv, is_training)
        conv = tf.nn.relu(conv)

        conv = ops.max_pool(conv, size=[1, 3, 3, 1], strides=[1, 2, 2, 1])

        conv = ops.inception_v2_block(conv,'Block_3', is_training, out_channel={'1': 192, '3': 208, '5': 48},
                                      reduced_out_channel={'3': 96, '5': 16, 'p': 64})
        conv = ops.batch_normalization(conv, is_training)
        conv = tf.nn.relu(conv)

        conv = ops.inception_v2_block(conv,'Block_4', is_training, out_channel={'1': 160, '3': 224, '5': 64},
                                      reduced_out_channel={'3': 112, '5': 24, 'p': 64})
        conv = ops.batch_normalization(conv, is_training)
        conv = tf.nn.relu(conv)

        conv = ops.inception_v2_block(conv,'Block_5', is_training, out_channel={'1': 128, '3': 256, '5': 64},
                                      reduced_out_channel={'3': 128, '5': 24, 'p': 64})
        conv = ops.batch_normalization(conv, is_training)
        conv = tf.nn.relu(conv)

        conv = ops.inception_v2_block(conv,'Block_6', is_training, out_channel={'1': 112, '3': 228, '5': 64},
                                      reduced_out_channel={'3': 144, '5': 32, 'p': 64})
        conv = ops.batch_normalization(conv, is_training)
        conv = tf.nn.relu(conv)

        conv = ops.inception_v2_block(conv,'Block_7', is_training, out_channel={'1': 256, '3': 320, '5': 128},
                                      reduced_out_channel={'3': 160, '5': 32, 'p': 128})
        conv = ops.batch_normalization(conv, is_training)
        conv = tf.nn.relu(conv)

        conv = ops.max_pool(conv, size=[1, 3, 3, 1], strides=[1, 2, 2, 1])

        conv = ops.inception_v2_block(conv,'Block_8', is_training, out_channel={'1': 256, '3': 320, '5': 128},
                                      reduced_out_channel={'3': 160, '5': 32, 'p': 128})
        conv = ops.batch_normalization(conv, is_training)
        conv = tf.nn.relu(conv)

        conv = ops.inception_v2_block(conv,'Block_9', is_training, out_channel={'1': 384, '3': 384, '5': 128},
                                      reduced_out_channel={'3': 192, '5': 48, 'p': 128})
        conv = ops.batch_normalization(conv, is_training)
        conv = tf.nn.relu(conv)

        conv = ops.global_avg_pool(conv)
        conv = ops.flatten(conv)

        conv = tf.nn.dropout(conv, 0.4)
        conv = ops.get_hidden_layer(conv,'output_layer',1000, 'none', 'xavier')
        return conv
示例#17
0
    def model(self, x):

        # model should take training input and produce transformed as output

        e1_a = conv2d(x, 64, name='g_e1_conv_a', k_h=3, k_w=3)
        e1_b = self.g_bn_e1a(
            conv2d(lrelu(e1_a), 64, name='g_e1_conv_b', k_h=3, k_w=3))
        e1_c = self.g_bn_e1b(
            conv2d(lrelu(e1_b), 64, name='g_e1_conv_c', k_h=3, k_w=3))

        m1 = max_pool(e1_c)

        e2_a = self.g_bn_e2a(
            conv2d(lrelu(m1), 128, name='g_e2_conv_a', k_h=3, k_w=3))
        e2_b = self.g_bn_e2b(
            conv2d(lrelu(e2_a), 128, name='g_e2_conv_b', k_h=3, k_w=3))

        m2 = max_pool(e2_b)

        e3_a = self.g_bn_e3a(
            conv2d(lrelu(m2), 256, name='g_e3_conv_a', k_h=3, k_w=3))
        e3_b = self.g_bn_e3b(
            conv2d(lrelu(e3_a), 256, name='g_e3_conv_b', k_h=3, k_w=3))

        m3 = max_pool(e3_b)

        e4_a = self.g_bn_e4a(
            conv2d(lrelu(m3), 512, name='g_e4_conv_a', k_h=3, k_w=3))
        e4_b = self.g_bn_e4b(
            conv2d(lrelu(e4_a), 512, name='g_e4_conv_b', k_h=3, k_w=3))

        m4 = max_pool(e4_b)

        e5_a = self.g_bn_e5a(
            conv2d(lrelu(m4), 1024, name='g_e5_conv_a', k_h=3, k_w=3))
        e5_b = self.g_bn_e5b(
            conv2d(lrelu(e5_a), 1024, name='g_e5_conv_b', k_h=3, k_w=3))

        d1, d1_w, d1_b = deconv2d(lrelu(e5_b), [self.batch_size, 32, 32, 1024],
                                  name='g_d1',
                                  with_w=True)
        d1 = self.g_bn_d1a(d1)
        d1 = tf.concat([d1, e4_b], 3)
        #
        d1_a = self.g_bn_d1b(
            conv2d(lrelu(d1), 512, name='g_d1_conv_a', k_h=3, k_w=3))
        d1_b = self.g_bn_d1c(
            conv2d(lrelu(d1_a), 512, name='g_d1_conv_b', k_h=3, k_w=3))

        d2, d2_w, d2_b = deconv2d(lrelu(d1_b), [self.batch_size, 64, 64, 512],
                                  name='g_d2',
                                  with_w=True)
        d2 = self.g_bn_d2a(d2)
        d2 = tf.concat([d2, e3_b], 3)

        d2_a = self.g_bn_d2b(
            conv2d(lrelu(d2), 256, name='g_d2_conv_a', k_h=3, k_w=3))
        d2_b = self.g_bn_d2c(
            conv2d(lrelu(d2_a), 256, name='g_d2_conv_b', k_h=3, k_w=3))

        d3, d3_w, d3_b = deconv2d(lrelu(d2_b),
                                  [self.batch_size, 128, 128, 256],
                                  name='g_d3',
                                  with_w=True)
        d3 = self.g_bn_d3a(d3)
        d3 = tf.concat([d3, e2_b], 3)

        d3_a = self.g_bn_d3b(
            conv2d(lrelu(d3), 128, name='g_d3_conv_a', k_h=3, k_w=3))
        d3_b = self.g_bn_d3c(
            conv2d(lrelu(d3_a), 128, name='g_d3_conv_b', k_h=3, k_w=3))

        d4, d4_w, d4_b = deconv2d(lrelu(d3_b),
                                  [self.batch_size, 256, 256, 128],
                                  name='g_d4',
                                  with_w=True)
        d4 = self.g_bn_d4a(d4)
        d4 = tf.concat([d4, e1_b], 3)

        d4_a = self.g_bn_d4b(
            conv2d(lrelu(d4), 64, name='g_d4_conv_a', k_h=3, k_w=3))
        d4_b = self.g_bn_d4c(
            conv2d(lrelu(d4_a), 64, name='g_d4_conv_b', k_h=3, k_w=3))

        resid = conv2d(d4_b, 1, k_h=1, k_w=1, name='residual')

        out = tf.add(resid, x, name='out')

        return out
示例#18
0
def inception_v3(inputs,
                 dropout_keep_prob=0.8,
                 num_classes=1000,
                 is_training=True,
                 restore_logits=True,
                 scope=''):
  """Latest Inception from http://arxiv.org/abs/1512.00567.

    "Rethinking the Inception Architecture for Computer Vision"

    Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
    Zbigniew Wojna

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    dropout_keep_prob: dropout keep_prob.
    num_classes: number of predicted classes.
    is_training: whether is training or not.
    restore_logits: whether or not the logits layers should be restored.
      Useful for fine-tuning a model with different num_classes.
    scope: Optional scope for name_scope.

  Returns:
    a list containing 'logits', 'aux_logits' Tensors.
  """
  # end_points will collect relevant activations for external use, for example
  # summaries or losses.
  end_points = {}
  with tf.name_scope(scope, 'inception_v3', [inputs]):
    with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
                          is_training=is_training):
      with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                            stride=1, padding='VALID'):
        # 299 x 299 x 3
        end_points['conv0'] = ops.conv2d(inputs, 32, [3, 3], stride=2,
                                         scope='conv0')
        # 149 x 149 x 32
        end_points['conv1'] = ops.conv2d(end_points['conv0'], 32, [3, 3],
                                         scope='conv1')
        # 147 x 147 x 32
        end_points['conv2'] = ops.conv2d(end_points['conv1'], 64, [3, 3],
                                         padding='SAME', scope='conv2')
        # 147 x 147 x 64
        end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3],
                                           stride=2, scope='pool1')
        # 73 x 73 x 64
        end_points['conv3'] = ops.conv2d(end_points['pool1'], 80, [1, 1],
                                         scope='conv3')
        # 73 x 73 x 80.
        end_points['conv4'] = ops.conv2d(end_points['conv3'], 192, [3, 3],
                                         scope='conv4')
        # 71 x 71 x 192.
        end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],
                                           stride=2, scope='pool2')
        # 35 x 35 x 192.
        net = end_points['pool2']
      # Inception blocks
      with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
                            stride=1, padding='SAME'):
        # mixed: 35 x 35 x 256.
        with tf.variable_scope('mixed_35x35x256a'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 64, [1, 1])
          with tf.variable_scope('branch5x5'):
            branch5x5 = ops.conv2d(net, 48, [1, 1])
            branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 32, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool])
          end_points['mixed_35x35x256a'] = net
        # mixed_1: 35 x 35 x 288.
        with tf.variable_scope('mixed_35x35x288a'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 64, [1, 1])
          with tf.variable_scope('branch5x5'):
            branch5x5 = ops.conv2d(net, 48, [1, 1])
            branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool])
          end_points['mixed_35x35x288a'] = net
        # mixed_2: 35 x 35 x 288.
        with tf.variable_scope('mixed_35x35x288b'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 64, [1, 1])
          with tf.variable_scope('branch5x5'):
            branch5x5 = ops.conv2d(net, 48, [1, 1])
            branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch5x5, branch3x3dbl, branch_pool])
          end_points['mixed_35x35x288b'] = net
        # mixed_3: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768a'):
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 384, [3, 3], stride=2, padding='VALID')
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 64, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3],
                                      stride=2, padding='VALID')
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
          net = tf.concat(axis=3, values=[branch3x3, branch3x3dbl, branch_pool])
          end_points['mixed_17x17x768a'] = net
        # mixed4: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768b'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 128, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 128, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 128, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
          end_points['mixed_17x17x768b'] = net
        # mixed_5: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768c'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 160, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 160, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
          end_points['mixed_17x17x768c'] = net
        # mixed_6: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768d'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 160, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 160, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
          end_points['mixed_17x17x768d'] = net
        # mixed_7: 17 x 17 x 768.
        with tf.variable_scope('mixed_17x17x768e'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 192, [1, 1])
          with tf.variable_scope('branch7x7'):
            branch7x7 = ops.conv2d(net, 192, [1, 1])
            branch7x7 = ops.conv2d(branch7x7, 192, [1, 7])
            branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
          with tf.variable_scope('branch7x7dbl'):
            branch7x7dbl = ops.conv2d(net, 192, [1, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
            branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch7x7, branch7x7dbl, branch_pool])
          end_points['mixed_17x17x768e'] = net
        # Auxiliary Head logits
        aux_logits = tf.identity(end_points['mixed_17x17x768e'])
        with tf.variable_scope('aux_logits'):
          aux_logits = ops.avg_pool(aux_logits, [5, 5], stride=3,
                                    padding='VALID')
          aux_logits = ops.conv2d(aux_logits, 128, [1, 1], scope='proj')
          # Shape of feature map before the final layer.
          shape = aux_logits.get_shape()
          aux_logits = ops.conv2d(aux_logits, 768, shape[1:3], stddev=0.01,
                                  padding='VALID')
          aux_logits = ops.flatten(aux_logits)
          aux_logits = ops.fc(aux_logits, num_classes, activation=None,
                              stddev=0.001, restore=restore_logits)
          end_points['aux_logits'] = aux_logits
        # mixed_8: 8 x 8 x 1280.
        # Note that the scope below is not changed to not void previous
        # checkpoints.
        # (TODO) Fix the scope when appropriate.
        with tf.variable_scope('mixed_17x17x1280a'):
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 192, [1, 1])
            branch3x3 = ops.conv2d(branch3x3, 320, [3, 3], stride=2,
                                   padding='VALID')
          with tf.variable_scope('branch7x7x3'):
            branch7x7x3 = ops.conv2d(net, 192, [1, 1])
            branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7])
            branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1])
            branch7x7x3 = ops.conv2d(branch7x7x3, 192, [3, 3],
                                     stride=2, padding='VALID')
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
          net = tf.concat(axis=3, values=[branch3x3, branch7x7x3, branch_pool])
          end_points['mixed_17x17x1280a'] = net
        # mixed_9: 8 x 8 x 2048.
        with tf.variable_scope('mixed_8x8x2048a'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 320, [1, 1])
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 384, [1, 1])
            branch3x3 = tf.concat(axis=3, values=[ops.conv2d(branch3x3, 384, [1, 3]),
                                                  ops.conv2d(branch3x3, 384, [3, 1])])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 448, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
            branch3x3dbl = tf.concat(axis=3, values=[ops.conv2d(branch3x3dbl, 384, [1, 3]),
                                                     ops.conv2d(branch3x3dbl, 384, [3, 1])])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch3x3, branch3x3dbl, branch_pool])
          end_points['mixed_8x8x2048a'] = net
        # mixed_10: 8 x 8 x 2048.
        with tf.variable_scope('mixed_8x8x2048b'):
          with tf.variable_scope('branch1x1'):
            branch1x1 = ops.conv2d(net, 320, [1, 1])
          with tf.variable_scope('branch3x3'):
            branch3x3 = ops.conv2d(net, 384, [1, 1])
            branch3x3 = tf.concat(axis=3, values=[ops.conv2d(branch3x3, 384, [1, 3]),
                                                  ops.conv2d(branch3x3, 384, [3, 1])])
          with tf.variable_scope('branch3x3dbl'):
            branch3x3dbl = ops.conv2d(net, 448, [1, 1])
            branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
            branch3x3dbl = tf.concat(axis=3, values=[ops.conv2d(branch3x3dbl, 384, [1, 3]),
                                                     ops.conv2d(branch3x3dbl, 384, [3, 1])])
          with tf.variable_scope('branch_pool'):
            branch_pool = ops.avg_pool(net, [3, 3])
            branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
          net = tf.concat(axis=3, values=[branch1x1, branch3x3, branch3x3dbl, branch_pool])
          end_points['mixed_8x8x2048b'] = net
        # Final pooling and prediction
        with tf.variable_scope('logits'):
          shape = net.get_shape()
          net = ops.avg_pool(net, shape[1:3], padding='VALID', scope='pool')
          # 1 x 1 x 2048
          net = ops.dropout(net, dropout_keep_prob, scope='dropout')
          net = ops.flatten(net, scope='flatten')
          # 2048
          logits = ops.fc(net, num_classes, activation=None, scope='logits',
                          restore=restore_logits)
          # 1000
          end_points['logits'] = logits
          end_points['predictions'] = tf.nn.softmax(logits, name='predictions')
      return logits, end_points
示例#19
0
 def testCreateMaxPoolWithScope(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         output = ops.max_pool(images, [3, 3], scope='pool1')
         self.assertEquals(output.op.name, 'pool1/MaxPool')
示例#20
0
def create_network(X, numClasses, is_train):
    """
    Building the Residual Network with 50 layer
    :param X: input
    :param h: number of units in the fully connected layer
    :param keep_prob: dropout rate
    :param numClasses: number of classes
    :param is_train: to be used by batch normalization
    :return:
    """
    res1 = conv_2d(X,
                   layer_name='res1',
                   stride=2,
                   filter_size=7,
                   num_filters=64,
                   is_train=is_train,
                   batch_norm=True,
                   use_relu=True)
    print('---------------------')
    print('Res1')
    print(res1.get_shape())
    print('---------------------')
    res1 = max_pool(res1, ksize=3, stride=2, name='res1_max_pool')
    print('---------------------')
    print('Res1')
    print(res1.get_shape())
    print('---------------------')
    # Res2
    with tf.variable_scope('Res2'):
        res2a = bottleneck_block(res1,
                                 is_train,
                                 block_name='res2a',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res2a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res2a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res2a_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2a_branch1',
                                 first_block=True)
        print('Res2a')
        print(res2a.get_shape())
        print('---------------------')
        res2b = bottleneck_block(res2a,
                                 is_train,
                                 block_name='res2b',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res2b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res2b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res2b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2b_branch1',
                                 first_block=False)
        print('Res2b')
        print(res2b.get_shape())
        print('---------------------')
        res2c = bottleneck_block(res2b,
                                 is_train,
                                 block_name='res2c',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res2c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res2c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res2c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2c_branch1',
                                 first_block=False)
        print('Res2c')
        print(res2c.get_shape())
        print('---------------------')

    # Res3
    with tf.variable_scope('Res3'):
        res3a = bottleneck_block(res2c,
                                 is_train,
                                 block_name='res3a',
                                 s1=2,
                                 k1=1,
                                 nf1=128,
                                 name1='res3a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res3a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res3a_branch2c',
                                 s4=2,
                                 k4=1,
                                 name4='res3a_branch1',
                                 first_block=True)
        print('Res3a')
        print(res3a.get_shape())
        print('---------------------')
        res3b = bottleneck_block(res3a,
                                 is_train,
                                 block_name='res3b',
                                 s1=1,
                                 k1=1,
                                 nf1=128,
                                 name1='res3b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res3b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res3b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2b_branch1',
                                 first_block=False)
        print('Res3b')
        print(res3b.get_shape())
        print('---------------------')
        res3c = bottleneck_block(res3b,
                                 is_train,
                                 block_name='res3c',
                                 s1=1,
                                 k1=1,
                                 nf1=128,
                                 name1='res3c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res3c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res3c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res3c_branch1',
                                 first_block=False)
        print('Res3c')
        print(res3c.get_shape())
        print('---------------------')
        res3d = bottleneck_block(res3c,
                                 is_train,
                                 block_name='res3d',
                                 s1=1,
                                 k1=1,
                                 nf1=128,
                                 name1='res3d_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res3d_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res3d_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res3d_branch1',
                                 first_block=False)
        print('Res3d')
        print(res3d.get_shape())
        print('---------------------')

    # Res4
    with tf.variable_scope('Res4'):
        res4a = bottleneck_block(res3d,
                                 is_train,
                                 block_name='res4a',
                                 s1=2,
                                 k1=1,
                                 nf1=256,
                                 name1='res4a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4a_branch2c',
                                 s4=2,
                                 k4=1,
                                 name4='res4a_branch1',
                                 first_block=True)
        print('---------------------')
        print('Res4a')
        print(res4a.get_shape())
        print('---------------------')
        res4b = bottleneck_block(res4a,
                                 is_train,
                                 block_name='res4b',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4b_branch1',
                                 first_block=False)
        print('Res4b')
        print(res4b.get_shape())
        print('---------------------')
        res4c = bottleneck_block(res4b,
                                 is_train,
                                 block_name='res4c',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4c_branch1',
                                 first_block=False)
        print('Res4c')
        print(res4c.get_shape())
        print('---------------------')
        res4d = bottleneck_block(res4c,
                                 is_train,
                                 block_name='res4d',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4d_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4d_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4d_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4d_branch1',
                                 first_block=False)
        print('Res4d')
        print(res4d.get_shape())
        print('---------------------')
        res4e = bottleneck_block(res4d,
                                 is_train,
                                 block_name='res4e',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4e_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4e_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4e_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4e_branch1',
                                 first_block=False)
        print('Res4e')
        print(res4e.get_shape())
        print('---------------------')
        res4f = bottleneck_block(res4e,
                                 is_train,
                                 block_name='res4f',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4f_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4f_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4f_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4f_branch1',
                                 first_block=False)
        print('Res4f')
        print(res4f.get_shape())
        print('---------------------')

    # Res5
    with tf.variable_scope('Res5'):
        res5a = bottleneck_block(res4f,
                                 is_train,
                                 block_name='res5a',
                                 s1=2,
                                 k1=1,
                                 nf1=512,
                                 name1='res5a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=512,
                                 name2='res5a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=2048,
                                 name3='res5a_branch2c',
                                 s4=2,
                                 k4=1,
                                 name4='res5a_branch1',
                                 first_block=True)
        print('---------------------')
        print('Res5a')
        print(res5a.get_shape())
        print('---------------------')
        res5b = bottleneck_block(res5a,
                                 is_train,
                                 block_name='res5b',
                                 s1=1,
                                 k1=1,
                                 nf1=512,
                                 name1='res5b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=512,
                                 name2='res5b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=2048,
                                 name3='res5b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res5b_branch1',
                                 first_block=False)
        print('Res5b')
        print(res5b.get_shape())
        print('---------------------')
        res5c = bottleneck_block(res5b,
                                 is_train,
                                 block_name='res5c',
                                 s1=1,
                                 k1=1,
                                 nf1=512,
                                 name1='res5c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=512,
                                 name2='res5c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=2048,
                                 name3='res5c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res5c_branch1',
                                 first_block=False)
        # res5c: [batch_size, 8, 8, 2048]
        print('Res5c')
        print(res5c.get_shape())
        k_size = res5c.get_shape().as_list()[1]
        num_filters = res5c.get_shape().as_list()[-1]

        f_map = tf.reshape(res5c, [-1, k_size * k_size, num_filters],
                           name='reshape_fmaps')
        # [batch_size, 64, 2048]

        res5c_gap = avg_pool(res5c,
                             ksize=k_size,
                             stride=1,
                             name='res5_avg_pool')
        # [batch_size, 1, 1, 2048]
        print('---------------------')
        print('Res5c after AVG_POOL')
        print(res5c.get_shape())
        print('---------------------')

    net_flatten = flatten_layer(res5c_gap)
    # [batch_size, 2048]
    print('---------------------')
    print('Matrix dimension to the first FC layer')
    print(net_flatten.get_shape())
    print('---------------------')
    net, W = fc_layer(net_flatten,
                      numClasses,
                      'FC1',
                      is_train=is_train,
                      batch_norm=True,
                      add_reg=True,
                      use_relu=False)
    # W: [2048, 14]
    W_tiled = tf.tile(tf.expand_dims(W, axis=0), [args.val_batch_size, 1, 1])

    # [2048, 14] -> [1, 2048, 14] -> [batch_size, 2048, 14]

    heat_map_list = tf.unstack(tf.matmul(f_map, W_tiled), axis=0)
    # [batch_size, 64, 14]
    # list of heat-maps of length batch_size, each element: [64, 14]

    cls_act_map_list = [
        tf.nn.softmax(heat_map, dim=0) for heat_map in heat_map_list
    ]
    cls_act_map = tf.stack(cls_act_map_list, axis=0)
    # [batch_size, 64, 14]

    return net, net_flatten, res5c, cls_act_map
示例#21
0
def get_vgg16_pool5(input, params):
    layers = get_vgg16_conv5(input, params)
    layers.pool5 = ops.max_pool(input=layers.conv5_3_relu, name='pool5')

    return layers
示例#22
0
def get_vgg16_conv5(input, params):
    layers = edict()

    layers.conv1_1 = ops.conv2D(input=input,
                                shape=(3, 3, 64),
                                name='conv1_1',
                                params=params)
    layers.conv1_1_relu = ops.activate(input=layers.conv1_1,
                                       name='conv1_1_relu',
                                       act_type='relu')
    layers.conv1_2 = ops.conv2D(input=layers.conv1_1_relu,
                                shape=(3, 3, 64),
                                name='conv1_2',
                                params=params)
    layers.conv1_2_relu = ops.activate(input=layers.conv1_2,
                                       name='conv1_2_relu',
                                       act_type='relu')
    layers.pool1 = ops.max_pool(input=layers.conv1_2_relu, name='pool1')

    layers.conv2_1 = ops.conv2D(input=layers.pool1,
                                shape=(3, 3, 128),
                                name='conv2_1',
                                params=params)
    layers.conv2_1_relu = ops.activate(input=layers.conv2_1,
                                       name='conv2_1_relu',
                                       act_type='relu')
    layers.conv2_2 = ops.conv2D(input=layers.conv2_1_relu,
                                shape=(3, 3, 128),
                                name='conv2_2',
                                params=params)
    layers.conv2_2_relu = ops.activate(input=layers.conv2_2,
                                       name='conv2_2_relu',
                                       act_type='relu')
    layers.pool2 = ops.max_pool(input=layers.conv2_2_relu, name='pool2')

    layers.conv3_1 = ops.conv2D(input=layers.pool2,
                                shape=(3, 3, 256),
                                name='conv3_1',
                                params=params)
    layers.conv3_1_relu = ops.activate(input=layers.conv3_1,
                                       name='conv3_1_relu',
                                       act_type='relu')
    layers.conv3_2 = ops.conv2D(input=layers.conv3_1_relu,
                                shape=(3, 3, 256),
                                name='conv3_2',
                                params=params)
    layers.conv3_2_relu = ops.activate(input=layers.conv3_2,
                                       name='conv3_2_relu',
                                       act_type='relu')
    layers.conv3_3 = ops.conv2D(input=layers.conv3_2_relu,
                                shape=(3, 3, 256),
                                name='conv3_3',
                                params=params)
    layers.conv3_3_relu = ops.activate(input=layers.conv3_3,
                                       name='conv3_3_relu',
                                       act_type='relu')
    layers.pool3 = ops.max_pool(input=layers.conv3_3_relu, name='pool3')

    layers.conv4_1 = ops.conv2D(input=layers.pool3,
                                shape=(3, 3, 512),
                                name='conv4_1',
                                params=params)
    layers.conv4_1_relu = ops.activate(input=layers.conv4_1,
                                       name='conv4_1_relu',
                                       act_type='relu')
    layers.conv4_2 = ops.conv2D(input=layers.conv4_1_relu,
                                shape=(3, 3, 512),
                                name='conv4_2',
                                params=params)
    layers.conv4_2_relu = ops.activate(input=layers.conv4_2,
                                       name='conv4_2_relu',
                                       act_type='relu')
    layers.conv4_3 = ops.conv2D(input=layers.conv4_2_relu,
                                shape=(3, 3, 512),
                                name='conv4_3',
                                params=params)
    layers.conv4_3_relu = ops.activate(input=layers.conv4_3,
                                       name='conv4_3_relu',
                                       act_type='relu')
    layers.pool4 = ops.max_pool(input=layers.conv4_3_relu, name='pool4')

    layers.conv5_1 = ops.conv2D(input=layers.pool4,
                                shape=(3, 3, 512),
                                name='conv5_1',
                                params=params)
    layers.conv5_1_relu = ops.activate(input=layers.conv5_1,
                                       name='conv5_1_relu',
                                       act_type='relu')
    layers.conv5_2 = ops.conv2D(input=layers.conv5_1_relu,
                                shape=(3, 3, 512),
                                name='conv5_2',
                                params=params)
    layers.conv5_2_relu = ops.activate(input=layers.conv5_2,
                                       name='conv5_2_relu',
                                       act_type='relu')
    layers.conv5_3 = ops.conv2D(input=layers.conv5_2_relu,
                                shape=(3, 3, 512),
                                name='conv5_3',
                                params=params)
    layers.conv5_3_relu = ops.activate(input=layers.conv5_3,
                                       name='conv5_3_relu',
                                       act_type='relu')

    return layers
示例#23
0
 def testGlobalMaxPool(self):
     height, width = 3, 3
     with self.test_session():
         images = tf.random_uniform((5, height, width, 3), seed=1)
         output = ops.max_pool(images, images.get_shape()[1:3], stride=1)
         self.assertListEqual(output.get_shape().as_list(), [5, 1, 1, 3])