def build_network(self, x):
     # Building network...
     with tf.variable_scope('LeNet'):
         x = conv_2d(x,
                     filter_size=5,
                     num_filters=6,
                     name='conv_1',
                     keep_prob=1)
         x = drop_out(x, self.keep_prob_pl)
         x = max_pool(x, 2, 2, 'pool_1')
         x = conv_2d(x,
                     filter_size=5,
                     num_filters=16,
                     name='conv_2',
                     keep_prob=1)
         x = drop_out(x, self.keep_prob_pl)
         x = max_pool(x, 2, 2, 'pool_2')
         x = flatten_layer(x)
         x = drop_out(x, self.keep_prob_pl)
         x = fc_layer(x, 120, name='fc_1', keep_prob=1)
         x = drop_out(x, self.keep_prob_pl)
         x = fc_layer(x, 84, name='fc_2', keep_prob=1)
         x = drop_out(x, self.keep_prob_pl)
         self.logits = fc_layer(x,
                                self.conf.num_cls,
                                name='fc_3',
                                use_relu=False,
                                keep_prob=1)
Exemple #2
0
def resblock1_D(inputs, filters, kernel_size, stride, training, norm,
                collection, scope):
    with tf.variable_scope(scope):
        outputs = ops.conv_2d(inputs,
                              filters,
                              kernel_size,
                              stride,
                              padding='SAME',
                              stddev=0.02,
                              norm=norm,
                              training=training,
                              collection=collection,
                              scope='conv_1')
        outputs = tf.nn.leaky_relu(outputs, alpha=0.2)
        outputs = ops.conv_2d(outputs,
                              filters,
                              kernel_size,
                              stride,
                              padding='SAME',
                              stddev=0.02,
                              norm=norm,
                              training=training,
                              collection=collection,
                              scope='conv_2')
        outputs = outputs + inputs
        outputs = tf.nn.leaky_relu(outputs, alpha=0.2)

    return outputs
Exemple #3
0
def discriminator(images, labels, reuse=False):
    with tf.variable_scope("discriminator") as scope:
        if reuse:
            scope.reuse_variables()

        # conv1
        conv1 = ops.conv_2d(images, 64, scope="conv1")

        # leakly ReLu
        h1 = ops.leaky_relu(conv1)

        # conv2
        conv2 = ops.conv_2d(h1, 128, scope="conv2")

        # batch norm
        norm2 = ops.batch_norm(conv2, scope="batch_norm2", is_training=True)

        # leaky ReLU
        h2 = ops.leaky_relu(norm2)

        # conv3
        conv3 = ops.conv_2d(h2, 256, scope="conv3")

        # batch norm
        norm3 = ops.batch_norm(conv3, scope="batch_norm3", is_training=True)

        # leaky ReLU
        h3 = ops.leaky_relu(norm3)

        # conv4
        conv4 = ops.conv_2d(h3, 512, scope="conv4")

        # batch norm
        norm4 = ops.batch_norm(conv4, scope="batch_norm4", is_training=True)

        # leaky ReLU
        h4 = ops.leaky_relu(norm4)

        # reshape
        h4_reshape = tf.reshape(h4, [FLAGS.batch_size, -1])

        # source logits
        source_logits = ops.fc(h4_reshape, 1, scope="source_logits")

        # class logits
        class_logits = ops.fc(
            h4_reshape, FLAGS.n_classes, scope="class_logits")

        return source_logits, class_logits
def AlexNet(X, keep_prob, is_train):
    net = conv_2d(X, 7, 2, 96, 'CONV1', trainable=True)
    net = lrn(net)
    net = max_pool(net, 3, 2, 'MaxPool1')
    net = conv_2d(net, 5, 2, 256, 'CONV2', trainable=True)
    net = lrn(net)
    net = max_pool(net, 3, 2, 'MaxPool2')
    net = conv_2d(net, 3, 1, 384, 'CONV3', trainable=True)
    net = conv_2d(net, 3, 1, 384, 'CONV4', trainable=True)
    net = conv_2d(net, 3, 1, 256, 'CONV5', trainable=True)
    net = max_pool(net, 3, 2, 'MaxPool3')
    layer_flat = flatten_layer(net)
    net = fc_layer(layer_flat, 512, 'FC1', trainable=True, use_relu=True)
    net = dropout(net, keep_prob)
    return net
def AlexNet_target_task(X, keep_prob, num_cls):
    net = conv_2d(X, 7, 2, 96, 'CONV1', trainable=False)
    net = lrn(net)
    net = max_pool(net, 3, 2, 'MaxPool1')
    net = conv_2d(net, 5, 2, 256, 'CONV2', trainable=False)
    net = lrn(net)
    net = max_pool(net, 3, 2, 'MaxPool2')
    net = conv_2d(net, 3, 1, 384, 'CONV3', trainable=False)
    net = conv_2d(net, 3, 1, 384, 'CONV4', trainable=False)
    net = conv_2d(net, 3, 1, 256, 'CONV5', trainable=False)
    net = max_pool(net, 3, 2, 'MaxPool3')
    layer_flat = flatten_layer(net)
    net = fc_layer(layer_flat, 512, 'FC_1', trainable=True, use_relu=True)
    net = dropout(net, keep_prob)
    net = fc_layer(net, num_cls, 'FC_2', trainable=True, use_relu=False)
    return net
Exemple #6
0
    def build_encoding(self, x):
        """
        Builds graph to create encoding from input x
        :param x: input image
        :return: flat layer containing the encoding
        """

        def getVars(name, w_shape):
            """
            Helper function to resuse variables in order to create a siamese net.
            :param name: Name of the variable we want
            :param w_shape: Shape of the variable we want
            :return: Variable with given name if exists, otherwise new variable with given shape
            """
            w = tf.get_variable("W" + name, w_shape, initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1),
                                regularizer=tf.contrib.layers.l2_regularizer(0.01))
            b = tf.get_variable("b" + name, [w_shape[-1]], initializer=tf.constant_initializer(0.1),
                                regularizer=tf.contrib.layers.l2_regularizer(0.01))
            return w, b

        prev_layer = x
        img_size = self.shape[2]

        for ind in range(len(self.conv_layer_size)):
            """
            Iterate through conv_layers and apply convolution, rele and max-pool
            """
            if ind == 0:
                w_shape = [self.conv_dim[ind], self.conv_dim[ind], self.shape[3], self.conv_layer_size[ind]]
            else:
                w_shape = [self.conv_dim[ind], self.conv_dim[ind], self.conv_layer_size[ind - 1],
                           self.conv_layer_size[ind]]
            w, b = getVars("enc%s" % ind, w_shape)
            prev_layer = ops.max_pool_2x2(tf.nn.relu(ops.conv_2d(prev_layer, w, b)))

            self.enc_weights.append(w)
            self.enc_weights.append(b)

        # Reshape for fully connected layers
        next_size = self.conv_layer_size[-1] * img_size / (2 ** len(self.conv_layer_size)) * img_size / (
        2 ** len(self.conv_layer_size))
        flat_layer = tf.reshape(prev_layer, [-1, next_size])

        for ind in range(len(self.fcl_layer_size)):
            """
            Iterate through fully connected layers and apply matmul and sigmoid
            """
            if ind == 0:
                w_shape = [next_size, self.fcl_layer_size[0]]
            else:
                w_shape = [self.fcl_layer_size[ind - 1], self.fcl_layer_size[ind]]
            w, b = getVars("enc_fcl%s" % ind, w_shape)
            flat_layer = tf.nn.sigmoid(tf.matmul(flat_layer, w) + b)
            self.enc_weights.append(w)
            self.enc_weights.append(b)

        return flat_layer
Exemple #7
0
    def generator(self, x, c, reuse=False):
        print("Generator ...........")

        with tf.variable_scope('generator') as scope:
            if (reuse):
                scope.reuse_variables()

            inputs = tf.concat([x, c], axis=-1)

            # Dense layer 1
            dense1 = ops.dense(inputs,
                               64 * 8 * 8,
                               0.02,
                               self.training,
                               norm=self.g_norm,
                               scope="g_inputs")
            dense1 = tf.nn.relu(dense1)
            dense1 = tf.reshape(dense1, [-1, 8, 8, 64])

            outputs = dense1
            # Res Block
            for i in range(self.res_block_size):
                outputs = resblock1_G(outputs,
                                      64,
                                      3,
                                      1,
                                      self.training,
                                      norm=self.g_norm,
                                      scope='g_residual_{:d}'.format(i))

            outputs = ops.batch_norm(outputs, self.training)
            outputs = tf.nn.relu(outputs)
            outputs = outputs + dense1

            # Upscaling by pixel shuffling
            for i in range(3):
                outputs = resblock2_G(outputs,
                                      256,
                                      3,
                                      1,
                                      2,
                                      self.training,
                                      norm=None,
                                      scope='g_upscale_{:d}'.format(i))

            outputs = ops.conv_2d(outputs,
                                  3,
                                  9,
                                  1,
                                  padding='SAME',
                                  stddev=0.02,
                                  training=self.training,
                                  norm=None,
                                  scope="g_conv_last")
            outputs = tf.nn.tanh(outputs)

        return outputs
Exemple #8
0
def discriminator(image, reuse=False):
    with tf.variable_scope('discriminator', reuse=reuse):
        conv_1 = ops.conv_2d(image, 64, scope='conv_1')
        relu_1 = ops.leaky_relu(conv_1)

        conv_2 = ops.conv_2d(relu_1, 128, scope='conv_2')
        conv_2_norm = ops.batch_norm(conv_2, True, scope="batch_norm_2")
        relu_2 = ops.leaky_relu(conv_2_norm)

        conv_3 = ops.conv_2d(relu_2, 256, scope='conv_3')
        conv_3_norm = ops.batch_norm(conv_3, True, scope="batch_norm_3")
        relu_3 = ops.leaky_relu(conv_3_norm)

        conv_4 = ops.conv_2d(relu_3, 512, scope='conv_4')
        conv_4_norm = ops.batch_norm(conv_4, True, scope="batch_norm_4")
        relu_4 = ops.leaky_relu(conv_4_norm)

        relu_4_flat = tf.reshape(relu_4, [flags.batch_size, -1])

        source_logits = ops.full_connect(relu_4_flat, 1, scope='source_logits')
        class_logits = ops.full_connect(relu_4_flat, 1, scope='class_logits')

        return source_logits, class_logits
Exemple #9
0
    def build_network(self, x):
        # Building network...
        with tf.variable_scope('FCNet'):
            x = conv_2d(x,
                        filter_size=3,
                        stride=1,
                        num_filters=32,
                        name='conv_1',
                        keep_prob=1)
            x = tf.contrib.slim.batch_norm(x)
            x = conv_2d(x,
                        filter_size=3,
                        stride=1,
                        num_filters=32,
                        name='conv_2',
                        keep_prob=1)
            x = tf.contrib.slim.batch_norm(x)
            x = max_pool(x, 2, 2, 'pool_1')

            x = conv_2d(x,
                        filter_size=3,
                        stride=1,
                        num_filters=64,
                        name='conv_3',
                        keep_prob=1)
            x = tf.contrib.slim.batch_norm(x)
            x = conv_2d(x,
                        filter_size=3,
                        stride=1,
                        num_filters=64,
                        name='conv_4',
                        keep_prob=1)
            x = tf.contrib.slim.batch_norm(x)
            x = max_pool(x, 2, 2, 'pool_2')

            x = conv_2d(x,
                        filter_size=3,
                        stride=1,
                        num_filters=128,
                        name='conv_5',
                        keep_prob=1)
            x = tf.contrib.slim.batch_norm(x)
            x = conv_2d(x,
                        filter_size=3,
                        stride=1,
                        num_filters=128,
                        name='conv_6',
                        keep_prob=1)
            x = tf.contrib.slim.batch_norm(x)
            x = max_pool(x, 2, 2, 'pool_3')

            x = flatten_layer(x)
            self.logits = fc_layer(x,
                                   self.conf.num_cls,
                                   name='fc_3',
                                   use_relu=False,
                                   keep_prob=1)
Exemple #10
0
def resblock1_G(inputs, filters, kernel_size, stride, training, norm, scope):
    with tf.variable_scope(scope):
        outputs = ops.conv_2d(inputs,
                              filters,
                              kernel_size,
                              stride,
                              padding='SAME',
                              stddev=0.02,
                              norm=norm,
                              training=training,
                              scope="conv_1")
        outputs = tf.nn.relu(outputs)
        outputs = ops.conv_2d(outputs,
                              filters,
                              kernel_size,
                              stride,
                              padding='SAME',
                              stddev=0.02,
                              norm=norm,
                              training=training,
                              scope="conv_2")
        outputs = outputs + inputs

    return outputs
Exemple #11
0
def resblock2_G(inputs, filters, kernel_size, stride, scale, training, norm,
                scope):
    with tf.variable_scope(scope):
        outputs = ops.conv_2d(inputs,
                              filters,
                              kernel_size,
                              stride,
                              padding='SAME',
                              stddev=0.02,
                              norm=norm,
                              training=training,
                              scope="conv")
        outputs = ops.pixelShuffler(outputs, scale)
        outputs = ops.batch_norm(outputs, training)
        outputs = tf.nn.relu(outputs)

    return outputs
Exemple #12
0
    def __call__(self, x):
        # Building network...
        with tf.variable_scope('CapsNet', reuse=tf.AUTO_REUSE):
            net, summary = conv_2d(x,
                                   5,
                                   2,
                                   self.conf.A,
                                   'CONV1',
                                   add_bias=self.conf.use_bias,
                                   add_reg=self.conf.L2_reg,
                                   batch_norm=self.conf.use_BN,
                                   is_train=self.is_train)
            # [?, 14, 14, A]
            self.summary_list.append(summary)

            pose, act, summary_list = capsules_init(
                net,
                1,
                1,
                OUT=self.conf.B,
                padding='VALID',
                pose_shape=[4, 4],
                add_reg=self.conf.L2_reg,
                use_bias=self.conf.use_bias,
                name='capsule_init')
            # [?, 14, 14, B, 4, 4], [?, 14, 14, B]
            for summary in summary_list:
                self.summary_list.append(summary)

            pose, act, summary_list = capsule_conv(pose,
                                                   act,
                                                   K=3,
                                                   OUT=self.conf.C,
                                                   stride=2,
                                                   add_reg=self.conf.L2_reg,
                                                   iters=self.conf.iter,
                                                   std=1,
                                                   name='capsule_conv1')
            # [?, 6, 6, C, 4, 4], [?, 6, 6, C]
            for summary in summary_list:
                self.summary_list.append(summary)

            pose, act, summary_list = capsule_conv(pose,
                                                   act,
                                                   K=3,
                                                   OUT=self.conf.D,
                                                   stride=1,
                                                   add_reg=self.conf.L2_reg,
                                                   iters=self.conf.iter,
                                                   std=1,
                                                   name='capsule_conv2')
            # [?, 4, 4, D, 4, 4], [?, 4, 4, D]
            for summary in summary_list:
                self.summary_list.append(summary)

            if self.conf.fc:
                pose, act, summary_list = capsule_fc(
                    pose,
                    act,
                    OUT=self.conf.E,
                    add_reg=self.conf.L2_reg,
                    iters=self.conf.iter,
                    std=1,
                    add_coord=self.conf.add_coords,
                    name='capsule_fc1')
                # [?, E, 4, 4], [?, E]
                for summary in summary_list:
                    self.summary_list.append(summary)
                pose = pose[:, tf.newaxis, :, :, :]
                act = act[:, tf.newaxis, :]
                # [?, 1, E, 4, 4], [?, 1, E]

            return act, pose, self.summary_list
def discriminator(images, reuse=False):
    with tf.variable_scope("discriminator") as scope:
        if reuse:
            scope.reuse_variables()

        # conv1
        conv1 = ops.conv_2d(images, 64, scope="conv1")

        # leakly ReLu
        h1 = ops.leaky_relu(conv1)

        # conv2
        conv2 = ops.conv_2d(h1, 128, scope="conv2")

        # batch norm
        norm2 = ops.batch_norm(conv2,
                               scope="batch_norm2",
                               is_training=FLAGS.is_train)

        # leaky ReLU
        h2 = ops.leaky_relu(norm2)

        # conv3
        conv3 = ops.conv_2d(h2, 256, scope="conv3")
        # batch norm
        norm3 = ops.batch_norm(conv3,
                               scope="batch_norm3",
                               is_training=FLAGS.is_train)

        # leaky ReLU
        h3 = ops.leaky_relu(norm3)
        # conv4
        conv4 = ops.conv_2d(h3, 512, scope="conv4")

        # batch norm
        norm4 = ops.batch_norm(conv4,
                               scope="batch_norm4",
                               is_training=FLAGS.is_train)

        # leaky ReLU
        h4 = ops.leaky_relu(norm4)

        conv5 = ops.conv_2d(h4, 1024, scope="conv5")

        conv5 = tf.nn.dropout(conv5, 0.5, name='conv_5_drop_out')

        norm5 = ops.batch_norm(conv5,
                               scope="batch_norm5",
                               is_training=FLAGS.is_train)

        h5 = ops.leaky_relu(norm5)
        # reshape
        h5_reshape = tf.reshape(h5, [FLAGS.batch_size, -1])

        # source logits
        source_logits = ops.fc(h5_reshape, 1, scope="source_logits")

        # class logits
        class_logits = ops.fc(h5_reshape,
                              FLAGS.num_classes,
                              scope="class_logits",
                              decay=4e-3)

        return source_logits, class_logits
Exemple #14
0
def create_network(X, numClasses, is_train):
    """
    Building the Residual Network with 50 layer
    :param X: input
    :param h: number of units in the fully connected layer
    :param keep_prob: dropout rate
    :param numClasses: number of classes
    :param is_train: to be used by batch normalization
    :return:
    """
    res1 = conv_2d(X,
                   layer_name='res1',
                   stride=2,
                   filter_size=7,
                   num_filters=64,
                   is_train=is_train,
                   batch_norm=True,
                   use_relu=True)
    print('---------------------')
    print('Res1')
    print(res1.get_shape())
    print('---------------------')
    res1 = max_pool(res1, ksize=3, stride=2, name='res1_max_pool')
    print('---------------------')
    print('Res1')
    print(res1.get_shape())
    print('---------------------')
    # Res2
    with tf.variable_scope('Res2'):
        res2a = bottleneck_block(res1,
                                 is_train,
                                 block_name='res2a',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res2a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res2a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res2a_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2a_branch1',
                                 first_block=True)
        print('Res2a')
        print(res2a.get_shape())
        print('---------------------')
        res2b = bottleneck_block(res2a,
                                 is_train,
                                 block_name='res2b',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res2b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res2b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res2b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2b_branch1',
                                 first_block=False)
        print('Res2b')
        print(res2b.get_shape())
        print('---------------------')
        res2c = bottleneck_block(res2b,
                                 is_train,
                                 block_name='res2c',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res2c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res2c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res2c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2c_branch1',
                                 first_block=False)
        print('Res2c')
        print(res2c.get_shape())
        print('---------------------')

    # Res3
    with tf.variable_scope('Res3'):
        res3a = bottleneck_block(res2c,
                                 is_train,
                                 block_name='res3a',
                                 s1=2,
                                 k1=1,
                                 nf1=128,
                                 name1='res3a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res3a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res3a_branch2c',
                                 s4=2,
                                 k4=1,
                                 name4='res3a_branch1',
                                 first_block=True)
        print('Res3a')
        print(res3a.get_shape())
        print('---------------------')
        res3b = bottleneck_block(res3a,
                                 is_train,
                                 block_name='res3b',
                                 s1=1,
                                 k1=1,
                                 nf1=128,
                                 name1='res3b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res3b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res3b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2b_branch1',
                                 first_block=False)
        print('Res3b')
        print(res3b.get_shape())
        print('---------------------')
        res3c = bottleneck_block(res3b,
                                 is_train,
                                 block_name='res3c',
                                 s1=1,
                                 k1=1,
                                 nf1=128,
                                 name1='res3c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res3c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res3c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res3c_branch1',
                                 first_block=False)
        print('Res3c')
        print(res3c.get_shape())
        print('---------------------')
        res3d = bottleneck_block(res3c,
                                 is_train,
                                 block_name='res3d',
                                 s1=1,
                                 k1=1,
                                 nf1=128,
                                 name1='res3d_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res3d_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res3d_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res3d_branch1',
                                 first_block=False)
        print('Res3d')
        print(res3d.get_shape())
        print('---------------------')

    # Res4
    with tf.variable_scope('Res4'):
        res4a = bottleneck_block(res3d,
                                 is_train,
                                 block_name='res4a',
                                 s1=2,
                                 k1=1,
                                 nf1=256,
                                 name1='res4a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4a_branch2c',
                                 s4=2,
                                 k4=1,
                                 name4='res4a_branch1',
                                 first_block=True)
        print('---------------------')
        print('Res4a')
        print(res4a.get_shape())
        print('---------------------')
        res4b = bottleneck_block(res4a,
                                 is_train,
                                 block_name='res4b',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4b_branch1',
                                 first_block=False)
        print('Res4b')
        print(res4b.get_shape())
        print('---------------------')
        res4c = bottleneck_block(res4b,
                                 is_train,
                                 block_name='res4c',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4c_branch1',
                                 first_block=False)
        print('Res4c')
        print(res4c.get_shape())
        print('---------------------')
        res4d = bottleneck_block(res4c,
                                 is_train,
                                 block_name='res4d',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4d_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4d_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4d_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4d_branch1',
                                 first_block=False)
        print('Res4d')
        print(res4d.get_shape())
        print('---------------------')
        res4e = bottleneck_block(res4d,
                                 is_train,
                                 block_name='res4e',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4e_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4e_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4e_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4e_branch1',
                                 first_block=False)
        print('Res4e')
        print(res4e.get_shape())
        print('---------------------')
        res4f = bottleneck_block(res4e,
                                 is_train,
                                 block_name='res4f',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4f_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4f_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4f_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4f_branch1',
                                 first_block=False)
        print('Res4f')
        print(res4f.get_shape())
        print('---------------------')

    # Res5
    with tf.variable_scope('Res5'):
        res5a = bottleneck_block(res4f,
                                 is_train,
                                 block_name='res5a',
                                 s1=2,
                                 k1=1,
                                 nf1=512,
                                 name1='res5a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=512,
                                 name2='res5a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=2048,
                                 name3='res5a_branch2c',
                                 s4=2,
                                 k4=1,
                                 name4='res5a_branch1',
                                 first_block=True)
        print('---------------------')
        print('Res5a')
        print(res5a.get_shape())
        print('---------------------')
        res5b = bottleneck_block(res5a,
                                 is_train,
                                 block_name='res5b',
                                 s1=1,
                                 k1=1,
                                 nf1=512,
                                 name1='res5b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=512,
                                 name2='res5b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=2048,
                                 name3='res5b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res5b_branch1',
                                 first_block=False)
        print('Res5b')
        print(res5b.get_shape())
        print('---------------------')
        res5c = bottleneck_block(res5b,
                                 is_train,
                                 block_name='res5c',
                                 s1=1,
                                 k1=1,
                                 nf1=512,
                                 name1='res5c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=512,
                                 name2='res5c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=2048,
                                 name3='res5c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res5c_branch1',
                                 first_block=False)
        # res5c: [batch_size, 8, 8, 2048]
        print('Res5c')
        print(res5c.get_shape())
        k_size = res5c.get_shape().as_list()[1]
        num_filters = res5c.get_shape().as_list()[-1]

        f_map = tf.reshape(res5c, [-1, k_size * k_size, num_filters],
                           name='reshape_fmaps')
        # [batch_size, 64, 2048]

        res5c_gap = avg_pool(res5c,
                             ksize=k_size,
                             stride=1,
                             name='res5_avg_pool')
        # [batch_size, 1, 1, 2048]
        print('---------------------')
        print('Res5c after AVG_POOL')
        print(res5c.get_shape())
        print('---------------------')

    net_flatten = flatten_layer(res5c_gap)
    # [batch_size, 2048]
    print('---------------------')
    print('Matrix dimension to the first FC layer')
    print(net_flatten.get_shape())
    print('---------------------')
    net, W = fc_layer(net_flatten,
                      numClasses,
                      'FC1',
                      is_train=is_train,
                      batch_norm=True,
                      add_reg=True,
                      use_relu=False)
    # W: [2048, 14]
    W_tiled = tf.tile(tf.expand_dims(W, axis=0), [args.val_batch_size, 1, 1])

    # [2048, 14] -> [1, 2048, 14] -> [batch_size, 2048, 14]

    heat_map_list = tf.unstack(tf.matmul(f_map, W_tiled), axis=0)
    # [batch_size, 64, 14]
    # list of heat-maps of length batch_size, each element: [64, 14]

    cls_act_map_list = [
        tf.nn.softmax(heat_map, dim=0) for heat_map in heat_map_list
    ]
    cls_act_map = tf.stack(cls_act_map_list, axis=0)
    # [batch_size, 64, 14]

    return net, net_flatten, res5c, cls_act_map