Ejemplo n.º 1
0
    def build_fcnet(self, pool5, dropout):
        '''

        :param pool5: The tensor to pass to the fully connected layer.
        :param dropout: If True, dropout is applied to fully connected layer
        :return:
        '''
        pool_dim = int(np.prod(pool5.get_shape()[1:]))
        pool5_flatten = tf.reshape(pool5, [-1, pool_dim])

        fc1 = ops.fc_layer(pool5_flatten,
                           pool_dim,
                           4096,
                           scope='fc6',
                           dropout=dropout,
                           keep_prob=self.dropout_keep_prob)
        fc2 = ops.fc_layer(fc1,
                           4096,
                           4096,
                           scope='fc7',
                           dropout=dropout,
                           keep_prob=self.dropout_keep_prob)
        fc3 = ops.fc_layer(fc2,
                           4096,
                           10,
                           scope='fc8',
                           dropout=False,
                           keep_prob=1.0)
        return fc3
 def build_network(self, x):
     # Building network...
     with tf.variable_scope('LeNet'):
         x = conv_2d(x,
                     filter_size=5,
                     num_filters=6,
                     name='conv_1',
                     keep_prob=1)
         x = drop_out(x, self.keep_prob_pl)
         x = max_pool(x, 2, 2, 'pool_1')
         x = conv_2d(x,
                     filter_size=5,
                     num_filters=16,
                     name='conv_2',
                     keep_prob=1)
         x = drop_out(x, self.keep_prob_pl)
         x = max_pool(x, 2, 2, 'pool_2')
         x = flatten_layer(x)
         x = drop_out(x, self.keep_prob_pl)
         x = fc_layer(x, 120, name='fc_1', keep_prob=1)
         x = drop_out(x, self.keep_prob_pl)
         x = fc_layer(x, 84, name='fc_2', keep_prob=1)
         x = drop_out(x, self.keep_prob_pl)
         self.logits = fc_layer(x,
                                self.conf.num_cls,
                                name='fc_3',
                                use_relu=False,
                                keep_prob=1)
def AlexNet_target_task(X, keep_prob, num_cls):
    net = conv_2d(X, 7, 2, 96, 'CONV1', trainable=False)
    net = lrn(net)
    net = max_pool(net, 3, 2, 'MaxPool1')
    net = conv_2d(net, 5, 2, 256, 'CONV2', trainable=False)
    net = lrn(net)
    net = max_pool(net, 3, 2, 'MaxPool2')
    net = conv_2d(net, 3, 1, 384, 'CONV3', trainable=False)
    net = conv_2d(net, 3, 1, 384, 'CONV4', trainable=False)
    net = conv_2d(net, 3, 1, 256, 'CONV5', trainable=False)
    net = max_pool(net, 3, 2, 'MaxPool3')
    layer_flat = flatten_layer(net)
    net = fc_layer(layer_flat, 512, 'FC_1', trainable=True, use_relu=True)
    net = dropout(net, keep_prob)
    net = fc_layer(net, num_cls, 'FC_2', trainable=True, use_relu=False)
    return net
Ejemplo n.º 4
0
 def __call__(self, x, reuse=False):
     '''
     Args :
         x - 4D tensor [batch_size, 28, 28, 1]
         reuse - bool
             whether reuse or not
     Return :
         d - 2D tensor [batch, 1]
     '''
     with tf.variable_scope(self.name) as scope:
         if reuse:
             scope.reuse_variables()
         d = convolution(x, [4, 4, 1, 32], strides = [1, 2, 2, 1], activation = leaky_relu, scope = 'conv1')
         d = flatten(d)
         d = fc_layer(d, 128, activation=leaky_relu, scope="fc1")
         d = fc_layer(d, 1, scope="fc2")
     return d
Ejemplo n.º 5
0
    def build_network(self, x):
        # Building network...
        with tf.variable_scope('FCNet'):
            x = conv_2d(x,
                        filter_size=3,
                        stride=1,
                        num_filters=32,
                        name='conv_1',
                        keep_prob=1)
            x = tf.contrib.slim.batch_norm(x)
            x = conv_2d(x,
                        filter_size=3,
                        stride=1,
                        num_filters=32,
                        name='conv_2',
                        keep_prob=1)
            x = tf.contrib.slim.batch_norm(x)
            x = max_pool(x, 2, 2, 'pool_1')

            x = conv_2d(x,
                        filter_size=3,
                        stride=1,
                        num_filters=64,
                        name='conv_3',
                        keep_prob=1)
            x = tf.contrib.slim.batch_norm(x)
            x = conv_2d(x,
                        filter_size=3,
                        stride=1,
                        num_filters=64,
                        name='conv_4',
                        keep_prob=1)
            x = tf.contrib.slim.batch_norm(x)
            x = max_pool(x, 2, 2, 'pool_2')

            x = conv_2d(x,
                        filter_size=3,
                        stride=1,
                        num_filters=128,
                        name='conv_5',
                        keep_prob=1)
            x = tf.contrib.slim.batch_norm(x)
            x = conv_2d(x,
                        filter_size=3,
                        stride=1,
                        num_filters=128,
                        name='conv_6',
                        keep_prob=1)
            x = tf.contrib.slim.batch_norm(x)
            x = max_pool(x, 2, 2, 'pool_3')

            x = flatten_layer(x)
            self.logits = fc_layer(x,
                                   self.conf.num_cls,
                                   name='fc_3',
                                   use_relu=False,
                                   keep_prob=1)
Ejemplo n.º 6
0
 def build_model(self, x):
     logger.info("Buidling model starts...")
     x_r = tf.reshape(x, [-1, 28, 28, 1])
     o = deform_conv2d(x_r, [7, 7, 1, 50], [5, 5, 1, 32],
                       activation=tf.nn.relu,
                       scope="deform_conv1")
     o = tf.nn.max_pool(o,
                        ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1],
                        padding='VALID')
     o = deform_conv2d(o, [7, 7, 32, 50], [5, 5, 32, 64],
                       activation=tf.nn.relu,
                       scope="deform_conv2")
     o = tf.nn.max_pool(o,
                        ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1],
                        padding='VALID')
     o = tf.reshape(o, [self.batch_size, -1])
     o = fc_layer(o, 512, activation=tf.nn.relu, scope="fc1")
     o = fc_layer(o, 10, scope="fc2")
     print_vars("trainable_variables")
     logger.info("Buidling model done")
     return o
def AlexNet(X, keep_prob, is_train):
    net = conv_2d(X, 7, 2, 96, 'CONV1', trainable=True)
    net = lrn(net)
    net = max_pool(net, 3, 2, 'MaxPool1')
    net = conv_2d(net, 5, 2, 256, 'CONV2', trainable=True)
    net = lrn(net)
    net = max_pool(net, 3, 2, 'MaxPool2')
    net = conv_2d(net, 3, 1, 384, 'CONV3', trainable=True)
    net = conv_2d(net, 3, 1, 384, 'CONV4', trainable=True)
    net = conv_2d(net, 3, 1, 256, 'CONV5', trainable=True)
    net = max_pool(net, 3, 2, 'MaxPool3')
    layer_flat = flatten_layer(net)
    net = fc_layer(layer_flat, 512, 'FC1', trainable=True, use_relu=True)
    net = dropout(net, keep_prob)
    return net
Ejemplo n.º 8
0
 def __call__(self, z, reuse = False):
     '''
     Args :
         z - 2D tensor [batch, zdim]
             latent vector space
         reuse - bool
             whether reuse or not
     Return :
         g - 4D tensor [batch_size, 28, 28, 1], 0 to 1
     '''
     assert get_shape(z)[0] == self.batch_size, "Batch size %d doesn't matches with %d"%(get_shape(z)[0], self.batch_size)
     with tf.variable_scope(self.name) as scope:
         if reuse:
             scope.reuse_variables()
         g = fc_layer(z, 7*7*128, activation = tf.nn.relu, batch_norm =False, scope = "fc1")
         g = tf.reshape(g, [-1, 7, 7, 128])
         g = deconvolution(g, [4, 4, 64, 128], output_shape = [self.batch_size, 14, 14, 64], strides = [1,2,2,1], activation = tf.nn.relu, scope = 'deconv1')
         g = deconvolution(g, [4, 4, 1, 64], output_shape = [self.batch_size, 28, 28, 1], strides = [1,2,2,1], activation = tf.nn.sigmoid, scope = 'deconv2')
         return g
# input data:
# Number of classes, one class for each of 10 digits.
n_classes = 3

# number of units in the first hidden layer
h1 = 52
epochs = 10  # Total number of training epochs
batch_size = 108  # Training batch size
display_freq = 25  # Frequency of displaying the training results
num_tr_iter = int(len_M_label / batch_size)
# Create graph
# Placeholders for inputs (x), outputs(y)
x = tf.placeholder(tf.float32, shape=[None, 21], name='X')
y = tf.placeholder(tf.float32, shape=[None, n_classes], name='Y')
learning_r = tf.placeholder(tf.float32, name='Epsilon')
fc1 = fc_layer(x, h1, 'FC1', use_relu=True)
# initialize W_input2hidden and b_hidden to record Wn and Wn-1
W_input2hidden_former = tf.Variable(tf.zeros(shape=[x.get_shape()[1], h1]),
                                    name='W_input2hidden_former')
b_hidden_former = tf.Variable(tf.zeros(shape=[h1]), name='b_hidden_former')
output_logits = fc_layer(fc1[0], n_classes, 'OUT', use_relu=False)
# initialize W_hidden2output and b_output to record Wn and Wn-1
W_hidden2output_former = tf.Variable(
    tf.zeros(shape=[fc1[0].get_shape()[1], n_classes]),
    name='W_hidden2output_former')
b_output_former = tf.Variable(tf.zeros(shape=[n_classes]),
                              name='b_output_former')

# Define the loss function, optimizer, and accuracy
with tf.name_scope('Loss'):
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
Ejemplo n.º 10
0
# Number of classes, one class for each of 10 digits.
n_classes = 10

# number of units in the first hidden layer
h1 = 200

# Create graph
# Placeholders for inputs (x), outputs(y)
with tf.variable_scope('Input'):
    x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='X')
    tf.summary.image('input_image',
                     tf.reshape(x, (-1, img_w, img_h, 1)),
                     max_outputs=5)
    y = tf.placeholder(tf.float32, shape=[None, n_classes], name='Y')

fc1 = fc_layer(x, h1, 'Hidden_layer', use_relu=True)
output_logits = fc_layer(fc1, n_classes, 'Output_layer', use_relu=False)

# Define the loss function, optimizer, and accuracy
with tf.variable_scope('Train'):
    with tf.variable_scope('Loss'):
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
            labels=y, logits=output_logits),
                              name='loss')
        tf.summary.scalar('loss', loss)
    with tf.variable_scope('Optimizer'):
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                           name='Adam-op').minimize(loss)
    with tf.variable_scope('Accuracy'):
        correct_prediction = tf.equal(tf.argmax(output_logits, 1),
                                      tf.argmax(y, 1),
def create_network(X, h, keep_prob, numClasses):
    num_channels = X.get_shape().as_list()[-1]
    res1 = new_conv_layer(inputs=X,
                          layer_name='res1',
                          stride=2,
                          num_inChannel=num_channels,
                          filter_size=4,
                          num_filters=32,
                          batch_norm=True,
                          use_relu=True)

    #res1 = max_pool(res1, ksize=2, stride=2, name='res1_max_pool')
    print('---------------------')
    print('Res1')
    print(res1.get_shape())
    print('---------------------')
    # Res2
    with tf.variable_scope('Res2'):
        res2a = bottleneck_block(res1,
                                 32,
                                 block_name='res2a',
                                 s1=1,
                                 k1=1,
                                 nf1=32,
                                 name1='res2a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=32,
                                 name2='res2a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=64,
                                 name3='res2a_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2a_branch1',
                                 first_block=True)
        print('Res2a')
        print(res2a.get_shape())
        print('---------------------')
        res2b = bottleneck_block(res2a,
                                 64,
                                 block_name='res2b',
                                 s1=1,
                                 k1=1,
                                 nf1=32,
                                 name1='res2b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=32,
                                 name2='res2b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=64,
                                 name3='res2b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2b_branch1',
                                 first_block=False)
        print('Res2b')
        print(res2b.get_shape())
        print('---------------------')
        res2c = bottleneck_block(res2b,
                                 64,
                                 block_name='res2c',
                                 s1=1,
                                 k1=1,
                                 nf1=32,
                                 name1='res2c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=32,
                                 name2='res2c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=64,
                                 name3='res2c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2c_branch1',
                                 first_block=False)
        print('Res2c')
        print(res2c.get_shape())
        print('---------------------')

    # Res3
    with tf.variable_scope('Res3'):
        res3a = bottleneck_block(res2c,
                                 64,
                                 block_name='res3a',
                                 s1=2,
                                 k1=1,
                                 nf1=48,
                                 name1='res3a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=48,
                                 name2='res3a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=128,
                                 name3='res3a_branch2c',
                                 s4=2,
                                 k4=1,
                                 name4='res3a_branch1',
                                 first_block=True)
        print('Res3a')
        print(res3a.get_shape())
        print('---------------------')
        res3b = bottleneck_block(res3a,
                                 128,
                                 block_name='res3b',
                                 s1=1,
                                 k1=1,
                                 nf1=48,
                                 name1='res3b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=48,
                                 name2='res3b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=128,
                                 name3='res3b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2b_branch1',
                                 first_block=False)
        print('Res3b')
        print(res3b.get_shape())
        print('---------------------')
        res3c = bottleneck_block(res3b,
                                 128,
                                 block_name='res3c',
                                 s1=1,
                                 k1=1,
                                 nf1=48,
                                 name1='res3c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=48,
                                 name2='res3c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=128,
                                 name3='res3c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res3c_branch1',
                                 first_block=False)
        print('Res3c')
        print(res3c.get_shape())
        print('---------------------')
        res3d = bottleneck_block(res3c,
                                 128,
                                 block_name='res3d',
                                 s1=1,
                                 k1=1,
                                 nf1=48,
                                 name1='res3d_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=48,
                                 name2='res3d_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=128,
                                 name3='res3d_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res3d_branch1',
                                 first_block=False)
        print('Res3d')
        print(res3d.get_shape())
        print('---------------------')

    # Res4
    with tf.variable_scope('Res4'):
        res4a = bottleneck_block(res3d,
                                 128,
                                 block_name='res4a',
                                 s1=2,
                                 k1=1,
                                 nf1=64,
                                 name1='res4a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res4a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res4a_branch2c',
                                 s4=2,
                                 k4=1,
                                 name4='res4a_branch1',
                                 first_block=True)
        print('---------------------')
        print('Res4a')
        print(res4a.get_shape())
        print('---------------------')
        res4b = bottleneck_block(res4a,
                                 256,
                                 block_name='res4b',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res4b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res4b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res4b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4b_branch1',
                                 first_block=False)
        print('Res4b')
        print(res4b.get_shape())
        print('---------------------')
        res4c = bottleneck_block(res4b,
                                 256,
                                 block_name='res4c',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res4c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res4c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res4c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4c_branch1',
                                 first_block=False)
        print('Res4c')
        print(res4c.get_shape())
        print('---------------------')
        res4d = bottleneck_block(res4c,
                                 256,
                                 block_name='res4d',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res4d_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res4d_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res4d_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4d_branch1',
                                 first_block=False)
        print('Res4d')
        print(res4d.get_shape())
        print('---------------------')
        res4e = bottleneck_block(res4d,
                                 256,
                                 block_name='res4e',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res4e_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res4e_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res4e_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4e_branch1',
                                 first_block=False)
        print('Res4e')
        print(res4e.get_shape())
        print('---------------------')
        res4f = bottleneck_block(res4e,
                                 256,
                                 block_name='res4f',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res4f_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res4f_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res4f_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4f_branch1',
                                 first_block=False)
        print('Res4f')
        print(res4f.get_shape())
        print('---------------------')

    # Res5
    with tf.variable_scope('Res5'):
        res5a = bottleneck_block(res4f,
                                 256,
                                 block_name='res5a',
                                 s1=1,
                                 k1=1,
                                 nf1=128,
                                 name1='res5a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res5a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res5a_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res5a_branch1',
                                 first_block=True)
        print('---------------------')
        print('Res5a')
        print(res5a.get_shape())
        print('---------------------')
        res5b = bottleneck_block(res5a,
                                 512,
                                 block_name='res5b',
                                 s1=1,
                                 k1=1,
                                 nf1=128,
                                 name1='res5b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res5b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res5b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res5b_branch1',
                                 first_block=False)
        print('Res5b')
        print(res5b.get_shape())
        print('---------------------')
        res5c = bottleneck_block(res5b,
                                 512,
                                 block_name='res5c',
                                 s1=1,
                                 k1=1,
                                 nf1=128,
                                 name1='res5c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res5c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res5c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res5c_branch1',
                                 first_block=False)
        print('Res5c')
        print(res5c.get_shape())

        res5c = avg_pool(res5c, ksize=4, stride=1, name='res5_avg_pool')
        print('---------------------')
        print('Res5c after AVG_POOL')
        print(res5c.get_shape())
        print('---------------------')

    net_flatten, _ = flatten_layer(res5c)
    print('---------------------')
    print('Matrix dimension to the first FC layer')
    print(net_flatten.get_shape())
    print('---------------------')
    net = fc_layer(net_flatten,
                   h,
                   'FC1',
                   batch_norm=True,
                   add_reg=True,
                   use_relu=True)
    net = dropout(net, keep_prob)
    net = fc_layer(net,
                   numClasses,
                   'FC2',
                   batch_norm=True,
                   add_reg=True,
                   use_relu=False)

    return net
Ejemplo n.º 12
0
h1 = 100

# level of the noise in noisy data
noise_level = 0.6

# Create graph
# Placeholders for inputs (x), outputs(y)
with tf.variable_scope('Input'):
    x_original = tf.placeholder(tf.float32,
                                shape=[None, img_size_flat],
                                name='X_original')
    x_noisy = tf.placeholder(tf.float32,
                             shape=[None, img_size_flat],
                             name='X_noisy')

fc1, W1 = fc_layer(x_noisy, h1, 'Hidden_layer', use_relu=True)
out, W2 = fc_layer(fc1, img_size_flat, 'Output_layer', use_relu=False)

# calculate the activation
h_active = W1 / tf.sqrt(tf.reduce_sum(tf.square(W1), axis=0))  # [784, 100]

# Define the loss function, optimizer, and accuracy
with tf.variable_scope('Train'):
    with tf.variable_scope('Loss'):
        loss = tf.reduce_mean(tf.losses.mean_squared_error(x_original, out),
                              name='loss')
        tf.summary.scalar('loss', loss)
    with tf.variable_scope('Optimizer'):
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                           name='Adam-op').minimize(loss)
Ejemplo n.º 13
0
    def build(self, rgb, label_num, kp, last_layer_type="softmax"):

        assert rgb.get_shape().as_list()[1:] == [224, 224, 3]

        self.conv1 = conv_layer(rgb, 7, 3, 64, 2, "scale1")
        self.conv1 = bn(self.conv1,
                        is_training=self.is_training,
                        name="scale1")
        self.conv1 = tf.nn.relu(self.conv1)
        self.conv1 = maxpool(self.conv1, 3, 2, "pool1")

        with tf.variable_scope("scale2"):
            self.block1_1 = res_block_3_layer(self.conv1, [64, 64, 256],
                                              "block1",
                                              change_dimension=True,
                                              block_stride=1,
                                              is_training=self.is_training)
            self.block1_2 = res_block_3_layer(self.block1_1, [64, 64, 256],
                                              "block2",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
            self.block1_3 = res_block_3_layer(self.block1_2, [64, 64, 256],
                                              "block3",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)

        with tf.variable_scope("scale3"):
            self.block2_1 = res_block_3_layer(self.block1_3, [128, 128, 512],
                                              "block1",
                                              change_dimension=True,
                                              block_stride=2,
                                              is_training=self.is_training)
            self.block2_2 = res_block_3_layer(self.block2_1, [128, 128, 512],
                                              "block2",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
            self.block2_3 = res_block_3_layer(self.block2_2, [128, 128, 512],
                                              "block3",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
            self.block2_4 = res_block_3_layer(self.block2_3, [128, 128, 512],
                                              "block4",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
        with tf.variable_scope("scale4"):
            self.block3_1 = res_block_3_layer(self.block2_4, [256, 256, 1024],
                                              "block1",
                                              change_dimension=True,
                                              block_stride=2,
                                              is_training=self.is_training)
            self.block3_2 = res_block_3_layer(self.block3_1, [256, 256, 1024],
                                              "block2",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
            self.block3_3 = res_block_3_layer(self.block3_2, [256, 256, 1024],
                                              "block3",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
            self.block3_4 = res_block_3_layer(self.block3_3, [256, 256, 1024],
                                              "block4",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
            self.block3_5 = res_block_3_layer(self.block3_4, [256, 256, 1024],
                                              "block5",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
            self.block3_6 = res_block_3_layer(self.block3_5, [256, 256, 1024],
                                              "block6",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
        with tf.variable_scope("scale5"):
            self.block4_1 = res_block_3_layer(self.block3_6, [512, 512, 2048],
                                              "block1",
                                              change_dimension=True,
                                              block_stride=2,
                                              is_training=self.is_training)
            self.block4_2 = res_block_3_layer(self.block4_1, [512, 512, 2048],
                                              "block2",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
            self.block4_3 = res_block_3_layer(self.block4_2, [512, 512, 2048],
                                              "block3",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
        with tf.variable_scope("fc"):
            self.pool2 = maxpool(self.block4_3, 7, 1, "pool2")
            self.fc1 = fc_layer(self.pool2, 2048, 2048, "fc1")
            self.fc1 = tf.nn.relu(tf.nn.dropout(self.fc1, keep_prob=kp))
            self.fc2 = fc_layer(self.fc1, 2048, label_num, "fc2")

        if last_layer_type == "sigmoid":
            self.prob = tf.nn.sigmoid(self.fc2)
        elif last_layer_type == "softmax":
            self.prob = tf.nn.softmax(self.fc2)
        elif last_layer_type == "no":
            self.prob = self.fc2
        return self.prob
Ejemplo n.º 14
0
def create_network(X, numClasses, is_train):
    """
    Building the Residual Network with 50 layer
    :param X: input
    :param h: number of units in the fully connected layer
    :param keep_prob: dropout rate
    :param numClasses: number of classes
    :param is_train: to be used by batch normalization
    :return:
    """
    res1 = conv_2d(X,
                   layer_name='res1',
                   stride=2,
                   filter_size=7,
                   num_filters=64,
                   is_train=is_train,
                   batch_norm=True,
                   use_relu=True)
    print('---------------------')
    print('Res1')
    print(res1.get_shape())
    print('---------------------')
    res1 = max_pool(res1, ksize=3, stride=2, name='res1_max_pool')
    print('---------------------')
    print('Res1')
    print(res1.get_shape())
    print('---------------------')
    # Res2
    with tf.variable_scope('Res2'):
        res2a = bottleneck_block(res1,
                                 is_train,
                                 block_name='res2a',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res2a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res2a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res2a_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2a_branch1',
                                 first_block=True)
        print('Res2a')
        print(res2a.get_shape())
        print('---------------------')
        res2b = bottleneck_block(res2a,
                                 is_train,
                                 block_name='res2b',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res2b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res2b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res2b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2b_branch1',
                                 first_block=False)
        print('Res2b')
        print(res2b.get_shape())
        print('---------------------')
        res2c = bottleneck_block(res2b,
                                 is_train,
                                 block_name='res2c',
                                 s1=1,
                                 k1=1,
                                 nf1=64,
                                 name1='res2c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=64,
                                 name2='res2c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=256,
                                 name3='res2c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2c_branch1',
                                 first_block=False)
        print('Res2c')
        print(res2c.get_shape())
        print('---------------------')

    # Res3
    with tf.variable_scope('Res3'):
        res3a = bottleneck_block(res2c,
                                 is_train,
                                 block_name='res3a',
                                 s1=2,
                                 k1=1,
                                 nf1=128,
                                 name1='res3a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res3a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res3a_branch2c',
                                 s4=2,
                                 k4=1,
                                 name4='res3a_branch1',
                                 first_block=True)
        print('Res3a')
        print(res3a.get_shape())
        print('---------------------')
        res3b = bottleneck_block(res3a,
                                 is_train,
                                 block_name='res3b',
                                 s1=1,
                                 k1=1,
                                 nf1=128,
                                 name1='res3b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res3b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res3b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res2b_branch1',
                                 first_block=False)
        print('Res3b')
        print(res3b.get_shape())
        print('---------------------')
        res3c = bottleneck_block(res3b,
                                 is_train,
                                 block_name='res3c',
                                 s1=1,
                                 k1=1,
                                 nf1=128,
                                 name1='res3c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res3c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res3c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res3c_branch1',
                                 first_block=False)
        print('Res3c')
        print(res3c.get_shape())
        print('---------------------')
        res3d = bottleneck_block(res3c,
                                 is_train,
                                 block_name='res3d',
                                 s1=1,
                                 k1=1,
                                 nf1=128,
                                 name1='res3d_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=128,
                                 name2='res3d_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=512,
                                 name3='res3d_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res3d_branch1',
                                 first_block=False)
        print('Res3d')
        print(res3d.get_shape())
        print('---------------------')

    # Res4
    with tf.variable_scope('Res4'):
        res4a = bottleneck_block(res3d,
                                 is_train,
                                 block_name='res4a',
                                 s1=2,
                                 k1=1,
                                 nf1=256,
                                 name1='res4a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4a_branch2c',
                                 s4=2,
                                 k4=1,
                                 name4='res4a_branch1',
                                 first_block=True)
        print('---------------------')
        print('Res4a')
        print(res4a.get_shape())
        print('---------------------')
        res4b = bottleneck_block(res4a,
                                 is_train,
                                 block_name='res4b',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4b_branch1',
                                 first_block=False)
        print('Res4b')
        print(res4b.get_shape())
        print('---------------------')
        res4c = bottleneck_block(res4b,
                                 is_train,
                                 block_name='res4c',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4c_branch1',
                                 first_block=False)
        print('Res4c')
        print(res4c.get_shape())
        print('---------------------')
        res4d = bottleneck_block(res4c,
                                 is_train,
                                 block_name='res4d',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4d_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4d_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4d_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4d_branch1',
                                 first_block=False)
        print('Res4d')
        print(res4d.get_shape())
        print('---------------------')
        res4e = bottleneck_block(res4d,
                                 is_train,
                                 block_name='res4e',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4e_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4e_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4e_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4e_branch1',
                                 first_block=False)
        print('Res4e')
        print(res4e.get_shape())
        print('---------------------')
        res4f = bottleneck_block(res4e,
                                 is_train,
                                 block_name='res4f',
                                 s1=1,
                                 k1=1,
                                 nf1=256,
                                 name1='res4f_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=256,
                                 name2='res4f_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=1024,
                                 name3='res4f_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res4f_branch1',
                                 first_block=False)
        print('Res4f')
        print(res4f.get_shape())
        print('---------------------')

    # Res5
    with tf.variable_scope('Res5'):
        res5a = bottleneck_block(res4f,
                                 is_train,
                                 block_name='res5a',
                                 s1=2,
                                 k1=1,
                                 nf1=512,
                                 name1='res5a_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=512,
                                 name2='res5a_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=2048,
                                 name3='res5a_branch2c',
                                 s4=2,
                                 k4=1,
                                 name4='res5a_branch1',
                                 first_block=True)
        print('---------------------')
        print('Res5a')
        print(res5a.get_shape())
        print('---------------------')
        res5b = bottleneck_block(res5a,
                                 is_train,
                                 block_name='res5b',
                                 s1=1,
                                 k1=1,
                                 nf1=512,
                                 name1='res5b_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=512,
                                 name2='res5b_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=2048,
                                 name3='res5b_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res5b_branch1',
                                 first_block=False)
        print('Res5b')
        print(res5b.get_shape())
        print('---------------------')
        res5c = bottleneck_block(res5b,
                                 is_train,
                                 block_name='res5c',
                                 s1=1,
                                 k1=1,
                                 nf1=512,
                                 name1='res5c_branch2a',
                                 s2=1,
                                 k2=3,
                                 nf2=512,
                                 name2='res5c_branch2b',
                                 s3=1,
                                 k3=1,
                                 nf3=2048,
                                 name3='res5c_branch2c',
                                 s4=1,
                                 k4=1,
                                 name4='res5c_branch1',
                                 first_block=False)
        # res5c: [batch_size, 8, 8, 2048]
        print('Res5c')
        print(res5c.get_shape())
        k_size = res5c.get_shape().as_list()[1]
        num_filters = res5c.get_shape().as_list()[-1]

        f_map = tf.reshape(res5c, [-1, k_size * k_size, num_filters],
                           name='reshape_fmaps')
        # [batch_size, 64, 2048]

        res5c_gap = avg_pool(res5c,
                             ksize=k_size,
                             stride=1,
                             name='res5_avg_pool')
        # [batch_size, 1, 1, 2048]
        print('---------------------')
        print('Res5c after AVG_POOL')
        print(res5c.get_shape())
        print('---------------------')

    net_flatten = flatten_layer(res5c_gap)
    # [batch_size, 2048]
    print('---------------------')
    print('Matrix dimension to the first FC layer')
    print(net_flatten.get_shape())
    print('---------------------')
    net, W = fc_layer(net_flatten,
                      numClasses,
                      'FC1',
                      is_train=is_train,
                      batch_norm=True,
                      add_reg=True,
                      use_relu=False)
    # W: [2048, 14]
    W_tiled = tf.tile(tf.expand_dims(W, axis=0), [args.val_batch_size, 1, 1])

    # [2048, 14] -> [1, 2048, 14] -> [batch_size, 2048, 14]

    heat_map_list = tf.unstack(tf.matmul(f_map, W_tiled), axis=0)
    # [batch_size, 64, 14]
    # list of heat-maps of length batch_size, each element: [64, 14]

    cls_act_map_list = [
        tf.nn.softmax(heat_map, dim=0) for heat_map in heat_map_list
    ]
    cls_act_map = tf.stack(cls_act_map_list, axis=0)
    # [batch_size, 64, 14]

    return net, net_flatten, res5c, cls_act_map
Ejemplo n.º 15
0
x = tf.placeholder(tf.float64, shape=[None, n_des], name='X')
y = tf.placeholder(tf.float64, shape=[None, n_des], name='Y')
x1=tf.placeholder(tf.float64, shape=[None, h], name='X1')
z=tf.placeholder(tf.float64, shape=[None, n_classes], name='Z')
learning_rate = tf.placeholder(tf.float64, shape=None, name='epsilon')
W_hid2out_form=tf.Variable(tf.zeros(shape=[h,n_des]),name='W_hid2out_form')
W_in2hid_form=tf.Variable(tf.zeros(shape=[n_des,h]),name='W_in2hid_form')
b_hid2out_form=tf.Variable(tf.zeros(shape=[n_des]),name='b_hid2out_form')
b_in2hid_form=tf.Variable(tf.zeros(shape=[h]),name='b_in2hid_form')
W_hid2out_form=tf.cast(W_hid2out_form,tf.float64)
W_in2hid_form=tf.cast(W_in2hid_form,tf.float64)
b_hid2out_form=tf.cast(b_hid2out_form,tf.float64)
b_in2hid_form=tf.cast(b_in2hid_form,tf.float64)

fc1 = fc_layer(x, h, 'Hidden_layer', use_relu=True)
output_logits = fc_layer(fc1[0], n_des, 'Output_layer', use_relu=False)
fc11 = fc_layer(x1, hh, 'Hidden_layer1', use_relu=True)
output_logits1=fc_layer(fc11[0], n_classes, 'Output_layer1', use_relu=False)

#define the loss defined as mean squared error
with tf.name_scope('MSE'):
    loss = tf.losses.mean_squared_error(labels=y, predictions=output_logits[0])
    tf.summary.scalar('MSE',loss)
loss1 = tf.losses.mean_squared_error(labels=z, predictions=output_logits1[0])
#compute the root MSE
with tf.name_scope('RMSE'):
    rmse=tf.sqrt(loss,name='RMSE')
    tf.summary.scalar('RMSE',rmse)
#define the optimizer
with tf.name_scope('Optimizer'):
Ejemplo n.º 16
0
    def __call__(self, x, state, scope=None):
        '''
		Args:
			x - input 2D tensor [batch_size x 2*self.chunk_samples]
			state - tuple
				(hidden, cell_state)
			scope - string
				defaults to be None
    	'''
        with tf.variable_scope(scope or type(self).__name__):
            h, c = state
            with tf.variable_scope("Prior"):
                prior_hidden = fc_layer(h,
                                        self.n_prior_hidden,
                                        activation=tf.nn.relu,
                                        scope="hidden")
                prior_mu = fc_layer(prior_hidden, self.n_z, scope="mu")
                prior_sigma = fc_layer(prior_hidden,
                                       self.n_z,
                                       activation=tf.nn.softplus,
                                       scope="sigma")  # >=0

            x_1 = fc_layer(x, self.n_x_1, activation=tf.nn.relu,
                           scope="phi_x")  # >=0

            with tf.variable_scope("Encoder"):
                enc_hidden = fc_layer(tf.concat(values=(x_1, h), axis=1),
                                      self.n_enc_hidden,
                                      activation=tf.nn.relu,
                                      scope="hidden")
                enc_mu = fc_layer(enc_hidden, self.n_z, scope='mu')
                enc_sigma = fc_layer(enc_hidden,
                                     self.n_z,
                                     activation=tf.nn.softplus,
                                     scope='sigma')

            # Random sampling ~ N(0, 1)
            eps = tf.random_normal((get_shape(x)[0], self.n_z),
                                   0.0,
                                   1.0,
                                   dtype=tf.float32)
            # z = mu + sigma*epsilon, latent variable from reparametrization trick
            z = tf.add(enc_mu, tf.multiply(enc_sigma, eps))
            z_1 = fc_layer(z, self.n_z_1, activation=tf.nn.relu, scope="phi_z")

            with tf.variable_scope("Decoder"):
                dec_hidden = fc_layer(tf.concat(values=(z_1, h), axis=1),
                                      self.n_dec_hidden,
                                      activation=tf.nn.relu,
                                      scope="hidden")
                dec_mu = fc_layer(dec_hidden, self.n_x, scope="mu")
                dec_sigma = fc_layer(dec_hidden,
                                     self.n_x,
                                     activation=tf.nn.softplus,
                                     scope="sigma")

            output, next_state = self.lstm(
                tf.concat(values=(x_1, z_1), axis=1), state)

        return (enc_mu, enc_sigma, dec_mu, dec_sigma, prior_mu,
                prior_sigma), next_state