示例#1
0
文件: network.py 项目: pedrolcn/MNIST
    def _create_network(self):
        with tf.device('/cpu:0'):
            with tf.name_scope('Conv1'):
                self.conv1_bn = batch_norm(conv2d(self.x_image, self.W_conv1),
                                           self.beta_conv1, self.gamma_conv1,
                                           self.phase_train)
                self.h_conv1 = tf.nn.relu(self.conv1_bn)
                self.h_pool1 = max_pool_2x2(self.h_conv1)

            with tf.name_scope('Conv2'):
                self.conv2_bn = batch_norm(conv2d(self.h_pool1, self.W_conv2),
                                           self.beta_conv2, self.gamma_conv2,
                                           self.phase_train)

                self.h_conv2 = tf.nn.relu(self.conv2_bn)
                self.h_pool2 = max_pool_2x2(self.h_conv2)

            with tf.name_scope('Avg_pool'):
                self.h_avg_pool = tf.nn.avg_pool(self.h_pool2,
                                                 ksize=[1, 7, 7, 1],
                                                 strides=[1, 1, 1, 1],
                                                 padding='VALID',
                                                 name='Avg_pool')
                self.h_drop = tf.nn.dropout(self.h_avg_pool,
                                            keep_prob=self.keep_prob,
                                            name='Dropout')

            with tf.name_scope('Readout'):
                self.h_drop_flat = tf.reshape(self.h_drop, [-1, 64])
                self.y_conv = tf.matmul(self.h_drop_flat,
                                        self.W_fc) + self.b_fc
示例#2
0
def VGG16(x, n_classes, is_pretrain=True):
    
    x = tools.conv('conv1_1', x, 64, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv1_2', x, 64, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.pool('pool1', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)
    
    x = tools.conv('conv2_1', x, 128, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv2_2', x, 128, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.pool('pool2', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)
    
    x = tools.conv('conv3_1', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv3_2', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv3_3', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.pool('pool3', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)
    
    x = tools.conv('conv4_1', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv4_2', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv4_3', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.pool('pool4', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)

    x = tools.conv('conv5_1', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv5_2', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv5_3', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.pool('pool5', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)            

    x = tools.FC_layer('fc6', x, out_nodes=4096)
    x = tools.batch_norm(x)
    x = tools.FC_layer('fc7', x, out_nodes=4096)
    x = tools.batch_norm(x)
    x = tools.softmax_layer('fc8', x, out_nodes=n_classes)

    return x
示例#3
0
def VGG16(x, n_classes, is_pretrain=True):
    
    x = tools.conv('conv1_1', x, 64, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv1_2', x, 64, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.pool('pool1', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)
    
    x = tools.conv('conv2_1', x, 128, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv2_2', x, 128, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.pool('pool2', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)
    
    x = tools.conv('conv3_1', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv3_2', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv3_3', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.pool('pool3', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)
    
    x = tools.conv('conv4_1', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv4_2', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv4_3', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.pool('pool3', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)

    x = tools.conv('conv5_1', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv5_2', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv5_3', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.pool('pool3', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)            

    x = tools.FC_layer('fc6', x, out_nodes=4096)
    x = tools.batch_norm(x)
    x = tools.FC_layer('fc7', x, out_nodes=4096)
    x = tools.batch_norm(x)
    x = tools.FC_layer('fc8', x, out_nodes=n_classes)

    return x
def VGG16(x, n_classes, keep_prob=0.5, is_pretrain=True):
    x = tools.conv('conv1_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = tools.conv('conv1_2', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = tools.pool('pool1', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True)

    x = tools.conv('conv2_1', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = tools.conv('conv2_2', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = tools.pool('pool2', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True)

    x = tools.conv('conv3_1', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = tools.conv('conv3_2', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = tools.conv('conv3_3', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = tools.pool('pool3', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True)

    x = tools.conv('conv4_1', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = tools.conv('conv4_2', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = tools.conv('conv4_3', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = tools.pool('pool3', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True)

    x = tools.conv('conv5_1', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = tools.conv('conv5_2', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = tools.conv('conv5_3', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = tools.pool('pool3', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True)

    x = tools.FC_layer('fc6', x, out_nodes=512, keep_prob=keep_prob)
    x = tools.batch_norm(x)
    x = tools.FC_layer('fc7', x, out_nodes=128, keep_prob=keep_prob)
    x = tools.batch_norm(x)
    x = tools.output_Layer('out', x, out_nodes=n_classes)

    return x
示例#5
0
def VGG16N(x, n_classes, is_pretrain=True):
    
    with tf.name_scope('VGG16'):

        x = tools.conv('conv1_1', x, 64, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)   
        x = tools.conv('conv1_2', x, 64, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
        with tf.name_scope('pool1'):    
            x = tools.pool('pool1', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)
            
        x = tools.conv('conv2_1', x, 128, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)    
        x = tools.conv('conv2_2', x, 128, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
        with tf.name_scope('pool2'):    
            x = tools.pool('pool2', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)
         
            

        x = tools.conv('conv3_1', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
        x = tools.conv('conv3_2', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
        x = tools.conv('conv3_3', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
        with tf.name_scope('pool3'):
            x = tools.pool('pool3', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)
            

        x = tools.conv('conv4_1', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
        x = tools.conv('conv4_2', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
        x = tools.conv('conv4_3', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
        with tf.name_scope('pool4'):
            x = tools.pool('pool4', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)
        

        x = tools.conv('conv5_1', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
        x = tools.conv('conv5_2', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
        x = tools.conv('conv5_3', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
        with tf.name_scope('pool5'):
            x = tools.pool('pool5', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)            
        
        
        x = tools.FC_layer('fc6', x, out_nodes=4096)        
        with tf.name_scope('batch_norm1'):
            x = tools.batch_norm(x)
        feature = tools.FC_layer('fc7', x, out_nodes=4096)

        with tf.name_scope('batch_norm2'):
            x = tools.batch_norm(feature)
        x = tools.FC_layer('fc8', x, out_nodes=n_classes)
    
        return x,feature



#%%







            
def Encoder_VGG(x, z_dim, n_class, is_pretrain=True, reuse = False):
    if (reuse):
        tf.get_variable_scope().reuse_variables() 
    x = tools.conv('conv1_1', x, 64, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv1_2', x, 64, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.pool('pool1', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)
    
    x = tools.conv('conv2_1', x, 128, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv2_2', x, 128, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.pool('pool2', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)
    
    x = tools.conv('conv3_1', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv3_2', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv3_3', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.pool('pool3', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)
    
    x = tools.conv('conv4_1', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv4_2', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv4_3', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.pool('pool3', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)

    x = tools.conv('conv5_1', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv5_2', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.conv('conv5_3', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain)
    x = tools.pool('pool3', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)            

    x = tools.FC_layer('fc6', x, out_nodes=4096)
    x = tools.batch_norm(x)
    z_mu = tools.FC_layer('fc8', x, out_nodes=z_dim)
    z_lv = tools.FC_layer('fc9', x, out_nodes=z_dim)
    y_predict = tools.FC_layer('fc10', z_mu, out_nodes=n_class)
        

    return z_mu, z_lv, y_predict
示例#7
0
    def fc_layers(self):
        # def FC_layer(layer_name, x, out_nodes):
        images = FC_layer('fc6',
                          self.convs_output,
                          4096,
                          regularizer=self.regularizer)
        images = batch_norm(images)
        images = FC_layer('fc7', images, 4096, regularizer=self.regularizer)
        images = batch_norm(images)
        self.feature = images
        images = FC_layer('fc8',
                          images,
                          self.classesnumber,
                          regularizer=self.regularizer)

        self.fcs_output = images
示例#8
0
def Model_finetune(layer, n_classes, is_pretrain=True):
    # '''
    # Model_finetune: that uses varies tools.py function to setup CNN and returns the logits
    # Arg: layer(tensor), n_classes(int), is_pretrain(Booleen)
    # layer: list of images that are decoded in tensor
    # n_classes: batch size for the training
    # is_pretrain: number of classification for output
    # Return: layer(logits)
    # '''
    with tf.name_scope('Model_finetune'):
        # first conv + pool
        layer = tools.conv('conv1_1', layer, 64, kernel_size=[7,7], stride=[1,1,1,1], is_pretrain=is_pretrain)   
        with tf.name_scope('pool1'):    
            layer = tools.pool('pool1', layer, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)
        # second conv + pool
        layer = tools.conv('conv2_1', layer, 128, kernel_size=[7,7], stride=[1,1,1,1], is_pretrain=is_pretrain)    
        with tf.name_scope('pool2'):    
            layer = tools.pool('pool2', layer, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)
                    
        # thrid conv + pool
        layer = tools.conv('conv3_1', layer, 256, kernel_size=[7,7], stride=[1,1,1,1], is_pretrain=is_pretrain)
        with tf.name_scope('pool3'):
            layer = tools.pool('pool3', layer, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)
            
        # fourth conv + pool
        layer = tools.conv('conv4_1', layer, 512, kernel_size=[7,7], stride=[1,1,1,1], is_pretrain=is_pretrain)
        with tf.name_scope('pool4'):
            layer = tools.pool('pool4', layer, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)
        
        # fifth conv + pool
        layer = tools.conv('conv5_1', layer, 512, kernel_size=[7,7], stride=[1,1,1,1], is_pretrain=is_pretrain)
        with tf.name_scope('pool5'):
            layer = tools.pool('pool5', layer, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True)            
        
        # 3 fully connected layers, last one is softmax
        layer = tools.FC_layer('fc6', layer, out_nodes=2048)        
        with tf.name_scope('batch_norm1'):
            layer = tools.batch_norm(layer)           
        layer = tools.FC_layer('fc7', layer, out_nodes=2048)        
        with tf.name_scope('batch_norm2'):
            layer = tools.batch_norm(layer)            
        layer = tools.FC_layer('fc8', layer, out_nodes=n_classes)
    
        return layer
示例#9
0
def build_renet(inputs,
                num_classes,
                use_bottleneck=False,
                num_residual_units=5,
                relu_leakiness=0.0):
    with tf.variable_scope('init'):
        x = inputs
        x = tools.conv('init_conv', x, 3, 3, 16, _stride_arr(1))

    strides = [1, 2, 2]
    activate_before_residual = [True, False, False]
    if use_bottleneck:
        res_func = tools.bottleneck_residual
        filters = [16, 64, 128, 256]
    else:
        res_func = tools.residual
        filters = [16, 16, 32, 64]

    # first group
    with tf.variable_scope('unit_1_0'):
        x = res_func(x, filters[0], filters[1], _stride_arr(strides[0]),
                     activate_before_residual[0])

    for i in six.moves.range(1, num_residual_units):
        with tf.variable_scope('unit_1_%d' % i):
            x = res_func(x, filters[1], filters[1], _stride_arr(1), False)

    # second group
    with tf.variable_scope('unit_2_0'):
        x = res_func(x, filters[1], filters[2], _stride_arr(strides[1]),
                     activate_before_residual[1])
    for i in six.moves.range(1, num_residual_units):
        with tf.variable_scope('unit_2_%d' % i):
            x = res_func(x, filters[2], filters[2], _stride_arr(1), False)

    # third group
    with tf.variable_scope('unit_3_0'):
        x = res_func(x, filters[2], filters[3], _stride_arr(strides[2]),
                     activate_before_residual[2])
    for i in six.moves.range(1, num_residual_units):
        with tf.variable_scope('unit_3_%d' % i):
            x = res_func(x, filters[3], filters[3], _stride_arr(1), False)

    # all pool layer
    with tf.variable_scope('unit_last'):
        x = tools.batch_norm('final_bn', x)
        x = tools.relu(x, relu_leakiness)
        x = tools.global_avg_pool(x)

    # fc_layer + softmax
    with tf.variable_scope('logit'):
        logits = tools.fully_connected(x, num_classes)
        predictions = tf.nn.softmax(logits)

    return logits, predictions
示例#10
0
def VGG16(x, n_class, is_pretrain=True):

    # using the name scope, the tensorboard maybe look better
    with tf.name_scope('VGG16'):

        x = tools.conv('conv1_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
        x = tools.conv('conv1_2', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
        with tf.name_scope('pool1'):
            x = tools.pool('pool1', x, ksize=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True)

        x = tools.conv('conv2_1', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
        x = tools.conv('conv2_2', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
        with tf.name_scope('pool2'):
            x = tools.pool('pool2', x, ksize=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True)

        x = tools.conv('conv3_1', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
        x = tools.conv('conv3_2', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
        x = tools.conv('conv3_3', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
        with tf.name_scope('pool3'):
            x = tools.pool('pool3', x, ksize=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True)

        x = tools.conv('conv4_1', x, 512, kernel_size=[3, 3], stride=[1, 2, 2, 1], is_pretrain=is_pretrain)
        x = tools.conv('conv4_2', x, 512, kernel_size=[3, 3], stride=[1, 2, 2, 1], is_pretrain=is_pretrain)
        x = tools.conv('conv4_3', x, 512, kernel_size=[3, 3], stride=[1, 2, 2, 1], is_pretrain=is_pretrain)
        with tf.name_scope('pool4'):
            x = tools.pool('pool4', x, ksize=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True)

        x = tools.conv('conv5_1', x, 512, kernel_size=[3, 3], stride=[1, 2, 2, 1], is_pretrain=is_pretrain)
        x = tools.conv('conv5_2', x, 512, kernel_size=[3, 3], stride=[1, 2, 2, 1], is_pretrain=is_pretrain)
        x = tools.conv('conv5_3', x, 512, kernel_size=[3, 3], stride=[1, 2, 2, 1], is_pretrain=is_pretrain)
        with tf.name_scope('pool5'):
            x = tools.pool('pool5', x, ksize=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True)

        x = tools.FC_layer('fc6', x, out_nodes=4096)
        with tf.name_scope('batch_norma1'):
            x = tools.batch_norm(x)     # batch norm can avoid overfit, more efficient than dropout
        x = tools.FC_layer('fc7', x, out_nodes=4096)
        with tf.name_scope('batch_norm2'):
            x = tools.batch_norm(x)
        x = tools.FC_layer('fc8', x, out_nodes=n_class)

        return x
示例#11
0
    def LeNet5(self):

        with tf.name_scope('LeNet5'):

            self.conv1 = tools.conv('conv1',
                                    self.input,
                                    16,
                                    kernel_size=[3, 3],
                                    stride=[1, 1, 1, 1],
                                    is_pretrain=self.is_pretrain)
            self.pool1 = tools.pool('pool1',
                                    self.conv1,
                                    kernel=[1, 3, 3, 1],
                                    stride=[1, 2, 2, 1],
                                    is_max_pool=True,
                                    is_norm=True)

            self.conv2 = tools.conv('conv2',
                                    self.pool1,
                                    16,
                                    kernel_size=[3, 3],
                                    stride=[1, 1, 1, 1],
                                    is_pretrain=self.is_pretrain)
            self.pool2 = tools.pool('pool2',
                                    self.conv2,
                                    kernel=[1, 3, 3, 1],
                                    stride=[1, 2, 2, 1],
                                    is_max_pool=True,
                                    is_norm=True)

            self.fc1 = tools.FC_layer('local3', self.pool2, out_nodes=128)
            self.norm1 = tools.batch_norm('batch_norm1', self.fc1)

            self.fc2 = tools.FC_layer('local4', self.norm1, out_nodes=128)
            self.norm2 = tools.batch_norm('batch_norm2', self.fc2)

            self.fc3 = tools.FC_layer('softmax_linear',
                                      self.norm2,
                                      out_nodes=self.n_classes,
                                      use_relu=False)
示例#12
0
文件: models.py 项目: lhzhong/TFCode
def AlexNet(x, n_classes, is_pretrain=True):

    with tf.name_scope('AlexNet'):

        x = tools.conv('conv1',
                       x,
                       16,
                       kernel_size=[3, 3],
                       stride=[1, 1, 1, 1],
                       is_pretrain=is_pretrain)
        x = tools.pool('pool1',
                       x,
                       kernel=[1, 3, 3, 1],
                       stride=[1, 2, 2, 1],
                       is_max_pool=True,
                       is_norm=True)

        x = tools.conv('conv2',
                       x,
                       16,
                       kernel_size=[3, 3],
                       stride=[1, 1, 1, 1],
                       is_pretrain=is_pretrain)
        x = tools.pool('pool2',
                       x,
                       kernel=[1, 3, 3, 1],
                       stride=[1, 2, 2, 1],
                       is_max_pool=True,
                       is_norm=True)

        x = tools.FC_layer('local3', x, out_nodes=128)
        x = tools.batch_norm('batch_norm1', x)

        x = tools.FC_layer('local4', x, out_nodes=128)
        x = tools.batch_norm('batch_norm2', x)

        x = tools.FC_layer('softmax_linear', x, out_nodes=n_classes)

        return x
示例#13
0
    def cnn_rnn(self):
        """rnn模型"""
        """cnn模型"""
        with tf.name_scope('cnn'):
            self.conv1 = tools.conv('conv1',
                                    self.input_x,
                                    16,
                                    kernel_size=[3, 3],
                                    stride=[1, 1, 1, 1],
                                    is_pretrain=self.is_pretrain)
            self.pool1 = tools.pool('pool1',
                                    self.conv1,
                                    kernel=[1, 3, 3, 1],
                                    stride=[1, 2, 2, 1],
                                    is_max_pool=True,
                                    is_norm=True)

            self.conv2 = tools.conv('conv2',
                                    self.pool1,
                                    16,
                                    kernel_size=[3, 3],
                                    stride=[1, 1, 1, 1],
                                    is_pretrain=self.is_pretrain)
            self.pool2 = tools.pool('pool2',
                                    self.conv2,
                                    kernel=[1, 3, 3, 1],
                                    stride=[1, 2, 2, 1],
                                    is_max_pool=True,
                                    is_norm=True)

            self.fc1 = tools.FC_layer('local3', self.pool2, out_nodes=128)
            self.norm1 = tools.batch_norm('batch_norm1', self.fc1)

            self.fc2 = tools.FC_layer('local4', self.norm1, out_nodes=128)
            self.norm2 = tools.batch_norm('batch_norm2', self.fc2)

            # self.fc3 = tools.FC_layer('softmax_linear', self.norm2, out_nodes=self.n_classes

        def lstm_cell():  # lstm核
            return tf.contrib.rnn.BasicLSTMCell(self.config.hidden_dim,
                                                state_is_tuple=True)

        def gru_cell():  # gru核
            return tf.contrib.rnn.GRUCell(self.config.hidden_dim)

        def dropout():  # 为每一个rnn核后面加一个dropout层
            if self.config.rnn == 'lstm':
                cell = lstm_cell()
            else:
                cell = gru_cell()
            return tf.contrib.rnn.DropoutWrapper(
                cell, output_keep_prob=self.keep_prob)

        with tf.name_scope("rnn"):
            _batch, _fcsize = self.norm2.get_shape().as_list()
            X = tf.reshape(self.norm2, [_batch, 1, _fcsize])
            timestep_size = 1
            #            input_size = _fcsize
            # 多层rnn网络
            cells = [dropout() for _ in range(self.config.num_layers)]
            rnn_cell = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True)

            init_state = rnn_cell.zero_state(self.batch_size, dtype=tf.float32)
            outputs = list()
            state = init_state
            for timestep in range(timestep_size):
                (cell_output, state) = rnn_cell(X[:, timestep, :], state)
                outputs.append(cell_output)
            h_state = outputs[-1]

        with tf.name_scope("score"):
            # LSTM 部分的输出会是一个 [hidden_size] 的tensor,我们要分类的话,还需要接一个 softmax 层
            # 全连接层,后面接dropout以及relu激活
            W = tf.Variable(tf.truncated_normal(
                [self.config.hidden_dim, self.config.num_classes], stddev=0.1),
                            dtype=tf.float32)
            bias = tf.Variable(tf.constant(0.1,
                                           shape=[self.config.num_classes]),
                               dtype=tf.float32)
            self.logits = tf.nn.bias_add(tf.matmul(h_state, W), bias)

        with tf.name_scope("loss") as scope:
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=self.logits, labels=self.input_y, name='cross-entropy')
            self.loss = tf.reduce_mean(cross_entropy, name='loss')
            tf.summary.scalar(scope + 'loss', self.loss)

        with tf.name_scope("accuracy") as scope:
            correct = tf.nn.in_top_k(self.logits, self.input_y, 1)
            correct = tf.cast(correct, tf.float32)
            self.acc = tf.reduce_mean(correct) * 100.0
            tf.summary.scalar(scope + 'accuracy', self.acc)

        with tf.name_scope('optimizer'):
            optimizer = tf.train.AdamOptimizer(
                learning_rate=self.config.learning_rate)
            self.optim = optimizer.minimize(self.loss)
示例#14
0
    def generator(self,
                  z_input,
                  num_z_input=None,
                  num_layers=4,
                  stride=2,
                  z_first_output_deep=512,
                  reuse=True,
                  train=True):
        """
        :param layers_size:
        :return:
        """
        # try:
        #     num_sample = z_input.get_shape()[0] if z_input.get_shape()[0] == tf.Dimension(None) else 64  # 解决bug
        # except AttributeError:
        #     num_sample = z_input.shape[0]
        # logger.info(z_input.get_shape()[0]._value)
        # num_sample = z_input.get_shape()[0]._value if z_input.get_shape()[0]._value else 64  # 解决bug
        # logger.info(num_sample)
        if num_z_input == None:
            num_z_input = self.batch_size

        with tf.variable_scope("GEN", reuse=reuse):
            hidden_output_size = [
                np.ceil(np.array(self.image_size) / (stride**i)).astype(
                    np.int).tolist() for i in range(0, num_layers + 1)
            ]  # 所有层的output size
            hidden_output_size = list(reversed(hidden_output_size))
            # logger.info(
            #         hidden_output_size)  # [array([6, 6]), array([12, 12]), array([24, 24]), array([48, 48]), array([96, 96])]
            # 输出层采用全连接层: 线性全连接层
            h0_size = (hidden_output_size[0][0] * hidden_output_size[0][1]
                       ) * z_first_output_deep  # width * height * chanel
            # weight_0 = tf.Variable(initial_value=tf.random_normal(
            #         shape=[self.z_size, h0_size],
            #         stddev=0.2,
            #         dtype=tf.float32),
            #         name="weight_input_layer"
            # )
            # bias_0 = tf.Variable(initial_value=np.zeros(shape=[h0_size]), dtype=tf.float32, name="bias_input_layer")
            weight_0 = tf.get_variable(
                shape=[self.z_size, h0_size],
                initializer=tf.truncated_normal_initializer(
                    stddev=self.stddev),
                dtype=tf.float32,
                name="h0_weight")
            bias_0 = tf.get_variable(shape=[h0_size],
                                     dtype=tf.float32,
                                     name="h0_bias",
                                     initializer=tf.zeros_initializer())
            h0 = tf.matmul(z_input, weight_0) + bias_0
            h0_reshape = tf.reshape(h0,
                                    shape=[
                                        num_z_input, hidden_output_size[0][0],
                                        hidden_output_size[0][1],
                                        z_first_output_deep
                                    ],
                                    name="h0_reshape")  # reshape project
            # logger.info(h0.get_shape())  # (?, 6, 6, 1024)
            h0_bn = batch_norm(h0_reshape,
                               name="h0_bn",
                               train=train,
                               reuse=reuse)
            h0_lrelu = tf.nn.leaky_relu(h0_bn, alpha=0.2, name="h0_lrelu")
            hm_lrelu = None
            # 定义反卷积层
            for i in range(1, len(hidden_output_size) - 1):
                with tf.variable_scope("deconv_%d" % i, reuse=reuse):
                    # 第一层 不许卷积, 最后一层不需要BN
                    # width, height = hidden_output_size[i]
                    filter_shape = [
                        5,
                        5,  # 认为设定 5 width height
                        int(z_first_output_deep / 2**(i)),  # output chanel
                        int(z_first_output_deep / 2**(i - 1))
                    ]  # input chanel
                    logger.info(filter_shape)
                    # bug:必须指定batch的具体数量
                    # output_shape = [z_input.get_shape()[0]._value, hidden_output_size[i+1][0], hidden_output_size[i+1][0], filter_shape[-1]]
                    output_shape = [
                        num_z_input, hidden_output_size[i][0],
                        hidden_output_size[i][0], filter_shape[-2]
                    ]
                    # logger.info(output_shape)
                    # filter = tf.Variable(
                    #         initial_value=tf.random_normal(
                    #                 dtype=tf.float32,
                    #                 shape=filter_shape
                    #         ),
                    #         name="deconv_filter_%d" % i
                    # )  # 反卷积操作的共享卷积权重矩阵
                    # bias = tf.get_variable(name="deconv_bias_%d" % i,
                    #                        shape=[z_first_output_deep / 2 ** (i)],
                    #                        dtype=tf.float32,
                    #                        initializer=tf.zeros_initializer()
                    #                        )  # filter对应的偏置,shape取决于filter的output chanel 即filter的数量
                    filter = tf.get_variable(
                        dtype=tf.float32,
                        shape=filter_shape,
                        initializer=tf.truncated_normal_initializer(
                            stddev=self.stddev),
                        name="deconv%d_filter_weight" % i)  # 反卷积操作的共享卷积权重矩阵
                    bias = tf.get_variable(
                        name="deconv%d_filter_bias" % i,
                        shape=[z_first_output_deep / 2**(i)],  # output chanel
                        dtype=tf.float32,
                        initializer=tf.zeros_initializer()
                    )  # filter对应的偏置,shape取决于filter的output chanel 即filter的数量
                    if hm_lrelu == None:
                        hm_lrelu = h0_lrelu
                    hm = tf.add(tf.nn.conv2d_transpose(
                        value=hm_lrelu,
                        filter=filter,
                        strides=[1, stride, stride, 1],
                        output_shape=output_shape),
                                bias,
                                name="h%d" % i)
                    hm_bn = batch_norm(hm,
                                       name="h%d_bn" % i,
                                       train=train,
                                       reuse=reuse)
                    hm_lrelu = tf.nn.leaky_relu(hm_bn, name="h%d_lrelu" % i)
                    # logger.info(hm.get_shape())

            # 定义输出层
            # filter = tf.Variable(
            #         initial_value=tf.random_normal(
            #                 shape=[5, 5, self.color_chanel, hm_lrelu.get_shape()[-1]._value],  # 获取Dimension 的值
            #                 dtype=tf.float32
            #         ),
            #         name="weight_output_filter"
            # )
            # logger.info(filter.get_shape())
            # bias = tf.Variable(initial_value=np.zeros(shape=[self.color_chanel]), dtype=tf.float32,
            #                    name="bias_output_filter")
            filter = tf.get_variable(
                shape=[
                    5, 5, self.color_chanel,
                    hm_lrelu.get_shape()[-1]._value
                ],  # 获取Dimension 的值
                dtype=tf.float32,
                initializer=tf.truncated_normal_initializer(
                    stddev=self.stddev),
                name="output_filter_weight")
            # logger.info(filter.get_shape())
            bias = tf.get_variable(shape=[self.color_chanel],
                                   dtype=tf.float32,
                                   name="output_filter_bias",
                                   initializer=tf.truncated_normal_initializer(
                                       stddev=self.stddev))
            output_shape = [
                num_z_input, hidden_output_size[-1][0],
                hidden_output_size[-1][1], self.color_chanel
            ]
            # logger.info(output_shape)  输出层的激活函数为tanh
            out_put = tf.nn.tanh(tf.add(
                tf.nn.conv2d_transpose(value=hm_lrelu,
                                       filter=filter,
                                       strides=[1, stride, stride, 1],
                                       output_shape=output_shape),
                bias,
            ),
                                 name="out_put")  # 没有BN, active func: tanh
            # logger.info(out_put.get_shape())
            return out_put
示例#15
0
文件: a.py 项目: suncht/sun-python
                             64,
                             kernel_size=[3, 3],
                             stride=[1, 1, 1, 1],
                             is_pretrain=is_pretrain)
        #outputs = tools.conv('conv2_2', outputs, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
        outputs = tools.pool('pool2',
                             outputs,
                             kernel=[1, 2, 2, 1],
                             stride=[1, 2, 2, 1],
                             is_max_pool=True)

        outputs = tools.FC_layer('fc6',
                                 outputs,
                                 out_nodes=1024,
                                 activaction_function=tf.nn.relu)
        outputs = tools.batch_norm(outputs)
        outputs = tools.FC_layer('fc7',
                                 outputs,
                                 out_nodes=1024,
                                 activaction_function=tf.nn.relu)
        outputs = tools.batch_norm(outputs)
        logits = tools.FC_layer('fc8',
                                outputs,
                                out_nodes=10,
                                activaction_function=tf.nn.softmax)

    loss = tools.loss(logits, y_)
    accuracy = tools.accuracy(logits, y_)
    my_global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = tools.optimize(loss, learning_rate, my_global_step)
示例#16
0
    def AlexNet(self):

        with tf.name_scope('AlexNet'):

            self.conv1 = tools.conv('conv1',
                                    self.input,
                                    96,
                                    kernel_size=[11, 11],
                                    stride=[1, 4, 4, 1],
                                    is_pretrain=self.is_pretrain)
            self.pool1 = tools.pool('pool1',
                                    self.conv1,
                                    kernel=[1, 3, 3, 1],
                                    stride=[1, 2, 2, 1],
                                    is_max_pool=True,
                                    is_norm=True)

            self.conv2 = tools.conv('conv2',
                                    self.pool1,
                                    256,
                                    kernel_size=[5, 5],
                                    stride=[1, 1, 1, 1],
                                    is_pretrain=self.is_pretrain)
            self.pool2 = tools.pool('pool2',
                                    self.conv2,
                                    kernel=[1, 3, 3, 1],
                                    stride=[1, 2, 2, 1],
                                    is_max_pool=True,
                                    is_norm=True)

            self.conv3 = tools.conv('conv3',
                                    self.pool2,
                                    384,
                                    kernel_size=[3, 3],
                                    stride=[1, 1, 1, 1],
                                    is_pretrain=self.is_pretrain)
            self.conv4 = tools.conv('conv4',
                                    self.conv3,
                                    384,
                                    kernel_size=[3, 3],
                                    stride=[1, 1, 1, 1],
                                    is_pretrain=self.is_pretrain)
            self.conv5 = tools.conv('conv5',
                                    self.conv4,
                                    256,
                                    kernel_size=[3, 3],
                                    stride=[1, 1, 1, 1],
                                    is_pretrain=self.is_pretrain)
            self.pool5 = tools.pool('pool5',
                                    self.conv5,
                                    kernel=[1, 3, 3, 1],
                                    stride=[1, 2, 2, 1],
                                    is_max_pool=True,
                                    is_norm=True)

            self.fc1 = tools.FC_layer('fc6', self.pool5, out_nodes=4096)
            self.norm1 = tools.batch_norm('batch_norm1', self.fc1)

            self.fc2 = tools.FC_layer('fc7', self.norm1, out_nodes=4096)
            self.norm2 = tools.batch_norm('batch_norm2', self.fc2)

            self.fc3 = tools.FC_layer('softmax_linear',
                                      self.norm2,
                                      out_nodes=self.n_classes,
                                      use_relu=False)
示例#17
0
def VGG16PlanInferencet(x, keep_prob, n_classes=12, is_pretrain=True):
    x = tools.conv('conv1_1',
                   x,
                   64,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.conv('conv1_2',
                   x,
                   64,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.pool('pool1',
                   x,
                   kernel=[1, 2, 2, 1],
                   stride=[1, 2, 2, 1],
                   is_max_pool=True)

    x = tools.conv('conv2_1',
                   x,
                   128,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.conv('conv2_2',
                   x,
                   128,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.pool('pool2',
                   x,
                   kernel=[1, 2, 2, 1],
                   stride=[1, 2, 2, 1],
                   is_max_pool=True)

    x = tools.conv('conv3_1',
                   x,
                   256,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.conv('conv3_2',
                   x,
                   256,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.conv('conv3_3',
                   x,
                   256,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.pool('pool3',
                   x,
                   kernel=[1, 2, 2, 1],
                   stride=[1, 2, 2, 1],
                   is_max_pool=True)

    x = tools.conv('conv4_1',
                   x,
                   512,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.conv('conv4_2',
                   x,
                   512,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.conv('conv4_3',
                   x,
                   512,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.pool('pool3',
                   x,
                   kernel=[1, 2, 2, 1],
                   stride=[1, 2, 2, 1],
                   is_max_pool=True)

    x = tools.conv('conv5_1',
                   x,
                   512,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.conv('conv5_2',
                   x,
                   512,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.conv('conv5_3',
                   x,
                   512,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.pool('pool3',
                   x,
                   kernel=[1, 2, 2, 1],
                   stride=[1, 2, 2, 1],
                   is_max_pool=True)

    x = tools.FC_layer('fc6', x, out_nodes=4096)
    x = tools.batch_norm(x)
    x = tools.FC_layer('fc7', x, out_nodes=4096)
    x = tools.batch_norm(x)
    x_drop = tf.nn.dropout(x, keep_prob)
    x = tools.FC_layer('fc8', x_drop, out_nodes=n_classes)

    return x
示例#18
0
    def discriminator(self,
                      input,
                      num_input=None,
                      layer_size=4,
                      stride=2,
                      d_first_output_deep=64,
                      reuse=True,
                      train=True):
        """
        :param layer_size:
        :return:
        """
        # D leak —relu, 没有pool层, 没有BN层
        if num_input == None:
            num_input = self.batch_size
        with tf.variable_scope("DIS", reuse=reuse) as dis:
            # tf.get_variable_scope().reuse_variables()
            # 定义没有池化层卷积层
            input_ = input
            for i in range(layer_size):
                with tf.variable_scope("conv_%d" % i, reuse=reuse) as conv:
                    # conv.reuse_variables()
                    # tf.get_variable_scope().reuse_variables()
                    filter = tf.get_variable(
                        name='conv%d_filter_weight' % i,
                        shape=[
                            5,
                            5,  # 认为设定: 5
                            input_.get_shape()[-1]._value,  # input chanel
                            d_first_output_deep * (2**i)
                        ],  # output chanel
                        initializer=tf.truncated_normal_initializer(
                            stddev=self.stddev),
                        dtype=tf.float32)
                    # logger.info(filter.get_shape())
                    bias = tf.get_variable(
                        name='conv%d_filter_bias' % i,
                        shape=[d_first_output_deep * (2**i)
                               ],  # filter 的数量不断在提高
                        initializer=tf.constant_initializer(0.0),
                        dtype=tf.float32)
                    # if self.clip_value:
                    #     filter = tf.clip_by_value(filter, clip_value_min=-self.clip_value,
                    #                               clip_value_max=self.clip_value)
                    #     bias = tf.clip_by_value(bias, clip_value_min=-self.clip_value, clip_value_max=self.clip_value)
                    conv = tf.nn.conv2d(input_,
                                        filter=filter,
                                        strides=[1, stride, stride, 1],
                                        padding='SAME')
                    hm = tf.nn.bias_add(conv, bias, name="h%d" % i)
                    hm_bn = batch_norm(hm,
                                       name="h%d_bn" % i,
                                       train=train,
                                       reuse=reuse)  # 第一层的输入并没有采用BN
                    # input_ = tf.nn.leaky_relu(hm_bn, name="h%d_lrelu" % i)
                    input_ = tf.nn.relu(hm_bn, name="h%d_lrelu" % i)

                    logger.info(input_.get_shape())
            # 定义输出层: 输出采用全连接s
            output_layer_input = tf.layers.flatten(input_,
                                                   name="output_layer_input")
            output_layer_input = tf.reshape(
                output_layer_input,
                shape=[num_input,
                       output_layer_input.get_shape()[-1]._value])
            logger.info(output_layer_input.get_shape())
            weight = tf.get_variable(
                name='output_weitht',
                shape=[output_layer_input.get_shape()[-1]._value, 1],
                initializer=tf.truncated_normal_initializer(
                    stddev=self.stddev),
                dtype=tf.float32)
            bias = tf.get_variable(name='output_bias',
                                   shape=[1],
                                   initializer=tf.constant_initializer(0.0),
                                   dtype=tf.float32)
            # if self.clip_value:
            #     weight = tf.clip_by_value(weight, clip_value_min=-self.clip_value, clip_value_max=self.clip_value)
            #     bias = tf.clip_by_value(bias, clip_value_min=-self.clip_value, clip_value_max=self.clip_value)
            logits = tf.add(tf.matmul(output_layer_input, weight),
                            bias,
                            name="logits")
            logger.info(logits.get_shape())
            return logits  # 输出不采用是个sigmoid
示例#19
0
文件: models.py 项目: lhzhong/TFCode
    def VGG16(self):

        with tf.name_scope('VGG16'):

            self.conv1_1 = tools.conv('conv1_1',
                                      self.input,
                                      64,
                                      kernel_size=[3, 3],
                                      stride=[1, 1, 1, 1],
                                      is_pretrain=self.is_pretrain)
            self.conv1_2 = tools.conv('conv1_2',
                                      self.conv1_1,
                                      64,
                                      kernel_size=[3, 3],
                                      stride=[1, 1, 1, 1],
                                      is_pretrain=self.is_pretrain)
            self.pool1 = tools.pool('pool1',
                                    self.conv1_2,
                                    kernel=[1, 2, 2, 1],
                                    stride=[1, 2, 2, 1],
                                    is_max_pool=True)

            self.conv2_1 = tools.conv('conv2_1',
                                      self.pool1,
                                      128,
                                      kernel_size=[3, 3],
                                      stride=[1, 1, 1, 1],
                                      is_pretrain=self.is_pretrain)
            self.conv2_2 = tools.conv('conv2_2',
                                      self.conv2_1,
                                      128,
                                      kernel_size=[3, 3],
                                      stride=[1, 1, 1, 1],
                                      is_pretrain=self.is_pretrain)
            self.pool2 = tools.pool('pool2',
                                    self.conv2_2,
                                    kernel=[1, 2, 2, 1],
                                    stride=[1, 2, 2, 1],
                                    is_max_pool=True)

            self.conv3_1 = tools.conv('conv3_1',
                                      self.pool2,
                                      256,
                                      kernel_size=[3, 3],
                                      stride=[1, 1, 1, 1],
                                      is_pretrain=self.is_pretrain)
            self.conv3_2 = tools.conv('conv3_2',
                                      self.conv3_1,
                                      256,
                                      kernel_size=[3, 3],
                                      stride=[1, 1, 1, 1],
                                      is_pretrain=self.is_pretrain)
            self.conv3_3 = tools.conv('conv3_3',
                                      self.conv3_2,
                                      256,
                                      kernel_size=[3, 3],
                                      stride=[1, 1, 1, 1],
                                      is_pretrain=self.is_pretrain)
            self.pool3 = tools.pool('pool3',
                                    self.conv3_3,
                                    kernel=[1, 2, 2, 1],
                                    stride=[1, 2, 2, 1],
                                    is_max_pool=True)

            self.conv4_1 = tools.conv('conv4_1',
                                      self.pool3,
                                      512,
                                      kernel_size=[3, 3],
                                      stride=[1, 1, 1, 1],
                                      is_pretrain=self.is_pretrain)
            self.conv4_2 = tools.conv('conv4_2',
                                      self.conv4_1,
                                      512,
                                      kernel_size=[3, 3],
                                      stride=[1, 1, 1, 1],
                                      is_pretrain=self.is_pretrain)
            self.conv4_3 = tools.conv('conv4_3',
                                      self.conv4_2,
                                      512,
                                      kernel_size=[3, 3],
                                      stride=[1, 1, 1, 1],
                                      is_pretrain=self.is_pretrain)
            self.pool4 = tools.pool('pool4',
                                    self.conv4_3,
                                    kernel=[1, 2, 2, 1],
                                    stride=[1, 2, 2, 1],
                                    is_max_pool=True)

            self.conv5_1 = tools.conv('conv5_1',
                                      self.pool4,
                                      512,
                                      kernel_size=[3, 3],
                                      stride=[1, 1, 1, 1],
                                      is_pretrain=self.is_pretrain)
            self.conv5_2 = tools.conv('conv5_2',
                                      self.conv5_1,
                                      512,
                                      kernel_size=[3, 3],
                                      stride=[1, 1, 1, 1],
                                      is_pretrain=self.is_pretrain)
            self.conv5_3 = tools.conv('conv5_3',
                                      self.conv5_2,
                                      512,
                                      kernel_size=[3, 3],
                                      stride=[1, 1, 1, 1],
                                      is_pretrain=self.is_pretrain)
            self.pool5 = tools.pool('pool5',
                                    self.conv5_3,
                                    kernel=[1, 2, 2, 1],
                                    stride=[1, 2, 2, 1],
                                    is_max_pool=True)

            self.fc6 = tools.FC_layer('fc6', self.pool5, out_nodes=4096)
            self.batch_norm1 = tools.batch_norm('batch_norm1', self.fc6)

            self.fc7 = tools.FC_layer('fc7', self.batch_norm1, out_nodes=4096)
            self.batch_norm2 = tools.batch_norm('batch_norm2', self.fc7)

            self.fc8 = tools.FC_layer('fc8',
                                      self.batch_norm2,
                                      out_nodes=self.n_classes)
示例#20
0
文件: VGG.py 项目: lihaifand/projects
def VGG16(x, n_classes, is_pretrain=True):
    """
    :param x: 输入数据
    :param n_classes: 输出类别数量
    :param is_pretrain: 是否对网络层进行训练
    :return: 输出向量
    """

    x = tools.conv('conv1_1',
                   x,
                   64,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.conv('conv1_2',
                   x,
                   64,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.pool('pool1',
                   x,
                   kernel=[1, 2, 2, 1],
                   stride=[1, 2, 2, 1],
                   is_max_pool=True)

    x = tools.conv('conv2_1',
                   x,
                   128,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.conv('conv2_2',
                   x,
                   128,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.pool('pool2',
                   x,
                   kernel=[1, 2, 2, 1],
                   stride=[1, 2, 2, 1],
                   is_max_pool=True)

    x = tools.conv('conv3_1',
                   x,
                   256,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.conv('conv3_2',
                   x,
                   256,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.conv('conv3_3',
                   x,
                   256,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.pool('pool3',
                   x,
                   kernel=[1, 2, 2, 1],
                   stride=[1, 2, 2, 1],
                   is_max_pool=True)

    x = tools.conv('conv4_1',
                   x,
                   512,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.conv('conv4_2',
                   x,
                   512,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.conv('conv4_3',
                   x,
                   512,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.pool('pool3',
                   x,
                   kernel=[1, 2, 2, 1],
                   stride=[1, 2, 2, 1],
                   is_max_pool=True)

    x = tools.conv('conv5_1',
                   x,
                   512,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.conv('conv5_2',
                   x,
                   512,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.conv('conv5_3',
                   x,
                   512,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=is_pretrain)
    x = tools.pool('pool3',
                   x,
                   kernel=[1, 2, 2, 1],
                   stride=[1, 2, 2, 1],
                   is_max_pool=True)

    x = tools.FC_layer('fc6', x, out_nodes=4096)
    x = tools.batch_norm(x)
    x = tools.FC_layer('fc7', x, out_nodes=4096)
    x = tools.batch_norm(x)
    x = tools.FC_layer('fc8', x, out_nodes=n_classes)

    return x
示例#21
0
def VGG16N(x, n_classes, IS_PRETRAIN):
    import tools

    x = tools.conv('conv1_1',
                   x,
                   64,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=IS_PRETRAIN[0])
    x = tools.conv('conv1_2',
                   x,
                   64,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=IS_PRETRAIN[1])
    x = tools.pool('pool1',
                   x,
                   kernel=[1, 2, 2, 1],
                   stride=[1, 2, 2, 1],
                   is_max_pool=True)

    x = tools.conv('conv2_1',
                   x,
                   128,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=IS_PRETRAIN[2])
    x = tools.conv('conv2_2',
                   x,
                   128,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=IS_PRETRAIN[3])
    x = tools.pool('pool2',
                   x,
                   kernel=[1, 2, 2, 1],
                   stride=[1, 2, 2, 1],
                   is_max_pool=True)

    x = tools.conv('conv3_1',
                   x,
                   256,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=IS_PRETRAIN[4])
    x = tools.conv('conv3_2',
                   x,
                   256,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=IS_PRETRAIN[5])
    x = tools.conv('conv3_3',
                   x,
                   256,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=IS_PRETRAIN[6])
    x = tools.pool('pool3',
                   x,
                   kernel=[1, 2, 2, 1],
                   stride=[1, 2, 2, 1],
                   is_max_pool=True)

    x = tools.conv('conv4_1',
                   x,
                   512,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=IS_PRETRAIN[7])
    x = tools.conv('conv4_2',
                   x,
                   512,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=IS_PRETRAIN[8])
    x = tools.conv('conv4_3',
                   x,
                   512,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=IS_PRETRAIN[9])
    x = tools.pool('pool3',
                   x,
                   kernel=[1, 2, 2, 1],
                   stride=[1, 2, 2, 1],
                   is_max_pool=True)

    x = tools.conv('conv5_1',
                   x,
                   512,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=IS_PRETRAIN[10])
    x = tools.conv('conv5_2',
                   x,
                   512,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=IS_PRETRAIN[11])
    x = tools.conv('conv5_3',
                   x,
                   512,
                   kernel_size=[3, 3],
                   stride=[1, 1, 1, 1],
                   is_pretrain=IS_PRETRAIN[12])
    x = tools.pool('pool3',
                   x,
                   kernel=[1, 2, 2, 1],
                   stride=[1, 2, 2, 1],
                   is_max_pool=True)

    x = tools.FC_layer('fc6', x, out_nodes=4096, is_pretrain=IS_PRETRAIN[13])
    x = tools.batch_norm(x)
    x = tools.FC_layer('fc7', x, out_nodes=4096, is_pretrain=IS_PRETRAIN[14])
    x = tools.batch_norm(x)
    x = tools.final_layer('fc8', x, out_nodes=n_classes)
    return x
示例#22
0
def VGG16N(x, n_classes, is_pretrain=True):

    with tf.name_scope('VGG16'):

        conv1_1 = tools.conv('conv1_1',
                             x,
                             64,
                             kernel_size=[3, 3],
                             stride=[1, 1, 1, 1],
                             is_pretrain=is_pretrain)
        conv1_2 = tools.conv('conv1_2',
                             conv1_1,
                             64,
                             kernel_size=[3, 3],
                             stride=[1, 1, 1, 1],
                             is_pretrain=is_pretrain)
        with tf.name_scope('pool1'):
            pool1 = tools.pool('pool1',
                               conv1_2,
                               kernel=[1, 2, 2, 1],
                               stride=[1, 2, 2, 1],
                               is_max_pool=True)

        conv2_1 = tools.conv('conv2_1',
                             pool1,
                             128,
                             kernel_size=[3, 3],
                             stride=[1, 1, 1, 1],
                             is_pretrain=is_pretrain)
        conv2_2 = tools.conv('conv2_2',
                             conv2_1,
                             128,
                             kernel_size=[3, 3],
                             stride=[1, 1, 1, 1],
                             is_pretrain=is_pretrain)
        with tf.name_scope('pool2'):
            pool2 = tools.pool('pool2',
                               conv2_2,
                               kernel=[1, 2, 2, 1],
                               stride=[1, 2, 2, 1],
                               is_max_pool=True)

        conv3_1 = tools.conv('conv3_1',
                             pool2,
                             256,
                             kernel_size=[3, 3],
                             stride=[1, 1, 1, 1],
                             is_pretrain=is_pretrain)
        conv3_2 = tools.conv('conv3_2',
                             conv3_1,
                             256,
                             kernel_size=[3, 3],
                             stride=[1, 1, 1, 1],
                             is_pretrain=is_pretrain)
        conv3_3 = tools.conv('conv3_3',
                             conv3_2,
                             256,
                             kernel_size=[3, 3],
                             stride=[1, 1, 1, 1],
                             is_pretrain=is_pretrain)
        with tf.name_scope('pool3'):
            pool3 = tools.pool('pool3',
                               conv3_3,
                               kernel=[1, 2, 2, 1],
                               stride=[1, 2, 2, 1],
                               is_max_pool=True)

        conv4_1 = tools.conv('conv4_1',
                             pool3,
                             512,
                             kernel_size=[3, 3],
                             stride=[1, 1, 1, 1],
                             is_pretrain=is_pretrain)
        conv4_2 = tools.conv('conv4_2',
                             conv4_1,
                             512,
                             kernel_size=[3, 3],
                             stride=[1, 1, 1, 1],
                             is_pretrain=is_pretrain)
        conv4_3 = tools.conv('conv4_3',
                             conv4_2,
                             512,
                             kernel_size=[3, 3],
                             stride=[1, 1, 1, 1],
                             is_pretrain=is_pretrain)
        with tf.name_scope('pool4'):
            pool4 = tools.pool('pool4',
                               conv4_3,
                               kernel=[1, 2, 2, 1],
                               stride=[1, 2, 2, 1],
                               is_max_pool=True)

        conv5_1 = tools.conv('conv5_1',
                             pool4,
                             512,
                             kernel_size=[3, 3],
                             stride=[1, 1, 1, 1],
                             is_pretrain=is_pretrain)
        conv5_2 = tools.conv('conv5_2',
                             conv5_1,
                             512,
                             kernel_size=[3, 3],
                             stride=[1, 1, 1, 1],
                             is_pretrain=is_pretrain)
        conv5_3 = tools.conv('conv5_3',
                             conv5_2,
                             512,
                             kernel_size=[3, 3],
                             stride=[1, 1, 1, 1],
                             is_pretrain=is_pretrain)
        with tf.name_scope('pool5'):
            pool5 = tools.pool('pool5',
                               conv5_3,
                               kernel=[1, 2, 2, 1],
                               stride=[1, 2, 2, 1],
                               is_max_pool=True)

        fc6 = tools.FC_layer('fc6', pool5, out_nodes=4096)
        with tf.name_scope('batch_norm1'):
            batch_norm1 = tools.batch_norm(fc6)
        fc7 = tools.FC_layer('fc7', batch_norm1, out_nodes=4096)
        with tf.name_scope('batch_norm2'):
            batch_norm2 = tools.batch_norm(fc7)
        fc8 = tools.FC_layer('fc8', batch_norm2, out_nodes=n_classes)

        return fc8
示例#23
0
def Myvgg(x, n_class, is_pretrain=True):

    with tf.name_scope('Myvgg'):

        x = tools.conv('conv1_1',
                       x,
                       64,
                       kernel_size=[3, 3],
                       stride=[1, 1, 1, 1],
                       is_pretrain=is_pretrain)
        x = tools.conv('conv1_2',
                       x,
                       64,
                       kernel_size=[3, 3],
                       stride=[1, 1, 1, 1],
                       is_pretrain=is_pretrain)
        with tf.name_scope('pool1'):
            x = tools.pool('pool1',
                           x,
                           ksize=[1, 2, 2, 1],
                           stride=[1, 2, 2, 1],
                           is_max_pool=True)

        x = tools.conv('conv2_1',
                       x,
                       128,
                       kernel_size=[3, 3],
                       stride=[1, 1, 1, 1],
                       is_pretrain=is_pretrain)
        x = tools.conv('conv2_2',
                       x,
                       128,
                       kernel_size=[3, 3],
                       stride=[1, 1, 1, 1],
                       is_pretrain=is_pretrain)
        with tf.name_scope('pool2'):
            x = tools.pool('pool2',
                           x,
                           ksize=[1, 2, 2, 1],
                           stride=[1, 2, 2, 1],
                           is_max_pool=True)

        x = tools.conv('conv3_1',
                       x,
                       256,
                       kernel_size=[3, 3],
                       stride=[1, 1, 1, 1],
                       is_pretrain=is_pretrain)
        x = tools.conv('conv3_2',
                       x,
                       256,
                       kernel_size=[3, 3],
                       stride=[1, 1, 1, 1],
                       is_pretrain=is_pretrain)
        # x = tools.conv('conv3_3', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
        with tf.name_scope('pool3'):
            x = tools.pool('pool3',
                           x,
                           ksize=[1, 2, 2, 1],
                           stride=[1, 2, 2, 1],
                           is_max_pool=True)

        x = tools.FC_layer('fc6', x, out_nodes=2048)
        with tf.name_scope('batch_norma1'):
            x = tools.batch_norm(
                x)  # batch norm can avoid overfit, more efficient than dropout
        x = tools.FC_layer('fc7', x, out_nodes=2048)
        #x = tools.dropout(x,0.5)
        with tf.name_scope('batch_norm2'):
            x = tools.batch_norm(x)
        x = tools.FC_layer('fc8', x, out_nodes=n_class)

    return x
示例#24
0
def MyResNet(x, n_class, is_pretrain=True):

    with tf.name_scope('MyResNet'):

        x1 = tools.conv('conv1_1',
                        x,
                        64,
                        kernel_size=[3, 3],
                        stride=[1, 1, 1, 1],
                        is_pretrain=is_pretrain)
        x2 = tools.conv_no_relu('conv1_2',
                                x,
                                64,
                                kernel_size=[3, 3],
                                stride=[1, 1, 1, 1],
                                is_pretrain=is_pretrain)
        x3 = tf.add(x1, x2)
        x = tf.nn.relu(x3, name='relu')

        with tf.name_scope('pool1'):
            x = tools.pool('pool1',
                           x,
                           ksize=[1, 2, 2, 1],
                           stride=[1, 2, 2, 1],
                           is_max_pool=True)

        x1 = tools.conv('conv2_1',
                        x,
                        128,
                        kernel_size=[3, 3],
                        stride=[1, 1, 1, 1],
                        is_pretrain=is_pretrain)
        x2 = tools.conv_no_relu('conv2_2',
                                x,
                                128,
                                kernel_size=[3, 3],
                                stride=[1, 1, 1, 1],
                                is_pretrain=is_pretrain)
        x3 = tf.add(x1, x2)
        x = tf.nn.relu(x3, name='relu')
        with tf.name_scope('pool2'):
            x = tools.pool('pool2',
                           x,
                           ksize=[1, 2, 2, 1],
                           stride=[1, 2, 2, 1],
                           is_max_pool=True)

        x1 = tools.conv('conv3_1',
                        x,
                        256,
                        kernel_size=[1, 1],
                        stride=[1, 1, 1, 1],
                        is_pretrain=is_pretrain)
        x2 = tools.conv_no_relu('conv3_2',
                                x,
                                256,
                                kernel_size=[1, 1],
                                stride=[1, 1, 1, 1],
                                is_pretrain=is_pretrain)
        x3 = tf.add(x1, x2)
        x = tf.nn.relu(x3, name='relu')
        # x = tools.conv('conv3_3', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
        with tf.name_scope('pool3'):
            x = tools.pool('pool3',
                           x,
                           ksize=[1, 2, 2, 1],
                           stride=[1, 2, 2, 1],
                           is_max_pool=True)

        x = tools.FC_layer('fc6', x, out_nodes=512)
        with tf.name_scope('batch_norma1'):
            x = tools.batch_norm(
                x)  # batch norm can avoid overfit, more efficient than dropout
        x = tools.FC_layer('fc7', x, out_nodes=512)
        #x = tools.dropout(x,0.5)
        with tf.name_scope('batch_norm2'):
            x = tools.batch_norm(x)
        x = tools.FC_layer('fc8', x, out_nodes=n_class)

    return x
示例#25
0
with tf.variable_scope('conv2'):
    w2 = tools.weight([3, 3, 16, 32], is_uniform=True)
    x_w2 = tf.nn.conv2d(x_pool, w2, strides=[1, 1, 1, 1], padding='SAME')

    b2 = tools.bias([32])
    x_b2 = tf.nn.bias_add(x_w2, b2)

    x_relu2 = tf.nn.relu(x_b2)

    x_pool2 = tools.pool('test2',
                         x_relu2,
                         kernel=[1, 2, 2, 1],
                         stride=[1, 2, 2, 1],
                         is_max_pool=False)

    x_BN = tools.batch_norm(x_pool2)


#
def shape(x):
    return str(x.get_shape())


# First conv
print('\n')
print('** First conv: **\n')
print('input size: ', shape(x))
print('w size:', shape(w))
print('x_w size: ', shape(x_w))
print('b size: ', shape(b))
print('x_b size: ', shape(x_b))
    
    x_pool = tools.pool('test1', x_relu, kernel=[1,2,2,1], stride=[1,2,2,1],is_max_pool=True)

# Second conv
with tf.variable_scope('conv2'):
    w2 = tools.weight([3,3,16,32], is_uniform=True)
    x_w2 = tf.nn.conv2d(x_pool, w2, strides=[1, 1, 1, 1], padding='SAME')
    
    b2 = tools.bias([32])
    x_b2 = tf.nn.bias_add(x_w2, b2)
    
    x_relu2 = tf.nn.relu(x_b2)
    
    x_pool2 = tools.pool('test2',x_relu2, kernel=[1,2,2,1],stride=[1,2,2,1], is_max_pool=False)
    
    x_BN = tools.batch_norm(x_pool2)

#%%
def shape(x):
    return str(x.get_shape())

## First conv
print('\n')
print('** First conv: **\n')
print('input size: ', shape(x))
print('w size:', shape(w))
print('x_w size: ', shape(x_w))
print('b size: ', shape(b))
print('x_b size: ', shape(x_b))
print('x_relu size: ', shape(x_relu))
print('x_pool size: ', shape(x_pool))