def create_discriminator(discrim_inputs, discrim_targets): n_layers = 3 layers = [] ndf = 64 # 2x [batch, height, width, in_channels] => [batch, height, width, in_channels * 2] input = tf.concat([discrim_inputs, discrim_targets], axis=3) # layer_1: [batch, 256, 256, in_channels * 2] => [batch, 128, 128, ndf] with tf.variable_scope("layer_1"): convolved = tools.conv(input, ndf, stride=2) rectified = tools.lrelu(convolved, 0.2) layers.append(rectified) # layer_2: [batch, 128, 128, ndf] => [batch, 64, 64, ndf * 2] # layer_3: [batch, 64, 64, ndf * 2] => [batch, 32, 32, ndf * 4] # layer_4: [batch, 32, 32, ndf * 4] => [batch, 31, 31, ndf * 8] for i in range(n_layers): with tf.variable_scope("layer_%d" % (len(layers) + 1)): out_channels = ndf * min(2**(i + 1), 8) stride = 1 if i == n_layers - 1 else 2 # last layer here has stride 1 convolved = tools.conv(layers[-1], out_channels, stride=stride) normalized = tools.batchnorm(convolved) rectified = tools.lrelu(normalized, 0.2) layers.append(rectified) # layer_5: [batch, 31, 31, ndf * 8] => [batch, 30, 30, 1] with tf.variable_scope("layer_%d" % (len(layers) + 1)): convolved = tools.conv(rectified, out_channels=1, stride=1) output = tf.sigmoid(convolved) layers.append(output) return layers[-1]
def SVHN(x, n_classes): import tools ''' Args: images: 4D tensor [batch_size, img_width, img_height, img_channel] Notes: In each conv layer, the kernel size is: [kernel_size, kernel_size, number of input channels, number of output channels]. number of input channels are from previuous layer, if previous layer is THE input layer, number of input channels should be image's channels. ''' x = tools.conv('conv1', x, 64) x = tools.pool('pool1', x) x = tools.conv('conv2', x, 64) x = tools.pool('pool2', x) x = tools.conv('conv3', x, 128) x = tools.pool('pool3', x) x = tools.FC_layer('fc4', x, out_nodes=64) x = tools.drop_out('drop_out', x, keep_prob=0.5) x = tools.final_layer('softmax', x, out_nodes=n_classes) return x
def lenet5(self): with tf.name_scope('LeNet5'): self.conv1 = tools.conv('conv1', self.input, 32, kernel_size=[5, 5], stride=[1, 1, 1, 1], is_trainable=self.is_trainable) self.pool1 = tools.pool('pool1', self.conv1, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) self.conv2 = tools.conv('conv2', self.pool1, 64, kernel_size=[5, 5], stride=[1, 1, 1, 1], is_trainable=self.is_trainable) self.pool2 = tools.pool('pool2', self.conv2, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) self.fc1 = tools.fc_layer('fc1', self.pool2, out_nodes=512) self.dropout1 = tools.dropout('dropout1', self.fc1, self.keep_prob) self.logits = tools.fc_layer('fc2', self.dropout1, use_relu=False, out_nodes=self.n_classes)
def VGG16(x, isSty, is_pretrain=False): x = x - np.array([ 123.68 , 116.779, 103.939]) every_layer_output = {} x = tools.conv('conv1_1', x, 64, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv1_2', x, 64, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) every_layer_output['conv1_2'] = x #conv1_2 x = tools.pool('pool1', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) x = tools.conv('conv2_1', x, 128, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv2_2', x, 128, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) every_layer_output['conv2_2'] = x #conv2_2 if isSty: x = tools.pool('pool2', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) x = tools.conv('conv3_1', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv3_2', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv3_3', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) every_layer_output['conv3_3'] = x #conv3_3 x = tools.pool('pool3', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) x = tools.conv('conv4_1', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv4_2', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv4_3', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) every_layer_output['conv4_3'] = x #conv4_3 # x = tools.pool('pool3', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) # # x = tools.conv('conv5_1', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) # x = tools.conv('conv5_2', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) # x = tools.conv('conv5_3', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) # x = tools.pool('pool3', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) # # x = tools.FC_layer('fc6', x, out_nodes=4096) # #x = tools.batch_norm(x) # x = tools.FC_layer('fc7', x, out_nodes=4096) # #x = tools.batch_norm(x) # x = tools.FC_layer('fc8', x, out_nodes=n_classes) return every_layer_output
def Network(x, n_classes, dropout): global is_training x = tf.reshape(x, [-1, 28, 28, 1]) x = tools.conv('conv1', x, 32, kernel_size=[5, 5], is_pretrain=is_pretrain) x = tools.pool('pool1', x, is_max_pool=True) x = tools.conv('conv2', x, 64, is_pretrain=is_pretrain) x = tools.pool('pool2', x, is_max_pool=True) fc_x = tools.FC_layer('FC1', x, 1024) fc_x = tf.layers.dropout(fc_x, rate=dropout, training=is_training) out = tools.FC_layer('FC2', fc_x, n_classes) return out
def Model_finetune(layer, n_classes, is_pretrain=True): # ''' # Model_finetune: that uses varies tools.py function to setup CNN and returns the logits # Arg: layer(tensor), n_classes(int), is_pretrain(Booleen) # layer: list of images that are decoded in tensor # n_classes: batch size for the training # is_pretrain: number of classification for output # Return: layer(logits) # ''' with tf.name_scope('Model_finetune'): # first conv + pool layer = tools.conv('conv1_1', layer, 64, kernel_size=[7,7], stride=[1,1,1,1], is_pretrain=is_pretrain) with tf.name_scope('pool1'): layer = tools.pool('pool1', layer, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) # second conv + pool layer = tools.conv('conv2_1', layer, 128, kernel_size=[7,7], stride=[1,1,1,1], is_pretrain=is_pretrain) with tf.name_scope('pool2'): layer = tools.pool('pool2', layer, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) # thrid conv + pool layer = tools.conv('conv3_1', layer, 256, kernel_size=[7,7], stride=[1,1,1,1], is_pretrain=is_pretrain) with tf.name_scope('pool3'): layer = tools.pool('pool3', layer, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) # fourth conv + pool layer = tools.conv('conv4_1', layer, 512, kernel_size=[7,7], stride=[1,1,1,1], is_pretrain=is_pretrain) with tf.name_scope('pool4'): layer = tools.pool('pool4', layer, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) # fifth conv + pool layer = tools.conv('conv5_1', layer, 512, kernel_size=[7,7], stride=[1,1,1,1], is_pretrain=is_pretrain) with tf.name_scope('pool5'): layer = tools.pool('pool5', layer, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) # 3 fully connected layers, last one is softmax layer = tools.FC_layer('fc6', layer, out_nodes=2048) with tf.name_scope('batch_norm1'): layer = tools.batch_norm(layer) layer = tools.FC_layer('fc7', layer, out_nodes=2048) with tf.name_scope('batch_norm2'): layer = tools.batch_norm(layer) layer = tools.FC_layer('fc8', layer, out_nodes=n_classes) return layer
def build_renet(inputs, num_classes, use_bottleneck=False, num_residual_units=5, relu_leakiness=0.0): with tf.variable_scope('init'): x = inputs x = tools.conv('init_conv', x, 3, 3, 16, _stride_arr(1)) strides = [1, 2, 2] activate_before_residual = [True, False, False] if use_bottleneck: res_func = tools.bottleneck_residual filters = [16, 64, 128, 256] else: res_func = tools.residual filters = [16, 16, 32, 64] # first group with tf.variable_scope('unit_1_0'): x = res_func(x, filters[0], filters[1], _stride_arr(strides[0]), activate_before_residual[0]) for i in six.moves.range(1, num_residual_units): with tf.variable_scope('unit_1_%d' % i): x = res_func(x, filters[1], filters[1], _stride_arr(1), False) # second group with tf.variable_scope('unit_2_0'): x = res_func(x, filters[1], filters[2], _stride_arr(strides[1]), activate_before_residual[1]) for i in six.moves.range(1, num_residual_units): with tf.variable_scope('unit_2_%d' % i): x = res_func(x, filters[2], filters[2], _stride_arr(1), False) # third group with tf.variable_scope('unit_3_0'): x = res_func(x, filters[2], filters[3], _stride_arr(strides[2]), activate_before_residual[2]) for i in six.moves.range(1, num_residual_units): with tf.variable_scope('unit_3_%d' % i): x = res_func(x, filters[3], filters[3], _stride_arr(1), False) # all pool layer with tf.variable_scope('unit_last'): x = tools.batch_norm('final_bn', x) x = tools.relu(x, relu_leakiness) x = tools.global_avg_pool(x) # fc_layer + softmax with tf.variable_scope('logit'): logits = tools.fully_connected(x, num_classes) predictions = tf.nn.softmax(logits) return logits, predictions
def Network(x, n_classes, dropout, reuse, is_training): with tf.variable_scope('ConvNet', reuse=reuse): x = x['images'] x = tf.reshape(x, [-1, 28, 28, 1]) x = tools.conv('conv1', x, 32, kernel_size=[5, 5], is_pretrain=is_pretrain) x = tools.pool('pool1', x, is_max_pool=True) x = tools.conv('conv2', x, 64, is_pretrain=is_pretrain) x = tools.pool('pool2', x, is_max_pool=True) fc_x = tools.FC_layer('FC1', x, 1024) fc_x = tf.layers.dropout(fc_x, rate=dropout, training=is_training) out = tools.FC_layer('FC2', fc_x, n_classes) return out
def LeNet5(self): with tf.name_scope('LeNet5'): self.conv1 = tools.conv('conv1', self.input, 16, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=self.is_pretrain) self.pool1 = tools.pool('pool1', self.conv1, kernel=[1, 3, 3, 1], stride=[1, 2, 2, 1], is_max_pool=True, is_norm=True) self.conv2 = tools.conv('conv2', self.pool1, 16, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=self.is_pretrain) self.pool2 = tools.pool('pool2', self.conv2, kernel=[1, 3, 3, 1], stride=[1, 2, 2, 1], is_max_pool=True, is_norm=True) self.fc1 = tools.FC_layer('local3', self.pool2, out_nodes=128) self.norm1 = tools.batch_norm('batch_norm1', self.fc1) self.fc2 = tools.FC_layer('local4', self.norm1, out_nodes=128) self.norm2 = tools.batch_norm('batch_norm2', self.fc2) self.fc3 = tools.FC_layer('softmax_linear', self.norm2, out_nodes=self.n_classes, use_relu=False)
def AlexNet(x, n_classes, is_pretrain=True): with tf.name_scope('AlexNet'): x = tools.conv('conv1', x, 16, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.pool('pool1', x, kernel=[1, 3, 3, 1], stride=[1, 2, 2, 1], is_max_pool=True, is_norm=True) x = tools.conv('conv2', x, 16, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.pool('pool2', x, kernel=[1, 3, 3, 1], stride=[1, 2, 2, 1], is_max_pool=True, is_norm=True) x = tools.FC_layer('local3', x, out_nodes=128) x = tools.batch_norm('batch_norm1', x) x = tools.FC_layer('local4', x, out_nodes=128) x = tools.batch_norm('batch_norm2', x) x = tools.FC_layer('softmax_linear', x, out_nodes=n_classes) return x
def mnist_net(x, prob): with tf.variable_scope('mnist_net'): x = tf.reshape(x, shape=[-1, 28, 28, 1]) # first convolution layer x = tools.conv('conv_layer_1', x, 32, kernel_size=[5, 5], stride=[1, 1, 1, 1]) with tf.name_scope('max_pool_1'): x = tools.pool('max_pool_1', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1]) # second convolution layer x = tools.conv('conv_layer_2', x, 64, kernel_size=[5, 5], stride=[1, 1, 1, 1]) with tf.name_scope('max_pool_2'): x = tools.pool('max_pool_2', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1]) # fully convolution layers x = tools.FC_layer('fc_layer_1', x, 1024) x = tf.nn.relu(x) x = tools.drop(x, prob) x = tools.FC_layer('fc_layer_2', x, 10) return x
def VGG16(x, n_classes, is_pretrain=True): x = tools.conv('conv1_1', x, 64, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv1_2', x, 64, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.pool('pool1', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) x = tools.conv('conv2_1', x, 128, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv2_2', x, 128, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.pool('pool2', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) x = tools.conv('conv3_1', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv3_2', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv3_3', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.pool('pool3', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) x = tools.conv('conv4_1', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv4_2', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv4_3', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.pool('pool3', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) x = tools.conv('conv5_1', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv5_2', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv5_3', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.pool('pool3', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) x = tools.FC_layer('fc6', x, out_nodes=4096) x = tools.batch_norm(x) x = tools.FC_layer('fc7', x, out_nodes=4096) x = tools.batch_norm(x) x = tools.FC_layer('fc8', x, out_nodes=n_classes) return x
def VGG16N(x, n_classes, is_pretrain=True): with tf.name_scope('VGG16'): x = tools.conv('conv1_1', x, 8, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv1_2', x, 8, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool1'): x = tools.pool('pool1', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv2_1', x, 16, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv2_2', x, 16, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool2'): x = tools.pool('pool2', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv3_1', x, 32, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv3_2', x, 32, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv3_3', x, 32, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool3'): x = tools.pool('pool3', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv4_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv4_2', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv4_3', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool4'): x = tools.pool('pool4', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv5_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv5_2', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv5_3', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool5'): x = tools.pool('pool5', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.FC_layer('fc6', x, out_nodes=512) x = tf.nn.dropout(x, 0.5) #with tf.name_scope('batch_norm1'): #x = tools.batch_norm(x) x = tools.FC_layer('fc7', x, out_nodes=512) x = tf.nn.dropout(x, 0.5) #with tf.name_scope('batch_norm2'): #x = tools.batch_norm(x) x = tools.FC_layer('fc8', x, out_nodes=512) x = tf.nn.dropout(x, 0.5) x = tools.FC_layer('fc9', x, out_nodes=24) return x #%%
def create_generator(generator_inputs, generator_outputs_channels): ''' [<tf.Tensor 'generator/encoder_1/conv/Conv2D:0' shape=(1, 16, 16, 64) dtype=float32>, <tf.Tensor 'generator/encoder_2/batchnorm/batchnorm/add_1:0' shape=(1, 8, 8, 128) dtype=float32>, <tf.Tensor 'generator/encoder_3/batchnorm/batchnorm/add_1:0' shape=(1, 4, 4, 256) dtype=float32>, <tf.Tensor 'generator/encoder_4/batchnorm/batchnorm/add_1:0' shape=(1, 2, 2, 512) dtype=float32>, <tf.Tensor 'generator/encoder_5/batchnorm/batchnorm/add_1:0' shape=(1, 1, 1, 512) dtype=float32>, <tf.Tensor 'generator/decoder_5/dropout/mul:0' shape=(1, 2, 2, 512) dtype=float32>, <tf.Tensor 'generator/decoder_4/batchnorm/batchnorm/add_1:0' shape=(1, 4, 4, 512) dtype=float32>, <tf.Tensor 'generator/decoder_3/batchnorm/batchnorm/add_1:0' shape=(1, 8, 8, 256) dtype=float32>, <tf.Tensor 'generator/decoder_2/batchnorm/batchnorm/add_1:0' shape=(1, 16, 16, 64) dtype=float32>, <tf.Tensor 'generator/decoder_1/Tanh:0' shape=(1, 32, 32, 1) dtype=float32>] ''' layers = [] ngf = 64 # encoder_1: [batch, 32, 256, in_channels] => [batch, 16, 128, ngf] with tf.variable_scope("encoder_1"): output = tools.conv(generator_inputs, ngf, stride=2) layers.append(output) layer_specs = [ ngf * 2, # encoder_2: [batch, 16, 128, ngf] => [batch, 8, 64, ngf * 2] ngf * 4, # encoder_3: [batch, 8, 64, ngf * 2] => [batch, 4, 32, ngf * 4] ngf * 8, # encoder_4: [batch, 4, 32, ngf * 4] => [batch, 2, 16, ngf * 8] ngf * 8, # encoder_5: [batch, 2, 16, ngf * 8] => [batch, 1, 8, ngf * 8] ] for out_channels in layer_specs: with tf.variable_scope("encoder_%d" % (len(layers) + 1)): rectified = tools.lrelu(layers[-1], 0.2) # [batch, in_height, in_width, in_channels] => [batch, in_height/2, in_width/2, out_channels] convolved = tools.conv(rectified, out_channels, stride=2) output = tools.batchnorm(convolved) layers.append(output) layer_specs = [ (ngf * 8, 0.5), # decoder_5: [batch, 1, 1, ngf * 8] => [batch, 2, 2, ngf * 8 ] (ngf * 8, 0.0), # decoder_4: [batch, 2, 2, ngf * 8 ] => [batch, 4, 4, ngf * 8 ] (ngf * 4, 0.0), # decoder_3: [batch, 4, 4, ngf * 8] => [batch, 8, 8, ngf * 4 ] (ngf, 0.0), # decoder_2: [batch, 8, 8, ngf * 4 ] => [batch, 16, 16, ngf ] ] num_encoder_layers = len(layers) for decoder_layer, (out_channels, dropout) in enumerate(layer_specs): skip_layer = num_encoder_layers - decoder_layer - 1 with tf.variable_scope("decoder_%d" % (skip_layer + 1)): if decoder_layer == 0: # first decoder layer doesn't have skip connections # since it is directly connected to the skip_layer input = layers[-1] else: input = tf.concat([layers[-1], layers[skip_layer]], axis=3) rectified = tf.nn.relu(input) # [batch, in_height, in_width, in_channels] => [batch, in_height*2, in_width*2, out_channels] output = tools.deconv(rectified, out_channels) output = tools.batchnorm(output) if dropout > 0.0: output = tf.nn.dropout(output, keep_prob=1 - dropout) layers.append(output) # decoder_1: [batch, 128, 128, ngf * 2] => [batch, 256, 256, generator_outputs_channels] with tf.variable_scope("decoder_1"): input = tf.concat([layers[-1], layers[0]], axis=3) rectified = tf.nn.relu(input) output = tools.deconv(rectified, generator_outputs_channels) output = tf.tanh(output) layers.append(output) return layers[-1]
def DehazeNet(x): with tf.variable_scope('DehazeNet'): x_s = x # x = tools.conv('DN_conv1_1', x_s, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) # x = tools.conv('DN_conv1_2', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) # with tf.name_scope('pool1'): # x = tools.pool('pool1', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) # # with tf.name_scope('pool2'): # x = tools.pool('pool2', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('upsampling_1', x_s, 128, kernel_size=[3, 3], stride=[1, 2, 2, 1]) x = tools.conv('upsampling_2', x, 128, kernel_size=[3, 3], stride=[1, 2, 2, 1]) x1 = tools.conv('DN_conv2_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x = tools.conv('DN_conv2_2', x1, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x = tools.conv_nonacti('DN_conv2_3', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x = tf.add(x, x1) # x = tools.batch_norm(x) x = tools.acti_layer(x) # x = tools.conv('DN_conv2_4', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x2 = tools.conv('DN_conv3_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x = tools.conv('DN_conv3_2', x2, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x = tools.conv_nonacti('DN_conv3_3', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x = tf.add(x, x2) # x = tools.batch_norm(x) x = tools.acti_layer(x) # x = tools.conv('DN_conv3_4', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x3 = tools.conv('DN_conv4_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x = tools.conv('DN_conv4_2', x3, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x = tools.conv('DN_conv4_3', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x = tools.conv('DN_conv4_4', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x = tools.conv_nonacti('DN_conv4_5', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x = tf.add(x, x3) # x = tools.batch_norm(x) x = tools.acti_layer(x) # x = tools.conv('DN_conv4_5', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x4 = tools.conv('DN_conv5_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x = tools.conv('DN_conv5_2', x4, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x = tools.conv('DN_conv5_3', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x = tools.conv('DN_conv5_4', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x = tools.conv_nonacti('DN_conv5_5', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x = tf.add(x, x4) # x = tools.batch_norm(x) x = tools.acti_layer(x) # x5 = tools.conv('DN_conv5_6', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) # x = tools.conv('DN_conv5_7', x5, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) # x = tools.conv_nonacti('DN_conv5_8', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) # x = tf.add(x, x5) # x = tools.acti_layer(x) x = tools.deconv('DN_deconv1', x, 64, output_shape=[35, 112, 112, 64], kernel_size=[3, 3], stride=[1, 2, 2, 1]) x = tools.deconv('DN_deconv2', x, 64, output_shape=[35, 224, 224, 64], kernel_size=[3, 3], stride=[1, 2, 2, 1]) # x = tools.conv('DN_conv6_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) # x = tools.conv('DN_conv6_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1]) # x = tf.add(x, x_s) # # # x = tools.batch_norm(x) # x = tools.acti_layer(x) x_r = tools.conv_nonacti('DN_conv7_1', x, 3, kernel_size=[3, 3], stride=[1, 1, 1, 1]) x_r = tf.add(x_r, x_s) x_r = tools.acti_layer(x_r) return x_r
def VGG16_DP(x, n_classes, is_pretrain=True, flag=0): with tf.name_scope('VGG16'): x = tools.conv('conv1_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv1_2', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool1'): x = tools.pool('pool1', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv2_1', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv2_2', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool2'): x = tools.pool('pool2', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv3_1', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv3_2', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv3_3', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool3'): x = tools.pool('pool3', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv4_1', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv4_2', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv4_3', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool4'): x = tools.pool('pool4', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv5_1', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv5_2', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv5_3', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool5'): x = tools.pool('pool5', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.FC_layer('fc6', x, out_nodes=4096) with tf.name_scope('dropout1'): if flag == 0: dropout1 = tf.nn.dropout(x, 0.5) else: dropout1 = tf.nn.dropout(x, 1) x = tf.nn.relu(dropout1) x = tools.FC_layer('fc7', x, out_nodes=4096) with tf.name_scope('dropout2'): if flag == 0: dropout2 = tf.nn.dropout(x, 0.5) else: dropout2 = tf.nn.dropout(x, 1) x = tf.nn.relu(dropout2) x = tools.last_FC('fc8', x, out_nodes=n_classes) return x
is_pretrain = False learning_rate = 0.5 MAX_STEP = 5000 train_log_dir = './/logs//train//' val_log_dir = './/logs//val//' with tf.Graph().as_default(): with tf.name_scope('inputs'): x = tf.placeholder(tf.float32, shape=[None, 784]) y_ = tf.placeholder(tf.float32, shape=[None, 10]) x_images = tf.reshape(x, [-1, 28, 28, 1]) with tf.name_scope('conv_net'): outputs = tools.conv('conv1_1', x_images, 32, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) #outputs = tools.conv('conv1_2', outputs, 32, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) outputs = tools.pool('pool1', outputs, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) outputs = tools.conv('conv2_1', outputs, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain)
def Myvgg(x, n_class, is_pretrain=True): with tf.name_scope('Myvgg'): x = tools.conv('conv1_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv1_2', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool1'): x = tools.pool('pool1', x, ksize=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv2_1', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv2_2', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool2'): x = tools.pool('pool2', x, ksize=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv3_1', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv3_2', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) # x = tools.conv('conv3_3', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool3'): x = tools.pool('pool3', x, ksize=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.FC_layer('fc6', x, out_nodes=2048) with tf.name_scope('batch_norma1'): x = tools.batch_norm( x) # batch norm can avoid overfit, more efficient than dropout x = tools.FC_layer('fc7', x, out_nodes=2048) #x = tools.dropout(x,0.5) with tf.name_scope('batch_norm2'): x = tools.batch_norm(x) x = tools.FC_layer('fc8', x, out_nodes=n_class) return x
def VGG16(x, n_classes, is_pretrain=True): x = tools.conv('conv1_1', x, 64, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv1_2', x, 64, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.pool('pool1', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) x = tools.conv('conv2_1', x, 128, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv2_2', x, 128, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.pool('pool2', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) x = tools.conv('conv3_1', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv3_2', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv3_3', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.pool('pool3', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) x = tools.conv('conv4_1', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv4_2', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv4_3', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.pool('pool3', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) x = tools.conv('conv5_1', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv5_2', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv5_3', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.pool('pool3', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) x = tools.FC_layer('fc6', x, out_nodes=4096) #x = tools.batch_norm(x) x = tools.FC_layer('fc7', x, out_nodes=4096) #x = tools.batch_norm(x) x = tools.FC_layer('fc8', x, out_nodes=n_classes) return x
def MyResNet(x, n_class, is_pretrain=True): with tf.name_scope('MyResNet'): x1 = tools.conv('conv1_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x2 = tools.conv_no_relu('conv1_2', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x3 = tf.add(x1, x2) x = tf.nn.relu(x3, name='relu') with tf.name_scope('pool1'): x = tools.pool('pool1', x, ksize=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x1 = tools.conv('conv2_1', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x2 = tools.conv_no_relu('conv2_2', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x3 = tf.add(x1, x2) x = tf.nn.relu(x3, name='relu') with tf.name_scope('pool2'): x = tools.pool('pool2', x, ksize=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x1 = tools.conv('conv3_1', x, 256, kernel_size=[1, 1], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x2 = tools.conv_no_relu('conv3_2', x, 256, kernel_size=[1, 1], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x3 = tf.add(x1, x2) x = tf.nn.relu(x3, name='relu') # x = tools.conv('conv3_3', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool3'): x = tools.pool('pool3', x, ksize=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.FC_layer('fc6', x, out_nodes=512) with tf.name_scope('batch_norma1'): x = tools.batch_norm( x) # batch norm can avoid overfit, more efficient than dropout x = tools.FC_layer('fc7', x, out_nodes=512) #x = tools.dropout(x,0.5) with tf.name_scope('batch_norm2'): x = tools.batch_norm(x) x = tools.FC_layer('fc8', x, out_nodes=n_class) return x
def vgg16(x, CLASS_NUM, _dropout, is_training): with tf.variable_scope('layer1_1'): conv1_1 = tools.conv2d(x, [3, 3], 64, 1, is_training, True, True, True) with tf.variable_scope('layer1_2'): conv1_2 = tools.conv2d(conv1_1, [3, 3], 64, 1, is_training, True, True, True) with tf.variable_scope('pool1'): pool1 = tf.nn.max_pool(conv1_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") with tf.variable_scope('layer2_1'): conv2_1 = tools.conv2d(pool1, [3, 3], 128, 1, is_training, True, True, True) with tf.variable_scope('layer2_2'): conv2_2 = tools.conv2d(conv2_1, [3, 3], 128, 1, is_training, True, True, True) with tf.variable_scope('pool2'): pool2 = tf.nn.max_pool(conv2_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") with tf.variable_scope('layer3_1'): conv3_1 = tools.conv2d(pool2, [3, 3], 256, 1, is_training, True, True, True) with tf.variable_scope('layer3_2'): conv3_2 = tools.conv2d(conv3_1, [3, 3], 256, 1, is_training, True, True, True) with tf.variable_scope('layer3_3'): conv3_3 = tools.conv2d(conv3_2, [3, 3], 256, 1, is_training, True, True, True) with tf.variable_scope('pool3'): pool3 = tf.nn.max_pool(conv3_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") with tf.variable_scope('layer4_1'): conv4_1 = tools.conv2d(pool3, [3, 3], 512, 1, is_training, True, True, True) with tf.variable_scope('layer4_2'): conv4_2 = tools.conv2d(conv4_1, [3, 3], 512, 1, is_training, True, True, True) with tf.variable_scope('layer4_3'): conv4_3 = tools.conv2d(conv4_2, [3, 3], 512, 1, is_training, True, True, True) with tf.variable_scope('pool4'): pool4 = tf.nn.max_pool(conv4_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") with tf.variable_scope('layer5_1'): conv5_1 = tools.conv2d(pool4, [3, 3], 512, 1, is_training, True, False, True) with tf.variable_scope('layer5_2'): conv5_2 = tools.conv2d(conv5_1, [3, 3], 512, 1, is_training, True, False, True) with tf.variable_scope('layer5_3'): conv5_3 = tools.conv2d(conv5_2, [3, 3], 512, 1, is_training, True, False, True) fmp_3 = conv(conv3_3, kernel_size=[1, 1], out_channels=256, stride=[1, 1, 1, 1], is_pretrain=_training, bias=False, bn=False, layer_name='conv_3') fmp_3 = tf.image.resize_bilinear(fmp_3, [56, 56]) fmp_4 = conv(conv4_3, kernel_size=[1, 1], out_channels=256, stride=[1, 1, 1, 1], is_pretrain=_training, bias=False, bn=False, layer_name='conv_4') fmp_4 = tf.image.resize_bilinear(fmp_4, [56, 56]) fmp_5 = conv(conv5_3, kernel_size=[1, 1], out_channels=256, stride=[1, 1, 1, 1], is_pretrain=_training, bias=False, bn=False, layer_name='conv_5') fmp_5 = tf.image.resize_bilinear(fmp_5, [56, 56]) fmp = tf.concat([fmp_3, fmp_4, fmp_5], -1) with tf.variable_scope('dilation'): fmp_dil_1 = dil_conv(fmp, kernel_size=[3, 3], out_channels=256, rate=1, is_pretrain=_training, bias=False, bn=False, layer_name='dilation1') fmp_dil_2 = dil_conv(fmp, kernel_size=[3, 3], out_channels=256, rate=2, is_pretrain=_training, bias=False, bn=False, layer_name='dilation2') fmp_dil_3 = dil_conv(fmp, kernel_size=[3, 3], out_channels=256, rate=4, is_pretrain=_training, bias=False, bn=False, layer_name='dilation3') fmp_dil_4 = dil_conv(fmp, kernel_size=[3, 3], out_channels=256, rate=8, is_pretrain=_training, bias=False, bn=False, layer_name='dilation4') fmp_dilation = tf.concat([fmp_dil_1, fmp_dil_2, fmp_dil_3, fmp_dil_4], -1) fmp = tools.conv(fmp_dilation, kernel_size=[1, 1], out_channels=512, stride=[1, 1, 1, 1], is_pretrain=_training, bias=False, bn=False, layer_name='conv_dilation') gap = tf.reduce_mean(fmp, [1, 2]) with tf.variable_scope('CAM_fc'): cam_w = tf.get_variable( 'CAM_W', shape=[512, CLASS_NUM], initializer=tf.contrib.layers.xavier_initializer(0.0)) output = tf.matmul(gap, cam_w) annotation_pred = tf.argmax(output, axis=-1) fmp = tf.image.resize_bilinear(fmp, [224, 224]) return annotation_pred, output, fmp
def VGG19(x, n_classes, is_pretrain=True): x = tools.conv('conv1_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv1_2', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.pool('pool1', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1]) x = tools.conv('conv2_1', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv2_2', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.pool('pool2', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1]) x = tools.conv('conv3_1', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv3_2', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv3_3', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv3_4', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.pool('pool3', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1]) x = tools.conv('conv4_1', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv4_2', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv4_3', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv4_4', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.pool('pool4', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1]) x = tools.conv('conv5_1', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv5_2', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv5_3', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv5_4', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.pool('pool5', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1]) x = tools.FC_layer('fc6', x, out_nodes=4096) x = tf.nn.dropout(x, keep_prob=0.5) x = tools.FC_layer('fc7', x, out_nodes=4096) x = tf.nn.dropout(x, keep_prob=0.5) x = tools.FC_layer('fc8', x, out_nodes=n_classes) return x
def VGG16PlanInferencet(x, keep_prob, n_classes=12, is_pretrain=True): x = tools.conv('conv1_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv1_2', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.pool('pool1', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv2_1', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv2_2', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.pool('pool2', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv3_1', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv3_2', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv3_3', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.pool('pool3', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv4_1', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv4_2', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv4_3', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.pool('pool3', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv5_1', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv5_2', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv5_3', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.pool('pool3', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.FC_layer('fc6', x, out_nodes=4096) x = tools.batch_norm(x) x = tools.FC_layer('fc7', x, out_nodes=4096) x = tools.batch_norm(x) x_drop = tf.nn.dropout(x, keep_prob) x = tools.FC_layer('fc8', x_drop, out_nodes=n_classes) return x
def VGG16N(x, n_classes, IS_PRETRAIN): import tools x = tools.conv('conv1_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=IS_PRETRAIN[0]) x = tools.conv('conv1_2', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=IS_PRETRAIN[1]) x = tools.pool('pool1', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv2_1', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=IS_PRETRAIN[2]) x = tools.conv('conv2_2', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=IS_PRETRAIN[3]) x = tools.pool('pool2', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv3_1', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=IS_PRETRAIN[4]) x = tools.conv('conv3_2', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=IS_PRETRAIN[5]) x = tools.conv('conv3_3', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=IS_PRETRAIN[6]) x = tools.pool('pool3', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv4_1', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=IS_PRETRAIN[7]) x = tools.conv('conv4_2', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=IS_PRETRAIN[8]) x = tools.conv('conv4_3', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=IS_PRETRAIN[9]) x = tools.pool('pool3', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv5_1', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=IS_PRETRAIN[10]) x = tools.conv('conv5_2', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=IS_PRETRAIN[11]) x = tools.conv('conv5_3', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=IS_PRETRAIN[12]) x = tools.pool('pool3', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.FC_layer('fc6', x, out_nodes=4096, is_pretrain=IS_PRETRAIN[13]) x = tools.batch_norm(x) x = tools.FC_layer('fc7', x, out_nodes=4096, is_pretrain=IS_PRETRAIN[14]) x = tools.batch_norm(x) x = tools.final_layer('fc8', x, out_nodes=n_classes) return x
def VGG16N(x, n_classes, is_pretrain=True): with tf.name_scope('VGG16'): conv1_1 = tools.conv('conv1_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) conv1_2 = tools.conv('conv1_2', conv1_1, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool1'): pool1 = tools.pool('pool1', conv1_2, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) conv2_1 = tools.conv('conv2_1', pool1, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) conv2_2 = tools.conv('conv2_2', conv2_1, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool2'): pool2 = tools.pool('pool2', conv2_2, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) conv3_1 = tools.conv('conv3_1', pool2, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) conv3_2 = tools.conv('conv3_2', conv3_1, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) conv3_3 = tools.conv('conv3_3', conv3_2, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool3'): pool3 = tools.pool('pool3', conv3_3, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) conv4_1 = tools.conv('conv4_1', pool3, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) conv4_2 = tools.conv('conv4_2', conv4_1, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) conv4_3 = tools.conv('conv4_3', conv4_2, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool4'): pool4 = tools.pool('pool4', conv4_3, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) conv5_1 = tools.conv('conv5_1', pool4, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) conv5_2 = tools.conv('conv5_2', conv5_1, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) conv5_3 = tools.conv('conv5_3', conv5_2, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool5'): pool5 = tools.pool('pool5', conv5_3, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) fc6 = tools.FC_layer('fc6', pool5, out_nodes=4096) with tf.name_scope('batch_norm1'): batch_norm1 = tools.batch_norm(fc6) fc7 = tools.FC_layer('fc7', batch_norm1, out_nodes=4096) with tf.name_scope('batch_norm2'): batch_norm2 = tools.batch_norm(fc7) fc8 = tools.FC_layer('fc8', batch_norm2, out_nodes=n_classes) return fc8
def VGG16N(x, n_classes, is_pretrain=True): with tf.name_scope('VGG16'): #目的:不想显示的节点做成一个大的节点,美化tensorbroad图像 x = tools.conv('conv1_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv1_2', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool1'): x = tools.pool('pool1', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv2_1', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv2_2', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool2'): x = tools.pool('pool2', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv3_1', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv3_2', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv3_3', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool3'): x = tools.pool('pool3', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv4_1', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv4_2', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv4_3', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool4'): x = tools.pool('pool4', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv5_1', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv5_2', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv5_3', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool5'): x = tools.pool('pool5', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.FC_layer('fc6', x, out_nodes=4096) #with tf.name_scope('batch_norm1'): #x = tools.batch_norm(x) x = tools.FC_layer('fc7', x, out_nodes=4096) #with tf.name_scope('batch_norm2'): #x = tools.batch_norm(x) x = tools.FC_layer('fc8', x, out_nodes=n_classes) return x
def VGG16(x, n_class, is_pretrain=True): # using the name scope, the tensorboard maybe look better with tf.name_scope('VGG16'): x = tools.conv('conv1_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv1_2', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool1'): x = tools.pool('pool1', x, ksize=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv2_1', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv2_2', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool2'): x = tools.pool('pool2', x, ksize=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv3_1', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv3_2', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv3_3', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool3'): x = tools.pool('pool3', x, ksize=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv4_1', x, 512, kernel_size=[3, 3], stride=[1, 2, 2, 1], is_pretrain=is_pretrain) x = tools.conv('conv4_2', x, 512, kernel_size=[3, 3], stride=[1, 2, 2, 1], is_pretrain=is_pretrain) x = tools.conv('conv4_3', x, 512, kernel_size=[3, 3], stride=[1, 2, 2, 1], is_pretrain=is_pretrain) with tf.name_scope('pool4'): x = tools.pool('pool4', x, ksize=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv5_1', x, 512, kernel_size=[3, 3], stride=[1, 2, 2, 1], is_pretrain=is_pretrain) x = tools.conv('conv5_2', x, 512, kernel_size=[3, 3], stride=[1, 2, 2, 1], is_pretrain=is_pretrain) x = tools.conv('conv5_3', x, 512, kernel_size=[3, 3], stride=[1, 2, 2, 1], is_pretrain=is_pretrain) with tf.name_scope('pool5'): x = tools.pool('pool5', x, ksize=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.FC_layer('fc6', x, out_nodes=4096) with tf.name_scope('batch_norma1'): x = tools.batch_norm(x) # batch norm can avoid overfit, more efficient than dropout x = tools.FC_layer('fc7', x, out_nodes=4096) with tf.name_scope('batch_norm2'): x = tools.batch_norm(x) x = tools.FC_layer('fc8', x, out_nodes=n_class) return x
def VGG16N(x, n_classes, keep_prob,is_pretrain=True): with tf.name_scope('VGG16'): x = tools.conv('conv1_1', x, 64, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv1_2', x, 64, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) # with tf.name_scope('batch_norm1'): # x = tools.batch_norm(x) with tf.name_scope('pool1'): x = tools.pool('pool1', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) x = tools.conv('conv2_1', x, 128, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv2_2', x, 128, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) # with tf.name_scope('batch_norm2'): # x = tools.batch_norm(x) with tf.name_scope('pool2'): x = tools.pool('pool2', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) x = tools.conv('conv3_1', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv3_2', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv3_3', x, 256, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) # with tf.name_scope('batch_norm3'): # x = tools.batch_norm(x) with tf.name_scope('pool3'): x = tools.pool('pool3', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) x = tools.conv('conv4_1', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv4_2', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv4_3', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) # with tf.name_scope('batch_norm4'): # x = tools.batch_norm(x) with tf.name_scope('pool4'): x = tools.pool('pool4', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) x = tools.conv('conv5_1', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv5_2', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) x = tools.conv('conv5_3', x, 512, kernel_size=[3,3], stride=[1,1,1,1], is_pretrain=is_pretrain) # with tf.name_scope('batch_norm5'): # x = tools.batch_norm(x) with tf.name_scope('pool5'): x = tools.pool('pool5', x, kernel=[1,2,2,1], stride=[1,2,2,1], is_max_pool=True) x = tools.FC_layer('fc6', x, out_nodes=4096) # with tf.name_scope('batch_norm6'): # x = tools.batch_norm(x) #dropouut x=tf.nn.dropout(x, keep_prob) x = tools.FC_layer('fc7', x, out_nodes=4096) # with tf.name_scope('batch_norm7'): # x = tools.batch_norm(x) #dropout x=tf.nn.dropout(x, keep_prob) fc_output=x x = tools.softmax_layer('fc8', x, out_nodes=n_classes) return x,fc_output #%%
def VGG16_BN(x, n_classes, is_pretrain=True, is_training=True): with tf.name_scope('VGG16'): x = tools.conv('conv1_1', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv1_2', x, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool1'): x = tools.pool('pool1', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv2_1', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv2_2', x, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool2'): x = tools.pool('pool2', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv3_1', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv3_2', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv3_3', x, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool3'): x = tools.pool('pool3', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv4_1', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv4_2', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv4_3', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool4'): x = tools.pool('pool4', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.conv('conv5_1', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv5_2', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) x = tools.conv('conv5_3', x, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=is_pretrain) with tf.name_scope('pool5'): x = tools.pool('pool5', x, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) x = tools.FC_layer('fc6', x, out_nodes=4096) with tf.name_scope('batch_norm1'): x = tools.batch_norm_wrapper( x, is_training=is_training) # 训练时为True,测试为False x = tools.FC_layer('fc7', x, out_nodes=4096) with tf.name_scope('batch_norm2'): x = tools.batch_norm_wrapper(x, is_training=is_training) x = tools.last_FC('fc8', x, out_nodes=n_classes) return x
def VGG16(self): with tf.name_scope('VGG16'): self.conv1_1 = tools.conv('conv1_1', self.input, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=self.is_pretrain) self.conv1_2 = tools.conv('conv1_2', self.conv1_1, 64, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=self.is_pretrain) self.pool1 = tools.pool('pool1', self.conv1_2, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) self.conv2_1 = tools.conv('conv2_1', self.pool1, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=self.is_pretrain) self.conv2_2 = tools.conv('conv2_2', self.conv2_1, 128, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=self.is_pretrain) self.pool2 = tools.pool('pool2', self.conv2_2, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) self.conv3_1 = tools.conv('conv3_1', self.pool2, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=self.is_pretrain) self.conv3_2 = tools.conv('conv3_2', self.conv3_1, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=self.is_pretrain) self.conv3_3 = tools.conv('conv3_3', self.conv3_2, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=self.is_pretrain) self.pool3 = tools.pool('pool3', self.conv3_3, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) self.conv4_1 = tools.conv('conv4_1', self.pool3, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=self.is_pretrain) self.conv4_2 = tools.conv('conv4_2', self.conv4_1, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=self.is_pretrain) self.conv4_3 = tools.conv('conv4_3', self.conv4_2, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=self.is_pretrain) self.pool4 = tools.pool('pool4', self.conv4_3, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) self.conv5_1 = tools.conv('conv5_1', self.pool4, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=self.is_pretrain) self.conv5_2 = tools.conv('conv5_2', self.conv5_1, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=self.is_pretrain) self.conv5_3 = tools.conv('conv5_3', self.conv5_2, 512, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=self.is_pretrain) self.pool5 = tools.pool('pool5', self.conv5_3, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], is_max_pool=True) self.fc6 = tools.FC_layer('fc6', self.pool5, out_nodes=4096) self.batch_norm1 = tools.batch_norm('batch_norm1', self.fc6) self.fc7 = tools.FC_layer('fc7', self.batch_norm1, out_nodes=4096) self.batch_norm2 = tools.batch_norm('batch_norm2', self.fc7) self.fc8 = tools.FC_layer('fc8', self.batch_norm2, out_nodes=self.n_classes)
def AlexNet(self): with tf.name_scope('AlexNet'): self.conv1 = tools.conv('conv1', self.input, 96, kernel_size=[11, 11], stride=[1, 4, 4, 1], is_pretrain=self.is_pretrain) self.pool1 = tools.pool('pool1', self.conv1, kernel=[1, 3, 3, 1], stride=[1, 2, 2, 1], is_max_pool=True, is_norm=True) self.conv2 = tools.conv('conv2', self.pool1, 256, kernel_size=[5, 5], stride=[1, 1, 1, 1], is_pretrain=self.is_pretrain) self.pool2 = tools.pool('pool2', self.conv2, kernel=[1, 3, 3, 1], stride=[1, 2, 2, 1], is_max_pool=True, is_norm=True) self.conv3 = tools.conv('conv3', self.pool2, 384, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=self.is_pretrain) self.conv4 = tools.conv('conv4', self.conv3, 384, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=self.is_pretrain) self.conv5 = tools.conv('conv5', self.conv4, 256, kernel_size=[3, 3], stride=[1, 1, 1, 1], is_pretrain=self.is_pretrain) self.pool5 = tools.pool('pool5', self.conv5, kernel=[1, 3, 3, 1], stride=[1, 2, 2, 1], is_max_pool=True, is_norm=True) self.fc1 = tools.FC_layer('fc6', self.pool5, out_nodes=4096) self.norm1 = tools.batch_norm('batch_norm1', self.fc1) self.fc2 = tools.FC_layer('fc7', self.norm1, out_nodes=4096) self.norm2 = tools.batch_norm('batch_norm2', self.fc2) self.fc3 = tools.FC_layer('softmax_linear', self.norm2, out_nodes=self.n_classes, use_relu=False)