def _transition_block(self, ip, nb_filter): """ Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D Args: ip: tensor nb_filter: number of filters compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block. dropout_rate: dropout rate weight_decay: weight decay factor Returns: tensor, after applying batch_norm, relu-conv, dropout, maxpool """ x = batch_normalization(ip, **self.bn_kwargs) x = tf.nn.relu(x) x = conv2d(x, int(nb_filter * self.compression), (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False, **self.conv_kwargs) x = average_pooling2d(x, (2, 2), strides=(2, 2), data_format=self.data_format) return x
def mixed_5b(self, x): with tf.variable_scope("mixed_5b"): with tf.variable_scope("branch0"): x0 = self.basic_conv2d(x, 96, kernel_size=1, stride=1, padding="same", namescope="conv1", use_bias=False) with tf.variable_scope("branch1"): x1 = self.basic_conv2d(x, 48, kernel_size=1, stride=1, padding="same", namescope="conv1", use_bias=False) x1 = self.basic_conv2d(x1, 64, kernel_size=5, stride=1, padding="same", namescope="conv2", use_bias=False) with tf.variable_scope("branch2"): x2 = self.basic_conv2d(x, 64, kernel_size=1, stride=1, padding="same", namescope="conv1", use_bias=False) x2 = self.basic_conv2d(x2, 96, kernel_size=3, stride=1, padding="same", namescope="conv2", use_bias=False) x2 = self.basic_conv2d(x2, 96, kernel_size=3, stride=1, padding="same", namescope="conv3", use_bias=False) with tf.variable_scope("branch3"): x3 = layers.average_pooling2d(x, 3, strides=1, padding="same") x3 = self.basic_conv2d(x3, 64, kernel_size=1, stride=1, padding="same", namescope="conv1", use_bias=False) x = tf.concat([x0, x1, x2, x3], axis=-1) x = layers.dropout(x, noise_shape=[None, 1, 1, None]) return x
def build_network(self, img_placeholder): x = img_placeholder x = self.basic_conv2d(x, 32, kernel_size=3, stride=2, padding="same", namescope="conv2d_1a", use_bias=False) x = self.basic_conv2d(x, 32, kernel_size=3, stride=1, padding="same", namescope="conv2d_2a", use_bias=False) x = self.basic_conv2d(x, 64, kernel_size=3, stride=1, padding="same", namescope="conv2d_2b", use_bias=False) x = self.basic_conv2d(x, 80, kernel_size=1, stride=1, padding="same", namescope="conv2d_3b", use_bias=False) x = self.basic_conv2d(x, 192, kernel_size=3, stride=1, padding="same", namescope="conv2d_4a", use_bias=False) x = self.mixed_5b(x) x = self.mixed_6a(x) x = self.mixed_7a(x) x = self.block8(x) x = self.basic_conv2d(x, 1536, kernel_size=1, stride=1, padding="same", namescope="conv2d_7b", use_bias=False) x = layers.average_pooling2d(x, 8, strides=8, padding="valid") x = layers.flatten(x) logits = layers.dense(x, self.n_classes, name="last_linear") probs = tf.nn.softmax(logits) return logits, probs
def model(inputs, is_training, data_format, num_classes): net = conv2d(inputs=inputs, filters=96, kernel_size=[7, 7], strides=2, padding='valid', data_format=data_format, activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.variance_scaling_initializer(), bias_initializer=tf.zeros_initializer()) net = max_pooling2d(inputs=net, pool_size=[3, 3], strides=2, data_format=data_format) net = fire_module(net, 16, 64, data_format) net = fire_module(net, 16, 64, data_format) net = fire_module(net, 32, 128, data_format) net = max_pooling2d(inputs=net, pool_size=[3, 3], strides=2, data_format=data_format) net = fire_module(net, 32, 128, data_format) net = fire_module(net, 48, 192, data_format) net = fire_module(net, 48, 192, data_format) net = fire_module(net, 64, 256, data_format) net = max_pooling2d(inputs=net, pool_size=[3, 3], strides=2, data_format=data_format) net = fire_module(net, 64, 256, data_format) net = dropout(inputs=net, rate=0.5, training=is_training) net = conv2d( inputs=net, filters=num_classes, kernel_size=[1, 1], strides=1, padding='valid', # no padding eqv. to pad=1 for 1x1 conv? data_format=data_format, activation=tf.nn.relu, use_bias=True, kernel_initializer=tf.initializers.random_normal(mean=0.0, stddev=0.01), bias_initializer=tf.zeros_initializer()) net = average_pooling2d(inputs=net, pool_size=[13, 13], strides=1, data_format=data_format) # TODO fix for data_format later logits = tf.squeeze(net, [2, 3]) return logits
def Model_Struct(self): conv1 = conv2d(self.input, filters=32, kernel_size=(3, 3), strides=2, padding='SAME') b1 = batch_normalization(conv1) r1 = relu(b1) pool1 = max_pooling2d(r1, (3, 3), (2, 2), padding='same') # 56 32 block1 = self.depthBlock(pool1, 1, 32, 64, 1) #56 64 block2 = self.depthBlock(block1, 2, 64, 128, 2) #28 128 block3 = self.depthBlock(block2, 3, 128, 128, 1) #28 128 block4 = self.depthBlock(block3, 4, 128, 256, 2) #14 256 block5 = self.depthBlock(block4, 5, 256, 256, 1) #14 256 block6 = self.depthBlock(block5, 6, 256, 512, 2) #7 512 block7_1 = self.depthBlock(block6, 71, 512, 512, 1) block7_2 = self.depthBlock(block7_1, 72, 512, 512, 1) block7_3 = self.depthBlock(block7_2, 73, 512, 512, 1) #平均池化 aver_pool = average_pooling2d(block7_3, (7, 7), (1, 1)) flatten = tf.layers.flatten(aver_pool) # 把网络展平,以输入到后面的全连接层 fc1 = dense(flatten, 700, tf.nn.relu) #700个神经元 fc1_dropout = dropout(fc1, keep_prob=self.keep_prob) fc2 = tf.layers.dense(fc1_dropout, 256, tf.nn.relu) fc2_dropout = dropout(fc2, keep_prob=self.keep_prob) fc3 = dense(fc2_dropout, 2, None) # 得到两类输出fc3 return fc3
def res_block(inputlayer): pool_1 =layers.average_pooling2d( inputlayer, pool_size=(2, 2), strides=(1, 1)) conv_2 = layers.conv2d( inputs=pool_1, filters=3, kernel_size=(3, 3), padding='same') paddings = tf.constant([[0,0],[0, 1,], [0, 1],[0,0]]) padded_conv_2 = tf.pad(conv_2, paddings, "CONSTANT") print("") print('padded_conv_2', padded_conv_2) batch_norm_1 = layers.batch_normalization(inputs=inputlayer) relu_1 = tf.nn.relu(batch_norm_1) conv_3 = layers.conv2d( inputs=relu_1, filters=3, kernel_size=(3, 3), padding='same') batch_norm_2 = layers.batch_normalization(inputs=conv_3) relu_2 = tf.nn.relu(batch_norm_2) conv_4 = layers.conv2d( inputs=relu_2, filters=3, kernel_size=(3, 3), padding='same') print('conv_4',conv_4) print("") res_out_1 = padded_conv_2+conv_4 return res_out_1
def googlenet(X, config): """ googlenet implementation. """ with tf.name_scope('googlenet'): block1 = conv2d(X, 64, 7, strides=2, padding='SAME') block1 = tf.nn.relu(block1) block1 = max_pooling2d(block1, 3, 2, padding='SAME') block1 = tf.nn.local_response_normalization(block1) block2 = conv2d(block1, 64, 1, padding='SAME') block2 = tf.nn.relu(block2) block2 = conv2d(block2, 192, 3, padding='SAME') block2 = tf.nn.relu(block2) block2 = tf.nn.local_response_normalization(block2) block2 = max_pooling2d(block2, 3, 2, padding='SAME') block2 = tf.nn.relu(block2) # inception x2 block3 = inceptionBlock(block2, c1=64, c3_r=96, c3=128, c5_r=16, c5=32, p3_r=32) block3 = inceptionBlock(block3, c1=128, c3_r=128, c3=192, c5_r=32, c5=96, p3_r=64) block3 = max_pooling2d(block3, 3, 2, padding='SAME') # inception x5 block4 = inceptionBlock(block3, c1=192, c3_r=96, c3=208, c5_r=16, c5=48, p3_r=64) block4 = inceptionBlock(block4, c1=160, c3_r=112, c3=224, c5_r=24, c5=64, p3_r=64) block4 = inceptionBlock(block4, c1=128, c3_r=128, c3=256, c5_r=24, c5=64, p3_r=64) block4 = inceptionBlock(block4, c1=112, c3_r=144, c3=288, c5_r=32, c5=64, p3_r=64) block4 = inceptionBlock(block4, c1=256, c3_r=160, c3=320, c5_r=32, c5=128, p3_r=128) block4 = max_pooling2d(block4, 3, 2, padding='SAME') # inception x2 with average pooling block5 = inceptionBlock(block4, c1=256, c3_r=160, c3=320, c5_r=32, c5=128, p3_r=128) block5 = inceptionBlock(block5, c1=384, c3_r=192, c3=384, c5_r=48, c5=128, p3_r=128) block5 = average_pooling2d(block5, 7, 1, padding='SAME') block5 = dropout(block5, rate=0.4, training=config.training) logits = flatten(block5) logits = dense(logits, config.NUM_CLASSES) return logits, None
def mean_pool(input, scope="scope", pool=[2, 2], stride=2, reuse=False): return average_pooling2d(input, pool, stride, padding='VALID')