def discriminator_base(inputs): with tf.name_scope('discriminator_base'): #net = layers.batch_norm(inputs, training, name='bn1') net = layers.conv2d_layer(1, inputs, [5, 5, 16], lambda x: layers.lrelu(x, 0.2), stride=2) #net = layers.batch_norm(net, training, name='bn2') net = layers.conv2d_layer(2, net, [5, 5, 32], lambda x: layers.lrelu(x, 0.2), stride=2) #net = layers.batch_norm(net, training, name='bn3') net = layers.conv2d_layer(3, net, [5, 5, 64], lambda x: layers.lrelu(x, 0.2), stride=2) #net = layers.batch_norm(net, training, name='bn4') net = layers.conv2d_layer(4, net, [5, 5, 128], lambda x: layers.lrelu(x, 0.2), stride=2) net = layers.max_pool2d(net, [2, 2]) #net = layers.batch_norm(net, training, name='bn5') return net
def __init__(self): conv_stride = [4, 4] self.conv1 = layers.conv2d(filters=96, kernel=[11, 11], padding='SAME', name='conv1', activation='relu', normalization='local_response_normalization', stride=conv_stride) self.conv2 = layers.conv2d(filters=256, kernel=[5, 5], padding='SAME', name='conv2', activation='relu', normalization="local_response_normalization", stride=[1, 1]) self.conv1 = layers.conv2d(filters=96, kernel=[11, 11], padding='SAME', name='conv1', activation='relu', normalization='local_response_normalization', stride=conv_stride) self.conv2 = layers.conv2d(filters=256, kernel=[5, 5], padding='SAME', name='conv2', activation='relu', normalization="local_response_normalization", stride=[1, 1]) self.conv3 = layers.conv2d(filters=384, kernel=[3, 3], padding='SAME', name='conv3', activation='relu', stride=[1, 1]) self.conv4 = layers.conv2d(filters=384, kernel=[3, 3], padding='SAME', name='conv4', activation='relu', stride=[1, 1]) self.conv5 = layers.conv2d(filters=256, kernel=[3, 3], padding='SAME', name='conv5', activation='relu', stride=[1, 1]) self.fc6 = layers.dense(4096, activation='relu', dropout=True, name='fc6') self.fc7 = layers.dense(4096, activation='relu', dropout=True, name='fc7') self.fc8 = layers.dense(1000, activation='relu', name='fc8') self.max_pool1 = layers.max_pool2d(ksize=[3, 3], stride=[2, 2]) self.max_pool2 = layers.max_pool2d(ksize=[3, 3], stride=[2, 2]) self.max_pool5 = layers.max_pool2d(ksize=[3, 3], stride=[2, 2])
def __call__(self, x, is_training=True, reuse=False, *args, **kwargs): with tf.variable_scope(self.__class__.__name__) as vs: if reuse: vs.reuse_variables() conv_params = {'is_training': is_training, 'activation_': 'relu', 'normalization': 'batch'} x = conv_block(x, 64, **conv_params, dropout_rate=0.3) x = conv_block(x, 64, **conv_params, dropout_rate=0.3) x = conv_block(x, 128, **conv_params, dropout_rate=0.4) x = conv_block(x, 128, **conv_params, dropout_rate=0.4) x = conv_block(x, 256, **conv_params, dropout_rate=0.4) x = conv_block(x, 256, **conv_params, dropout_rate=0.4) x = conv_block(x, 256, **conv_params) l1 = x x = max_pool2d(x) x = conv_block(x, 512, **conv_params, dropout_rate=0.4) x = conv_block(x, 512, **conv_params, dropout_rate=0.4) x = conv_block(x, 512, **conv_params) l2 = x x = max_pool2d(x) x = conv_block(x, 512, **conv_params, dropout_rate=0.4) x = conv_block(x, 512, **conv_params, dropout_rate=0.4) x = conv_block(x, 512, **conv_params) l3 = x x = max_pool2d(x) x = conv_block(x, 512, **conv_params, sampling='pool') x = conv_block(x, 512, **conv_params, sampling='pool') x = flatten(x) g = dense(x, 512, activation_='relu') x, attentions = attention_module([l1, l2, l3], g) x = dense(x, self.nb_classes) return x, attentions
def _siamese(self, img, scope, reuse=False): with tf.variable_scope(scope, reuse=reuse): x = img h1 = conv2d(x, 64, kernel_size=10, strides=1, name='conv1', l2reg=1e-4, training=self.is_training, use_bn=True) h1 = max_pool2d(h1, kernel_size=2, strides=2, name='pool1') h1 = tf.layers.dropout(h1, rate=.4, training=self.is_training) h2 = conv2d(h1, 128, kernel_size=7, strides=2, name='conv2', l2reg=1e-5, training=self.is_training, use_bn=True) #h2 = max_pool2d(h2, kernel_size=2, strides=2, name='pool2') h2 = tf.layers.dropout(h2, rate=.4, training=self.is_training) h3 = conv2d(h2, 256, kernel_size=4, strides=2, name='conv3', l2reg=1e-5, training=self.is_training, use_bn=True) #h3 = max_pool2d(h3, kernel_size=2, strides=2, name='pool3') h3 = tf.layers.dropout(h3, rate=.4, training=self.is_training) h4 = conv2d(h3, 512, kernel_size=4, strides=2, name='conv4', l2reg=1e-5, training=self.is_training, use_bn=True) # h4 = max_pool2d(h4, kernel_size=2, strides=2, name='pool4') h4 = tf.layers.dropout(h4, rate=.4, training=self.is_training) h5 = conv2d(h4, 1024, kernel_size=3, strides=2, name='conv5', l2reg=1e-5, training=self.is_training, use_bn=True) h5_flat = tf.layers.flatten(h5, name='flatten1') out = fc(h5_flat, 2056, name='out', activation_fn=tf.nn.sigmoid, l2reg=1e-6) return out
padding='SAME', name='conv4', activation='relu', stride=[1, 1]) conv5 = layers.conv2d(filters=256, kernel=[3, 3], padding='SAME', name='conv5', activation='relu', stride=[1, 1]) fc6 = layers.dense(4096, activation='relu', dropout=True, name='fc6') fc7 = layers.dense(4096, activation='relu', dropout=True, name='fc7') fc8 = layers.dense(1000, activation='relu', name='fc8') max_pool1 = layers.max_pool2d(ksize=[3, 3], stride=[2, 2]) max_pool2 = layers.max_pool2d(ksize=[3, 3], stride=[2, 2]) max_pool5 = layers.max_pool2d(ksize=[3, 3], stride=[2, 2]) def Forward(x): out = conv1.forward(x) # print('layer 1:', out.shape) out = max_pool1.forward(out) # print('layer max 1:', out.shape) out = conv2.forward(out) # print('layer conv2:', out.shape) out = max_pool2.forward(out)