def build_network(self, x): # Building network... with tf.variable_scope('CapsNet'): net = batch_normalization(relu(conv_layer_3d(x, kernel_size=7, stride=2, num_filters=96, add_reg=self.conf.L2_reg, layer_name='CONV1')), training=self.is_training, scope='BN1') net = max_pool_3d(net, pool_size=3, stride=2, padding='SAME', name='MaxPool1') net = batch_normalization(relu(conv_layer_3d(net, kernel_size=5, stride=2, num_filters=256, add_reg=self.conf.L2_reg, layer_name='CONV2')), training=self.is_training, scope='BN2') net = max_pool_3d(net, pool_size=3, stride=2, padding='SAME', name='MaxPool2') net = batch_normalization(relu(conv_layer_3d(net, kernel_size=3, stride=1, num_filters=384, add_reg=self.conf.L2_reg, layer_name='CONV3')), training=self.is_training, scope='BN3') net = batch_normalization(relu(conv_layer_3d(net, kernel_size=3, stride=1, num_filters=384, add_reg=self.conf.L2_reg, layer_name='CONV4')), training=self.is_training, scope='BN4') net = batch_normalization(relu(conv_layer_3d(net, kernel_size=3, stride=1, num_filters=256, add_reg=self.conf.L2_reg, layer_name='CONV5')), training=self.is_training, scope='BN5') net = max_pool_3d(net, pool_size=3, stride=2, padding='SAME', name='MaxPool3') layer_flat = flatten(net) net = relu(fc_layer(layer_flat, num_units=200, add_reg=self.conf.L2_reg, layer_name='FC1')) net = dropout(net, self.conf.dropout_rate, training=self.is_training) net = relu(fc_layer(net, num_units=75, add_reg=self.conf.L2_reg, layer_name='FC2')) net = dropout(net, self.conf.dropout_rate, training=self.is_training) self.features = net self.logits = fc_layer(net, num_units=self.conf.num_cls, add_reg=self.conf.L2_reg, layer_name='FC3') # [?, num_cls] self.probs = tf.nn.softmax(self.logits) # [?, num_cls] self.y_pred = tf.to_int32(tf.argmax(self.probs, 1))
def build_network(self, x): # Building network... with tf.variable_scope('ResNet'): net = conv_layer_3d(x, num_filters=64, kernel_size=4, stride=1, add_reg=self.conf.L2_reg, layer_name='CONV0') net = relu(batch_normalization(net, training=self.is_training, scope='BN1')) # net = max_pool_3d(net, pool_size=3, stride=2, name='MaxPool0') # Create the bottleneck groups, each of which contains `num_blocks` bottleneck blocks. for group_i, group in enumerate(self.groups): first_block = True for block_i in range(group.num_blocks): block_name = 'group_%d/block_%d' % (group_i, block_i) net = self.bottleneck_block(net, group, block_name, is_first_block=first_block) first_block = False net = average_pool_3d(net, pool_size=2, stride=1, name='avg_pool') net = flatten(net) net = fc_layer(net, num_units=75, add_reg=self.conf.L2_reg, layer_name='Fc1') net = dropout(net, self.conf.dropout_rate, training=self.is_training) self.logits = fc_layer(net, num_units=self.conf.num_cls, add_reg=self.conf.L2_reg, layer_name='Fc2') # [?, num_cls] self.probs = tf.nn.softmax(self.logits) # [?, num_cls] self.y_pred = tf.to_int32(tf.argmax(self.probs, 1))
def bottleneck_block(self, x, scope): with tf.variable_scope(scope): x = batch_normalization(x, training=self.is_training, scope='BN1') x = relu(x) x = conv_layer(x, num_filters=4 * self.k, kernel_size=1, layer_name='CONV1') x = dropout(x, rate=self.conf.dropout_rate, training=self.is_training) x = batch_normalization(x, training=self.is_training, scope='BN2') x = relu(x) x = conv_layer(x, num_filters=self.k, kernel_size=3, layer_name='CONV2') x = dropout(x, rate=self.conf.dropout_rate, training=self.is_training) return x
def transition_layer(self, x, scope): with tf.variable_scope(scope): x = batch_normalization(x, training=self.is_training, scope='BN') x = relu(x) x = conv_layer(x, num_filters=int(x.get_shape().as_list()[-1] * self.conf.theta), kernel_size=1, layer_name='CONV') x = dropout(x, rate=self.conf.dropout_rate, training=self.is_training) x = average_pool(x, pool_size=2, stride=2, name='AVG_POOL') return x
def build_network(self, x): # Building network... with tf.variable_scope('CapsNet'): net = lrn( relu( conv_layer(x, kernel_size=7, stride=2, num_filters=96, trainable=self.conf.trainable, add_reg=self.conf.L2_reg, layer_name='CONV1'))) net = max_pool(net, pool_size=3, stride=2, padding='SAME', name='MaxPool1') net = lrn( relu( conv_layer(net, kernel_size=5, stride=2, num_filters=256, trainable=self.conf.trainable, add_reg=self.conf.L2_reg, layer_name='CONV2'))) net = max_pool(net, pool_size=3, stride=2, padding='SAME', name='MaxPool2') net = relu( conv_layer(net, kernel_size=3, stride=1, num_filters=384, trainable=self.conf.trainable, add_reg=self.conf.L2_reg, layer_name='CONV3')) net = relu( conv_layer(net, kernel_size=3, stride=1, num_filters=384, trainable=self.conf.trainable, add_reg=self.conf.L2_reg, layer_name='CONV4')) self.net_grad = relu( conv_layer(net, kernel_size=3, stride=1, num_filters=256, trainable=self.conf.trainable, add_reg=self.conf.L2_reg, layer_name='CONV5')) net = max_pool(self.net_grad, pool_size=3, stride=2, padding='SAME', name='MaxPool3') layer_flat = flatten(net) net = relu( fc_layer(layer_flat, num_units=512, add_reg=self.conf.L2_reg, trainable=self.conf.trainable, layer_name='FC1')) net = dropout(net, self.conf.dropout_rate, training=self.is_training) net = relu( fc_layer(net, num_units=512, add_reg=self.conf.L2_reg, trainable=self.conf.trainable, layer_name='FC2')) net = dropout(net, self.conf.dropout_rate, training=self.is_training) self.features = net self.logits = fc_layer(net, num_units=self.conf.num_cls, add_reg=self.conf.L2_reg, trainable=self.conf.trainable, layer_name='FC3') # [?, num_cls] self.prob = tf.nn.softmax(self.logits) # [?, num_cls] self.y_pred = tf.to_int32(tf.argmax(self.prob, 1))