Ejemplo n.º 1
0
    def bottleneck_block(self, input_x, group, name, is_first_block=False):
        with tf.variable_scope(name):
            # 1x1 convolution responsible for reducing the depth
            with tf.variable_scope('conv_in'):
                stride = 2 if is_first_block else 1
                conv = conv_layer_3d(input_x, num_filters=group.bottleneck_size, add_reg=self.conf.L2_reg,
                                     kernel_size=1, stride=stride, layer_name='CONV')
                conv = relu(batch_normalization(conv, self.is_training, scope='BN'))

            with tf.variable_scope('conv_bottleneck'):
                conv = conv_layer_3d(conv, num_filters=group.bottleneck_size, kernel_size=3,
                                     add_reg=self.conf.L2_reg, layer_name='CONV')
                conv = relu(batch_normalization(conv, self.is_training, scope='BN'))

            # 1x1 convolution responsible for increasing the depth
            with tf.variable_scope('conv_out'):
                conv = conv_layer_3d(conv, num_filters=group.out_filters, kernel_size=1,
                                     add_reg=self.conf.L2_reg, layer_name='CONV')
                conv = batch_normalization(conv, self.is_training, scope='BN')

            # shortcut connections that turn the network into its counterpart
            # residual function (identity shortcut)
            with tf.variable_scope('shortcut'):
                if is_first_block:
                    shortcut = conv_layer_3d(input_x, num_filters=group.out_filters, stride=2, kernel_size=1,
                                             add_reg=self.conf.L2_reg, layer_name='CONV_shortcut')
                    shortcut = batch_normalization(shortcut, self.is_training, scope='BN_shortcut')
                    assert (shortcut.get_shape().as_list() == conv.get_shape().as_list()), \
                        "Tensor sizes of the two branches are not matched!"
                    res = shortcut + conv
                else:
                    res = conv + input_x
                    assert (input_x.get_shape().as_list() == conv.get_shape().as_list()), \
                        "Tensor sizes of the two branches are not matched!"
            return relu(res)
Ejemplo n.º 2
0
    def build_network(self, x):
        # Building network...
        with tf.variable_scope('ResNet'):
            net = conv_layer_3d(x, num_filters=64, kernel_size=4, stride=1, add_reg=self.conf.L2_reg,
                                layer_name='CONV0')
            net = relu(batch_normalization(net, training=self.is_training, scope='BN1'))
            # net = max_pool_3d(net, pool_size=3, stride=2, name='MaxPool0')

            # Create the bottleneck groups, each of which contains `num_blocks` bottleneck blocks.
            for group_i, group in enumerate(self.groups):
                first_block = True
                for block_i in range(group.num_blocks):
                    block_name = 'group_%d/block_%d' % (group_i, block_i)
                    net = self.bottleneck_block(net, group, block_name, is_first_block=first_block)
                    first_block = False

            net = average_pool_3d(net, pool_size=2, stride=1, name='avg_pool')
            net = flatten(net)
            net = fc_layer(net, num_units=75, add_reg=self.conf.L2_reg, layer_name='Fc1')
            net = dropout(net, self.conf.dropout_rate, training=self.is_training)
            self.logits = fc_layer(net, num_units=self.conf.num_cls, add_reg=self.conf.L2_reg, layer_name='Fc2')
            # [?, num_cls]
            self.probs = tf.nn.softmax(self.logits)
            # [?, num_cls]
            self.y_pred = tf.to_int32(tf.argmax(self.probs, 1))
Ejemplo n.º 3
0
    def build_network(self, x):
        # Building network...
        with tf.variable_scope('DenseNet'):
            net = conv_layer(x,
                             num_filters=2 * self.k,
                             kernel_size=7,
                             stride=2,
                             layer_name='Conv0')
            # net = max_pool(net, pool_size=3, stride=2, name='MaxPool0')

            for l in range(self.conf.num_levels):
                net = self.dense_block(net,
                                       num_BBs=self.conf.num_BBs[l],
                                       block_name='DB_' + str(l))
                print('DB_{} shape: {}'.format(str(l + 1), net.get_shape()))
                net = self.transition_layer(net, scope='TB_' + str(l))
                print('TD_{} shape: {}'.format(str(l + 1), net.get_shape()))

            # net = self.dense_block(net, num_BBs=32, block_name='Dense_final')
            # print('DB_{} shape: {}'.format(str(l + 2), net.get_shape()))
            net = batch_normalization(net,
                                      training=self.is_training,
                                      scope='BN_out')
            net = relu(net)
            net = global_average_pool(net)
            net = flatten(net)
            self.features = net
            self.logits = fc_layer(net,
                                   num_units=self.conf.num_cls,
                                   add_reg=self.conf.L2_reg,
                                   layer_name='Fc1')
            # [?, num_cls]
            self.prob = tf.nn.softmax(self.logits)
            # [?, num_cls]
            self.y_pred = tf.to_int32(tf.argmax(self.prob, 1))
Ejemplo n.º 4
0
 def build_network(self, x):
     # Building network...
     with tf.variable_scope('CapsNet'):
         net = batch_normalization(relu(conv_layer_3d(x, kernel_size=7, stride=2, num_filters=96,
                                                      add_reg=self.conf.L2_reg, layer_name='CONV1')),
                                   training=self.is_training, scope='BN1')
         net = max_pool_3d(net, pool_size=3, stride=2, padding='SAME', name='MaxPool1')
         net = batch_normalization(relu(conv_layer_3d(net, kernel_size=5, stride=2, num_filters=256,
                                                      add_reg=self.conf.L2_reg, layer_name='CONV2')),
                                   training=self.is_training, scope='BN2')
         net = max_pool_3d(net, pool_size=3, stride=2, padding='SAME', name='MaxPool2')
         net = batch_normalization(relu(conv_layer_3d(net, kernel_size=3, stride=1, num_filters=384,
                                                      add_reg=self.conf.L2_reg, layer_name='CONV3')),
                                   training=self.is_training, scope='BN3')
         net = batch_normalization(relu(conv_layer_3d(net, kernel_size=3, stride=1, num_filters=384,
                                                      add_reg=self.conf.L2_reg, layer_name='CONV4')),
                                   training=self.is_training, scope='BN4')
         net = batch_normalization(relu(conv_layer_3d(net, kernel_size=3, stride=1, num_filters=256,
                                                      add_reg=self.conf.L2_reg, layer_name='CONV5')),
                                   training=self.is_training, scope='BN5')
         net = max_pool_3d(net, pool_size=3, stride=2, padding='SAME', name='MaxPool3')
         layer_flat = flatten(net)
         net = relu(fc_layer(layer_flat, num_units=200, add_reg=self.conf.L2_reg, layer_name='FC1'))
         net = dropout(net, self.conf.dropout_rate, training=self.is_training)
         net = relu(fc_layer(net, num_units=75, add_reg=self.conf.L2_reg, layer_name='FC2'))
         net = dropout(net, self.conf.dropout_rate, training=self.is_training)
         self.features = net
         self.logits = fc_layer(net, num_units=self.conf.num_cls, add_reg=self.conf.L2_reg, layer_name='FC3')
         # [?, num_cls]
         self.probs = tf.nn.softmax(self.logits)
         # [?, num_cls]
         self.y_pred = tf.to_int32(tf.argmax(self.probs, 1))
Ejemplo n.º 5
0
    def bottleneck_block(self, x, scope):
        with tf.variable_scope(scope):
            x = batch_normalization(x, training=self.is_training, scope='BN1')
            x = relu(x)
            x = conv_layer(x,
                           num_filters=4 * self.k,
                           kernel_size=1,
                           layer_name='CONV1')
            x = dropout(x,
                        rate=self.conf.dropout_rate,
                        training=self.is_training)

            x = batch_normalization(x, training=self.is_training, scope='BN2')
            x = relu(x)
            x = conv_layer(x,
                           num_filters=self.k,
                           kernel_size=3,
                           layer_name='CONV2')
            x = dropout(x,
                        rate=self.conf.dropout_rate,
                        training=self.is_training)
            return x
Ejemplo n.º 6
0
 def transition_layer(self, x, scope):
     with tf.variable_scope(scope):
         x = batch_normalization(x, training=self.is_training, scope='BN')
         x = relu(x)
         x = conv_layer(x,
                        num_filters=int(x.get_shape().as_list()[-1] *
                                        self.conf.theta),
                        kernel_size=1,
                        layer_name='CONV')
         x = dropout(x,
                     rate=self.conf.dropout_rate,
                     training=self.is_training)
         x = average_pool(x, pool_size=2, stride=2, name='AVG_POOL')
         return x
Ejemplo n.º 7
0
 def build_network(self, x):
     # Building network...
     with tf.variable_scope('CapsNet'):
         net = lrn(
             relu(
                 conv_layer(x,
                            kernel_size=7,
                            stride=2,
                            num_filters=96,
                            trainable=self.conf.trainable,
                            add_reg=self.conf.L2_reg,
                            layer_name='CONV1')))
         net = max_pool(net,
                        pool_size=3,
                        stride=2,
                        padding='SAME',
                        name='MaxPool1')
         net = lrn(
             relu(
                 conv_layer(net,
                            kernel_size=5,
                            stride=2,
                            num_filters=256,
                            trainable=self.conf.trainable,
                            add_reg=self.conf.L2_reg,
                            layer_name='CONV2')))
         net = max_pool(net,
                        pool_size=3,
                        stride=2,
                        padding='SAME',
                        name='MaxPool2')
         net = relu(
             conv_layer(net,
                        kernel_size=3,
                        stride=1,
                        num_filters=384,
                        trainable=self.conf.trainable,
                        add_reg=self.conf.L2_reg,
                        layer_name='CONV3'))
         net = relu(
             conv_layer(net,
                        kernel_size=3,
                        stride=1,
                        num_filters=384,
                        trainable=self.conf.trainable,
                        add_reg=self.conf.L2_reg,
                        layer_name='CONV4'))
         self.net_grad = relu(
             conv_layer(net,
                        kernel_size=3,
                        stride=1,
                        num_filters=256,
                        trainable=self.conf.trainable,
                        add_reg=self.conf.L2_reg,
                        layer_name='CONV5'))
         net = max_pool(self.net_grad,
                        pool_size=3,
                        stride=2,
                        padding='SAME',
                        name='MaxPool3')
         layer_flat = flatten(net)
         net = relu(
             fc_layer(layer_flat,
                      num_units=512,
                      add_reg=self.conf.L2_reg,
                      trainable=self.conf.trainable,
                      layer_name='FC1'))
         net = dropout(net,
                       self.conf.dropout_rate,
                       training=self.is_training)
         net = relu(
             fc_layer(net,
                      num_units=512,
                      add_reg=self.conf.L2_reg,
                      trainable=self.conf.trainable,
                      layer_name='FC2'))
         net = dropout(net,
                       self.conf.dropout_rate,
                       training=self.is_training)
         self.features = net
         self.logits = fc_layer(net,
                                num_units=self.conf.num_cls,
                                add_reg=self.conf.L2_reg,
                                trainable=self.conf.trainable,
                                layer_name='FC3')
         # [?, num_cls]
         self.prob = tf.nn.softmax(self.logits)
         # [?, num_cls]
         self.y_pred = tf.to_int32(tf.argmax(self.prob, 1))