Beispiel #1
0
    def build_network(self, x):
        # Building network...
        with tf.variable_scope('ResNet'):
            net = conv_layer_3d(x, num_filters=64, kernel_size=4, stride=1, add_reg=self.conf.L2_reg,
                                layer_name='CONV0')
            net = relu(batch_normalization(net, training=self.is_training, scope='BN1'))
            # net = max_pool_3d(net, pool_size=3, stride=2, name='MaxPool0')

            # Create the bottleneck groups, each of which contains `num_blocks` bottleneck blocks.
            for group_i, group in enumerate(self.groups):
                first_block = True
                for block_i in range(group.num_blocks):
                    block_name = 'group_%d/block_%d' % (group_i, block_i)
                    net = self.bottleneck_block(net, group, block_name, is_first_block=first_block)
                    first_block = False

            net = average_pool_3d(net, pool_size=2, stride=1, name='avg_pool')
            net = flatten(net)
            net = fc_layer(net, num_units=75, add_reg=self.conf.L2_reg, layer_name='Fc1')
            net = dropout(net, self.conf.dropout_rate, training=self.is_training)
            self.logits = fc_layer(net, num_units=self.conf.num_cls, add_reg=self.conf.L2_reg, layer_name='Fc2')
            # [?, num_cls]
            self.probs = tf.nn.softmax(self.logits)
            # [?, num_cls]
            self.y_pred = tf.to_int32(tf.argmax(self.probs, 1))
Beispiel #2
0
 def build_network(self, x):
     # Building network...
     with tf.variable_scope('CapsNet'):
         net = batch_normalization(relu(conv_layer_3d(x, kernel_size=7, stride=2, num_filters=96,
                                                      add_reg=self.conf.L2_reg, layer_name='CONV1')),
                                   training=self.is_training, scope='BN1')
         net = max_pool_3d(net, pool_size=3, stride=2, padding='SAME', name='MaxPool1')
         net = batch_normalization(relu(conv_layer_3d(net, kernel_size=5, stride=2, num_filters=256,
                                                      add_reg=self.conf.L2_reg, layer_name='CONV2')),
                                   training=self.is_training, scope='BN2')
         net = max_pool_3d(net, pool_size=3, stride=2, padding='SAME', name='MaxPool2')
         net = batch_normalization(relu(conv_layer_3d(net, kernel_size=3, stride=1, num_filters=384,
                                                      add_reg=self.conf.L2_reg, layer_name='CONV3')),
                                   training=self.is_training, scope='BN3')
         net = batch_normalization(relu(conv_layer_3d(net, kernel_size=3, stride=1, num_filters=384,
                                                      add_reg=self.conf.L2_reg, layer_name='CONV4')),
                                   training=self.is_training, scope='BN4')
         net = batch_normalization(relu(conv_layer_3d(net, kernel_size=3, stride=1, num_filters=256,
                                                      add_reg=self.conf.L2_reg, layer_name='CONV5')),
                                   training=self.is_training, scope='BN5')
         net = max_pool_3d(net, pool_size=3, stride=2, padding='SAME', name='MaxPool3')
         layer_flat = flatten(net)
         net = relu(fc_layer(layer_flat, num_units=200, add_reg=self.conf.L2_reg, layer_name='FC1'))
         net = dropout(net, self.conf.dropout_rate, training=self.is_training)
         net = relu(fc_layer(net, num_units=75, add_reg=self.conf.L2_reg, layer_name='FC2'))
         net = dropout(net, self.conf.dropout_rate, training=self.is_training)
         self.features = net
         self.logits = fc_layer(net, num_units=self.conf.num_cls, add_reg=self.conf.L2_reg, layer_name='FC3')
         # [?, num_cls]
         self.probs = tf.nn.softmax(self.logits)
         # [?, num_cls]
         self.y_pred = tf.to_int32(tf.argmax(self.probs, 1))
Beispiel #3
0
    def build_network(self, x):
        # Building network...
        with tf.variable_scope('DenseNet'):
            net = conv_layer(x,
                             num_filters=2 * self.k,
                             kernel_size=7,
                             stride=2,
                             layer_name='Conv0')
            # net = max_pool(net, pool_size=3, stride=2, name='MaxPool0')

            for l in range(self.conf.num_levels):
                net = self.dense_block(net,
                                       num_BBs=self.conf.num_BBs[l],
                                       block_name='DB_' + str(l))
                print('DB_{} shape: {}'.format(str(l + 1), net.get_shape()))
                net = self.transition_layer(net, scope='TB_' + str(l))
                print('TD_{} shape: {}'.format(str(l + 1), net.get_shape()))

            # net = self.dense_block(net, num_BBs=32, block_name='Dense_final')
            # print('DB_{} shape: {}'.format(str(l + 2), net.get_shape()))
            net = batch_normalization(net,
                                      training=self.is_training,
                                      scope='BN_out')
            net = relu(net)
            net = global_average_pool(net)
            net = flatten(net)
            self.features = net
            self.logits = fc_layer(net,
                                   num_units=self.conf.num_cls,
                                   add_reg=self.conf.L2_reg,
                                   layer_name='Fc1')
            # [?, num_cls]
            self.prob = tf.nn.softmax(self.logits)
            # [?, num_cls]
            self.y_pred = tf.to_int32(tf.argmax(self.prob, 1))
Beispiel #4
0
 def build_network(self, x):
     # Building network...
     with tf.variable_scope('CapsNet'):
         net = lrn(
             relu(
                 conv_layer(x,
                            kernel_size=7,
                            stride=2,
                            num_filters=96,
                            trainable=self.conf.trainable,
                            add_reg=self.conf.L2_reg,
                            layer_name='CONV1')))
         net = max_pool(net,
                        pool_size=3,
                        stride=2,
                        padding='SAME',
                        name='MaxPool1')
         net = lrn(
             relu(
                 conv_layer(net,
                            kernel_size=5,
                            stride=2,
                            num_filters=256,
                            trainable=self.conf.trainable,
                            add_reg=self.conf.L2_reg,
                            layer_name='CONV2')))
         net = max_pool(net,
                        pool_size=3,
                        stride=2,
                        padding='SAME',
                        name='MaxPool2')
         net = relu(
             conv_layer(net,
                        kernel_size=3,
                        stride=1,
                        num_filters=384,
                        trainable=self.conf.trainable,
                        add_reg=self.conf.L2_reg,
                        layer_name='CONV3'))
         net = relu(
             conv_layer(net,
                        kernel_size=3,
                        stride=1,
                        num_filters=384,
                        trainable=self.conf.trainable,
                        add_reg=self.conf.L2_reg,
                        layer_name='CONV4'))
         self.net_grad = relu(
             conv_layer(net,
                        kernel_size=3,
                        stride=1,
                        num_filters=256,
                        trainable=self.conf.trainable,
                        add_reg=self.conf.L2_reg,
                        layer_name='CONV5'))
         net = max_pool(self.net_grad,
                        pool_size=3,
                        stride=2,
                        padding='SAME',
                        name='MaxPool3')
         layer_flat = flatten(net)
         net = relu(
             fc_layer(layer_flat,
                      num_units=512,
                      add_reg=self.conf.L2_reg,
                      trainable=self.conf.trainable,
                      layer_name='FC1'))
         net = dropout(net,
                       self.conf.dropout_rate,
                       training=self.is_training)
         net = relu(
             fc_layer(net,
                      num_units=512,
                      add_reg=self.conf.L2_reg,
                      trainable=self.conf.trainable,
                      layer_name='FC2'))
         net = dropout(net,
                       self.conf.dropout_rate,
                       training=self.is_training)
         self.features = net
         self.logits = fc_layer(net,
                                num_units=self.conf.num_cls,
                                add_reg=self.conf.L2_reg,
                                trainable=self.conf.trainable,
                                layer_name='FC3')
         # [?, num_cls]
         self.prob = tf.nn.softmax(self.logits)
         # [?, num_cls]
         self.y_pred = tf.to_int32(tf.argmax(self.prob, 1))