Пример #1
0
    def bottleneck_block(self, input_x, group, name, is_first_block=False):
        with tf.variable_scope(name):
            # 1x1 convolution responsible for reducing the depth
            with tf.variable_scope('conv_in'):
                stride = 2 if is_first_block else 1
                conv = conv_layer(input_x,
                                  num_filters=group.bottleneck_size,
                                  add_reg=self.conf.L2_reg,
                                  kernel_size=1,
                                  stride=stride,
                                  layer_name='CONV')
                conv = relu(
                    batch_normalization(conv, self.is_training, scope='BN'))

            with tf.variable_scope('conv_bottleneck'):
                conv = conv_layer(conv,
                                  num_filters=group.bottleneck_size,
                                  kernel_size=3,
                                  add_reg=self.conf.L2_reg,
                                  layer_name='CONV')
                conv = relu(
                    batch_normalization(conv, self.is_training, scope='BN'))

            # 1x1 convolution responsible for increasing the depth
            with tf.variable_scope('conv_out'):
                conv = conv_layer(conv,
                                  num_filters=group.out_filters,
                                  kernel_size=1,
                                  add_reg=self.conf.L2_reg,
                                  layer_name='CONV')
                conv = batch_normalization(conv, self.is_training, scope='BN')

            # shortcut connections that turn the network into its counterpart
            # residual function (identity shortcut)
            with tf.variable_scope('shortcut'):
                if is_first_block:
                    shortcut = conv_layer(input_x,
                                          num_filters=group.out_filters,
                                          stride=2,
                                          kernel_size=1,
                                          add_reg=self.conf.L2_reg,
                                          layer_name='CONV_shortcut')
                    shortcut = batch_normalization(shortcut,
                                                   self.is_training,
                                                   scope='BN_shortcut')
                    assert (shortcut.get_shape().as_list() == conv.get_shape().as_list()), \
                        "Tensor sizes of the two branches are not matched!"
                    res = shortcut + conv
                else:
                    res = conv + input_x
                    assert (input_x.get_shape().as_list() == conv.get_shape().as_list()), \
                        "Tensor sizes of the two branches are not matched!"
            return relu(res)
Пример #2
0
    def build_network(self, x):
        # Building network...
        with tf.variable_scope('DenseNet'):
            net = conv_layer(x,
                             num_filters=2 * self.k,
                             kernel_size=7,
                             stride=2,
                             layer_name='Conv0')
            # net = max_pool(net, pool_size=3, stride=2, name='MaxPool0')

            for l in range(self.conf.num_levels):
                net = self.dense_block(net,
                                       num_BBs=self.conf.num_BBs[l],
                                       block_name='DB_' + str(l))
                print('DB_{} shape: {}'.format(str(l + 1), net.get_shape()))
                net = self.transition_layer(net, scope='TB_' + str(l))
                print('TD_{} shape: {}'.format(str(l + 1), net.get_shape()))

            # net = self.dense_block(net, num_BBs=32, block_name='Dense_final')
            # print('DB_{} shape: {}'.format(str(l + 2), net.get_shape()))
            net = batch_normalization(net,
                                      training=self.is_training,
                                      scope='BN_out')
            net = relu(net)
            net = global_average_pool(net)
            net = flatten(net)
            self.features = net
            self.logits = fc_layer(net,
                                   num_units=self.conf.num_cls,
                                   add_reg=self.conf.L2_reg,
                                   layer_name='Fc1')
            # [?, num_cls]
            self.prob = tf.nn.softmax(self.logits)
            # [?, num_cls]
            self.y_pred = tf.to_int32(tf.argmax(self.prob, 1))
Пример #3
0
    def bottleneck_block(self, x, scope):
        with tf.variable_scope(scope):
            x = batch_normalization(x, training=self.is_training, scope='BN1')
            x = relu(x)
            x = conv_layer(x,
                           num_filters=4 * self.k,
                           kernel_size=1,
                           layer_name='CONV1')
            x = dropout(x,
                        rate=self.conf.dropout_rate,
                        training=self.is_training)

            x = batch_normalization(x, training=self.is_training, scope='BN2')
            x = relu(x)
            x = conv_layer(x,
                           num_filters=self.k,
                           kernel_size=3,
                           layer_name='CONV2')
            x = dropout(x,
                        rate=self.conf.dropout_rate,
                        training=self.is_training)
            return x
Пример #4
0
 def transition_layer(self, x, scope):
     with tf.variable_scope(scope):
         x = batch_normalization(x, training=self.is_training, scope='BN')
         x = relu(x)
         x = conv_layer(x,
                        num_filters=int(x.get_shape().as_list()[-1] *
                                        self.conf.theta),
                        kernel_size=1,
                        layer_name='CONV')
         x = dropout(x,
                     rate=self.conf.dropout_rate,
                     training=self.is_training)
         x = average_pool(x, pool_size=2, stride=2, name='AVG_POOL')
         return x
Пример #5
0
    def build_network(self, x):
        # Building network...
        with tf.variable_scope('ResNet'):
            net = conv_layer(x,
                             num_filters=64,
                             kernel_size=7,
                             stride=1,
                             add_reg=self.conf.L2_reg,
                             layer_name='CONV0')
            net = relu(
                batch_normalization(net,
                                    training=self.is_training,
                                    scope='BN1'))
            # net = max_pool(net, pool_size=3, stride=2, name='MaxPool0')

            # Create the bottleneck groups, each of which contains `num_blocks` bottleneck blocks.
            for group_i, group in enumerate(self.groups):
                first_block = True
                for block_i in range(group.num_blocks):
                    block_name = 'group_%d/block_%d' % (group_i, block_i)
                    net = self.bottleneck_block(net,
                                                group,
                                                block_name,
                                                is_first_block=first_block)
                    first_block = False

            net = global_average_pool(net)
            net = flatten(net)
            self.features = net
            self.logits = fc_layer(net,
                                   num_units=self.conf.num_cls,
                                   add_reg=self.conf.L2_reg,
                                   layer_name='Fc1')
            # [?, num_cls]
            self.prob = tf.nn.softmax(self.logits)
            # [?, num_cls]
            self.y_pred = tf.to_int32(tf.argmax(self.prob, 1))