예제 #1
0
파일: DenseNet.py 프로젝트: ypy516478793/OS
 def transition_up(self, x, scope, num_filters=None):
     with tf.variable_scope(scope):
         x = batch_norm(x, is_training=self.is_training_pl, scope='BN_1')
         x = Relu(x)
         x = conv_2d(x,
                     filter_size=1,
                     num_filters=int(x.get_shape().as_list()[-1] *
                                     self.theta_up),
                     layer_name='conv',
                     add_reg=self.conf.use_reg,
                     add_batch_norm=self.conf.use_BN,
                     is_train=self.is_training_pl)
         x = tf.nn.dropout(x, keep_prob=self.keep_prob_pl)
         x = batch_norm(x, is_training=self.is_training_pl, scope='BN_2')
         x = Relu(x)
         if not num_filters:
             num_filters = self.trans_out
         x = deconv_2d(inputs=x,
                       filter_size=3,
                       num_filters=num_filters,
                       layer_name='deconv',
                       stride=2,
                       add_reg=self.conf.use_reg,
                       add_batch_norm=False,
                       is_train=self.is_training_pl)
         x = tf.nn.dropout(x, keep_prob=self.keep_prob_pl)
     return x
예제 #2
0
    def build_network(self, x):
        # Building network...
        with tf.variable_scope('VNet'):
            feature_list = list()

            with tf.variable_scope('Encoder'):
                for l in range(self.num_levels):
                    with tf.variable_scope('level_' + str(l + 1)):
                        x = self.conv_block_down(x, self.num_convs[l])
                        feature_list.append(x)
                        x = self.down_conv(x)

            with tf.variable_scope('Bottom_level'):
                x = self.conv_block_down(x, self.bottom_convs)

            with tf.variable_scope('Decoder'):
                for l in reversed(range(self.num_levels)):
                    with tf.variable_scope('level_' + str(l + 1)):
                        f = feature_list[l]
                        x = self.up_conv(x)
                        x = self.conv_block_up(x, f, self.num_convs[l])

            self.logits = conv_2d(x,
                                  1,
                                  self.conf.num_cls,
                                  'Output_layer',
                                  self.conf.use_BN,
                                  self.is_training_pl,
                                  keep_prob=1)
예제 #3
0
    def build_network(self, x):
        # Building network...
        with tf.variable_scope('Tiramisu'):
            feature_list = list()
            shape_list = list()

            with tf.variable_scope('input'):
                x = conv_2d(x,
                            self.k_size,
                            48,
                            'input_layer',
                            add_batch_norm=self.conf.use_BN,
                            is_train=self.is_training_pl,
                            add_reg=self.conf.use_reg,
                            activation=tf.nn.relu)
                # x = tf.nn.dropout(x, self.keep_prob)
                print('{}: {}'.format('input_layer', x.get_shape()))

            with tf.variable_scope('Encoder'):
                for l in range(self.num_levels):
                    with tf.variable_scope('level_' + str(l + 1)):
                        level = self.dense_block(x, self.num_convs[l])
                        shape_list.append(tf.shape(level))
                        x = tf.concat((x, level), axis=-1)
                        print('{}: {}'.format('Encoder_level' + str(l + 1),
                                              x.get_shape()))
                        feature_list.append(x)
                        x = self.down_conv(x)

            with tf.variable_scope('Bottom_level'):
                x = self.dense_block(x, self.bottom_convs)
                print('{}: {}'.format('bottom_level', x.get_shape()))

            with tf.variable_scope('Decoder'):
                for l in reversed(range(self.num_levels)):
                    with tf.variable_scope('level_' + str(l + 1)):
                        x = self.up_conv(x)
                        stack = tf.concat((x, feature_list[l]), axis=-1)
                        print('{}: {}'.format('Decoder_level' + str(l + 1),
                                              x.get_shape()))
                        x = self.dense_block(stack, self.num_convs[l])
                        print('{}: {}'.format('Dense_block_level' + str(l + 1),
                                              x.get_shape()))
                        stack = tf.concat((stack, x), axis=-1)
                        print('{}: {}'.format('stck_depth' + str(l + 1),
                                              stack.get_shape()))

            with tf.variable_scope('output'):

                print('{}: {}'.format('out_block_input', stack.get_shape()))
                self.logits = BN_Relu_conv_2d(stack,
                                              1,
                                              self.conf.num_cls,
                                              'Output_layer',
                                              add_batch_norm=self.conf.use_BN,
                                              is_train=self.is_training_pl)
                print('{}: {}'.format('output', self.logits.get_shape()))
예제 #4
0
 def down_conv(self, x):
     num_out_channels = get_num_channels(x) * 2
     x = conv_2d(inputs=x,
                 filter_size=2,
                 num_filters=num_out_channels,
                 layer_name='conv_down',
                 stride=2,
                 add_batch_norm=self.conf.use_BN,
                 is_train=self.is_training_pl,
                 keep_prob=self.keep_prob_pl,
                 activation=self.act_fcn)
     return x
예제 #5
0
파일: DenseNet.py 프로젝트: ypy516478793/OS
 def transition_down(self, x, scope):
     with tf.variable_scope(scope):
         x = batch_norm(x, is_training=self.is_training_pl, scope='BN')
         x = Relu(x)
         x = conv_2d(x,
                     filter_size=1,
                     num_filters=int(x.get_shape().as_list()[-1] *
                                     self.theta_down),
                     layer_name='conv',
                     add_reg=self.conf.use_reg,
                     add_batch_norm=self.conf.use_BN,
                     is_train=self.is_training_pl)
         x = tf.nn.dropout(x, keep_prob=self.keep_prob_pl)
         x = avg_pool(x, ksize=2, stride=2, scope='avg_pool')
         return x
예제 #6
0
파일: DenseNet.py 프로젝트: ypy516478793/OS
    def bottleneck_block(self, x, scope):
        with tf.variable_scope(scope):
            x = batch_norm(x, is_training=self.is_training_pl, scope='BN1')
            x = Relu(x)
            x = conv_2d(x,
                        filter_size=1,
                        num_filters=4 * self.k,
                        add_batch_norm=self.conf.use_BN,
                        layer_name='conv1',
                        add_reg=self.conf.use_reg,
                        is_train=self.is_training_pl)
            x = tf.nn.dropout(x, keep_prob=self.keep_prob_pl)

            x = batch_norm(x, is_training=self.is_training_pl, scope='BN2')
            x = Relu(x)
            x = conv_2d(x,
                        filter_size=3,
                        num_filters=self.k,
                        add_batch_norm=self.conf.use_BN,
                        layer_name='conv2',
                        add_reg=self.conf.use_reg,
                        is_train=self.is_training_pl)
            x = tf.nn.dropout(x, keep_prob=self.keep_prob_pl)
            return x
예제 #7
0
 def conv_block_up(self, layer_input, fine_grained_features,
                   num_convolutions):
     x = tf.concat((layer_input, fine_grained_features), axis=-1)
     n_channels = get_num_channels(layer_input)
     for i in range(num_convolutions):
         x = conv_2d(inputs=x,
                     filter_size=self.k_size,
                     num_filters=n_channels,
                     layer_name='conv_' + str(i + 1),
                     add_batch_norm=self.conf.use_BN,
                     is_train=self.is_training_pl,
                     keep_prob=self.keep_prob_pl,
                     dropconnect=True)
         if i == num_convolutions - 1:
             x = x + layer_input
         x = self.act_fcn(x, name='prelu_' + str(i + 1))
     return x
예제 #8
0
 def conv_block_down(self, layer_input, num_convolutions):
     x = layer_input
     n_channels = get_num_channels(x)
     if n_channels == 1:
         n_channels = self.conf.start_channel_num
     for i in range(num_convolutions):
         x = conv_2d(inputs=x,
                     filter_size=self.k_size,
                     num_filters=n_channels,
                     layer_name='conv_' + str(i + 1),
                     add_batch_norm=self.conf.use_BN,
                     is_train=self.is_training_pl,
                     keep_prob=self.keep_prob_pl,
                     dropconnect=True)
         if i == num_convolutions - 1:
             x = x + layer_input
         x = self.act_fcn(x, name='prelu_' + str(i + 1))
     return x
예제 #9
0
파일: DenseNet.py 프로젝트: ypy516478793/OS
    def build_network(self, x_input):
        # Building network...
        with tf.variable_scope('DenseNet'):
            feature_list = list()
            shape_list = list()
            x = conv_2d(x_input,
                        filter_size=3,
                        num_filters=self.trans_out,
                        layer_name='conv1',
                        activation=tf.nn.relu,
                        add_batch_norm=self.conf.use_BN,
                        is_train=self.is_training_pl,
                        add_reg=self.conf.use_reg)
            print('conv1 shape: {}'.format(x.get_shape()))
            shape_list.append(tf.shape(x))

            with tf.variable_scope('Encoder'):
                for l in range(self.num_levels):
                    with tf.variable_scope('level_' + str(l + 1)):
                        x = self.dense_block(x,
                                             self.num_blocks[l],
                                             scope='DB_' + str(l + 1))
                        feature_list.append(x)
                        print('DB_{} shape: {}'.format(str(l + 1),
                                                       x.get_shape()))
                        x = self.transition_down(x, scope='TD_' + str(l + 1))
                        print('TD_{} shape: {}'.format(str(l + 1),
                                                       x.get_shape()))
                        if l != self.num_levels - 1:
                            shape_list.append(tf.shape(x))

            with tf.variable_scope('Bottom_level'):
                x = self.dense_block(x, self.bottom_convs, scope='BottomBlock')
                print('bottom_level shape: {}'.format(x.get_shape()))

            with tf.variable_scope('Decoder'):
                for l in reversed(range(self.num_levels)):
                    with tf.variable_scope('level_' + str(l + 1)):
                        shape = x.get_shape().as_list()
                        # out_shape = [self.conf.batch_size] + list(map(lambda x: x*2, shape[1:-1])) \
                        #             + [int(shape[-1]*self.theta_up)]
                        # out_shape = tf.shape(tf.zeros(out_shape))
                        x = self.transition_up(x,
                                               scope='TU_' + str(l + 1),
                                               num_filters=int(shape[-1] *
                                                               self.theta_up))
                        print('TU_{} shape: {}'.format(str(l + 1),
                                                       x.get_shape()))
                        stack = tf.concat((x, feature_list[l]), axis=-1)
                        print('After concat shape: {}'.format(
                            stack.get_shape()))
                        x = self.dense_block(stack,
                                             self.num_blocks[l],
                                             scope='DB_' + str(l + 1))
                        print('DB_{} shape: {}'.format(str(l + 1),
                                                       x.get_shape()))

            with tf.variable_scope('output'):
                x = BN_Relu_conv_2d(x,
                                    3,
                                    256,
                                    'pre_output_layer',
                                    add_reg=self.conf.use_reg,
                                    is_train=self.is_training_pl)
                print('pre_out shape: {}'.format(x.get_shape()))
                self.logits = BN_Relu_conv_2d(x,
                                              1,
                                              self.conf.num_cls,
                                              'Output_layer',
                                              add_reg=self.conf.use_reg,
                                              is_train=self.is_training_pl)
                print('{}: {}'.format('output', self.logits.get_shape()))