コード例 #1
0
    def bottleneck_block(self, x, scope):
        with tf.variable_scope(scope):
            x = batch_norm(x, is_training=self.is_training, scope='BN1')
            x = Relu(x)
            x = conv_3d(x, filter_size=1, num_filters=4 * self.k, layer_name='conv1', add_reg=self.conf.use_reg)
            x = drop_out(x, keep_prob=self.keep_prob)

            x = batch_norm(x, is_training=self.is_training, scope='BN2')
            x = Relu(x)
            x = conv_3d(x, filter_size=3, num_filters=self.k, layer_name='conv2', add_reg=self.conf.use_reg)
            x = drop_out(x, keep_prob=self.keep_prob)
            return x
コード例 #2
0
 def build_network(self):
     # Building network...
     with tf.variable_scope('FCN'):
         conv1 = conv_3d(self.x,
                         self.k_size,
                         16,
                         'CONV1',
                         batch_norm=self.conf.use_BN,
                         is_train=self.is_training,
                         activation=self.act_fcn)
         conv2 = conv_3d(conv1,
                         self.k_size,
                         32,
                         'CONV2',
                         batch_norm=self.conf.use_BN,
                         is_train=self.is_training,
                         activation=self.act_fcn)
         conv3 = conv_3d(conv2,
                         self.k_size,
                         64,
                         'CONV3',
                         batch_norm=self.conf.use_BN,
                         is_train=self.is_training,
                         activation=self.act_fcn)
         conv4 = conv_3d(conv3,
                         self.k_size,
                         128,
                         'CONV4',
                         batch_norm=self.conf.use_BN,
                         is_train=self.is_training,
                         activation=self.act_fcn)
         conv5 = conv_3d(conv4,
                         self.k_size,
                         64,
                         'CONV5',
                         batch_norm=self.conf.use_BN,
                         is_train=self.is_training,
                         activation=self.act_fcn)
         conv6 = conv_3d(conv5,
                         self.k_size,
                         32,
                         'CONV6',
                         batch_norm=self.conf.use_BN,
                         is_train=self.is_training,
                         activation=self.act_fcn)
         conv7 = conv_3d(conv6,
                         self.k_size,
                         16,
                         'CONV7',
                         batch_norm=self.conf.use_BN,
                         is_train=self.is_training,
                         activation=self.act_fcn)
         self.logits = conv_3d(conv7,
                               1,
                               self.conf.num_cls,
                               'CONV8',
                               batch_norm=self.conf.use_BN,
                               is_train=self.is_training)
コード例 #3
0
 def transition_down(self, x, scope):
     with tf.variable_scope(scope):
         x = batch_norm(x, is_training=self.is_training, scope='BN')
         x = Relu(x)
         x = conv_3d(x, filter_size=1, num_filters=self.trans_out, layer_name='conv', add_reg=self.conf.use_reg)
         x = drop_out(x, keep_prob=self.keep_prob)
         x = avg_pool(x, ksize=2, stride=2, scope='avg_pool')
         return x
コード例 #4
0
 def down_conv(self, x):
     num_out_channels = get_num_channels(x) * 2
     x = conv_3d(inputs=x,
                 filter_size=2,
                 num_filters=num_out_channels,
                 layer_name='conv_down',
                 stride=2,
                 batch_norm=self.conf.use_BN,
                 is_train=self.is_training,
                 activation=self.act_fcn)
     return x
コード例 #5
0
 def conv_block_up(self, layer_input, fine_grained_features, num_convolutions):
     x = tf.concat((layer_input, fine_grained_features), axis=-1)
     n_channels = get_num_channels(layer_input)
     for i in range(num_convolutions):
         x = conv_3d(inputs=x,
                     filter_size=self.k_size,
                     num_filters=n_channels,
                     layer_name='conv_' + str(i + 1),
                     batch_norm=self.conf.use_BN,
                     is_train=self.is_training)
         if i == num_convolutions - 1:
             x = x + layer_input
         x = self.act_fcn(x, name='prelu_' + str(i + 1))
         x = tf.nn.dropout(x, self.keep_prob)
     return x
コード例 #6
0
    def build_network(self, x_input):
        # Building network...
        with tf.variable_scope('DenseNet'):
            feature_list = list()
            shape_list = list()
            x = conv_3d(x_input, filter_size=3, num_filters=self.trans_out, stride=2, layer_name='conv1',
                        add_batch_norm=False, is_train=self.is_training, add_reg=self.conf.use_reg)
            print('conv1 shape: {}'.format(x.get_shape()))
            shape_list.append(tf.shape(x))

            with tf.variable_scope('Encoder'):
                for l in range(self.num_levels):
                    with tf.variable_scope('level_' + str(l + 1)):
                        x = self.dense_block(x, self.num_blocks[l], scope='DB_' + str(l + 1))
                        feature_list.append(x)
                        print('DB_{} shape: {}'.format(str(l + 1), x.get_shape()))
                        x = self.transition_down(x, scope='TD_' + str(l + 1))
                        print('TD_{} shape: {}'.format(str(l + 1), x.get_shape()))
                        if l != self.num_levels - 1:
                            shape_list.append(tf.shape(x))

            with tf.variable_scope('Bottom_level'):
                x = self.dense_block(x, self.bottom_convs, scope='BottomBlock')
                print('bottom_level shape: {}'.format(x.get_shape()))

            with tf.variable_scope('Decoder'):
                for l in reversed(range(self.num_levels)):
                    with tf.variable_scope('level_' + str(l + 1)):
                        f = feature_list[l]
                        out_shape = shape_list[l]
                        x = self.transition_up(x, out_shape=out_shape, scope='TU_' + str(l + 1))
                        print('TU_{} shape: {}'.format(str(l + 1), x.get_shape()))
                        stack = tf.concat((x, f), axis=-1)
                        print('After concat shape: {}'.format(stack.get_shape()))
                        x = self.dense_block(stack, self.num_blocks[l], scope='DB_' + str(l + 1))
                        print('DB_{} shape: {}'.format(str(l + 1), x.get_shape()))

            with tf.variable_scope('output'):
                out_filters = x.get_shape().as_list()[-1]
                out_shape = tf.shape(tf.tile(x_input, [1, 1, 1, 1, out_filters]))
                x = self.transition_up(x, out_shape, 'TD_out', num_filters=out_filters)
                print('TU_out shape: {}'.format(x.get_shape()))
                x = BN_Relu_conv_3d(x, 3, 256, 'pre_output_layer', add_reg=self.conf.use_reg,
                                    is_train=self.is_training)
                print('pre_out shape: {}'.format(x.get_shape()))
                self.logits = BN_Relu_conv_3d(x, 1, self.conf.num_cls, 'Output_layer',
                                              add_reg=self.conf.use_reg, is_train=self.is_training)
                print('{}: {}'.format('output', self.logits.get_shape()))
コード例 #7
0
 def conv_block_down(self, layer_input, num_convolutions):
     x = layer_input
     n_channels = get_num_channels(x)
     if n_channels == 1:
         n_channels = self.conf.start_channel_num
     for i in range(num_convolutions):
         x = conv_3d(inputs=x,
                     filter_size=self.k_size,
                     num_filters=n_channels,
                     layer_name='conv_' + str(i + 1),
                     batch_norm=self.conf.use_BN,
                     is_train=self.is_training)
         if i == num_convolutions - 1:
             x = x + layer_input
         x = self.act_fcn(x, name='prelu_' + str(i + 1))
         x = tf.nn.dropout(x, self.keep_prob)
     return x
コード例 #8
0
    def build_network(self, x):
        # Building network...
        with tf.variable_scope('V-Net'):
            feature_list = list()

            with tf.variable_scope('Encoder'):
                for l in range(self.num_levels):
                    with tf.variable_scope('level_' + str(l + 1)):
                        x = self.conv_block_down(x, self.num_convs[l])
                        feature_list.append(x)
                        x = self.down_conv(x)

            with tf.variable_scope('Bottom_level'):
                x = self.conv_block_down(x, self.bottom_convs)

            with tf.variable_scope('Decoder'):
                for l in reversed(range(self.num_levels)):
                    with tf.variable_scope('level_' + str(l + 1)):
                        f = feature_list[l]
                        x = self.up_conv(x, tf.shape(f))
                        x = self.conv_block_up(x, f, self.num_convs[l])

            self.logits = conv_3d(x, 1, self.conf.num_cls, 'Output_layer', batch_norm=True,
                                  is_train=self.is_training)
コード例 #9
0
ファイル: SegNet.py プロジェクト: ypy516478793/OS
    def build_network(self, x):
        # Building network...
        with tf.variable_scope('SegNet'):
            with tf.variable_scope('Encoder'):
                # first box of convolution layer,each part we do convolution two times, so we have conv1_1, and conv1_2
                x = conv_3d(x,
                            self.k_size,
                            64,
                            'conv1_1',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)
                x = conv_3d(x,
                            self.k_size,
                            64,
                            'conv1_2',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)
                x = max_pool(x, ksize=2, stride=2, name='pool_1')

                # Second box of convolution layer(4)
                x = conv_3d(x,
                            self.k_size,
                            128,
                            'conv2_1',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)
                x = conv_3d(x,
                            self.k_size,
                            128,
                            'conv2_2',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)
                x = max_pool(x, ksize=2, stride=2, name='pool_2')

                # Third box of convolution layer(7)
                x = conv_3d(x,
                            self.k_size,
                            256,
                            'conv3_1',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)
                x = conv_3d(x,
                            self.k_size,
                            256,
                            'conv3_2',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)
                x = conv_3d(x,
                            self.k_size,
                            256,
                            'conv3_3',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)
                x = max_pool(x, ksize=2, stride=2, name='pool_3')

                # Fourth box of convolution layer(10)
                if self.bayes:
                    x = tf.layers.dropout(x,
                                          rate=(1 - self.keep_prob_pl),
                                          training=self.with_dropout_pl,
                                          name="dropout1")
                    x = conv_3d(x,
                                self.k_size,
                                512,
                                'conv4_1',
                                self.conf.use_BN,
                                self.is_training_pl,
                                activation=tf.nn.relu)
                else:
                    x = conv_3d(x,
                                self.k_size,
                                512,
                                'conv4_1',
                                self.conf.use_BN,
                                self.is_training_pl,
                                activation=tf.nn.relu)
                x = conv_3d(x,
                            self.k_size,
                            512,
                            'conv4_2',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)
                x = conv_3d(x,
                            self.k_size,
                            512,
                            'conv4_3',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)
                x = max_pool(x, ksize=2, stride=2, name='pool_4')

                # Fifth box of convolution layers(13)
                if self.bayes:
                    x = tf.layers.dropout(x,
                                          rate=(1 - self.keep_prob_pl),
                                          training=self.with_dropout_pl,
                                          name="dropout2")
                    x = conv_3d(x,
                                self.k_size,
                                512,
                                'conv5_1',
                                self.conf.use_BN,
                                self.is_training_pl,
                                activation=tf.nn.relu)
                else:
                    x = conv_3d(x,
                                self.k_size,
                                512,
                                'conv5_1',
                                self.conf.use_BN,
                                self.is_training_pl,
                                activation=tf.nn.relu)
                x = conv_3d(x,
                            self.k_size,
                            512,
                            'conv5_2',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)
                x = conv_3d(x,
                            self.k_size,
                            512,
                            'conv5_3',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)
                x = max_pool(x, ksize=2, stride=2, name='pool_5')

            with tf.variable_scope('Decoder'):
                if self.bayes:
                    x = tf.layers.dropout(x,
                                          rate=(1 - self.keep_prob_pl),
                                          training=self.with_dropout_pl,
                                          name="dropout3")
                    x = deconv_3d(x,
                                  2,
                                  512,
                                  'deconv_5',
                                  2,
                                  add_batch_norm=self.conf.use_BN,
                                  is_train=self.is_training_pl)
                else:
                    x = deconv_3d(x,
                                  2,
                                  512,
                                  'deconv_5',
                                  2,
                                  add_batch_norm=self.conf.use_BN,
                                  is_train=self.is_training_pl)
                x = conv_3d(x,
                            self.k_size,
                            512,
                            'deconv5_2',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)
                x = conv_3d(x,
                            self.k_size,
                            512,
                            'deconv5_3',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)
                x = conv_3d(x,
                            self.k_size,
                            512,
                            'deconv5_4',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)

                # Second box of deconvolution layers(6)
                if self.bayes:
                    x = tf.layers.dropout(x,
                                          rate=(1 - self.keep_prob_pl),
                                          training=self.with_dropout_pl,
                                          name="dropout4")
                    x = deconv_3d(x,
                                  2,
                                  512,
                                  'deconv_4',
                                  2,
                                  add_batch_norm=self.conf.use_BN,
                                  is_train=self.is_training_pl)

                else:
                    x = deconv_3d(x,
                                  2,
                                  512,
                                  'deconv_4',
                                  2,
                                  add_batch_norm=self.conf.use_BN,
                                  is_train=self.is_training_pl)
                x = conv_3d(x,
                            self.k_size,
                            512,
                            'deconv4_2',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)
                x = conv_3d(x,
                            self.k_size,
                            512,
                            'deconv4_3',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)
                x = conv_3d(x,
                            self.k_size,
                            256,
                            'deconv4_4',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)

                # Third box of deconvolution layers(9)
                if self.bayes:
                    x = tf.layers.dropout(x,
                                          rate=(1 - self.keep_prob_pl),
                                          training=self.with_dropout_pl,
                                          name="dropout5")
                    x = deconv_3d(x,
                                  2,
                                  256,
                                  'deconv_3',
                                  2,
                                  add_batch_norm=self.conf.use_BN,
                                  is_train=self.is_training_pl)
                else:
                    x = deconv_3d(x,
                                  2,
                                  256,
                                  'deconv_3',
                                  2,
                                  add_batch_norm=self.conf.use_BN,
                                  is_train=self.is_training_pl)
                x = conv_3d(x,
                            self.k_size,
                            256,
                            'deconv3_2',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)
                x = conv_3d(x,
                            self.k_size,
                            256,
                            'deconv3_3',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)
                x = conv_3d(x,
                            self.k_size,
                            128,
                            'deconv3_4',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)

                # Fourth box of deconvolution layers(11)
                if self.bayes:
                    x = tf.layers.dropout(x,
                                          rate=(1 - self.keep_prob_pl),
                                          training=self.with_dropout_pl,
                                          name="dropout6")
                    x = deconv_3d(x,
                                  2,
                                  128,
                                  'deconv_2',
                                  2,
                                  add_batch_norm=self.conf.use_BN,
                                  is_train=self.is_training_pl)
                else:
                    x = deconv_3d(x,
                                  2,
                                  128,
                                  'deconv_2',
                                  2,
                                  add_batch_norm=self.conf.use_BN,
                                  is_train=self.is_training_pl)

                x = conv_3d(x,
                            self.k_size,
                            128,
                            'deconv2_2',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)
                x = conv_3d(x,
                            self.k_size,
                            64,
                            'deconv2_3',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)

                # Fifth box of deconvolution layers(13)
                x = deconv_3d(x,
                              2,
                              64,
                              'deconv_1',
                              2,
                              add_batch_norm=self.conf.use_BN,
                              is_train=self.is_training_pl)
                x = conv_3d(x,
                            self.k_size,
                            64,
                            'deconv1_2',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)
                x = conv_3d(x,
                            self.k_size,
                            64,
                            'deconv1_3',
                            self.conf.use_BN,
                            self.is_training_pl,
                            activation=tf.nn.relu)

            with tf.variable_scope('Classifier'):
                self.logits = conv_3d(x, 1, self.conf.num_cls, 'output',
                                      self.conf.use_BN, self.is_training_pl)
コード例 #10
0
    def build_network(self, x):
        # Building network...
        with tf.variable_scope('Tiramisu'):
            feature_list = list()
            shape_list = list()

            with tf.variable_scope('input'):
                x = conv_3d(x,
                            self.k_size,
                            48,
                            'input_layer',
                            add_batch_norm=False,
                            add_reg=self.conf.use_reg,
                            is_train=self.is_training)
                # x = tf.nn.dropout(x, self.keep_prob)
                print('{}: {}'.format('input_layer', x.get_shape()))

            with tf.variable_scope('Encoder'):
                for l in range(self.num_levels):
                    with tf.variable_scope('level_' + str(l + 1)):
                        level = self.dense_block(x, self.num_convs[l])
                        shape_list.append(tf.shape(level))
                        x = tf.concat((x, level), axis=-1)
                        print('{}: {}'.format('Encoder_level' + str(l + 1),
                                              x.get_shape()))
                        feature_list.append(x)
                        x = self.down_conv(x)

            with tf.variable_scope('Bottom_level'):
                x = self.dense_block(x, self.bottom_convs)
                print('{}: {}'.format('bottom_level', x.get_shape()))

            with tf.variable_scope('Decoder'):
                for l in reversed(range(self.num_levels)):
                    with tf.variable_scope('level_' + str(l + 1)):
                        f = feature_list[l]
                        # out_shape = shape_list[l]
                        shape = x.get_shape().as_list()
                        out_shape = [self.conf.batch_size] + list(
                            map(lambda x: x * 2, shape[1:-1])) + [shape[-1]]
                        out_shape = tf.shape(tf.zeros((out_shape)))
                        x = self.up_conv(x, out_shape=out_shape)
                        stack = tf.concat((x, f), axis=-1)
                        print('{}: {}'.format('Decoder_level' + str(l + 1),
                                              x.get_shape()))
                        x = self.dense_block(stack, self.num_convs[l])
                        print('{}: {}'.format('Dense_block_level' + str(l + 1),
                                              x.get_shape()))
                        stack = tf.concat((stack, x), axis=-1)

            with tf.variable_scope('output'):
                # x = BN_Relu_conv_3d(x, self.k_size, self.conf.num_cls, 'Output_layer', batch_norm=True,
                #                     add_reg=self.conf.use_reg, is_train=self.is_training)
                # x = tf.nn.dropout(x,self.keep_prob)
                print('{}: {}'.format('out_block_input', stack.get_shape()))
                self.logits = BN_Relu_conv_3d(stack,
                                              1,
                                              self.conf.num_cls,
                                              'Output_layer',
                                              add_batch_norm=True,
                                              add_reg=self.conf.use_reg,
                                              is_train=self.is_training)
                print('{}: {}'.format('output', self.logits.get_shape()))