Пример #1
0
    def cascade_feature(self, _input, block_num):
        atrous_layer = []
        out_feature = int(_input.get_shape()[-1])
        output = _input
        #output_1x1 = _conv3d(_input, kernel_size=1, stride=1, output_feature=out_feature, use_bias=True, name='pyramid_conv_1')

        for i in range(1, self.atrou_num + 1):
            #dilate_rate = int(np.power(2,4-block_num)*i)
            #dilate_rate = int(np.power(2,4-block_num)) # 8 8 8 // 4 4 4 // 2 2 2
            dilate_rate = int(np.power(2, i))  # 2 4 8 // 2 4 8 // 2 4 8
            output = atrous_bn_prelu(output,
                                     kernel_size=3,
                                     stride=1,
                                     output_channels=out_feature,
                                     dilation_rate=dilate_rate,
                                     is_training=self.is_training,
                                     name='atrous_conv%d' % i)

            atrous_layer.append(output)

        output = tf.concat([atrous_layer[0], atrous_layer[1], atrous_layer[2]],
                           axis=-1)
        # output = self.Squeeze_excitation_layer(output, int(output.get_shape()[-1]), self.reduction_ratio,
        #                                        layer_name='SE%d' % block_num)
        output = _conv2d(output,
                         kernel_size=1,
                         stride=1,
                         output_feature=out_feature,
                         use_bias=True,
                         name='pyramid_conv_1x1')

        return output
Пример #2
0
 def _expanding_block(self, block_num, _input, layer, option='concat'):
     output = _conv2d(
         _input,
         kernel_size=1,
         stride=1,
         output_feature=self.output_channels[str(block_num)][0],
         use_bias=True,
         name='conv_1')
     #print('ex1',output)
     output = deconv_bn_prelu(
         output,
         output_channels=self.output_channels[str(block_num)][1],
         is_training=self.is_training,
         name='deconv')
     #print('de1',output)
     if option == 'sum':
         output = tf.add(output, layer, name='elemwise_sum')
     else:
         output = tf.concat(values=(output, layer), axis=-1, name='concat')
     # 26*50*40*64 / 52*100*80*32 / 104*200*160*16
     output = conv_bn_prelu(output,
                            kernel_size=3,
                            stride=1,
                            output_channels=int(output.get_shape()[-1]),
                            name='conv_2',
                            is_training=self.is_training)
     #print('out1',output)
     output = conv_bn_prelu(output,
                            kernel_size=3,
                            stride=1,
                            output_channels=int(output.get_shape()[-1]),
                            name='conv_3',
                            is_training=self.is_training)
     #print('out2',output)
     return output
Пример #3
0
    def cascade_feature(self, _input, block_num):
        atrous_layer = []
        out_feature = int(_input.get_shape()[-1])
        output = _input
        #output_1x1 = _conv3d(_input, kernel_size=1, stride=1, output_feature=out_feature, use_bias=True, name='pyramid_conv_1')

        for i in range(1, self.atrou_num + 1):
            dilate_rate = int(np.power(2, 4 - block_num) * i)
            #print(dilate_rate)
            # dilate rate : 24 16 8 / 12 8 4 / 6 4 2
            output = atrous_bn_prelu(output,
                                     kernel_size=3,
                                     stride=1,
                                     output_channels=out_feature,
                                     dilation_rate=dilate_rate,
                                     is_training=self.is_training,
                                     name='atrous_conv%d' % i)

            atrous_layer.append(output)

        output = tf.concat([atrous_layer[0], atrous_layer[1], atrous_layer[2]],
                           axis=-1)
        #print('atrous conv shape:', output)
        output = _conv2d(output,
                         kernel_size=1,
                         stride=1,
                         output_feature=out_feature,
                         use_bias=True,
                         name='pyramid_conv_1x1')

        return output
Пример #4
0
    def _expanding_block(self, block_num, _input, layer, option='concat'):

        # 13*25*20*32 / 26*50*40*32 / 52*100*80*16
        #output = conv_bn_prelu(_input, kernel_size=1, stride=1, output_channels=self.output_channels[str(block_num)][0],
        # name='conv_1', is_training=self.is_training)
        output = _conv2d(
            _input,
            kernel_size=1,
            stride=1,
            output_feature=self.output_channels[str(block_num)][0],
            use_bias=True,
            name='conv_1')
        output = deconv_bn_prelu(
            output,
            output_channels=self.output_channels[str(block_num)][1],
            is_training=self.is_training,
            name='deconv')
        # 26*50*40*32 / 52*100*80*16 / 104*200*160*8
        # 26*50*40*64 / 52*100*80*32 / 104*200*160*16
        output = tf.concat(values=(output, layer), axis=-1, name='concat')
        # 26*50*40*64 / 52*100*80*32 / 104*200*160*16
        output = conv_bn_prelu(output,
                               kernel_size=3,
                               stride=1,
                               output_channels=int(output.get_shape()[-1]),
                               name='conv_2',
                               is_training=self.is_training)
        output = conv_bn_prelu(output,
                               kernel_size=3,
                               stride=1,
                               output_channels=int(output.get_shape()[-1]),
                               name='conv_3',
                               is_training=self.is_training)
        print(output)
        return output
Пример #5
0
    def inference_op(self, _input):

        conv_layer = []
        dconv_layer = []
        with tf.device(device_name_or_function=self.device[0]):
            output = conv_bn_prelu(_input,
                                   output_channels=8,
                                   kernel_size=3,
                                   stride=1,
                                   is_training=self.is_training,
                                   name='conv_1')  # 104x200x160 8
            input_layer = output
            conv_layer.append(output)

            for block_num in range(1, self.cont_block_num + 1):
                with tf.variable_scope('contract_block_%d' % block_num):
                    output = self._contracting_block(block_num, output,
                                                     input_layer)
                    conv_layer.append(output)
            self.class_layer.append(conv_layer[-1])

            for block_num in range(4, self.expand_block_num + 4):
                with tf.variable_scope('expand_block_%d' % block_num):
                    output = self._expanding_block(block_num, output,
                                                   conv_layer[2 - block_num])
                    dconv_layer.append(output)
        with tf.device(device_name_or_function=self.device[1]):
            '''auxiliary prediction'''

            # forth level
            auxiliary3_prob_4x = _conv2d(inputs=dconv_layer[0],
                                         output_feature=self.output_classes,
                                         kernel_size=1,
                                         stride=1,
                                         use_bias=True,
                                         name='auxiliary3_prob_4x')
            auxiliary3_prob_2x = deconv2d(inputs=auxiliary3_prob_4x,
                                          output_channels=self.output_classes,
                                          name='auxiliary3_prob_2x')
            auxiliary3_prob_1x = deconv2d(inputs=auxiliary3_prob_2x,
                                          output_channels=self.output_classes,
                                          name='auxiliary3_prob_1x')
            # third level
            auxiliary2_prob_2x = _conv2d(inputs=dconv_layer[1],
                                         output_feature=self.output_classes,
                                         kernel_size=1,
                                         stride=1,
                                         use_bias=True,
                                         name='auxiliary2_prob_2x')
            auxiliary2_prob_1x = deconv2d(inputs=auxiliary2_prob_2x,
                                          output_channels=self.output_classes,
                                          name='auxiliary2_prob_2x')
            # second level
            auxiliary1_prob_1x = _conv2d(inputs=dconv_layer[2],
                                         output_feature=self.output_classes,
                                         kernel_size=1,
                                         stride=1,
                                         use_bias=True,
                                         name='auxiliary1_prob_1x')

            with tf.variable_scope('last_stage'):
                # out_feature = int(output.get_shape()[-1]) / 2
                #print(dconv_layer[0],dconv_layer[1],dconv_layer[2])
                output1 = _conv2d(dconv_layer[0],
                                  kernel_size=1,
                                  stride=1,
                                  output_feature=5,
                                  use_bias=True,
                                  name='block1_conv1x1')

                output1 = BilinearUpsample2d(output1, up_factor=2)

                output2 = _conv2d(dconv_layer[1],
                                  kernel_size=1,
                                  stride=1,
                                  output_feature=5,
                                  use_bias=True,
                                  name='block2_conv1x1')

                output2 = tf.add(output1, output2)

                output2 = BilinearUpsample2d(output2, up_factor=2)

                output3 = _conv2d(dconv_layer[2],
                                  kernel_size=1,
                                  stride=1,
                                  output_feature=5,
                                  use_bias=True,
                                  name='block3_conv1x1')

                output3 = tf.add(output2, output3)

                output = _conv2d(output3,
                                 kernel_size=1,
                                 stride=1,
                                 output_feature=self.output_classes,
                                 use_bias=True,
                                 name='fc_layer')

        with tf.device(device_name_or_function=self.device[2]):
            with tf.variable_scope('prediction'):
                softmax_prob = tf.nn.softmax(logits=output,
                                             name='softmax_prob')
                predicted_label = tf.argmax(input=softmax_prob,
                                            axis=-1,
                                            name='predicted_label')

        if cfg.joint_train:
            with tf.variable_scope('class_layer'):
                cls_input = output
                cls_outputs = _conv2d(cls_input,
                                      kernel_size=3,
                                      stride=1,
                                      output_feature=int(
                                          cls_input.get_shape()[-1]),
                                      name='cls_conv1')
                # average pooling
                last_pool_kernel = int(cls_outputs.get_shape()[-2])
                k = last_pool_kernel
                cls_outputs = tf.nn.avg_pool(cls_outputs,
                                             ksize=[1, k, k, 1],
                                             strides=[1, k, k, 1],
                                             padding='VALID')
                # print(k,outputs)
                # FC
                features_total = int(cls_outputs.get_shape()[-1])
                cls_outputs = tf.reshape(cls_outputs, [-1, features_total])
                W = tf.get_variable(
                    shape=[features_total, 2],
                    initializer=tf.contrib.layers.xavier_initializer(),
                    name='W')
                bias = tf.get_variable(initializer=tf.constant(0.0, shape=[2]),
                                       name='bias')
                cls_outputs = tf.matmul(cls_outputs, W) + bias

            with tf.variable_scope('class_prediction'):
                cls_softmax = tf.nn.softmax(logits=cls_outputs,
                                            name='softmax_prob')
                cls_predict = tf.argmax(input=cls_softmax,
                                        axis=-1,
                                        name='predicted_label')
            return output, predicted_label, auxiliary1_prob_1x, auxiliary2_prob_1x, auxiliary3_prob_1x, cls_outputs, cls_predict
        else:
            return output, predicted_label, auxiliary1_prob_1x, auxiliary2_prob_1x, auxiliary3_prob_1x
Пример #6
0
    def inference_op(self, _input):

        conv_layer = []
        dconv_layer = []

        # padding output
        #output = tf.pad(_input, np.array([[0, 0], [1, 0], [1, 1], [0, 0], [0, 0]]), name='pad_1')
        output = conv_bn_prelu(_input,
                               output_channels=8,
                               kernel_size=3,
                               stride=1,
                               is_training=self.is_training,
                               name='conv_1')  # 104x200x160 8
        conv_layer.append(output)

        for block_num in range(1, self.cont_block_num + 1):
            with tf.variable_scope('contract_block_%d' % block_num):
                output = self._contracting_block(block_num, output)
                conv_layer.append(output)

        for block_num in range(4, self.expand_block_num + 4):
            with tf.variable_scope('expand_block_%d' % block_num):
                output = self._expanding_block(block_num, output,
                                               conv_layer[2 - block_num])
                dconv_layer.append(output)
        '''auxiliary prediction'''
        # forth level
        auxiliary3_prob_4x = _conv2d(inputs=dconv_layer[0],
                                     output_feature=self.output_labels,
                                     kernel_size=1,
                                     stride=1,
                                     use_bias=True,
                                     name='auxiliary3_prob_4x')
        auxiliary3_prob_2x = deconv2d(inputs=auxiliary3_prob_4x,
                                      output_channels=self.output_labels,
                                      name='auxiliary3_prob_2x')
        auxiliary3_prob_1x = deconv2d(inputs=auxiliary3_prob_2x,
                                      output_channels=self.output_labels,
                                      name='auxiliary3_prob_1x')
        # third level
        auxiliary2_prob_2x = _conv2d(inputs=dconv_layer[1],
                                     output_feature=self.output_labels,
                                     kernel_size=1,
                                     stride=1,
                                     use_bias=True,
                                     name='auxiliary2_prob_2x')
        auxiliary2_prob_1x = deconv2d(inputs=auxiliary2_prob_2x,
                                      output_channels=self.output_labels,
                                      name='auxiliary2_prob_2x')
        # second level
        auxiliary1_prob_1x = _conv2d(inputs=dconv_layer[2],
                                     output_feature=self.output_labels,
                                     kernel_size=1,
                                     stride=1,
                                     use_bias=True,
                                     name='auxiliary1_prob_1x')

        with tf.variable_scope('last_stage'):
            # out_feature = int(output.get_shape()[-1]) / 2
            #print(dconv_layer[0],dconv_layer[1],dconv_layer[2])
            output1 = _conv2d(dconv_layer[0],
                              kernel_size=1,
                              stride=1,
                              output_feature=5,
                              use_bias=True,
                              name='block1_conv1x1')

            #output1 = deconv3d(output1, output_channels=int(output1.get_shape()[-1]), name='block1_deconv')

            output1 = BilinearUpsample2d(output1, up_factor=2)

            output2 = _conv2d(dconv_layer[1],
                              kernel_size=1,
                              stride=1,
                              output_feature=5,
                              use_bias=True,
                              name='block2_conv1x1')

            output2 = tf.add(output1, output2)

            #output2 = deconv3d(output2, output_channels=int(output2.get_shape()[-1]), name='block2_deconv')
            output2 = BilinearUpsample2d(output2, up_factor=2)

            output3 = _conv2d(dconv_layer[2],
                              kernel_size=1,
                              stride=1,
                              output_feature=5,
                              use_bias=True,
                              name='block3_conv1x1')

            output3 = tf.add(output2, output3)

            output = _conv2d(output3,
                             kernel_size=1,
                             stride=1,
                             output_feature=self.output_labels,
                             use_bias=True,
                             name='fc_layer')
            #output = output[:, :103, :198, :]
        with tf.device(device_name_or_function='/cpu:0'):
            softmax_prob = tf.nn.softmax(logits=output, name='softmax_prob')
            predicted_label = tf.argmax(input=softmax_prob,
                                        axis=-1,
                                        name='predicted_label')
        #print(output,predicted_label,auxiliary3_prob_1x,auxiliary1_prob_1x,auxiliary2_prob_1x)
        return output, predicted_label, auxiliary1_prob_1x, auxiliary2_prob_1x, auxiliary3_prob_1x