Example #1
0
    def _contracting_block(self, block_num, _input, option='sum'):

        layer = []
        # 52*100*80*8 / 26*50*40*32 / 13*25*20*64
        output = _conv3d(_input, kernel_size=3, stride=2, output_feature=self.output_channels[str(block_num)][0], name='conv_1')
        layer.append(output)
        output = _active_layer(output, name='active_layer_1', is_training=self.is_training)
        # 52*100*80*16 / 26*50*40*32 / 13*25*20*64
        output = _conv3d(output, kernel_size=3, stride=1, output_feature=self.output_channels[str(block_num)][1], name='conv_2')

        if block_num == 1:
            # if output channles equal, ues zero padding
            big_zero = tf.zeros_like(layer[-1])

            print(big_zero.get_shape())
            layer[-1] = tf.concat([layer[-1],big_zero],axis=-1)

        #output = self.feature_pyramid(output)

        # print(output)
        if option == 'sum':
            output = tf.add(output, layer[-1], name='elemwise_sum')
        else:
            # 52*100*80*16 / 26*50*40*32 / 13*25*20*64
            output = tf.concat(values=(output, layer[-1]), axis=4, name='concat')

        output = _active_layer(output, name='active_layer_2', is_training=self.is_training)

        #output = self.feature_pyramid(output)
        layer.append(output)
        return output
Example #2
0
def convolution_block(layer_input, n_channels, num_convolutions):
    x = layer_input
    for i in range(num_convolutions - 1):
        with tf.variable_scope('conv_' + str(i + 1)):
            #x = convolution_3d(x, [5, 5, 5, n_channels, n_channels], [1, 1, 1, 1, 1])
            x = _conv3d(x, output_feature=n_channels, kernel_size=5, stride=1)
            #x = v_conv3d(x,kernel_shape=[5, 5, 5, n_channels, n_channels],stride=1)
            x = prelu(x)
    #x = convolution_3d(x, [5, 5, 5, n_channels, n_channels], [1, 1, 1, 1, 1])
    #x = v_conv3d(x, kernel_shape=[5, 5, 5, n_channels, n_channels], stride=1)
    x = _conv3d(x, output_feature=n_channels, kernel_size=5, stride=1)
    x = x + layer_input
    return prelu(x)
Example #3
0
    def cascade_feature(self, _input, block_num):
        atrous_layer = []
        out_feature = int(_input.get_shape()[-1])
        output = _input
        #output_1x1 = _conv3d(_input, kernel_size=1, stride=1, output_feature=out_feature, use_bias=True, name='pyramid_conv_1')

        for i in range(1, self.atrou_num + 1):
            dilate_rate = int(np.power(2, 4 - block_num) * i)
            #print(dilate_rate)
            # dilate rate : 24 16 8 / 12 8 4 / 6 4 2
            output = atrous_bn_prelu(output,
                                     kernel_size=3,
                                     stride=1,
                                     output_channels=out_feature,
                                     dilation_rate=dilate_rate,
                                     is_training=self.is_training,
                                     name='atrous_conv%d' % i)

            atrous_layer.append(output)

        output = tf.concat([atrous_layer[0], atrous_layer[1], atrous_layer[2]],
                           axis=-1)
        print('atrous conv shape:', output)
        output = _conv3d(output,
                         kernel_size=1,
                         stride=1,
                         output_feature=out_feature,
                         use_bias=True,
                         name='pyramid_conv_1x1')

        return output
Example #4
0
    def _expanding_block(self, block_num, _input, layer, option='concat'):

        # 13*25*20*32 / 26*50*40*32 / 52*100*80*16
        #output = conv_bn_prelu(_input, kernel_size=1, stride=1, output_channels=self.output_channels[str(block_num)][0],
        # name='conv_1', is_training=self.is_training)
        output = _conv3d(
            _input,
            kernel_size=1,
            stride=1,
            output_feature=self.output_channels[str(block_num)][0],
            use_bias=True,
            name='conv_1')
        output = deconv_bn_prelu(
            output,
            output_channels=self.output_channels[str(block_num)][1],
            is_training=self.is_training,
            name='deconv')
        # 26*50*40*32 / 52*100*80*16 / 104*200*160*8
        # 26*50*40*64 / 52*100*80*32 / 104*200*160*16
        output = tf.concat(values=(output, layer), axis=4, name='concat')
        # 26*50*40*64 / 52*100*80*32 / 104*200*160*16
        output = conv_bn_prelu(output,
                               kernel_size=3,
                               stride=1,
                               output_channels=int(output.get_shape()[-1]),
                               name='conv_2',
                               is_training=self.is_training)
        output = conv_bn_prelu(output,
                               kernel_size=3,
                               stride=1,
                               output_channels=int(output.get_shape()[-1]),
                               name='conv_3',
                               is_training=self.is_training)
        print(output)
        return output
Example #5
0
    def feature_pyramid(self,_input):
        atrous_layer = []
        out_feature = int(_input.get_shape()[-1])

        output_1x1 = _conv3d(_input, kernel_size=1, stride=1, output_feature=out_feature, use_bias=True, name='pyramid_conv_1')

        for i in range(1, self.atrou_num + 1):
            output = _conv3d(_input, kernel_size=3, stride=1, output_feature=out_feature,
                             dilation_rate=6*i,
                             name='atrous_conv%d' % i)
            atrous_layer.append(output)

        output = tf.concat([output_1x1, atrous_layer[0], atrous_layer[1], atrous_layer[2]], axis=-1)
        print('atrous conv shape:', output)
        output = _conv3d(output, kernel_size=1, stride=1, output_feature=out_feature,
                         use_bias=True, name='pyramid_conv_1x1')
        return output
Example #6
0
    def _contracting_block(self, _input, option='concat'):
        out_feature = _input.get_shape()[-1]
        #print('out_feature', out_feature)

        layer = []
        atrous_layer = []

        output = _conv3d(_input,
                         kernel_size=3,
                         stride=2,
                         output_feature=out_feature,
                         name='conv_1')
        #print(output)
        layer.append(output)
        output = _active_layer(output,
                               name='active_layer_1',
                               is_training=self.is_training)
        output = _conv3d(output,
                         kernel_size=3,
                         stride=1,
                         output_feature=out_feature,
                         name='conv_2')
        #print(output)
        if option == 'sum':
            output = tf.add(output, layer[-1], name='elemwise_sum')
            #print(output)
        else:
            output = tf.concat(values=(output, layer[-1]),
                               axis=4,
                               name='concat')

        output = _active_layer(output,
                               name='active_layer_2',
                               is_training=self.is_training)
        '''for i in range(self.atrou_num):

            output = self._create_conv_layer(_input, atrous=2 ** i)
            output = _conv3d(output, kernel_size=3, stride=1, output_feature=out_feature, dilation_rate=2 ** i,
                             name='atrous_conv%d'%i)
            atrous_layer.append(output)'''

        #output = tf.concat(atrous_layer,axis=-1)
        print(output)

        layer.append(output)
        return output
Example #7
0
def down_convolution(layer_input, in_channels):
    with tf.variable_scope('down_convolution'):
        #x = convolution_3d(layer_input, [2, 2, 2, in_channels, in_channels * 2], [1, 2, 2, 2, 1])
        x = _conv3d(layer_input,
                    output_feature=in_channels * 2,
                    kernel_size=2,
                    stride=2)
        #x = v_conv3d(layer_input, kernel_shape=[2, 2, 2, in_channels, in_channels * 2], stride=2)
        return prelu(x)
Example #8
0
def convolution_block_2(layer_input, fine_grained_features, n_channels,
                        num_convolutions):

    x = tf.concat((layer_input, fine_grained_features), axis=-1)

    with tf.variable_scope('conv_' + str(1)):
        #x = convolution_3d(x, [5, 5, 5, n_channels * 2, n_channels], [1, 1, 1, 1, 1])
        #x = v_conv3d(x,kernel_shape=[5, 5, 5, n_channels * 2, n_channels],stride=1)
        x = _conv3d(x, output_feature=n_channels, kernel_size=5, stride=1)
    for i in range(1, num_convolutions - 1):
        with tf.variable_scope('conv_' + str(i + 1)):
            #x = convolution_3d(x, [5, 5, 5, n_channels, n_channels], [1, 1, 1, 1, 1])
            x = _conv3d(x, output_feature=n_channels, kernel_size=5, stride=1)
            #x = v_conv3d(x, kernel_shape=[5, 5, 5, n_channels, n_channels], stride=1)
            x = prelu(x)
    #x = convolution_3d(x, [5, 5, 5, n_channels, n_channels], [1, 1, 1, 1, 1])
    x = _conv3d(x, output_feature=n_channels, kernel_size=5, stride=1)
    #x = v_conv3d(x, kernel_shape=[5, 5, 5, n_channels, n_channels], stride=1)
    x = x + layer_input
    return prelu(x)
Example #9
0
    def _expanding_block(self, _input, layer, option='concat'):

        out_feature = int(_input.get_shape()[-1]) / 2

        output = _conv3d(_input,
                         kernel_size=1,
                         stride=1,
                         output_feature=out_feature,
                         name='conv_1')
        #print(output)
        output = _active_layer(output,
                               name='active_layer_1',
                               is_training=self.is_training)
        #output = _create_dconv_layer(output)
        output = deconv3d(output,
                          output_channels=int(output.get_shape()[-1]),
                          name='deconv')
        #$print(output)
        output = _active_layer(output,
                               name='active_layer_2',
                               is_training=self.is_training)

        if option == 'sum':
            output = tf.add(output, layer, name='elemwise_sum')
            #print(output)
        else:
            output = tf.concat(values=(output, layer), axis=4, name='concat')

        output = _conv3d(output,
                         kernel_size=3,
                         stride=1,
                         output_feature=int(output.get_shape()[-1]),
                         name='conv_2')
        #print(output)
        output = _active_layer(output,
                               name='active_layer_3',
                               is_training=self.is_training)
        return output
Example #10
0
    def _expanding_block(self, block_num, _input, layer, option='concat'):

        # 13*25*20*32 / 26*50*40*32 / 52*100*80*16
        output = _conv3d(_input, kernel_size=1, stride=1, output_feature=self.output_channels[str(block_num)][0], use_bias=True, name='conv_1')
        print(output)
        output = _active_layer(output, name='active_layer_1', is_training=self.is_training)

        # 26*50*40*32 / 52*100*80*16 / 104*200*160*8
        output = deconv3d(output, output_channels=self.output_channels[str(block_num)][1], name='deconv')
        print(output)
        output = _active_layer(output, name='active_layer_2', is_training=self.is_training)

        if option == 'sum':
            output = tf.add(output, layer, name='elemwise_sum')
            # print(output)
        else:
            # 26*50*40*64 / 52*100*80*32 / 104*200*160*16
            output = tf.concat(values=(output, layer), axis=4, name='concat')
        # 26*50*40*64 / 52*100*80*32 / 104*200*160*16
        output = _conv3d(output, kernel_size=3, stride=1,
                         output_feature=int(output.get_shape()[-1]), name='conv_2')
        print(output)
        output = _active_layer(output, name='active_layer_3', is_training=self.is_training)
        return output
Example #11
0
    def _contracting_block(self, block_num, _input):
        # xception contracte blcok

        # 52*100*80*16 / 26*50*40*32 / 13*25*20*64
        output = conv_bn_prelu(
            _input,
            kernel_size=3,
            stride=1,
            output_channels=self.output_channels[str(block_num)][0],
            name='conv_1',
            is_training=self.is_training)
        # 52*100*80*8 / 26*50*40*32 / 13*25*20*64
        # 52*100*80*16 / 26*50*40*32 / 13*25*20*64
        output = conv_bn_prelu(
            output,
            kernel_size=3,
            stride=1,
            output_channels=self.output_channels[str(block_num)][1],
            name='conv_2',
            is_training=self.is_training)

        output = conv_bn_prelu(
            output,
            kernel_size=3,
            stride=2,
            output_channels=self.output_channels[str(block_num)][2],
            name='conv_3',
            is_training=self.is_training)
        #output = self.feature_pyramid(output)

        # do conv 1 x 1 x 1 before sum
        input = _conv3d(_input,
                        kernel_size=1,
                        stride=2,
                        output_feature=self.output_channels[str(block_num)][2],
                        use_bias=True,
                        name='conv_s2')
        #input = conv_bn_prelu(_input, kernel_size=1, stride=2, output_channels=self.output_channels[str(block_num)][2],
        #                      name='conv_s2', is_training=self.is_training)
        output = tf.add(output, input, name='elemwise_sum')

        output = self.feature_pyramid(output, block_num)

        return output
Example #12
0
    def inference_op(self, _input):
        input_channels = 1
        output_channels = 2
        n_channels = 16
        _input = tf.pad(_input,
                        np.array([[0, 0], [1, 0], [1, 1], [0, 0], [0, 0]]),
                        name='pad_1')

        with tf.variable_scope('contracting_path'):

            # if the input has more than 1 channel it has to be expanded because broadcasting only works for 1 input channel
            if input_channels == 1:
                c0 = tf.tile(_input, [1, 1, 1, 1, n_channels])
            else:
                with tf.variable_scope('level_0'):
                    #c0 = convolution_3d(_input, [5, 5, 5, input_channels, n_channels], [1, 1, 1, 1, 1])
                    c0 = _conv3d(_input,
                                 output_feature=n_channels,
                                 kernel_size=5,
                                 stride=1)
                    #c0 = v_conv3d(_input, kernel_shape=[5, 5, 5, input_channels, n_channels], stride=1)
                    c0 = prelu(c0)
            print(c0)
            with tf.variable_scope('level_1'):
                c1 = convolution_block(c0, n_channels, 1)
                c12 = down_convolution(c1, n_channels)

            with tf.variable_scope('level_2'):
                c2 = convolution_block(c12, n_channels * 2, 2)
                c22 = down_convolution(c2, n_channels * 2)

            with tf.variable_scope('level_3'):
                c3 = convolution_block(c22, n_channels * 4, 3)
                c32 = down_convolution(c3, n_channels * 4)

            with tf.variable_scope('level_4'):
                c4 = convolution_block(c32, n_channels * 8, 3)
                c42 = down_convolution(c4, n_channels * 8)

            with tf.variable_scope('level_5'):
                c5 = convolution_block(c42, n_channels * 16, 3)
                c52 = up_convolution(c5, tf.shape(c4), n_channels * 16)

        with tf.variable_scope('expanding_path'):

            with tf.variable_scope('level_4'):
                e4 = convolution_block_2(c52, c4, n_channels * 8, 3)
                e42 = up_convolution(e4, tf.shape(c3), n_channels * 8)

            with tf.variable_scope('level_3'):
                e3 = convolution_block_2(e42, c3, n_channels * 4, 3)
                e32 = up_convolution(e3, tf.shape(c2), n_channels * 4)

            with tf.variable_scope('level_2'):
                e2 = convolution_block_2(e32, c2, n_channels * 2, 2)
                e22 = up_convolution(e2, tf.shape(c1), n_channels * 2)

            with tf.variable_scope('level_1'):
                e1 = convolution_block_2(e22, c1, n_channels, 1)
                with tf.variable_scope('output_layer'):
                    #logits = convolution_3d(e1, [1, 1, 1, n_channels, output_channels], [1, 1, 1, 1, 1])
                    logits = _conv3d(e1,
                                     output_feature=output_channels,
                                     kernel_size=1,
                                     stride=1)
                    #logits = v_conv3d(e1, kernel_shape=[1, 1, 1, n_channels, output_channels], stride=1)
                    logits = logits[:, :103, :198, :]
        with tf.variable_scope('prediction'):
            softmax_prob = tf.nn.softmax(logits=logits, name='softmax_prob')
            predicted_label = tf.argmax(input=softmax_prob,
                                        axis=4,
                                        name='predicted_label')
            #predicted_label = tf.nn.sigmoid(logits, name='predicted_label')
        #print(logits,predicted_label)
        return logits, predicted_label
Example #13
0
    def inference_op(self, _input):

        conv_layer = []
        dconv_layer = []

        # padding output
        output = tf.pad(_input,
                        np.array([[0, 0], [1, 0], [1, 1], [0, 0], [0, 0]]),
                        name='pad_1')
        with tf.device(device_name_or_function=self.device[0]):
            output = conv_bn_prelu(output,
                                   output_channels=8,
                                   kernel_size=3,
                                   stride=1,
                                   is_training=self.is_training,
                                   name='conv_1')  # 104x200x160 8
            input_layer = output
            conv_layer.append(output)

            for block_num in range(1, self.cont_block_num + 1):
                with tf.variable_scope('contract_block_%d' % block_num):
                    output = self._contracting_block(block_num, output,
                                                     input_layer)
                    conv_layer.append(output)

            for block_num in range(4, self.expand_block_num + 4):
                with tf.variable_scope('expand_block_%d' % block_num):
                    output = self._expanding_block(block_num, output,
                                                   conv_layer[2 - block_num])
                    dconv_layer.append(output)
        with tf.device(device_name_or_function=self.device[1]):
            '''auxiliary prediction'''

            # forth level
            auxiliary3_prob_4x = _conv3d(inputs=dconv_layer[0],
                                         output_feature=self.output_classes,
                                         kernel_size=1,
                                         stride=1,
                                         use_bias=True,
                                         name='auxiliary3_prob_4x')
            auxiliary3_prob_2x = deconv3d(inputs=auxiliary3_prob_4x,
                                          output_channels=self.output_classes,
                                          name='auxiliary3_prob_2x')
            auxiliary3_prob_1x = deconv3d(inputs=auxiliary3_prob_2x,
                                          output_channels=self.output_classes,
                                          name='auxiliary3_prob_1x')
            # third level
            auxiliary2_prob_2x = _conv3d(inputs=dconv_layer[1],
                                         output_feature=self.output_classes,
                                         kernel_size=1,
                                         stride=1,
                                         use_bias=True,
                                         name='auxiliary2_prob_2x')
            auxiliary2_prob_1x = deconv3d(inputs=auxiliary2_prob_2x,
                                          output_channels=self.output_classes,
                                          name='auxiliary2_prob_2x')
            # second level
            auxiliary1_prob_1x = _conv3d(inputs=dconv_layer[2],
                                         output_feature=self.output_classes,
                                         kernel_size=1,
                                         stride=1,
                                         use_bias=True,
                                         name='auxiliary1_prob_1x')

            # with tf.variable_scope('last_stage'):
            #     # out_feature = int(output.get_shape()[-1]) / 2
            #     #print(dconv_layer[0],dconv_layer[1],dconv_layer[2])
            #     output1 = _conv3d(dconv_layer[0], kernel_size=1, stride=1, output_feature=5, use_bias=True,
            #                       name='block1_conv1x1')
            #
            #     #output1 = deconv3d(output1, output_channels=int(output1.get_shape()[-1]), name='block1_deconv')
            #
            #     output1 = BilinearUpsample3d(output1, up_factor=2)
            #
            #     output2 = _conv3d(dconv_layer[1], kernel_size=1, stride=1, output_feature=5, use_bias=True,
            #                        name='block2_conv1x1')
            #
            #     output2 = tf.add(output1, output2)
            #
            #     #output2 = deconv3d(output2, output_channels=int(output2.get_shape()[-1]), name='block2_deconv')
            #     output2 = BilinearUpsample3d(output2, up_factor=2)
            #
            #     output3 = _conv3d(dconv_layer[2], kernel_size=1, stride=1, output_feature=5, use_bias=True,
            #                        name='block3_conv1x1')
            #
            #     output3 = tf.add(output2, output3)
            #
            output = _conv3d(output,
                             kernel_size=1,
                             stride=1,
                             output_feature=self.output_classes,
                             use_bias=True,
                             name='fc_layer')
            output = output[:, :103, :198, :]
        with tf.device(device_name_or_function=self.device[2]):
            with tf.variable_scope('prediction'):
                softmax_prob = tf.nn.softmax(logits=output,
                                             name='softmax_prob')
                predicted_label = tf.argmax(input=softmax_prob,
                                            axis=4,
                                            name='predicted_label')
                #predicted_label = tf.nn.sigmoid(output, name='predicted_label')
        return output, predicted_label, auxiliary1_prob_1x, auxiliary2_prob_1x, auxiliary3_prob_1x
Example #14
0
    def inference_op(self, _input):

        conv_layer = []
        dconv_layer = []

        # padding output
        output = tf.pad(_input, np.array([[0, 0], [1, 0], [1, 1], [0, 0], [0, 0]]), name='pad_1')

        output = conv_bn_prelu(output, output_channels=8, kernel_size=3, stride=1,
                               is_training=self.is_training, name='conv_1') # 104x200x160 8
        conv_layer.append(output)

        for block_num in range(1, self.cont_block_num + 1):
            with tf.variable_scope('contract_block_%d' % block_num):
                output = self._contracting_block(block_num,output)
                conv_layer.append(output)

        for block_num in range(4, self.expand_block_num + 4):
            with tf.variable_scope('expand_block_%d' % block_num):
                output = self._expanding_block(block_num, output, conv_layer[2 - block_num])
                dconv_layer.append(output)

        '''auxiliary prediction'''
        # forth level
        auxiliary3_prob_4x = _conv3d(inputs=dconv_layer[0], output_feature=1, kernel_size=1,
                                     stride=1, use_bias=True, name='auxiliary3_prob_4x')
        auxiliary3_prob_2x = deconv3d(inputs=auxiliary3_prob_4x, output_channels=1,
                                      name='auxiliary3_prob_2x')
        auxiliary3_prob_1x = deconv3d(inputs=auxiliary3_prob_2x, output_channels=1,
                                      name='auxiliary3_prob_1x')
        # third level
        auxiliary2_prob_2x = _conv3d(inputs=dconv_layer[1], output_feature=1, kernel_size=1,
                                     stride=1, use_bias=True, name='auxiliary2_prob_2x')
        auxiliary2_prob_1x = deconv3d(inputs=auxiliary2_prob_2x, output_channels=1,
                                      name='auxiliary2_prob_2x')
        # second level
        auxiliary1_prob_1x = _conv3d(inputs=dconv_layer[2], output_feature=1, kernel_size=1,
                                     stride=1, use_bias=True, name='auxiliary1_prob_1x')

        with tf.variable_scope('last_stage'):
            # out_feature = int(output.get_shape()[-1]) / 2
            #print(dconv_layer[0],dconv_layer[1],dconv_layer[2])
            output1 = _conv3d(dconv_layer[0], kernel_size=1, stride=1, output_feature=5, use_bias=True,
                              name='block1_conv1x1')

            #output1 = deconv3d(output1, output_channels=int(output1.get_shape()[-1]), name='block1_deconv')

            output1 = BilinearUpsample3d(output1,up_factor=2)
            #print('block1_deconv1', output1)

            # out_feature = int(output.get_shape()[-1]) / 2
            output2 = _conv3d(dconv_layer[1], kernel_size=1, stride=1, output_feature=5, use_bias=True,
                               name='block2_conv1x1')
            output2 = tf.add(output1, output2)

            #output2 = deconv3d(output2, output_channels=int(output2.get_shape()[-1]), name='block2_deconv')
            output2 = BilinearUpsample3d(output2, up_factor=2)

            output3 = _conv3d(dconv_layer[2], kernel_size=1, stride=1, output_feature=5, use_bias=True,
                               name='block3_conv1x1')
            output3 = tf.add(output2, output3)

            output = _conv3d(output3, kernel_size=1, stride=1, output_feature=1, use_bias=True, name='fc_layer')

        return output, auxiliary1_prob_1x, auxiliary2_prob_1x, auxiliary3_prob_1x
Example #15
0
    def inference_op(self, _input):

        conv_layer = []
        dconv_layer = []

        # padding output
        output = tf.pad(_input,
                        np.array([[0, 0], [1, 0], [1, 1], [0, 0], [0, 0]]),
                        name='pad_1')
        #print(output)
        output = conv_bn_prelu(output,
                               output_channels=8,
                               kernel_size=3,
                               stride=1,
                               is_training=self.is_training,
                               name='conv_1')
        conv_layer.append(output)

        for i in range(self.cont_block_num):
            with tf.variable_scope('contract_block_%d' % i):
                output = self._contracting_block(output)
                #print(output)
                conv_layer.append(output)

        for i in range(self.expand_block_num):
            with tf.variable_scope('expand_block_%d' % i):
                output = self._expanding_block(output, conv_layer[2 - i])
                dconv_layer.append(output)
        '''auxiliary prediction'''
        # forth level
        auxiliary3_prob_4x = _conv3d(inputs=dconv_layer[0],
                                     output_feature=1,
                                     kernel_size=1,
                                     stride=1,
                                     use_bias=True,
                                     name='auxiliary3_prob_4x')
        auxiliary3_prob_2x = deconv3d(inputs=auxiliary3_prob_4x,
                                      output_channels=1,
                                      name='auxiliary3_prob_2x')
        auxiliary3_prob_1x = deconv3d(inputs=auxiliary3_prob_2x,
                                      output_channels=1,
                                      name='auxiliary3_prob_1x')
        # third level
        auxiliary2_prob_2x = _conv3d(inputs=dconv_layer[1],
                                     output_feature=1,
                                     kernel_size=1,
                                     stride=1,
                                     use_bias=True,
                                     name='auxiliary2_prob_2x')
        auxiliary2_prob_1x = deconv3d(inputs=auxiliary2_prob_2x,
                                      output_channels=1,
                                      name='auxiliary2_prob_2x')
        # second level
        auxiliary1_prob_1x = _conv3d(inputs=dconv_layer[2],
                                     output_feature=1,
                                     kernel_size=1,
                                     stride=1,
                                     use_bias=True,
                                     name='auxiliary1_prob_1x')

        #print(auxiliary3_prob_1x,'\n',auxiliary2_prob_1x,'\n',auxiliary1_prob_1x)
        with tf.variable_scope('last_stage'):
            # out_feature = int(output.get_shape()[-1]) / 2
            _output = _conv3d(dconv_layer[0],
                              kernel_size=1,
                              stride=1,
                              output_feature=32,
                              use_bias=True,
                              name='block1_conv1x1')

            _output = deconv3d(_output,
                               output_channels=int(_output.get_shape()[-1]),
                               name='block1_deconv')
            #print('block1_deconv1', _output)

            # out_feature = int(output.get_shape()[-1]) / 2
            _output2 = _conv3d(dconv_layer[1],
                               kernel_size=1,
                               stride=1,
                               output_feature=32,
                               use_bias=True,
                               name='block2_conv1x1')

            _output = tf.add(_output, _output2)
            #print('1', _output)

            _output = deconv3d(_output,
                               output_channels=int(_output.get_shape()[-1]),
                               name='block2_deconv')

            # out_feature = int(output.get_shape()[-1]) / 2
            _output3 = _conv3d(dconv_layer[2],
                               kernel_size=1,
                               stride=1,
                               output_feature=32,
                               use_bias=True,
                               name='block3_conv1x1')

            output = tf.add(_output, _output3)

            output = _conv3d(output,
                             kernel_size=1,
                             stride=1,
                             output_feature=1,
                             use_bias=True,
                             name='fc_layer')

        #logits = tf.nn.sigmoid(output)'''

        return output, auxiliary1_prob_1x, auxiliary2_prob_1x, auxiliary3_prob_1x