示例#1
0
    def layer_op(self, images, is_training, layer_id=-1):
        # image_size is defined as the largest context, then:
        #   downsampled path size: image_size / d_factor
        #   downsampled path output: image_size / d_factor - 16

        # to make sure same size of feature maps from both pathways:
        #   normal path size: (image_size / d_factor - 16) * d_factor + 16
        #   normal path output: (image_size / d_factor - 16) * d_factor
        print('deepmedic is running')
        # where 16 is fixed by the receptive field of conv layers
        # TODO: make sure label_size = image_size/d_factor - 16

        # image_size has to be an odd number and divisible by 3 and
        # smaller than the smallest image size of the input volumes

        # label_size should be (image_size/d_factor - 16) * d_factor

        assert self.d_factor % 2 == 1  # to make the downsampling centered
        assert (layer_util.check_spatial_dims(
            images, lambda x: x % self.d_factor == 0))
        assert (layer_util.check_spatial_dims(images, lambda x: x % 2 == 1)
                )  # to make the crop centered
        assert (layer_util.check_spatial_dims(images,
                                              lambda x: x > self.d_factor * 16)
                )  # required by receptive field

        # crop 25x25x25 from 57x57x57
        crop_op = CropLayer(border=self.crop_diff, name='cropping_input')
        normal_path = crop_op(images)
        print(crop_op)

        # downsample 19x19x19 from 57x57x57
        downsample_op = DownSampleLayer(func='CONSTANT',
                                        kernel_size=self.d_factor,
                                        stride=self.d_factor,
                                        padding='VALID',
                                        name='downsample_input')
        downsample_path = downsample_op(images)
        print(downsample_op)

        # convolutions for both pathways
        for n_features in self.conv_features:
            # normal pathway convolutions
            conv_path_1 = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=3,
                padding='VALID',
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                acti_func=self.acti_func,
                name='normal_conv')
            normal_path = conv_path_1(normal_path, is_training)
            print(conv_path_1)

            # downsampled pathway convolutions
            conv_path_2 = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=3,
                padding='VALID',
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                acti_func=self.acti_func,
                name='downsample_conv')
            downsample_path = conv_path_2(downsample_path, is_training)
            print(conv_path_2)

        # upsampling the downsampled pathway
        downsample_path = UpSampleLayer('REPLICATE',
                                        kernel_size=self.d_factor,
                                        stride=self.d_factor)(downsample_path)

        # concatenate both pathways
        output_tensor = ElementwiseLayer('CONCAT')(normal_path,
                                                   downsample_path)

        # 1x1x1 convolution layer
        for n_features in self.fc_features:
            conv_fc = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=1,
                acti_func=self.acti_func,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                name='conv_1x1x1_{}'.format(n_features))
            output_tensor = conv_fc(output_tensor, is_training)
            print(conv_fc)

        return output_tensor
示例#2
0
文件: unet2d5.py 项目: taigw/Demic
    def layer_op(self, images, is_training, bn_momentum=0.9, layer_id=-1):
        # image_size  should be divisible by 8

        block1 = UNetBlock((self.n_features[0], self.n_features[0]),
                           ((1, 3, 3), (1, 3, 3)),
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           acti_func=self.acti_func,
                           name='B1')

        block2 = UNetBlock((self.n_features[1], self.n_features[1]),
                           ((1, 3, 3), (1, 3, 3)),
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           acti_func=self.acti_func,
                           name='B2')

        block3 = UNetBlock((self.n_features[2], self.n_features[2]),
                           ((1, 3, 3), (1, 3, 3)),
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           acti_func=self.acti_func,
                           name='B3')

        block4 = UNetBlock((self.n_features[3], self.n_features[3]),
                           ((3, 3, 3), (3, 3, 3)),
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           acti_func=self.acti_func,
                           name='B4')

        block5 = UNetBlock((self.n_features[4], self.n_features[4]),
                           ((3, 3, 3), (3, 3, 3)),
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           acti_func=self.acti_func,
                           name='B5')

        block6 = UNetBlock((self.n_features[3], self.n_features[3]),
                           ((1, 3, 3), (1, 3, 3)),
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           acti_func=self.acti_func,
                           name='B6')

        block7 = UNetBlock((self.n_features[2], self.n_features[2]),
                           ((1, 3, 3), (1, 3, 3)),
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           acti_func=self.acti_func,
                           name='B7')

        block8 = UNetBlock((self.n_features[1], self.n_features[1]),
                           ((1, 3, 3), (1, 3, 3)),
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           acti_func=self.acti_func,
                           name='B8')

        block9 = UNetBlock((self.n_features[0], self.n_features[0]),
                           ((1, 3, 3), (1, 3, 3)),
                           w_initializer=self.initializers['w'],
                           w_regularizer=self.regularizers['w'],
                           acti_func=self.acti_func,
                           name='B9')

        conv = ConvLayer(n_output_chns=self.num_classes,
                         kernel_size=(1, 1, 1),
                         w_initializer=self.initializers['w'],
                         w_regularizer=self.regularizers['w'],
                         with_bias=True,
                         name='conv')
        down1 = DownSampleLayer('MAX',
                                kernel_size=(1, 2, 2),
                                stride=(1, 2, 2),
                                name='down1')
        down2 = DownSampleLayer('MAX',
                                kernel_size=(1, 2, 2),
                                stride=(1, 2, 2),
                                name='down2')
        down3 = DownSampleLayer('MAX',
                                kernel_size=(1, 2, 2),
                                stride=(1, 2, 2),
                                name='down3')
        down4 = DownSampleLayer('MAX',
                                kernel_size=(2, 2, 2),
                                stride=(2, 2, 2),
                                name='down4')

        up1 = DeconvolutionalLayer(n_output_chns=self.n_features[3],
                                   kernel_size=(2, 2, 2),
                                   stride=(2, 2, 2),
                                   name='up1')
        up2 = DeconvolutionalLayer(n_output_chns=self.n_features[2],
                                   kernel_size=(1, 2, 2),
                                   stride=(1, 2, 2),
                                   name='up2')
        up3 = DeconvolutionalLayer(n_output_chns=self.n_features[1],
                                   kernel_size=(1, 2, 2),
                                   stride=(1, 2, 2),
                                   name='up3')
        up4 = DeconvolutionalLayer(n_output_chns=self.n_features[0],
                                   kernel_size=(1, 2, 2),
                                   stride=(1, 2, 2),
                                   name='up4')

        centra_slice = TensorSliceLayer(margin=self.margin)
        f1 = block1(images, is_training)
        d1 = down1(f1)
        f2 = block2(d1, is_training)
        d2 = down2(f2)
        f3 = block3(d2, is_training)
        d3 = down3(f3)
        f4 = block4(d3, is_training)
        d4 = down4(f4)
        f5 = block5(d4, is_training)
        # add dropout to the original version
        f5 = tf.nn.dropout(f5, self.dropout)

        f5up = up1(f5, is_training)
        f4cat = tf.concat((f4, f5up), axis=-1)
        f6 = block6(f4cat, is_training)
        # add dropout to the original version
        f6 = tf.nn.dropout(f6, self.dropout)

        f6up = up2(f6, is_training)
        f3cat = tf.concat((f3, f6up), axis=-1)
        f7 = block7(f3cat, is_training)
        # add dropout to the original version
        f7 = tf.nn.dropout(f7, self.dropout)

        f7up = up3(f7, is_training)
        f2cat = tf.concat((f2, f7up), axis=-1)
        f8 = block8(f2cat, is_training)
        # add dropout to the original version
        f8 = tf.nn.dropout(f8, self.dropout)

        f8up = up4(f8, is_training)
        f1cat = tf.concat((f1, f8up), axis=-1)
        f9 = block9(f1cat, is_training)
        # add dropout to the original version
        f9 = tf.nn.dropout(f9, self.dropout)
        output = conv(f9)

        if (self.margin > 0):
            output = centra_slice(output)
        return output
示例#3
0
    def layer_op(self,
                 input_tensor,
                 is_training=True,
                 layer_id=-1,
                 **unused_kwargs):
        """

        :param input_tensor: tensor, input to the network
        :param is_training: boolean, True if network is in training mode
        :param layer_id: not in use
        :param unused_kwargs:
        :return: fused prediction from multiple scales
        """
        layer_instances = []
        scores_instances = []
        first_conv_layer = ConvolutionalLayer(
            n_output_chns=self.num_features[0],
            feature_normalization='batch',
            kernel_size=3,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            acti_func=self.acti_func,
            name='conv_1_1')
        flow = first_conv_layer(input_tensor, is_training)
        layer_instances.append((first_conv_layer, flow))

        # SCALE 1
        with DilatedTensor(flow, dilation_factor=1) as dilated:
            for j in range(self.num_res_blocks[0]):
                res_block = HighResBlock(self.num_features[0],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % ('res_1', j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        score_layer_scale1 = ScoreLayer(
            num_features=self.num_fea_score_layers[0],
            num_classes=self.num_classes)
        score_1 = score_layer_scale1(flow, is_training)
        scores_instances.append(score_1)
        # if is_training:
        #     loss_s1 = WGDL(score_1, labels)
        #     tf.add_to_collection('multiscale_loss', loss_s1/num_scales)

        # # SCALE 2
        with DilatedTensor(flow, dilation_factor=2) as dilated:
            for j in range(self.num_res_blocks[1]):
                res_block = HighResBlock(self.num_features[1],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % ('res_2', j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        score_layer_scale2 = ScoreLayer(
            num_features=self.num_fea_score_layers[1],
            num_classes=self.num_classes)
        score_2 = score_layer_scale2(flow, is_training)

        # score_2 = self.score_layer(flow, self.num_fea_score_layers[1])
        up_score_2 = score_2
        scores_instances.append(up_score_2)
        # if is_training:
        #     loss_s2 =  self.WGDL(score_2, labels)
        #     # loss_s2 = self.new_dice_loss(score_2, labels)
        #     tf.add_to_collection('multiscale_loss', loss_s2/num_scales)

        # SCALE 3
        ## dowsampling factor = 2
        downsample_scale3 = DownSampleLayer(func='AVG',
                                            kernel_size=2,
                                            stride=2)
        flow = downsample_scale3(flow)
        layer_instances.append((downsample_scale3, flow))
        with DilatedTensor(flow, dilation_factor=1) as dilated:
            for j in range(self.num_res_blocks[2]):
                res_block = HighResBlock(self.num_features[2],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % ('res_3', j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        score_layer_scale3 = ScoreLayer(
            num_features=self.num_fea_score_layers[2],
            num_classes=self.num_classes)
        score_3 = score_layer_scale3(flow, is_training)

        upsample_indep_scale3 = UpSampleLayer(
            func='CHANNELWISE_DECONV',
            kernel_size=2,
            stride=2,
            w_initializer=tf.constant_initializer(1.0, dtype=tf.float32))
        up_score_3 = upsample_indep_scale3(score_3)
        scores_instances.append(up_score_3)

        # up_score_3 = self.feature_indep_upsample_conv(score_3, factor=2)
        # if is_training:
        #     loss_s3 = self.WGDL(up_score_3, labels)
        #     # loss_s3 = self.new_dice_loss(up_score_3, labels)
        #     tf.add_to_collection('multiscale_loss', loss_s3/num_scales)

        # SCALE 4
        with DilatedTensor(flow, dilation_factor=2) as dilated:
            for j in range(self.num_res_blocks[3]):
                res_block = HighResBlock(self.num_features[3],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % ('res_4', j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        score_layer_scale4 = ScoreLayer(
            num_features=self.num_fea_score_layers[3],
            num_classes=self.num_classes)
        score_4 = score_layer_scale4(flow, self.num_fea_score_layers[3],
                                     is_training)

        upsample_indep_scale4 = UpSampleLayer(
            func='CHANNELWISE_DECONV',
            kernel_size=1,
            stride=2,
            w_initializer=tf.constant_initializer(1.0, dtype=tf.float32))
        up_score_4 = upsample_indep_scale4(score_4)
        scores_instances.append(up_score_4)

        # if is_training:
        #     loss_s4 = self.WGDL(up_score_4, labels)
        #     # loss_s4 = self.new_dice_loss(up_score_4, labels)
        #     tf.add_to_collection('multiscale_loss', loss_s4/num_scales)

        # FUSED SCALES
        merge_layer = MergeLayer('WEIGHTED_AVERAGE')
        soft_scores = []
        for s in scores_instances:
            soft_scores.append(tf.nn.softmax(s))
        fused_score = merge_layer(soft_scores)
        scores_instances.append(fused_score)
        if is_training:
            return scores_instances
        return fused_score