Example #1
0
    def _test_upsample_shape(self, rank, param_dict, output_shape):
        if rank == 3:
            input_data = self.get_3d_input()
        elif rank == 2:
            input_data = self.get_2d_input()

        upsample_layer = UpSampleLayer(**param_dict)
        output_data = upsample_layer(input_data)
        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            output = sess.run(output_data)
            self.assertAllClose(output_shape, output.shape)
Example #2
0
    def layer_op(self, codes, is_training):

        # Define the decoding fully-connected layers
        decoders_fc = []
        for i in range(0, len(self.layer_sizes_decoder)):
            decoders_fc.append(FullyConnectedLayer(
                n_output_chns=self.layer_sizes_decoder[i],
                with_bias=True,
                with_bn=True,
                acti_func=self.acti_func_decoder[i],
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                name='decoder_fc_{}'.format(self.layer_sizes_decoder[i])))
            print(decoders_fc[-1])

        # Define the decoding convolutional layers
        decoders_cnn = []
        decoders_upsamplers = []
        for i in range(0, len(self.trans_conv_output_channels)):
            if self.upsampling_mode == 'DECONV':
                decoders_upsamplers.append(DeconvolutionalLayer(
                    n_output_chns=self.trans_conv_output_channels[i],
                    kernel_size=self.trans_conv_unpooling_factors[i],
                    stride=self.trans_conv_unpooling_factors[i],
                    padding='SAME',
                    with_bias=True,
                    with_bn=True,
                    w_initializer=self.initializers['w'],
                    w_regularizer=None,
                    acti_func=None,
                    name='decoder_upsampler_{}_{}'.format(
                        self.trans_conv_unpooling_factors[i],
                        self.trans_conv_unpooling_factors[i])))
                print(decoders_upsamplers[-1])

            decoders_cnn.append(DeconvolutionalLayer(
                n_output_chns=self.trans_conv_output_channels[i],
                kernel_size=self.trans_conv_kernel_sizes[i],
                stride=1,
                padding='SAME',
                with_bias=True,
                with_bn=True,
                #with_bn=not (i == len(self.trans_conv_output_channels) - 1),
                # No BN on output
                w_initializer=self.initializers['w'],
                w_regularizer=None,
                acti_func=self.acti_func_trans_conv[i],
                name='decoder_trans_conv_{}_{}'.format(
                    self.trans_conv_kernel_sizes[i],
                    self.trans_conv_output_channels[i])))
            print(decoders_cnn[-1])

        # Fully-connected decoder layers
        flow = codes
        for i in range(0, len(self.layer_sizes_decoder)):
            flow = decoders_fc[i](flow, is_training)

        # Reconstitute the feature maps
        flow = tf.reshape(flow, [-1] + self.downsampled_shape)

        # Convolutional decoder layers
        for i in range(0, len(self.trans_conv_output_channels)):
            if self.upsampling_mode == 'DECONV':
                flow = decoders_upsamplers[i](flow, is_training)
            elif self.upsampling_mode == 'CHANNELWISE_DECONV':
                flow = UpSampleLayer(
                    'CHANNELWISE_DECONV',
                    kernel_size=self.trans_conv_unpooling_factors[i],
                    stride=self.trans_conv_unpooling_factors[i])(flow)
            elif self.upsampling_mode == 'REPLICATE':
                flow = UpSampleLayer(
                    'REPLICATE',
                    kernel_size=self.trans_conv_unpooling_factors[i],
                    stride=self.trans_conv_unpooling_factors[i])(flow)
            flow = decoders_cnn[i](flow, is_training)

        return flow
Example #3
0
    def layer_op(self, input_tensor, is_training, layer_id=-1):
        layer_instances = []
        scores_instances = []
        first_conv_layer = ConvolutionalLayer(
            n_output_chns=self.num_features[0],
            with_bn=True,
            kernel_size=3,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            acti_func=self.acti_func,
            name='conv_1_1')
        flow = first_conv_layer(input_tensor, is_training)
        layer_instances.append((first_conv_layer, flow))

        # SCALE 1
        with DilatedTensor(flow, dilation_factor=1) as dilated:
            for j in range(self.num_res_blocks[0]):
                res_block = HighResBlock(self.num_features[0],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % ('res_1', j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        score_layer_scale1 = ScoreLayer(
            num_features=self.num_fea_score_layers[0],
            num_classes=self.num_classes)
        score_1 = score_layer_scale1(flow, is_training)
        scores_instances.append(score_1)
        # if is_training:
        #     loss_s1 = WGDL(score_1, labels)
        #     tf.add_to_collection('multiscale_loss', loss_s1/num_scales)

        # # SCALE 2
        with DilatedTensor(flow, dilation_factor=2) as dilated:
            for j in range(self.num_res_blocks[1]):
                res_block = HighResBlock(self.num_features[1],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % ('res_2', j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        score_layer_scale2 = ScoreLayer(
            num_features=self.num_fea_score_layers[1],
            num_classes=self.num_classes)
        score_2 = score_layer_scale2(flow, is_training)

        # score_2 = self.score_layer(flow, self.num_fea_score_layers[1])
        up_score_2 = score_2
        scores_instances.append(up_score_2)
        # if is_training:
        #     loss_s2 =  self.WGDL(score_2, labels)
        #     # loss_s2 = self.new_dice_loss(score_2, labels)
        #     tf.add_to_collection('multiscale_loss', loss_s2/num_scales)

        # SCALE 3
        ## dowsampling factor = 2
        downsample_scale3 = DownSampleLayer(func='AVG',
                                            kernel_size=2,
                                            stride=2)
        flow = downsample_scale3(flow)
        layer_instances.append((downsample_scale3, flow))
        with DilatedTensor(flow, dilation_factor=1) as dilated:
            for j in range(self.num_res_blocks[2]):
                res_block = HighResBlock(self.num_features[2],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % ('res_3', j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        score_layer_scale3 = ScoreLayer(
            num_features=self.num_fea_score_layers[2],
            num_classes=self.num_classes)
        score_3 = score_layer_scale3(flow, is_training)

        upsample_indep_scale3 = UpSampleLayer(
            func='CHANNELWISE_DECONV',
            kernel_size=2,
            stride=2,
            w_initializer=tf.constant_initializer(1.0, dtype=tf.float32))
        up_score_3 = upsample_indep_scale3(score_3)
        scores_instances.append(up_score_3)

        # up_score_3 = self.feature_indep_upsample_conv(score_3, factor=2)
        # if is_training:
        #     loss_s3 = self.WGDL(up_score_3, labels)
        #     # loss_s3 = self.new_dice_loss(up_score_3, labels)
        #     tf.add_to_collection('multiscale_loss', loss_s3/num_scales)

        # SCALE 4
        with DilatedTensor(flow, dilation_factor=2) as dilated:
            for j in range(self.num_res_blocks[3]):
                res_block = HighResBlock(self.num_features[3],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % ('res_4', j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        score_layer_scale4 = ScoreLayer(
            num_features=self.num_fea_score_layers[3],
            num_classes=self.num_classes)
        score_4 = score_layer_scale4(flow, self.num_fea_score_layers[3],
                                     is_training)

        upsample_indep_scale4 = UpSampleLayer(
            func='CHANNELWISE_DECONV',
            kernel_size=1,
            stride=2,
            w_initializer=tf.constant_initializer(1.0, dtype=tf.float32))
        up_score_4 = upsample_indep_scale4(score_4)
        scores_instances.append(up_score_4)

        # if is_training:
        #     loss_s4 = self.WGDL(up_score_4, labels)
        #     # loss_s4 = self.new_dice_loss(up_score_4, labels)
        #     tf.add_to_collection('multiscale_loss', loss_s4/num_scales)

        # FUSED SCALES
        merge_layer = MergeLayer('WEIGHTED_AVERAGE')
        soft_scores = []
        for s in scores_instances:
            soft_scores.append(tf.nn.softmax(s))
        fused_score = merge_layer(soft_scores)
        scores_instances.append(fused_score)
        if is_training:
            return scores_instances
        return fused_score
Example #4
0
    def layer_op(self, images, is_training, layer_id=-1, **unused_kwargs):
        """

        :param images: tensor, input to the network, size should be divisible by d_factor
        :param is_training: boolean, True if network is in training mode
        :param layer_id: not in use
        :param unused_kwargs:
        :return: tensor, network output
        """
        # image_size is defined as the largest context, then:
        #   downsampled path size: image_size / d_factor
        #   downsampled path output: image_size / d_factor - 16

        # to make sure same size of feature maps from both pathways:
        #   normal path size: (image_size / d_factor - 16) * d_factor + 16
        #   normal path output: (image_size / d_factor - 16) * d_factor

        # where 16 is fixed by the receptive field of conv layers
        # TODO: make sure label_size = image_size/d_factor - 16

        # image_size has to be an odd number and divisible by 3 and
        # smaller than the smallest image size of the input volumes

        # label_size should be (image_size/d_factor - 16) * d_factor

        assert self.d_factor % 2 == 1  # to make the downsampling centered
        assert (layer_util.check_spatial_dims(
            images, lambda x: x % self.d_factor == 0))
        assert (layer_util.check_spatial_dims(images, lambda x: x % 2 == 1)
                )  # to make the crop centered
        assert (layer_util.check_spatial_dims(images,
                                              lambda x: x > self.d_factor * 16)
                )  # required by receptive field

        # crop 25x25x25 from 57x57x57
        crop_op = CropLayer(border=self.crop_diff, name='cropping_input')
        normal_path = crop_op(images)
        print(crop_op)

        # downsample 19x19x19 from 57x57x57
        downsample_op = DownSampleLayer(func='CONSTANT',
                                        kernel_size=self.d_factor,
                                        stride=self.d_factor,
                                        padding='VALID',
                                        name='downsample_input')
        downsample_path = downsample_op(images)
        print(downsample_op)

        # convolutions for both pathways
        for n_features in self.conv_features:
            # normal pathway convolutions
            conv_path_1 = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=3,
                padding='VALID',
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                acti_func=self.acti_func,
                name='normal_conv')
            normal_path = conv_path_1(normal_path, is_training)
            print(conv_path_1)

            # downsampled pathway convolutions
            conv_path_2 = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=3,
                padding='VALID',
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                acti_func=self.acti_func,
                name='downsample_conv')
            downsample_path = conv_path_2(downsample_path, is_training)
            print(conv_path_2)

        # upsampling the downsampled pathway
        downsample_path = UpSampleLayer('REPLICATE',
                                        kernel_size=self.d_factor,
                                        stride=self.d_factor)(downsample_path)

        # concatenate both pathways
        output_tensor = ElementwiseLayer('CONCAT')(normal_path,
                                                   downsample_path)

        # 1x1x1 convolution layer
        for n_features in self.fc_features:
            conv_fc = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=1,
                acti_func=self.acti_func,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                name='conv_1x1x1_{}'.format(n_features))
            output_tensor = conv_fc(output_tensor, is_training)
            print(conv_fc)

        return output_tensor