Example #1
0
    def layer_op(self, input_tensor, is_training, layer_id=-1):
        hp = self.hyperparameters
        if is_training and hp['augmentation_scale'] > 0:
            aug = Affine3DAugmentationLayer(hp['augmentation_scale'], 'LINEAR',
                                            'ZERO')
            input_tensor = aug(input_tensor)
        channel_dim = len(input_tensor.get_shape()) - 1
        input_size = input_tensor.get_shape().as_list()
        spatial_rank = len(input_size) - 2

        modulo = 2**(len(hp['dilation_rates']))
        assert layer_util.check_spatial_dims(input_tensor,
                                             lambda x: x % modulo == 0)

        downsample_channels = list(hp['n_input_channels'][1:]) + [None]
        v_params = zip(hp['n_dense_channels'], hp['n_seg_channels'],
                       downsample_channels, hp['dilation_rates'],
                       range(len(downsample_channels)))

        downsampled_img = BNLayer()(
            tf.nn.avg_pool3d(input_tensor, [1] + [3] * spatial_rank + [1],
                             [1] + [2] * spatial_rank + [1], 'SAME'),
            is_training=is_training)
        all_segmentation_features = [downsampled_img]
        output_shape = downsampled_img.get_shape().as_list()[1:-1]
        initial_features = ConvolutionalLayer(hp['n_input_channels'][0],
                                              kernel_size=5,
                                              stride=2)(
                                                  input_tensor,
                                                  is_training=is_training)

        down = tf.concat([downsampled_img, initial_features], channel_dim)
        ## ADDED to prevent dropout at inference
        if is_training is False:
            hp['p_channels_selected'] = 1
        ######
        for dense_ch, seg_ch, down_ch, dil_rate, idx in v_params:
            sd = DenseFeatureStackBlockWithSkipAndDownsample(
                dense_ch,
                3,
                dil_rate,
                seg_ch,
                down_ch,
                self.architecture_parameters['use_bdo'],
                acti_func='relu')
            skip, down = sd(down,
                            is_training=is_training,
                            keep_prob=hp['p_channels_selected'])
            all_segmentation_features.append(image_resize(skip, output_shape))
        segmentation = ConvolutionalLayer(
            10, kernel_size=hp['final_kernel'], with_bn=False,
            with_bias=True)(tf.concat(all_segmentation_features, channel_dim),
                            is_training=is_training)
        if self.architecture_parameters['use_prior']:
            segmentation = segmentation + \
                           SpatialPriorBlock([12] * spatial_rank, output_shape)
        if is_training and hp['augmentation_scale'] > 0:
            inverse_aug = aug.inverse()
            segmentation = inverse_aug(segmentation)
        segmentation = image_resize(segmentation, input_size[1:-1])
        #seg_summary = tf.to_float(tf.expand_dims(tf.argmax(segmentation,-1),-1)) * (255./self.num_classes-1)
        ###########
        # =============================================================================
        k = segmentation.get_shape().as_list()
        segmentation = tf.nn.max_pool3d(segmentation, [1, 1, 1, k[3], 1],
                                        [1, 1, 1, 1, 1],
                                        'VALID',
                                        data_format='NDHWC')
        segmentation = tf.reshape(segmentation, [k[0], k[1], k[2], k[-1]])
        segmentation = tf.layers.conv2d(
            segmentation,
            filters=10,
            kernel_size=(3, 3),
            strides=1,
            padding='SAME',
            use_bias=True,
            kernel_initializer=tf.variance_scaling_initializer(),
            activation=tf.nn.relu,
            data_format='channels_last')
        segmentation = tf.layers.conv2d(
            segmentation,
            filters=2,
            kernel_size=(3, 3),
            strides=1,
            padding='SAME',
            use_bias=True,
            kernel_initializer=tf.variance_scaling_initializer(),
            activation=tf.nn.relu,
            data_format='channels_last')

        # =============================================================================
        ###########
        #segmentation = tf.transpose(segmentation,[0,3,1,2])

        return segmentation
Example #2
0
    def layer_op(self, input_tensor, is_training, layer_id=-1):
        hp = self.hyperparameters
        if is_training and hp['augmentation_scale'] > 0:
            aug = Affine3DAugmentationLayer(hp['augmentation_scale'], 'LINEAR',
                                            'ZERO')
            input_tensor = aug(input_tensor)
        channel_dim = len(input_tensor.get_shape()) - 1
        input_size = input_tensor.get_shape().as_list()
        spatial_rank = len(input_size) - 2

        modulo = 2**(len(hp['dilation_rates']))
        assert layer_util.check_spatial_dims(input_tensor,
                                             lambda x: x % modulo == 0)

        downsample_channels = list(hp['n_input_channels'][1:]) + [None]
        v_params = zip(hp['n_dense_channels'], hp['n_seg_channels'],
                       downsample_channels, hp['dilation_rates'],
                       range(len(downsample_channels)))

        downsampled_img = BNLayer()(
            tf.nn.avg_pool3d(input_tensor, [1] + [3] * spatial_rank + [1],
                             [1] + [2] * spatial_rank + [1], 'SAME'),
            is_training=is_training)
        all_segmentation_features = [downsampled_img]
        output_shape = downsampled_img.get_shape().as_list()[1:-1]
        initial_features = ConvolutionalLayer(hp['n_input_channels'][0],
                                              kernel_size=5,
                                              stride=2)(
                                                  input_tensor,
                                                  is_training=is_training)

        down = tf.concat([downsampled_img, initial_features], channel_dim)
        for dense_ch, seg_ch, down_ch, dil_rate, idx in v_params:
            sd = DenseFeatureStackBlockWithSkipAndDownsample(
                dense_ch,
                3,
                dil_rate,
                seg_ch,
                down_ch,
                self.architecture_parameters['use_bdo'],
                acti_func='relu')
            skip, down = sd(down,
                            is_training=is_training,
                            keep_prob=hp['p_channels_selected'])
            all_segmentation_features.append(image_resize(skip, output_shape))
        segmentation = ConvolutionalLayer(
            self.num_classes,
            kernel_size=hp['final_kernel'],
            with_bn=False,
            with_bias=True)(tf.concat(all_segmentation_features, channel_dim),
                            is_training=is_training)
        if self.architecture_parameters['use_prior']:
            segmentation = segmentation + \
                           SpatialPriorBlock([12] * spatial_rank, output_shape)
        if is_training and hp['augmentation_scale'] > 0:
            inverse_aug = aug.inverse()
            segmentation = inverse_aug(segmentation)
        segmentation = image_resize(segmentation, input_size[1:-1])
        seg_summary = tf.to_float(
            tf.expand_dims(tf.argmax(segmentation, -1),
                           -1)) * (255. / self.num_classes - 1)
        m, v = tf.nn.moments(input_tensor, axes=[1, 2, 3], keep_dims=True)
        img_summary = tf.minimum(
            255.,
            tf.maximum(0., (tf.to_float(input_tensor - m) /
                            (tf.sqrt(v) * 2.) + 1.) * 127.))
        image3_axial('imgseg', tf.concat([img_summary, seg_summary], 1), 5,
                     [tf.GraphKeys.SUMMARIES])
        return segmentation