Ejemplo n.º 1
0
    def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs):
        # image_size  should be divisible by 4
        assert layer_util.check_spatial_dims(images, lambda x: x % 4 == 0)
        assert layer_util.check_spatial_dims(images, lambda x: x >= 21)
        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[0], self.n_features[1]),
                                (3, 3),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='d0')
        pool_1, conv_1 = block_layer(images, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[1], self.n_features[2]),
                                (3, 3),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='d1')
        up_1, _ = block_layer(pool_1, is_training)
        print(block_layer)

        block_layer = UNetBlock(
            'NONE', (self.n_features[1], self.n_features[1], self.num_classes),
            (3, 3),
            with_downsample_branch=True,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            acti_func=self.acti_func,
            name='u0')
        crop_layer = CropLayer(border=4, name='crop-8')
        concat_1 = ElementwiseLayer('CONCAT')(crop_layer(conv_1), up_1)
        print(block_layer)

        # for the last layer, upsampling path is not used
        _, output_tensor = block_layer(concat_1, is_training)

        output_conv_op = ConvolutionalLayer(
            n_output_chns=self.num_classes,
            kernel_size=1,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            acti_func=None,
            name='{}'.format(self.num_classes),
            padding='VALID',
            with_bn=False,
            with_bias=True)
        final_output_tensor = output_conv_op(output_tensor, is_training)
        print(output_conv_op)

        return final_output_tensor
Ejemplo n.º 2
0
    def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs):
        assert (layer_util.check_spatial_dims(images, lambda x: x % 8 == 0))
        # go through self.layers, create an instance of each layer
        # and plugin data
        layer_instances = []
        input_tensor_res = images

        ### first convolution layer
        params = self.layers[0]
        first_conv_layer = ConvolutionalLayer(
            n_output_chns=params['n_features'],
            kernel_size=params['kernel_size'],
            with_bias=True,
            with_bn=False,
            acti_func=self.acti_func,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            name=params['name'])
        flow = first_conv_layer(images, is_training)
        layer_instances.append((first_conv_layer, flow))

        ###
        params = self.layers[1]
        for j in range(params['repeat']):
            conv_layer = ConvolutionalLayer(
                n_output_chns=params['n_features'],
                kernel_size=params['kernel_size'],
                with_bias=True,
                with_bn=False,
                acti_func=self.acti_func,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                name='%s_%d' % (params['name'], j))
            flow = conv_layer(flow, is_training)
            layer_instances.append((conv_layer, flow))

        ###
        params = self.layers[2]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      with_bias=True,
                                      with_bn=False,
                                      acti_func=None,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        output_tensor_res = ElementwiseLayer('SUM')(input_tensor_res, flow)

        # set training properties
        if is_training:
            self._print(layer_instances)
            # return layer_instances[-1][1]
            return output_tensor_res
        # return layer_instances[layer_id][1]
        return output_tensor_res
Ejemplo n.º 3
0
 def __init__(self, input_tensor, dilation_factor):
     assert (layer_util.check_spatial_dims(
         input_tensor, lambda x: x % dilation_factor == 0))
     self._tensor = input_tensor
     self.dilation_factor = dilation_factor
     # parameters to transform input tensor
     self.spatial_rank = layer_util.infer_spatial_rank(self._tensor)
     self.zero_paddings = [[0, 0]] * self.spatial_rank
     self.block_shape = [dilation_factor] * self.spatial_rank
Ejemplo n.º 4
0
    def layer_op(self, input_tensor, is_training, layer_id=-1):
        hyper = self.hyperparameters

        # Initialize DenseVNet network layers
        net = self.create_network()

        #
        # Parameter handling
        #

        # Shape and dimension variable shortcuts
        channel_dim = len(input_tensor.shape) - 1
        input_size = input_tensor.shape.as_list()
        spatial_size = input_size[1:-1]
        n_spatial_dims = input_tensor.shape.ndims - 2

        # Quick access to hyperparams
        pkeep = hyper['p_channels_selected']

        # Validate input dimension with dilation rates
        modulo = 2 ** (len(hyper['dilation_rates']))
        assert layer_util.check_spatial_dims(input_tensor,
                                             lambda x: x % modulo == 0)

        #
        # Augmentation + Downsampling + Initial Layers
        #

        # On the fly data augmentation
        if is_training and hyper['augmentation_scale'] > 0:
            if n_spatial_dims == 2:
                augmentation_class = Affine2DAugmentationLayer
            elif n_spatial_dims == 3:
                augmentation_class = Affine3DAugmentationLayer
            else:
                raise NotImplementedError(
                    'Affine augmentation only supports 2D and 3D images')

            augment_layer = augmentation_class(hyper['augmentation_scale'],
                                               'LINEAR', 'ZERO')
            input_tensor = augment_layer(input_tensor)

        # Variable storing all intermediate results -- VLinks
        all_segmentation_features = []

        # Downsample input to the network
        down_tensor = self.downsample_input(input_tensor, n_spatial_dims)
        downsampled_img = net.initial_bn(down_tensor, is_training=is_training)

        # Add initial downsampled image VLink
        all_segmentation_features.append(downsampled_img)

        # All results should match the downsampled input's shape
        output_shape = downsampled_img.shape.as_list()[1:-1]

        init_features = net.initial_conv(input_tensor, is_training=is_training)

        #
        # Dense VNet Main Block
        #

        # `down` will handle the input of each Dense VNet block
        # Initialize it by stacking downsampled image and initial conv features
        down = tf.concat([downsampled_img, init_features], channel_dim)

        # Process Dense VNet Blocks
        for dblock in net.dense_vblocks:
            # Get skip layer and activation output
            skip, down = dblock(down, is_training=is_training, keep_prob=pkeep)

            # Resize skip layer to original shape and add VLink
            skip = image_resize(skip, output_shape)
            all_segmentation_features.append(skip)

        # Concatenate all intermediate skip layers
        inter_results = tf.concat(all_segmentation_features, channel_dim)

        # Initial segmentation output
        seg_output = net.seg_layer(inter_results, is_training=is_training)

        #
        # Dense VNet End - Now postprocess outputs
        #

        # Refine segmentation with prior if any
        if self.architecture_parameters['use_prior']:
            xyz_prior = SpatialPriorBlock([12] * n_spatial_dims, output_shape)
            seg_output += xyz_prior

        # Invert augmentation if any
        if is_training and hyper['augmentation_scale'] > 0:
            inverse_aug = augment_layer.inverse()
            seg_output = inverse_aug(seg_output)

        # Resize output to original size
        seg_output = image_resize(seg_output, spatial_size)

        # Segmentation results
        seg_argmax = tf.to_float(tf.expand_dims(tf.argmax(seg_output, -1), -1))
        seg_summary = seg_argmax * (255. / self.num_classes - 1)

        # Image Summary
        norm_axes = list(range(1, n_spatial_dims+1))
        mean, var = tf.nn.moments(input_tensor, axes=norm_axes, keep_dims=True)
        timg = tf.to_float(input_tensor - mean) / (tf.sqrt(var) * 2.)
        timg = (timg + 1.) * 127.
        single_channel = tf.reduce_mean(timg, axis=-1, keep_dims=True)
        img_summary = tf.minimum(255., tf.maximum(0., single_channel))
        if n_spatial_dims == 2:
            tf.summary.image(
                tf.get_default_graph().unique_name('imgseg'),
                tf.concat([img_summary, seg_summary], 1),
                5, [tf.GraphKeys.SUMMARIES])
        elif n_spatial_dims == 3:
            # Show summaries
            image3_axial(
                tf.get_default_graph().unique_name('imgseg'),
                tf.concat([img_summary, seg_summary], 1),
                5, [tf.GraphKeys.SUMMARIES])
        else:
            raise NotImplementedError(
                'Image Summary only supports 2D and 3D images')

        return seg_output
Ejemplo n.º 5
0
    def layer_op(self, images, is_training, layer_id=-1):
        assert layer_util.check_spatial_dims(images['T1'],
                                             lambda x: x % 8 == 0)
        assert set(images.keys()).issubset(
            set(MODALITIES)
        ), 'image has to be a dictionnary with keys in %s' % (MODALITIES)
        # go through self.layers, create an instance of each layer
        # and plugin data
        layer_instances = []
        layer_modalities = dict()

        for mod in MODALITIES:

            layer_modalities[mod] = []

            ### first convolution layer BRATS
            params = self.layers[0]
            first_conv_layer = ConvolutionalLayer(
                n_output_chns=params['n_features'],
                kernel_size=params['kernel_size'],
                acti_func=self.acti_func,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                name='%s_%s' % (params['name'], mod))

            layer_instances.append((first_conv_layer, ''))
            layer_modalities[mod].append(first_conv_layer)

            params = self.layers[1]
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%s_%d' %
                                         (params['name'], mod, j))
                layer_instances.append((res_block, ''))
                layer_modalities[mod].append(res_block)

        flow = []
        for mod in images.keys():
            flow_mod = layer_modalities[mod][0](images[mod], is_training)
            for lay in layer_modalities[mod][1:]:
                flow_mod = lay(flow_mod, is_training)
            flow.append(flow_mod)

        flow = 1 / len(images.keys()) * tf.add_n(flow)

        ### resblocks, all kernels dilated by 1 (normal convolution)
        params = self.layers[1]
        with DilatedTensor(flow, dilation_factor=1) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 2
        params = self.layers[2]
        with DilatedTensor(flow, dilation_factor=2) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 4
        params = self.layers[3]
        with DilatedTensor(flow, dilation_factor=4) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### 1x1x1 convolution layer
        params = self.layers[4]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      acti_func=self.acti_func,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        ### 1x1x1 convolution layer
        params = self.layers[5]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      acti_func=None,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        # set training properties
        if is_training:
            self._print(layer_instances)
            return layer_instances[-1][1]
        return layer_instances[layer_id][1]
Ejemplo n.º 6
0
    def layer_op(self,
                 input_tensor,
                 is_training=True,
                 layer_id=-1,
                 keep_prob=0.5,
                 **unused_kwargs):
        """

        :param input_tensor: tensor to input to the network, size has to be divisible by 2*dilation_rates
        :param is_training: boolean, True if network is in training mode
        :param layer_id: not in use
        :param keep_prob: double, percentage of nodes to keep for drop-out
        :param unused_kwargs:
        :return: network prediction
        """
        hyperparams = self.hyperparams

        # Validate that dilation rates are compatible with input dimensions
        modulo = 2**(len(hyperparams['dilation_rates']))
        assert layer_util.check_spatial_dims(input_tensor,
                                             lambda x: x % modulo == 0)

        # Perform on the fly data augmentation
        if is_training and hyperparams['augmentation_scale'] > 0:
            augment_layer = AffineAugmentationLayer(
                hyperparams['augmentation_scale'], 'LINEAR', 'ZERO')
            input_tensor = augment_layer(input_tensor)

        ###################
        ### Feedforward ###
        ###################

        # Initialize network components
        dense_vnet = self.create_network()

        # Store output feature maps from each component
        feature_maps = []

        # Downsample input to the network
        downsample_layer = DownSampleLayer(func='AVG', kernel_size=3, stride=2)
        downsampled_tensor = downsample_layer(input_tensor)
        bn_layer = BNLayer()
        downsampled_tensor = bn_layer(downsampled_tensor,
                                      is_training=is_training)
        feature_maps.append(downsampled_tensor)

        # All feature maps should match the downsampled tensor's shape
        feature_map_shape = downsampled_tensor.shape.as_list()[1:-1]

        # Prepare initial input to dense_vblocks
        initial_features = dense_vnet.initial_conv(input_tensor,
                                                   is_training=is_training)
        channel_dim = len(input_tensor.shape) - 1
        down = tf.concat([downsampled_tensor, initial_features], channel_dim)

        # Feed downsampled input through dense_vblocks
        for dblock in dense_vnet.dense_vblocks:
            # Get skip layer and activation output
            skip, down = dblock(down,
                                is_training=is_training,
                                keep_prob=keep_prob)
            # Resize skip layer to original shape and add to feature maps
            skip = LinearResizeLayer(feature_map_shape)(skip)
            feature_maps.append(skip)

        # Merge feature maps
        all_features = tf.concat(feature_maps, channel_dim)

        # Perform final convolution to segment structures
        output = dense_vnet.final_conv(all_features, is_training=is_training)

        ######################
        ### Postprocessing ###
        ######################

        # Get the number of spatial dimensions of input tensor
        n_spatial_dims = input_tensor.shape.ndims - 2

        # Refine segmentation with prior
        if hyperparams['use_prior']:
            spatial_prior_shape = [hyperparams['prior_size']] * n_spatial_dims
            # Prior shape must be 4 or 5 dim to work with linear_resize layer
            # ie to conform to shape=[batch, X, Y, Z, channels]
            prior_shape = [1] + spatial_prior_shape + [1]
            spatial_prior = SpatialPriorBlock(prior_shape, feature_map_shape)
            output += spatial_prior()

        # Invert augmentation
        if is_training and hyperparams['augmentation_scale'] > 0:
            inverse_aug = augment_layer.inverse()
            output = inverse_aug(output)

        # Resize output to original size
        input_tensor_spatial_size = input_tensor.shape.as_list()[1:-1]
        output = LinearResizeLayer(input_tensor_spatial_size)(output)

        # Segmentation summary
        seg_argmax = tf.to_float(tf.expand_dims(tf.argmax(output, -1), -1))
        seg_summary = seg_argmax * (255. / self.num_classes - 1)

        # Image Summary
        norm_axes = list(range(1, n_spatial_dims + 1))
        mean, var = tf.nn.moments(input_tensor, axes=norm_axes, keep_dims=True)
        timg = tf.to_float(input_tensor - mean) / (tf.sqrt(var) * 2.)
        timg = (timg + 1.) * 127.
        single_channel = tf.reduce_mean(timg, -1, True)
        img_summary = tf.minimum(255., tf.maximum(0., single_channel))

        if n_spatial_dims == 2:
            tf.summary.image(tf.get_default_graph().unique_name('imgseg'),
                             tf.concat([img_summary, seg_summary], 1), 5,
                             [tf.GraphKeys.SUMMARIES])
        elif n_spatial_dims == 3:
            image3_axial(tf.get_default_graph().unique_name('imgseg'),
                         tf.concat([img_summary, seg_summary], 1), 5,
                         [tf.GraphKeys.SUMMARIES])
        else:
            raise NotImplementedError(
                'Image Summary only supports 2D and 3D images')

        return output
Ejemplo n.º 7
0
    def layer_op(self,
                 input_tensor,
                 is_training=True,
                 layer_id=-1,
                 keep_prob=0.5,
                 **unused_kwargs):
        hyper = self.hyperparameters

        # Initialize DenseVNet network layers
        net = self.create_network()

        #
        # Parameter handling
        #

        # Shape and dimension variable shortcuts
        channel_dim = len(input_tensor.shape) - 1
        input_size = input_tensor.shape.as_list()
        spatial_size = input_size[1:-1]
        n_spatial_dims = input_tensor.shape.ndims - 2

        # Validate input dimension with dilation rates
        modulo = 2**(len(hyper['dilation_rates']))
        assert layer_util.check_spatial_dims(input_tensor,
                                             lambda x: x % modulo == 0)

        #
        # Augmentation + Downsampling + Initial Layers
        #

        # On the fly data augmentation
        augment_layer = None
        if is_training and hyper['augmentation_scale'] > 0:
            augmentation_class = AffineAugmentationLayer
            augment_layer = augmentation_class(hyper['augmentation_scale'],
                                               'LINEAR', 'ZERO')
            input_tensor = augment_layer(input_tensor)

        # Variable storing all intermediate results -- VLinks
        all_segmentation_features = []

        # Downsample input to the network
        ave_downsample_layer = DownSampleLayer(func='AVG',
                                               kernel_size=3,
                                               stride=2)
        down_tensor = ave_downsample_layer(input_tensor)
        downsampled_img = net.initial_bn(down_tensor, is_training=is_training)

        # Add initial downsampled image VLink
        all_segmentation_features.append(downsampled_img)

        # All results should match the downsampled input's shape
        output_shape = downsampled_img.shape.as_list()[1:-1]

        init_features = net.initial_conv(input_tensor, is_training=is_training)

        #
        # Dense VNet Main Block
        #

        # `down` will handle the input of each Dense VNet block
        # Initialize it by stacking downsampled image and initial conv features
        down = tf.concat([downsampled_img, init_features], channel_dim)

        # Process Dense VNet Blocks
        for dblock in net.dense_vblocks:
            # Get skip layer and activation output
            skip, down = dblock(down,
                                is_training=is_training,
                                keep_prob=keep_prob)

            # Resize skip layer to original shape and add VLink
            skip = LinearResizeLayer(output_shape)(skip)
            all_segmentation_features.append(skip)

        # Concatenate all intermediate skip layers
        inter_results = tf.concat(all_segmentation_features, channel_dim)

        # Initial segmentation output
        seg_output = net.seg_layer(inter_results, is_training=is_training)

        #
        # Dense VNet End - Now postprocess outputs
        #

        # Refine segmentation with prior if any
        if self.architecture_parameters['use_prior']:
            xyz_prior = SpatialPriorBlock([12] * n_spatial_dims, output_shape)
            seg_output += xyz_prior

        # Invert augmentation if any
        if is_training and hyper['augmentation_scale'] > 0 \
                and augment_layer is not None:
            inverse_aug = augment_layer.inverse()
            seg_output = inverse_aug(seg_output)

        # Resize output to original size
        seg_output = LinearResizeLayer(spatial_size)(seg_output)

        # Segmentation results
        seg_argmax = tf.to_float(tf.expand_dims(tf.argmax(seg_output, -1), -1))
        seg_summary = seg_argmax * (255. / self.num_classes - 1)

        # Image Summary
        norm_axes = list(range(1, n_spatial_dims + 1))
        mean, var = tf.nn.moments(input_tensor, axes=norm_axes, keep_dims=True)
        timg = tf.to_float(input_tensor - mean) / (tf.sqrt(var) * 2.)
        timg = (timg + 1.) * 127.
        single_channel = tf.reduce_mean(timg, -1, True)
        img_summary = tf.minimum(255., tf.maximum(0., single_channel))
        if n_spatial_dims == 2:
            tf.summary.image(tf.get_default_graph().unique_name('imgseg'),
                             tf.concat([img_summary, seg_summary], 1), 5,
                             [tf.GraphKeys.SUMMARIES])
        elif n_spatial_dims == 3:
            # Show summaries
            image3_axial(tf.get_default_graph().unique_name('imgseg'),
                         tf.concat([img_summary, seg_summary], 1), 5,
                         [tf.GraphKeys.SUMMARIES])
        else:
            raise NotImplementedError(
                'Image Summary only supports 2D and 3D images')

        return seg_output
Ejemplo n.º 8
0
    def layer_op(self,
                 fixed_image,
                 moving_image,
                 base_grid=None,
                 is_training=True,
                 **unused_kwargs):
        """

        :param fixed_image:
        :param moving_image:
        :param base_grid:
        :param is_training:
        :return: estimated dense displacement fields
        """

        spatial_rank = infer_spatial_rank(fixed_image)
        spatial_shape = fixed_image.get_shape().as_list()[1:-1]
        check_spatial_dims(fixed_image, lambda x: x % 16 == 0)

        #  resize the moving image to match the fixed
        moving_image = Resize(spatial_shape)(moving_image)
        img = tf.concat([moving_image, fixed_image], axis=-1)
        down_res_0, conv_0_0, _ = \
            DownRes(self.fea[0], kernel_size=7, **self.down_res_param)(img, is_training)
        down_res_1, conv_0_1, _ = \
            DownRes(self.fea[1], **self.down_res_param)(down_res_0, is_training)
        down_res_2, conv_0_2, _ = \
            DownRes(self.fea[2], **self.down_res_param)(down_res_1, is_training)
        down_res_3, conv_0_3, _ = \
            DownRes(self.fea[3], **self.down_res_param)(down_res_2, is_training)

        conv_4 = Conv(n_output_chns=self.fea[4],
                      kernel_size=self.k_conv,
                      **self.down_res_param)(down_res_3, is_training)

        up_res_0 = UpRes(self.fea[3], **self.up_res_param)(conv_4, conv_0_3,
                                                           is_training)
        up_res_1 = UpRes(self.fea[2], **self.up_res_param)(up_res_0, conv_0_2,
                                                           is_training)
        up_res_2 = UpRes(self.fea[1], **self.up_res_param)(up_res_1, conv_0_1,
                                                           is_training)
        up_res_3 = UpRes(self.fea[0], **self.up_res_param)(up_res_2, conv_0_0,
                                                           is_training)

        if self.multi_scale_fusion:
            output_list = [up_res_3, up_res_2, up_res_1, up_res_0, conv_4]
        else:
            output_list = [up_res_3]

        # converting all output layers to displacement fields
        dense_fields = []
        for scale_out in output_list:
            field = Conv(n_output_chns=spatial_rank,
                         kernel_size=self.k_conv,
                         with_bias=True,
                         with_bn=False,
                         acti_func=None,
                         **self.disp_param)(scale_out)
            resized_field = Resize(new_size=spatial_shape)(field)
            dense_fields.append(resized_field)

        if base_grid is None:
            # adding a reference grid if it doesn't exist
            in_spatial_size = [None] * spatial_rank
            base_grid = _create_affine_features(output_shape=spatial_shape,
                                                source_shape=in_spatial_size)
            base_grid = np.asarray(base_grid[:-1])
            base_grid = np.reshape(base_grid.T,
                                   [-1] + spatial_shape + [spatial_rank])
            base_grid = tf.constant(base_grid, dtype=resized_field.dtype)

        if self.multi_scale_fusion and len(dense_fields) > 1:
            dense_field = tf.reduce_sum(dense_fields, axis=0)
        else:
            dense_field = dense_fields[0]

        # TODO filtering
        if self.smoothing_func is not None:
            dense_field = self.smoothing_func(dense_field, spatial_rank)

        tf.add_to_collection('bending_energy',
                             _computing_bending_energy(dense_field))
        tf.add_to_collection('gradient_norm',
                             _computing_gradient_norm(dense_field))

        dense_field = dense_field + base_grid
        return dense_field
Ejemplo n.º 9
0
    def layer_op(self, images, is_training, layer_id=-1):
        assert (layer_util.check_spatial_dims(
            images, lambda x: x % 8 == 0))
        # go through self.layers, create an instance of each layer
        # and plugin data
        layer_instances = []

        ### first convolution layer
        params = self.layers[0]
        first_conv_layer = ConvolutionalLayer(
            n_output_chns=params['n_features'],
            kernel_size=params['kernel_size'],
            acti_func=self.acti_func,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            name=params['name'])
        flow = first_conv_layer(images, is_training)
        layer_instances.append((first_conv_layer, flow))

        ### resblocks, all kernels dilated by 1 (normal convolution)
        params = self.layers[1]
        with DilatedTensor(flow, dilation_factor=1) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(
                    params['n_features'],
                    params['kernels'],
                    acti_func=self.acti_func,
                    w_initializer=self.initializers['w'],
                    w_regularizer=self.regularizers['w'],
                    name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 2
        params = self.layers[2]
        with DilatedTensor(flow, dilation_factor=2) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(
                    params['n_features'],
                    params['kernels'],
                    acti_func=self.acti_func,
                    w_initializer=self.initializers['w'],
                    w_regularizer=self.regularizers['w'],
                    name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 4
        params = self.layers[3]
        with DilatedTensor(flow, dilation_factor=4) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(
                    params['n_features'],
                    params['kernels'],
                    acti_func=self.acti_func,
                    w_initializer=self.initializers['w'],
                    w_regularizer=self.regularizers['w'],
                    name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### 3x3x3 convolution layer
        params = self.layers[4]
        fc_layer = ConvolutionalLayer(
            n_output_chns=params['n_features'],
            kernel_size=params['kernel_size'],
            acti_func=self.acti_func,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        ### 1x1x1 convolution layer
        params = self.layers[5]
        fc_layer = ConvolutionalLayer(
            n_output_chns=params['n_features'],
            kernel_size=params['kernel_size'],
            acti_func=self.acti_func,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        ### 1x1x1 convolution layer
        params = self.layers[6]
        fc_layer = ConvolutionalLayer(
            n_output_chns=params['n_features'],
            kernel_size=params['kernel_size'],
            acti_func=None,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        # set training properties
        if is_training:
            self._print(layer_instances)
            return layer_instances[-1][1]
        return layer_instances[layer_id][1]
Ejemplo n.º 10
0
    def layer_op(self, thru_tensor, is_training=True, **unused_kwargs):
        """

        :param thru_tensor: the input is modified in-place as it goes through the network
        :param is_training:
        :param unused_kwargs:
        :return:
        """
        # image_size  should be divisible by 16 because of max-pooling 4 times, 2x2x2
        assert layer_util.check_spatial_dims(thru_tensor,
                                             lambda x: x % 16 == 0)
        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[0], self.n_features[0]),
                                (3, 3),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L1')
        thru_tensor, conv_1 = block_layer(thru_tensor, is_training)
        print(block_layer)

        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[1], self.n_features[1]),
                                (3, 3),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L2')
        thru_tensor, conv_2 = block_layer(thru_tensor, is_training)
        print(block_layer)

        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[2], self.n_features[2]),
                                (3, 3),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L3')
        thru_tensor, conv_3 = block_layer(thru_tensor, is_training)
        print(block_layer)

        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[3], self.n_features[3]),
                                (3, 3),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L4')
        thru_tensor, conv_4 = block_layer(thru_tensor, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[4], self.n_features[3]),
                                (3, 3),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='bottom')
        thru_tensor, _ = block_layer(thru_tensor, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[3], self.n_features[2]),
                                (3, 3),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='R4')
        concat_4 = ElementwiseLayer('CONCAT')(conv_4, thru_tensor)
        thru_tensor, _ = block_layer(concat_4, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[2], self.n_features[1]),
                                (3, 3),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='R3')
        concat_3 = ElementwiseLayer('CONCAT')(conv_3, thru_tensor)
        thru_tensor, _ = block_layer(concat_3, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[1], self.n_features[0]),
                                (3, 3),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='R2')
        concat_2 = ElementwiseLayer('CONCAT')(conv_2, thru_tensor)
        thru_tensor, _ = block_layer(concat_2, is_training)
        print(block_layer)

        block_layer = UNetBlock(
            'NONE', (self.n_features[0], self.n_features[0], self.num_classes),
            (3, 3, 1),
            with_downsample_branch=False,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            acti_func=self.acti_func,
            name='R1_FC')

        concat_1 = ElementwiseLayer('CONCAT')(conv_1, thru_tensor)
        thru_tensor, _ = block_layer(concat_1, is_training)
        print(block_layer)

        return thru_tensor
Ejemplo n.º 11
0
    def layer_op(self, images, is_training, layer_id=-1):
        # image_size is defined as the largest context, then:
        #   downsampled path size: image_size / d_factor
        #   downsampled path output: image_size / d_factor - 16

        # to make sure same size of feature maps from both pathways:
        #   normal path size: (image_size / d_factor - 16) * d_factor + 16
        #   normal path output: (image_size / d_factor - 16) * d_factor

        # where 16 is fixed by the receptive field of conv layers
        # TODO: make sure label_size = image_size/d_factor - 16

        # image_size has to be an odd number and divisible by 3 and
        # smaller than the smallest image size of the input volumes

        # label_size should be (image_size/d_factor - 16) * d_factor

        assert self.d_factor % 2 == 1  # to make the downsampling centered
        assert (layer_util.check_spatial_dims(
            images, lambda x: x % self.d_factor == 0))
        assert (layer_util.check_spatial_dims(
            images, lambda x: x % 2 == 1))  # to make the crop centered
        assert (layer_util.check_spatial_dims(
            images,
            lambda x: x > self.d_factor * 16))  # required by receptive field

        # crop 25x25x25 from 57x57x57
        crop_op = CropLayer(border=self.crop_diff, name='cropping_input')
        normal_path = crop_op(images)
        print(crop_op)

        # downsample 19x19x19 from 57x57x57
        downsample_op = DownSampleLayer(func='CONSTANT',
                                        kernel_size=self.d_factor,
                                        stride=self.d_factor,
                                        padding='VALID',
                                        name='downsample_input')
        downsample_path = downsample_op(images)
        print(downsample_op)

        # convolutions for both pathways
        for n_features in self.conv_features:
            # normal pathway convolutions
            conv_path_1 = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=3,
                padding='VALID',
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                acti_func=self.acti_func,
                name='normal_conv')
            normal_path = conv_path_1(normal_path, is_training)
            print(conv_path_1)

            # downsampled pathway convolutions
            conv_path_2 = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=3,
                padding='VALID',
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                acti_func=self.acti_func,
                name='downsample_conv')
            downsample_path = conv_path_2(downsample_path, is_training)
            print(conv_path_2)

        # upsampling the downsampled pathway
        downsample_path = UpSampleLayer('REPLICATE',
                                        kernel_size=self.d_factor,
                                        stride=self.d_factor)(downsample_path)

        # concatenate both pathways
        output_tensor = ElementwiseLayer('CONCAT')(normal_path, downsample_path)

        # 1x1x1 convolution layer
        for n_features in self.fc_features:
            conv_fc = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=1,
                acti_func=self.acti_func,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                name='conv_1x1x1_{}'.format(n_features))
            output_tensor = conv_fc(output_tensor, is_training)
            print(conv_fc)

        return output_tensor
Ejemplo n.º 12
0
    def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs):
        assert (layer_util.check_spatial_dims(images, lambda x: x % 8 == 0))

        layer_instances = []

        images2 = CubicResizeLayer((16, 16, 16))(images)

        params = self.layers[0]
        first_conv_layer = ConvolutionalLayer(
            n_output_chns=params['n_features'],
            kernel_size=params['kernel_size'],
            with_bias=True,
            with_bn=False,
            acti_func=self.acti_func,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            name=params['name'])
        flow = first_conv_layer(images2, is_training)
        layer_instances.append((first_conv_layer, flow))

        params = self.layers[1]
        conv_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                        kernel_size=params['kernel_size'],
                                        with_bias=True,
                                        with_bn=False,
                                        acti_func=self.acti_func,
                                        w_initializer=self.initializers['w'],
                                        w_regularizer=self.regularizers['w'],
                                        name=params['name'])
        flow = conv_layer(flow, is_training)
        layer_instances.append((conv_layer, flow))

        params = self.layers[2]
        for j in range(params['repeat']):
            conv_layer = ConvolutionalLayer(
                n_output_chns=params['n_features'],
                kernel_size=params['kernel_size'],
                with_bias=True,
                with_bn=False,
                acti_func=self.acti_func,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                name='%s_%d' % (params['name'], j))
            flow = conv_layer(flow, is_training)
            layer_instances.append((conv_layer, flow))

        params = self.layers[3]
        conv_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                        kernel_size=params['kernel_size'],
                                        with_bias=True,
                                        with_bn=False,
                                        acti_func=self.acti_func,
                                        w_initializer=self.initializers['w'],
                                        w_regularizer=self.regularizers['w'],
                                        name=params['name'])
        flow = conv_layer(flow, is_training)
        layer_instances.append((conv_layer, flow))

        params = self.layers[4]
        deconv_layer = DeconvolutionalLayer(
            n_output_chns=params['n_features'],
            kernel_size=params['kernel_size'],
            stride=2,
            padding='SAME',
            with_bias=True,
            with_bn=False,
            acti_func=None,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            name=params['name'])
        flow = deconv_layer(flow, is_training)
        layer_instances.append((deconv_layer, flow))

        if is_training:
            self._print(layer_instances)
            return layer_instances[-1][1]
        return layer_instances[layer_id][1]
Ejemplo n.º 13
0
    def layer_op(self, input_tensor, is_training, layer_id=-1):
        hp = self.hyperparameters
        if is_training and hp['augmentation_scale'] > 0:
            aug = Affine3DAugmentationLayer(hp['augmentation_scale'], 'LINEAR',
                                            'ZERO')
            input_tensor = aug(input_tensor)
        channel_dim = len(input_tensor.get_shape()) - 1
        input_size = input_tensor.get_shape().as_list()
        spatial_rank = len(input_size) - 2

        modulo = 2**(len(hp['dilation_rates']))
        assert layer_util.check_spatial_dims(input_tensor,
                                             lambda x: x % modulo == 0)

        downsample_channels = list(hp['n_input_channels'][1:]) + [None]
        v_params = zip(hp['n_dense_channels'], hp['n_seg_channels'],
                       downsample_channels, hp['dilation_rates'],
                       range(len(downsample_channels)))

        downsampled_img = BNLayer()(
            tf.nn.avg_pool3d(input_tensor, [1] + [3] * spatial_rank + [1],
                             [1] + [2] * spatial_rank + [1], 'SAME'),
            is_training=is_training)
        all_segmentation_features = [downsampled_img]
        output_shape = downsampled_img.get_shape().as_list()[1:-1]
        initial_features = ConvolutionalLayer(hp['n_input_channels'][0],
                                              kernel_size=5,
                                              stride=2)(
                                                  input_tensor,
                                                  is_training=is_training)

        down = tf.concat([downsampled_img, initial_features], channel_dim)
        ## ADDED to prevent dropout at inference
        if is_training is False:
            hp['p_channels_selected'] = 1
        ######
        for dense_ch, seg_ch, down_ch, dil_rate, idx in v_params:
            sd = DenseFeatureStackBlockWithSkipAndDownsample(
                dense_ch,
                3,
                dil_rate,
                seg_ch,
                down_ch,
                self.architecture_parameters['use_bdo'],
                acti_func='relu')
            skip, down = sd(down,
                            is_training=is_training,
                            keep_prob=hp['p_channels_selected'])
            all_segmentation_features.append(image_resize(skip, output_shape))
        segmentation = ConvolutionalLayer(
            10, kernel_size=hp['final_kernel'], with_bn=False,
            with_bias=True)(tf.concat(all_segmentation_features, channel_dim),
                            is_training=is_training)
        if self.architecture_parameters['use_prior']:
            segmentation = segmentation + \
                           SpatialPriorBlock([12] * spatial_rank, output_shape)
        if is_training and hp['augmentation_scale'] > 0:
            inverse_aug = aug.inverse()
            segmentation = inverse_aug(segmentation)
        segmentation = image_resize(segmentation, input_size[1:-1])
        #seg_summary = tf.to_float(tf.expand_dims(tf.argmax(segmentation,-1),-1)) * (255./self.num_classes-1)
        ###########
        # =============================================================================
        k = segmentation.get_shape().as_list()
        segmentation = tf.nn.max_pool3d(segmentation, [1, 1, 1, k[3], 1],
                                        [1, 1, 1, 1, 1],
                                        'VALID',
                                        data_format='NDHWC')
        segmentation = tf.reshape(segmentation, [k[0], k[1], k[2], k[-1]])
        segmentation = tf.layers.conv2d(
            segmentation,
            filters=10,
            kernel_size=(3, 3),
            strides=1,
            padding='SAME',
            use_bias=True,
            kernel_initializer=tf.variance_scaling_initializer(),
            activation=tf.nn.relu,
            data_format='channels_last')
        segmentation = tf.layers.conv2d(
            segmentation,
            filters=2,
            kernel_size=(3, 3),
            strides=1,
            padding='SAME',
            use_bias=True,
            kernel_initializer=tf.variance_scaling_initializer(),
            activation=tf.nn.relu,
            data_format='channels_last')

        # =============================================================================
        ###########
        #segmentation = tf.transpose(segmentation,[0,3,1,2])

        return segmentation
Ejemplo n.º 14
0
    def layer_op(self, images, is_training, layer_id=-1):
        # image_size  should be divisible by 8
        assert layer_util.check_spatial_dims(images, lambda x: x % 8 == 0)
        assert layer_util.check_spatial_dims(images, lambda x: x >= 89)
        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[0], self.n_features[1]),
                                (3, 3), with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L1')
        pool_1, conv_1 = block_layer(images, is_training)
        print(block_layer)

        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[1], self.n_features[2]),
                                (3, 3), with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L2')
        pool_2, conv_2 = block_layer(pool_1, is_training)
        print(block_layer)

        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[2], self.n_features[3]),
                                (3, 3), with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L3')
        pool_3, conv_3 = block_layer(pool_2, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[3], self.n_features[4]),
                                (3, 3), with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L4')
        up_3, _ = block_layer(pool_3, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[3], self.n_features[3]),
                                (3, 3), with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='R3')
        concat_3 = ElementwiseLayer('CONCAT')(conv_3, up_3)
        up_2, _ = block_layer(concat_3, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[2], self.n_features[2]),
                                (3, 3), with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='R2')
        concat_2 = ElementwiseLayer('CONCAT')(conv_2, up_2)
        up_1, _ = block_layer(concat_2, is_training)
        print(block_layer)

        block_layer = UNetBlock('NONE',
                                (self.n_features[1],
                                 self.n_features[1],
                                 self.num_classes),
                                (3, 3, 1),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='R1_FC')
        concat_1 = ElementwiseLayer('CONCAT')(conv_1, up_1)

        # for the last layer, upsampling path is not used
        _, output_tensor = block_layer(concat_1, is_training)

        crop_layer = CropLayer(border=44, name='crop-88')
        output_tensor = crop_layer(output_tensor)
        print(block_layer)
        return output_tensor
Ejemplo n.º 15
0
    def layer_op(self, input_tensor, is_training, layer_id=-1):
        hp = self.hyperparameters
        if is_training and hp['augmentation_scale'] > 0:
            aug = Affine3DAugmentationLayer(hp['augmentation_scale'], 'LINEAR',
                                            'ZERO')
            input_tensor = aug(input_tensor)
        channel_dim = len(input_tensor.get_shape()) - 1
        input_size = input_tensor.get_shape().as_list()
        spatial_rank = len(input_size) - 2

        modulo = 2**(len(hp['dilation_rates']))
        assert layer_util.check_spatial_dims(input_tensor,
                                             lambda x: x % modulo == 0)

        downsample_channels = list(hp['n_input_channels'][1:]) + [None]
        v_params = zip(hp['n_dense_channels'], hp['n_seg_channels'],
                       downsample_channels, hp['dilation_rates'],
                       range(len(downsample_channels)))

        downsampled_img = BNLayer()(
            tf.nn.avg_pool3d(input_tensor, [1] + [3] * spatial_rank + [1],
                             [1] + [2] * spatial_rank + [1], 'SAME'),
            is_training=is_training)
        all_segmentation_features = [downsampled_img]
        output_shape = downsampled_img.get_shape().as_list()[1:-1]
        initial_features = ConvolutionalLayer(hp['n_input_channels'][0],
                                              kernel_size=5,
                                              stride=2)(
                                                  input_tensor,
                                                  is_training=is_training)

        down = tf.concat([downsampled_img, initial_features], channel_dim)
        for dense_ch, seg_ch, down_ch, dil_rate, idx in v_params:
            sd = DenseFeatureStackBlockWithSkipAndDownsample(
                dense_ch,
                3,
                dil_rate,
                seg_ch,
                down_ch,
                self.architecture_parameters['use_bdo'],
                acti_func='relu')
            skip, down = sd(down,
                            is_training=is_training,
                            keep_prob=hp['p_channels_selected'])
            all_segmentation_features.append(image_resize(skip, output_shape))
        segmentation = ConvolutionalLayer(
            self.num_classes,
            kernel_size=hp['final_kernel'],
            with_bn=False,
            with_bias=True)(tf.concat(all_segmentation_features, channel_dim),
                            is_training=is_training)
        if self.architecture_parameters['use_prior']:
            segmentation = segmentation + \
                           SpatialPriorBlock([12] * spatial_rank, output_shape)
        if is_training and hp['augmentation_scale'] > 0:
            inverse_aug = aug.inverse()
            segmentation = inverse_aug(segmentation)
        segmentation = image_resize(segmentation, input_size[1:-1])
        seg_summary = tf.to_float(
            tf.expand_dims(tf.argmax(segmentation, -1),
                           -1)) * (255. / self.num_classes - 1)
        m, v = tf.nn.moments(input_tensor, axes=[1, 2, 3], keep_dims=True)
        img_summary = tf.minimum(
            255.,
            tf.maximum(0., (tf.to_float(input_tensor - m) /
                            (tf.sqrt(v) * 2.) + 1.) * 127.))
        image3_axial('imgseg', tf.concat([img_summary, seg_summary], 1), 5,
                     [tf.GraphKeys.SUMMARIES])
        return segmentation
Ejemplo n.º 16
0
    def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs):
        """

        :param images: tensor to input to the network. Size has to be divisible by 8
        :param is_training:  boolean, True if network is in training mode
        :param layer_id: not in use
        :param unused_kwargs: other conditional arguments, not in use
        :return: tensor, network output
        """
        assert layer_util.check_spatial_dims(images, lambda x: x % 8 == 0)

        if layer_util.infer_spatial_rank(images) == 2:
            padded_images = tf.tile(images, [1, 1, 1, self.n_features[0]])
        elif layer_util.infer_spatial_rank(images) == 3:
            padded_images = tf.tile(images, [1, 1, 1, 1, self.n_features[0]])
        else:
            raise ValueError('not supported spatial rank of the input image')
        # downsampling  blocks
        res_1, down_1 = VNetBlock('DOWNSAMPLE',
                                  1,
                                  self.n_features[0],
                                  self.n_features[1],
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  acti_func=self.acti_func,
                                  name='L1')(images, padded_images)
        res_2, down_2 = VNetBlock('DOWNSAMPLE',
                                  2,
                                  self.n_features[1],
                                  self.n_features[2],
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  acti_func=self.acti_func,
                                  name='L2')(down_1, down_1)
        res_3, down_3 = VNetBlock('DOWNSAMPLE',
                                  3,
                                  self.n_features[2],
                                  self.n_features[3],
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  acti_func=self.acti_func,
                                  name='L3')(down_2, down_2)
        res_4, down_4 = VNetBlock('DOWNSAMPLE',
                                  3,
                                  self.n_features[3],
                                  self.n_features[4],
                                  acti_func=self.acti_func,
                                  name='L4')(down_3, down_3)
        # upsampling blocks
        _, up_4 = VNetBlock('UPSAMPLE',
                            3,
                            self.n_features[4],
                            self.n_features[4],
                            w_initializer=self.initializers['w'],
                            w_regularizer=self.regularizers['w'],
                            acti_func=self.acti_func,
                            name='V_')(down_4, down_4)
        concat_r4 = ElementwiseLayer('CONCAT')(up_4, res_4)
        _, up_3 = VNetBlock('UPSAMPLE',
                            3,
                            self.n_features[4],
                            self.n_features[3],
                            w_initializer=self.initializers['w'],
                            w_regularizer=self.regularizers['w'],
                            acti_func=self.acti_func,
                            name='R4')(concat_r4, up_4)
        concat_r3 = ElementwiseLayer('CONCAT')(up_3, res_3)
        _, up_2 = VNetBlock('UPSAMPLE',
                            3,
                            self.n_features[3],
                            self.n_features[2],
                            w_initializer=self.initializers['w'],
                            w_regularizer=self.regularizers['w'],
                            acti_func=self.acti_func,
                            name='R3')(concat_r3, up_3)
        concat_r2 = ElementwiseLayer('CONCAT')(up_2, res_2)
        _, up_1 = VNetBlock('UPSAMPLE',
                            2,
                            self.n_features[2],
                            self.n_features[1],
                            w_initializer=self.initializers['w'],
                            w_regularizer=self.regularizers['w'],
                            acti_func=self.acti_func,
                            name='R2')(concat_r2, up_2)
        # final class score
        concat_r1 = ElementwiseLayer('CONCAT')(up_1, res_1)
        _, output_tensor = VNetBlock('SAME',
                                     1,
                                     self.n_features[1],
                                     self.num_classes,
                                     w_initializer=self.initializers['w'],
                                     w_regularizer=self.regularizers['w'],
                                     b_initializer=self.initializers['b'],
                                     b_regularizer=self.regularizers['b'],
                                     acti_func=self.acti_func,
                                     name='R1')(concat_r1, up_1)
        return output_tensor
Ejemplo n.º 17
0
    def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs):
        """

        :param images: tensor to input to the network. Size has to be divisible by 8
        :param is_training: boolean, True if network is in training mode
        :param layer_id: int, index of the layer to return as output
        :param unused_kwargs:
        :return: output of layer indicated by layer_id
        """
        assert (layer_util.check_spatial_dims(images, lambda x: x % 8 == 0))
        # go through self.layers, create an instance of each layer
        # and plugin data
        layer_instances = []

        ### first convolution layer
        params = self.layers[0]
        first_conv_layer = ConvolutionalLayer(
            n_output_chns=params['n_features'],
            kernel_size=params['kernel_size'],
            stride=2,
            acti_func=self.acti_func,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            name=params['name'])
        flow = first_conv_layer(images, is_training)
        layer_instances.append((first_conv_layer, flow))

        ### resblocks, all kernels dilated by 1 (normal convolution)
        params = self.layers[1]
        with DilatedTensor(flow, dilation_factor=1) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 2
        params = self.layers[2]
        with DilatedTensor(flow, dilation_factor=2) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 4
        params = self.layers[3]
        with DilatedTensor(flow, dilation_factor=4) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### 1x1x1 convolution layer
        params = self.layers[4]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      acti_func=self.acti_func,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        ### 3x3x3 deconvolution layer
        params = self.layers[4]
        fc_layer = DeconvolutionalLayer(n_output_chns=params['n_features'],
                                        kernel_size=3,
                                        stride=2,
                                        acti_func=self.acti_func,
                                        w_initializer=self.initializers['w'],
                                        w_regularizer=self.regularizers['w'],
                                        name='deconv')
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        ### 1x1x1 convolution layer
        params = self.layers[5]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      acti_func=None,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        # set training properties
        if is_training:
            self._print(layer_instances)
            return layer_instances[-1][1]
        return layer_instances[layer_id][1]
Ejemplo n.º 18
0
    def layer_op(self, images, is_training=True, layer_id=-1, **unused_kwargs):
        assert layer_util.check_spatial_dims(images, lambda x: x % 8 == 0)
        # go through self.layers, create an instance of each layer
        # and plugin data
        layer_instances = []

        input_tensor_T1, input_tensor_T2, input_tensor_PD = tf.split(
            value=images, num_or_size_splits=3, axis=-1, num=None)

        ### first convolution layer
        params = self.layers[0]
        first_conv_layer = ConvolutionalLayer(
            n_output_chns=params['n_features'],
            kernel_size=params['kernel_size'],
            with_bias=True,
            with_bn=False,
            acti_func=self.acti_func,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            name=params['name'])
        flow = first_conv_layer(images, is_training)
        layer_instances.append((first_conv_layer, flow))

        ### resblocks, all kernels dilated by 1 (normal convolution)
        params = self.layers[1]
        with DilatedTensor(flow, dilation_factor=1) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 2
        params = self.layers[2]
        with DilatedTensor(flow, dilation_factor=2) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 4
        params = self.layers[3]
        with DilatedTensor(flow, dilation_factor=4) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### 1x1x1 convolution layer
        params = self.layers[4]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      with_bias=True,
                                      with_bn=False,
                                      acti_func=self.acti_func,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        ### 1x1x1 convolution layer
        params = self.layers[5]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      with_bias=True,
                                      with_bn=False,
                                      acti_func=None,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        output_tensor_res = ElementwiseLayer('SUM')(input_tensor_T1, flow)

        # set training properties
        if is_training:
            self._print(layer_instances)
            # return layer_instances[-1][1]
            return output_tensor_res
        # return layer_instances[layer_id][1]
        return output_tensor_res
Ejemplo n.º 19
0
    def layer_op(self, images, is_training, layer_id=-1):
        assert (layer_util.check_spatial_dims(images, lambda x: x % 8 == 0))
        # go through self.layers, create an instance of each layer
        # and plugin data
        layer_instances = []

        ### first convolution layer
        params = self.layers[0]
        first_conv_layer = ConvolutionalLayer(
            n_output_chns=params['n_features'],
            kernel_size=params['kernel_size'],
            acti_func=self.acti_func,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            name=params['name'])
        flow = first_conv_layer(images, is_training)
        layer_instances.append((first_conv_layer, flow))

        ### resblocks, all kernels dilated by 1 (normal convolution)
        params = self.layers[1]
        with DilatedTensor(flow, dilation_factor=1) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 2
        params = self.layers[2]
        with DilatedTensor(flow, dilation_factor=2) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### resblocks, all kernels dilated by 4
        params = self.layers[3]
        with DilatedTensor(flow, dilation_factor=4) as dilated:
            for j in range(params['repeat']):
                res_block = HighResBlock(params['n_features'],
                                         params['kernels'],
                                         acti_func=self.acti_func,
                                         w_initializer=self.initializers['w'],
                                         w_regularizer=self.regularizers['w'],
                                         name='%s_%d' % (params['name'], j))
                dilated.tensor = res_block(dilated.tensor, is_training)
                layer_instances.append((res_block, dilated.tensor))
        flow = dilated.tensor

        ### 3x3x3 convolution layer
        params = self.layers[4]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      acti_func=self.acti_func,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        ### 1x1x1 convolution layer
        params = self.layers[5]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      acti_func=self.acti_func,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        ### 1x1x1 convolution layer
        params = self.layers[6]
        fc_layer = ConvolutionalLayer(n_output_chns=params['n_features'],
                                      kernel_size=params['kernel_size'],
                                      acti_func=None,
                                      w_initializer=self.initializers['w'],
                                      w_regularizer=self.regularizers['w'],
                                      name=params['name'])
        flow = fc_layer(flow, is_training)
        layer_instances.append((fc_layer, flow))

        # set training properties
        if is_training:
            self._print(layer_instances)
            return layer_instances[-1][1]
        return layer_instances[layer_id][1]
Ejemplo n.º 20
0
    def layer_op(self,
                 fixed_image,
                 moving_image,
                 base_grid=None,
                 is_training=True):
        """

        :param fixed_image:
        :param moving_image:
        :param base_grid:
        :param is_training:
        :return: estimated dense displacement fields
        """

        spatial_rank = infer_spatial_rank(fixed_image)
        spatial_shape = fixed_image.get_shape().as_list()[1:-1]
        check_spatial_dims(fixed_image, lambda x: x % 16 == 0)

        #  resize the moving image to match the fixed
        moving_image = Resize(spatial_shape)(moving_image)
        img = tf.concat([moving_image, fixed_image], axis=-1)
        down_res_0, conv_0_0, _ = \
            DownRes(self.fea[0], kernel_size=7, **self.down_res_param)(img, is_training)
        down_res_1, conv_0_1, _ = \
            DownRes(self.fea[1], **self.down_res_param)(down_res_0, is_training)
        down_res_2, conv_0_2, _ = \
            DownRes(self.fea[2], **self.down_res_param)(down_res_1, is_training)
        down_res_3, conv_0_3, _ = \
            DownRes(self.fea[3], **self.down_res_param)(down_res_2, is_training)

        conv_4 = Conv(n_output_chns=self.fea[4],
                      kernel_size=self.k_conv,
                      **self.down_res_param)(down_res_3, is_training)

        up_res_0 = UpRes(self.fea[3], **self.up_res_param)(
            conv_4, conv_0_3, is_training)
        up_res_1 = UpRes(self.fea[2], **self.up_res_param)(
            up_res_0, conv_0_2, is_training)
        up_res_2 = UpRes(self.fea[1], **self.up_res_param)(
            up_res_1, conv_0_1, is_training)
        up_res_3 = UpRes(self.fea[0], **self.up_res_param)(
            up_res_2, conv_0_0, is_training)

        if self.multi_scale_fusion:
            output_list = [up_res_3, up_res_2, up_res_1, up_res_0, conv_4]
        else:
            output_list = [up_res_3]

        # converting all output layers to displacement fields 
        dense_fields = []
        for scale_out in output_list:
            field = Conv(n_output_chns=spatial_rank,
                         kernel_size=self.k_conv,
                         with_bias=True,
                         with_bn=False,
                         acti_func=None,
                         **self.disp_param)(scale_out)
            resized_field = Resize(new_size=spatial_shape)(field)
            dense_fields.append(resized_field)

        if base_grid is None:
            # adding a reference grid if it doesn't exist
            in_spatial_size = [None] * spatial_rank
            base_grid = _create_affine_features(output_shape=spatial_shape,
                                                source_shape=in_spatial_size)
            base_grid = np.asarray(base_grid[:-1])
            base_grid = np.reshape(
                base_grid.T, [-1] + spatial_shape + [spatial_rank])
            base_grid = tf.constant(base_grid, dtype=resized_field.dtype)

        if self.multi_scale_fusion and len(dense_fields) > 1:
            dense_field = tf.reduce_sum(dense_fields, axis=0)
        else:
            dense_field = dense_fields[0]

        # TODO filtering
        if self.smoothing_func is not None:
            dense_field = self.smoothing_func(dense_field, spatial_rank)

        tf.add_to_collection('bending_energy',
                             _computing_bending_energy(dense_field))
        tf.add_to_collection('gradient_norm',
                             _computing_gradient_norm(dense_field))

        dense_field = dense_field + base_grid
        return dense_field
Ejemplo n.º 21
0
    def layer_op(self, images, is_training, layer_id=-1):
        # image_size  should be divisible by 8
        assert layer_util.check_spatial_dims(images, lambda x: x % 8 == 0)
        assert layer_util.check_spatial_dims(images, lambda x: x >= 89)
        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[0], self.n_features[1]),
                                (3, 3),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L1')
        pool_1, conv_1 = block_layer(images, is_training)
        print(block_layer)

        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[1], self.n_features[2]),
                                (3, 3),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L2')
        pool_2, conv_2 = block_layer(pool_1, is_training)
        print(block_layer)

        block_layer = UNetBlock('DOWNSAMPLE',
                                (self.n_features[2], self.n_features[3]),
                                (3, 3),
                                with_downsample_branch=True,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L3')
        pool_3, conv_3 = block_layer(pool_2, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[3], self.n_features[4]),
                                (3, 3),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='L4')
        up_3, _ = block_layer(pool_3, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[3], self.n_features[3]),
                                (3, 3),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='R3')
        concat_3 = ElementwiseLayer('CONCAT')(conv_3, up_3)
        up_2, _ = block_layer(concat_3, is_training)
        print(block_layer)

        block_layer = UNetBlock('UPSAMPLE',
                                (self.n_features[2], self.n_features[2]),
                                (3, 3),
                                with_downsample_branch=False,
                                w_initializer=self.initializers['w'],
                                w_regularizer=self.regularizers['w'],
                                acti_func=self.acti_func,
                                name='R2')
        concat_2 = ElementwiseLayer('CONCAT')(conv_2, up_2)
        up_1, _ = block_layer(concat_2, is_training)
        print(block_layer)

        block_layer = UNetBlock(
            'NONE', (self.n_features[1], self.n_features[1], self.num_classes),
            (3, 3, 1),
            with_downsample_branch=True,
            w_initializer=self.initializers['w'],
            w_regularizer=self.regularizers['w'],
            acti_func=self.acti_func,
            name='R1_FC')
        concat_1 = ElementwiseLayer('CONCAT')(conv_1, up_1)

        # for the last layer, upsampling path is not used
        _, output_tensor = block_layer(concat_1, is_training)

        crop_layer = CropLayer(border=44, name='crop-88')
        output_tensor = crop_layer(output_tensor)
        print(block_layer)
        return output_tensor
Ejemplo n.º 22
0
    def layer_op(self, images, is_training, layer_id=-1, **unused_kwargs):
        """

        :param images: tensor, input to the network, size should be divisible by d_factor
        :param is_training: boolean, True if network is in training mode
        :param layer_id: not in use
        :param unused_kwargs:
        :return: tensor, network output
        """
        # image_size is defined as the largest context, then:
        #   downsampled path size: image_size / d_factor
        #   downsampled path output: image_size / d_factor - 16

        # to make sure same size of feature maps from both pathways:
        #   normal path size: (image_size / d_factor - 16) * d_factor + 16
        #   normal path output: (image_size / d_factor - 16) * d_factor

        # where 16 is fixed by the receptive field of conv layers
        # TODO: make sure label_size = image_size/d_factor - 16

        # image_size has to be an odd number and divisible by 3 and
        # smaller than the smallest image size of the input volumes

        # label_size should be (image_size/d_factor - 16) * d_factor

        assert self.d_factor % 2 == 1  # to make the downsampling centered
        assert (layer_util.check_spatial_dims(
            images, lambda x: x % self.d_factor == 0))
        assert (layer_util.check_spatial_dims(images, lambda x: x % 2 == 1)
                )  # to make the crop centered
        assert (layer_util.check_spatial_dims(images,
                                              lambda x: x > self.d_factor * 16)
                )  # required by receptive field

        # crop 25x25x25 from 57x57x57
        crop_op = CropLayer(border=self.crop_diff, name='cropping_input')
        normal_path = crop_op(images)
        print(crop_op)

        # downsample 19x19x19 from 57x57x57
        downsample_op = DownSampleLayer(func='CONSTANT',
                                        kernel_size=self.d_factor,
                                        stride=self.d_factor,
                                        padding='VALID',
                                        name='downsample_input')
        downsample_path = downsample_op(images)
        print(downsample_op)

        # convolutions for both pathways
        for n_features in self.conv_features:
            # normal pathway convolutions
            conv_path_1 = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=3,
                padding='VALID',
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                acti_func=self.acti_func,
                name='normal_conv')
            normal_path = conv_path_1(normal_path, is_training)
            print(conv_path_1)

            # downsampled pathway convolutions
            conv_path_2 = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=3,
                padding='VALID',
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                acti_func=self.acti_func,
                name='downsample_conv')
            downsample_path = conv_path_2(downsample_path, is_training)
            print(conv_path_2)

        # upsampling the downsampled pathway
        downsample_path = UpSampleLayer('REPLICATE',
                                        kernel_size=self.d_factor,
                                        stride=self.d_factor)(downsample_path)

        # concatenate both pathways
        output_tensor = ElementwiseLayer('CONCAT')(normal_path,
                                                   downsample_path)

        # 1x1x1 convolution layer
        for n_features in self.fc_features:
            conv_fc = ConvolutionalLayer(
                n_output_chns=n_features,
                kernel_size=1,
                acti_func=self.acti_func,
                w_initializer=self.initializers['w'],
                w_regularizer=self.regularizers['w'],
                name='conv_1x1x1_{}'.format(n_features))
            output_tensor = conv_fc(output_tensor, is_training)
            print(conv_fc)

        return output_tensor
Ejemplo n.º 23
0
    def layer_op(self, images, is_training, layer_id=-1):
        assert layer_util.check_spatial_dims(images, lambda x: x % 8 == 0)

        if layer_util.infer_spatial_rank(images) == 2:
            padded_images = tf.tile(images, [1, 1, 1, self.n_features[0]])
        elif layer_util.infer_spatial_rank(images) == 3:
            padded_images = tf.tile(images, [1, 1, 1, 1, self.n_features[0]])
        else:
            raise ValueError('not supported spatial rank of the input image')
        # downsampling  blocks
        res_1, down_1 = VNetBlock('DOWNSAMPLE',
                                  1,
                                  self.n_features[0],
                                  self.n_features[1],
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  acti_func=self.acti_func,
                                  name='L1')(images, padded_images)
        res_2, down_2 = VNetBlock('DOWNSAMPLE',
                                  2,
                                  self.n_features[1],
                                  self.n_features[2],
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  acti_func=self.acti_func,
                                  name='L2')(down_1, down_1)
        res_3, down_3 = VNetBlock('DOWNSAMPLE',
                                  3,
                                  self.n_features[2],
                                  self.n_features[3],
                                  w_initializer=self.initializers['w'],
                                  w_regularizer=self.regularizers['w'],
                                  acti_func=self.acti_func,
                                  name='L3')(down_2, down_2)
        res_4, down_4 = VNetBlock('DOWNSAMPLE',
                                  3,
                                  self.n_features[3],
                                  self.n_features[4],
                                  acti_func=self.acti_func,
                                  name='L4')(down_3, down_3)
        # upsampling blocks
        _, up_4 = VNetBlock('UPSAMPLE',
                            3,
                            self.n_features[4],
                            self.n_features[4],
                            w_initializer=self.initializers['w'],
                            w_regularizer=self.regularizers['w'],
                            acti_func=self.acti_func,
                            name='V_')(down_4, down_4)
        concat_r4 = ElementwiseLayer('CONCAT')(up_4, res_4)
        _, up_3 = VNetBlock('UPSAMPLE',
                            3,
                            self.n_features[4],
                            self.n_features[3],
                            w_initializer=self.initializers['w'],
                            w_regularizer=self.regularizers['w'],
                            acti_func=self.acti_func,
                            name='R4')(concat_r4, up_4)
        concat_r3 = ElementwiseLayer('CONCAT')(up_3, res_3)
        _, up_2 = VNetBlock('UPSAMPLE',
                            3,
                            self.n_features[3],
                            self.n_features[2],
                            w_initializer=self.initializers['w'],
                            w_regularizer=self.regularizers['w'],
                            acti_func=self.acti_func,
                            name='R3')(concat_r3, up_3)
        concat_r2 = ElementwiseLayer('CONCAT')(up_2, res_2)
        _, up_1 = VNetBlock('UPSAMPLE',
                            2,
                            self.n_features[2],
                            self.n_features[1],
                            w_initializer=self.initializers['w'],
                            w_regularizer=self.regularizers['w'],
                            acti_func=self.acti_func,
                            name='R2')(concat_r2, up_2)
        # final class score
        concat_r1 = ElementwiseLayer('CONCAT')(up_1, res_1)
        _, output_tensor = VNetBlock('SAME',
                                     1,
                                     self.n_features[1],
                                     self.num_classes,
                                     w_initializer=self.initializers['w'],
                                     w_regularizer=self.regularizers['w'],
                                     b_initializer=self.initializers['b'],
                                     b_regularizer=self.regularizers['b'],
                                     acti_func=self.acti_func,
                                     name='R1')(concat_r1, up_1)
        return output_tensor
Ejemplo n.º 24
0
    def layer_op(self, input_tensor, is_training, layer_id=-1):
        hp = self.hyperparameters
        if is_training and hp['augmentation_scale']>0:
            aug = Affine3DAugmentationLayer(hp['augmentation_scale'],
                                            'LINEAR','ZERO')
            input_tensor=aug(input_tensor)
        channel_dim = len(input_tensor.get_shape()) - 1
        input_size = input_tensor.shape.as_list()
        spatial_rank = len(input_size) - 2

        modulo = 2 ** (len(hp['dilation_rates']))
        assert layer_util.check_spatial_dims(input_tensor,
                                             lambda x: x % modulo == 0)

        downsample_channels = list(hp['n_input_channels'][1:]) + [None]
        v_params = zip(hp['n_dense_channels'],
                       hp['n_seg_channels'],
                       downsample_channels,
                       hp['dilation_rates'],
                       range(len(downsample_channels)))

        downsampled_img = BNLayer()(tf.nn.avg_pool3d(input_tensor,
                                                     [1] + [3] * spatial_rank + [1],
                                                     [1] + [2] * spatial_rank + [1],
                                                     'SAME'), is_training=is_training)
        all_segmentation_features = [downsampled_img]
        output_shape = downsampled_img.shape.as_list()[1:-1]
        initial_features = ConvolutionalLayer(
            hp['n_input_channels'][0],
            kernel_size=5, stride=2)(input_tensor, is_training=is_training)

        down = tf.concat([downsampled_img, initial_features], channel_dim)
        for dense_ch, seg_ch, down_ch, dil_rate, idx in v_params:
            sd = DenseFeatureStackBlockWithSkipAndDownsample(
                dense_ch,
                3,
                dil_rate,
                seg_ch,
                down_ch,
                self.architecture_parameters['use_bdo'],
                acti_func='relu')
            skip, down = sd(down,
                            is_training=is_training,
                            keep_prob=hp['p_channels_selected'])
            all_segmentation_features.append(image_resize(skip, output_shape))
        segmentation = ConvolutionalLayer(
            self.num_classes,
            kernel_size=hp['final_kernel'],
            with_bn=False,
            with_bias=True)(tf.concat(all_segmentation_features, channel_dim),
                            is_training=is_training)
        if self.architecture_parameters['use_prior']:
            segmentation = segmentation + \
                           SpatialPriorBlock([12] * spatial_rank, output_shape)
        if is_training and hp['augmentation_scale']>0:
            inverse_aug = aug.inverse()
            segmentation = inverse_aug(segmentation)
        segmentation = image_resize(segmentation, input_size[1:-1])
        seg_summary = tf.to_float(tf.expand_dims(tf.argmax(segmentation,-1),-1)) * (255./self.num_classes-1)
        m,v = tf.nn.moments(input_tensor,axes=[1,2,3],keep_dims=True)
        img_summary = tf.minimum(255., tf.maximum(0.,
                         (tf.to_float(input_tensor-m) / (tf.sqrt(v) * 2.) + 1.) * 127.))
        image3_axial('imgseg', tf.concat([img_summary,seg_summary],1) ,
                     5, [tf.GraphKeys.SUMMARIES])
        return segmentation