Ejemplo n.º 1
0
 def layer_op(self, input_tensor):
     spatial_rank = layer_util.infer_spatial_rank(input_tensor)
     look_up_operations(self.func, SUPPORTED_OP)
     kernel_size_all_dims = layer_util.expand_spatial_params(
         self.kernel_size, spatial_rank)
     stride_all_dims = layer_util.expand_spatial_params(
         self.stride, spatial_rank)
     if self.func == 'CONSTANT':
         full_kernel_size = kernel_size_all_dims + (1, 1)
         np_kernel = layer_util.trivial_kernel(full_kernel_size)
         kernel = tf.constant(np_kernel, dtype=tf.float32)
         output_tensor = [tf.expand_dims(x, -1)
                          for x in tf.unstack(input_tensor, axis=-1)]
         output_tensor = [
             tf.nn.convolution(
                 input=inputs,
                 filter=kernel,
                 strides=stride_all_dims,
                 padding=self.padding,
                 name='conv')
             for inputs in output_tensor]
         output_tensor = tf.concat(output_tensor, axis=-1)
     else:
         output_tensor = tf.nn.pool(
             input=input_tensor,
             window_shape=kernel_size_all_dims,
             pooling_type=self.func,
             padding=self.padding,
             dilation_rate=[1] * spatial_rank,
             strides=stride_all_dims,
             name=self.layer_name)
     return output_tensor
Ejemplo n.º 2
0
def param_to_dict(input_data_param):
    """
    Validate the user input ``input_data_param``
    raise an error if it's invalid.

    :param input_data_param:
    :return: input data specifications as a nested dictionary
    """
    error_msg = 'Unknown ``data_param`` type. ' \
                'It should be a nested dictionary: '\
                '{"modality_name": {"input_property": value}} '\
                'or a dictionary of: {"modality_name": '\
                'niftynet.utilities.util_common.ParserNamespace}'
    data_param = deepcopy(input_data_param)
    if isinstance(data_param, (ParserNamespace, argparse.Namespace)):
        data_param = vars(data_param)
    if not isinstance(data_param, dict):
        raise ValueError(error_msg)
    for mod in data_param:
        mod_param = data_param[mod]
        if isinstance(mod_param, (ParserNamespace, argparse.Namespace)):
            dict_param = vars(mod_param)
        elif isinstance(mod_param, dict):
            dict_param = mod_param
        else:
            raise ValueError(error_msg)
        for data_key in dict_param:
            look_up_operations(data_key, SUPPORTED_DATA_SPEC)
        data_param[mod] = dict_param
    return data_param
Ejemplo n.º 3
0
def param_to_dict(input_data_param):
    """
    Validate the user input ``input_data_param``
    raise an error if it's invalid.

    :param input_data_param:
    :return: input data specifications as a nested dictionary
    """
    error_msg = 'Unknown ``data_param`` type. ' \
                'It should be a nested dictionary: '\
                '{"modality_name": {"input_property": value}} '\
                'or a dictionary of: {"modality_name": '\
                'niftynet.utilities.util_common.ParserNamespace}'
    data_param = deepcopy(input_data_param)
    if isinstance(data_param, (ParserNamespace, argparse.Namespace)):
        data_param = vars(data_param)
    if not isinstance(data_param, dict):
        raise ValueError(error_msg)
    for mod in data_param:
        mod_param = data_param[mod]
        if isinstance(mod_param, (ParserNamespace, argparse.Namespace)):
            dict_param = vars(mod_param)
        elif isinstance(mod_param, dict):
            dict_param = mod_param
        else:
            raise ValueError(error_msg)
        for data_key in dict_param:
            look_up_operations(data_key, SUPPORTED_DATA_SPEC)
        data_param[mod] = dict_param
    return data_param
Ejemplo n.º 4
0
 def layer_op(self, input_tensor):
     spatial_rank = layer_util.infer_spatial_rank(input_tensor)
     look_up_operations(self.func, SUPPORTED_OP)
     kernel_size_all_dims = layer_util.expand_spatial_params(
         self.kernel_size, spatial_rank)
     stride_all_dims = layer_util.expand_spatial_params(
         self.stride, spatial_rank)
     if self.func == 'CONSTANT':
         full_kernel_size = kernel_size_all_dims + (1, 1)
         np_kernel = layer_util.trivial_kernel(full_kernel_size)
         kernel = tf.constant(np_kernel, dtype=tf.float32)
         output_tensor = [
             tf.expand_dims(x, -1)
             for x in tf.unstack(input_tensor, axis=-1)
         ]
         output_tensor = [
             tf.nn.convolution(input=inputs,
                               filter=kernel,
                               strides=stride_all_dims,
                               padding=self.padding,
                               name='conv') for inputs in output_tensor
         ]
         output_tensor = tf.concat(output_tensor, axis=-1)
     else:
         output_tensor = tf.nn.pool(input=input_tensor,
                                    window_shape=kernel_size_all_dims,
                                    pooling_type=self.func,
                                    padding=self.padding,
                                    dilation_rate=[1] * spatial_rank,
                                    strides=stride_all_dims,
                                    name=self.layer_name)
     return output_tensor
Ejemplo n.º 5
0
    def get_file_list(self, phase=ALL, *section_names):
        """
        get file names as a dataframe, by partitioning phase and section names
        set phase to ALL to load all subsets.

        :param phase: the label of the subset generated by self._partition_ids
                    should be one of the SUPPORTED_PHASES
        :param section_names: one or multiple input section names
        :return: a pandas.dataframe of file names
        """
        if self._file_list is None:
            tf.logging.warning('Empty file list, please initialise'
                               'ImageSetsPartitioner first.')
            return []
        try:
            look_up_operations(phase, SUPPORTED_PHASES)
        except ValueError:
            tf.logging.fatal('Unknown phase argument.')
            raise
        for name in section_names:
            try:
                look_up_operations(name, set(self._file_list))
            except ValueError:
                tf.logging.fatal(
                    'Requesting files under input section [%s],\n'
                    'however the section does not exist in the config.', name)
                raise
        if phase == ALL:
            self._file_list = self._file_list.sort_index()
            if section_names:
                section_names = [COLUMN_UNIQ_ID] + list(section_names)
                return self._file_list[section_names]
            return self._file_list
        if self._partition_ids is None:
            tf.logging.fatal('No partition ids available.')
            if self.new_partition:
                tf.logging.fatal(
                    'Unable to create new partitions,'
                    'splitting ratios: %s, writing file %s', self.ratios,
                    self.data_split_file)
            elif os.path.isfile(self.data_split_file):
                tf.logging.fatal(
                    'Unable to load %s, initialise the'
                    'ImageSetsPartitioner with `new_partition=True`'
                    'to overwrite the file.', self.data_split_file)
            raise ValueError

        selector = self._partition_ids[COLUMN_PHASE] == phase
        selected = self._partition_ids[selector][[COLUMN_UNIQ_ID]]
        if selected.empty:
            tf.logging.warning(
                'Empty subset for phase [%s], returning None as file list. '
                'Please adjust splitting fractions.', phase)
            return None
        subset = pandas.merge(self._file_list, selected, on=COLUMN_UNIQ_ID)
        if section_names:
            section_names = [COLUMN_UNIQ_ID] + list(section_names)
            return subset[list(section_names)]
        return subset
Ejemplo n.º 6
0
    def __init__(self,
                 func='AVG',
                 reduction_ratio=16,
                 name='channel_squeeze_excitation'):
        self.func = func.upper()
        self.reduction_ratio = reduction_ratio
        super(ChannelSELayer, self).__init__(name=name)

        look_up_operations(self.func, SUPPORTED_OP)
Ejemplo n.º 7
0
    def __init__(self,
                 interpolation="LINEAR",
                 boundary="REPLICATE",
                 name="resampler",
                 implementation="Fast"):
        super(ResamplerLayer, self).__init__(name=name)
        self.boundary = boundary.upper()
        self.boundary_func = look_up_operations(self.boundary,
                                                SUPPORTED_BOUNDARY)
        self.interpolation = look_up_operations(interpolation.upper(),
                                                SUPPORTED_INTERPOLATION)

        if self.boundary == 'ZERO' and self.interpolation == 'BSPLINE':
            tf.logging.fatal('Zero padding is not supported for BSPLINE mode')
            raise NotImplementedError

        if self.boundary == 'ZERO' and self.interpolation == 'IDW':
            tf.logging.warning('Zero padding is not supported for IDW mode')
            # raise NotImplementedError

        self.FastResamplerLayer = None  #
        if implementation.lower() in ['niftyreg', 'fast']:
            # check if niftyreg_resampling_layer is installed
            try:
                from niftyreg_image_resampling import NiftyregImageResamplingLayer
                import niftyreg_image_resampling as resampler_module
            except ImportError:
                tf.logging.warning('''
                    niftyreg_image_resampling is not installed; falling back onto
                    niftynet.layer.resampler.ResamplerLayer. To allow fast resampling,
                    please see installation instructions in
                    niftynet/contrib/niftyreg_image_resampling/README.md
                    ''')
                return

            # Passthrough of supported boundary types for  resampling
            SUPPORTED_BOUNDARY_FAST = resampler_module.SUPPORTED_BOUNDARY

            # Passthrough of supported interpolation types for NiftyReg resampling
            SUPPORTED_INTERPOLATION_FAST = resampler_module.SUPPORTED_INTERPOLATION
            # check compatibility of the resampling options with niftyreg_image_resampling
            try:
                boundary_fast = look_up_operations(self.boundary,
                                                   SUPPORTED_BOUNDARY_FAST)
                interp_fast = look_up_operations(self.interpolation,
                                                 SUPPORTED_INTERPOLATION_FAST)
                self.FastResamplerLayer = NiftyregImageResamplingLayer(
                    interp_fast, boundary_fast)
                tf.logging.info('''NiftyReg image resampling is used.''')
            except ValueError as e:
                tf.logging.warning(e)
                tf.logging.warning(
                    '''Falling back onto niftynet.layer.resampler.ResamplerLayer.'''
                )
Ejemplo n.º 8
0
    def __init__(self,
                 type_str='otsu_plus',
                 multimod_fusion='or',
                 threshold=0.0):

        super(BinaryMaskingLayer, self).__init__(name='binary_masking')
        self.type_str = look_up_operations(
            type_str.lower(), SUPPORTED_MASK_TYPES)
        self.multimod_fusion = look_up_operations(
            multimod_fusion.lower(), SUPPORTED_MULTIMOD_MASK_TYPES)

        self.threshold = threshold
Ejemplo n.º 9
0
    def __init__(self,
                 func,
                 kernel_size=3,
                 stride=2,
                 padding='SAME',
                 name='pooling'):
        self.func = func.upper()
        self.layer_name = '{}_{}'.format(self.func.lower(), name)
        super(DownSampleLayer, self).__init__(name=self.layer_name)

        self.padding = padding.upper()
        look_up_operations(self.padding, SUPPORTED_PADDING)

        self.kernel_size = kernel_size
        self.stride = stride
Ejemplo n.º 10
0
    def __init__(self,
                 func,
                 kernel_size=3,
                 stride=2,
                 padding='SAME',
                 name='pooling'):
        self.func = func.upper()
        self.layer_name = '{}_{}'.format(self.func.lower(), name)
        super(DownSampleLayer, self).__init__(name=self.layer_name)

        self.padding = padding.upper()
        look_up_operations(self.padding, SUPPORTED_PADDING)

        self.kernel_size = kernel_size
        self.stride = stride
    def initialise_dataset_loader(self,
                                  data_param=None,
                                  task_param=None,
                                  data_partitioner=None):
        self.data_param = data_param
        self.autoencoder_param = task_param

        if not self.is_training:
            self._infer_type = look_up_operations(
                self.autoencoder_param.inference_type, SUPPORTED_INFERENCE)
        else:
            self._infer_type = None

        file_lists = self.get_file_lists(data_partitioner)
        # read each line of csv files into an instance of Subject
        if self.is_evaluation:
            NotImplementedError('Evaluation is not yet '
                                'supported in this application.')
        if self.is_training:
            self.readers = []
            for file_list in file_lists:
                reader = ImageReader(['image'])
                reader.initialise(data_param, task_param, file_list)
                self.readers.append(reader)
        if self._infer_type in ('encode', 'encode-decode'):
            self.readers = [ImageReader(['image'])]
            self.readers[0].initialise(data_param, task_param, file_lists[0])
        elif self._infer_type == 'sample':
            self.readers = []
        elif self._infer_type == 'linear_interpolation':
            self.readers = [ImageReader(['feature'])]
            self.readers[0].initialise(data_param, task_param, [file_lists])
Ejemplo n.º 12
0
    def __init__(self,
                 n_output_chns,
                 kernel_size=3,
                 stride=1,
                 dilation=1,
                 padding='SAME',
                 with_bias=False,
                 w_initializer=None,
                 w_regularizer=None,
                 b_initializer=None,
                 b_regularizer=None,
                 padding_constant=0,
                 name='conv'):
        """
        :param padding_constant: a constant applied in padded convolution
        (see also tf.pad)
        """

        super(ConvLayer, self).__init__(name=name)

        self.padding = look_up_operations(padding.upper(), SUPPORTED_PADDING)
        self.n_output_chns = int(n_output_chns)
        self.kernel_size = kernel_size
        self.stride = stride
        self.dilation = dilation
        self.with_bias = with_bias
        self.padding_constant = padding_constant

        self.initializers = {
            'w': w_initializer if w_initializer else default_w_initializer(),
            'b': b_initializer if b_initializer else default_b_initializer()
        }

        self.regularizers = {'w': w_regularizer, 'b': b_regularizer}
Ejemplo n.º 13
0
    def initialise_dataset_loader(
            self, data_param=None, task_param=None, data_partitioner=None):
        self.data_param = data_param
        self.autoencoder_param = task_param

        if not self.is_training:
            self._infer_type = look_up_operations(
                self.autoencoder_param.inference_type, SUPPORTED_INFERENCE)
        else:
            self._infer_type = None

        file_lists = self.get_file_lists(data_partitioner)
        # read each line of csv files into an instance of Subject
        if self.is_evaluation:
            NotImplementedError('Evaluation is not yet '
                                'supported in this application.')
        if self.is_training:
            self.readers = []
            for file_list in file_lists:
                reader = ImageReader(['image'])
                reader.initialise(data_param, task_param, file_list)
                self.readers.append(reader)
        if self._infer_type in ('encode', 'encode-decode'):
            self.readers = [ImageReader(['image'])]
            self.readers[0].initialise(data_param,
                                       task_param,
                                       file_lists[0])
        elif self._infer_type == 'sample':
            self.readers = []
        elif self._infer_type == 'linear_interpolation':
            self.readers = [ImageReader(['feature'])]
            self.readers[0].initialise(data_param,
                                       task_param,
                                       [file_lists])
Ejemplo n.º 14
0
 def interpret_output(self, batch_output):
     if self.is_training:
         return True
     else:
         infer_type = look_up_operations(
             self.autoencoder_param.inference_type, SUPPORTED_INFERENCE)
         if infer_type == 'encode':
             return self.output_decoder.decode_batch(
                 {'window_embedded': batch_output['embedded']},
                 batch_output['location'][:, 0:1])
         if infer_type == 'encode-decode':
             return self.output_decoder.decode_batch(
                 {
                     'window_generated_image':
                     batch_output['generated_image']
                 }, batch_output['location'][:, 0:1])
         if infer_type == 'sample':
             return self.output_decoder.decode_batch(
                 {
                     'window_generated_image':
                     batch_output['generated_image']
                 }, None)
         if infer_type == 'linear_interpolation':
             return self.output_decoder.decode_batch(
                 {
                     'window_generated_image':
                     batch_output['generated_image']
                 }, batch_output['location'][:, :2])
Ejemplo n.º 15
0
    def __init__(self,
                 n_output_chns,
                 kernel_size=3,
                 stride=1,
                 padding='SAME',
                 with_bias=False,
                 w_initializer=None,
                 w_regularizer=None,
                 b_initializer=None,
                 b_regularizer=None,
                 name='deconv'):

        super(DeconvLayer, self).__init__(name=name)

        self.padding = look_up_operations(padding.upper(), SUPPORTED_PADDING)
        self.n_output_chns = int(n_output_chns)
        self.kernel_size = kernel_size
        self.stride = stride
        self.with_bias = with_bias

        self.initializers = {
            'w': w_initializer if w_initializer else default_w_initializer(),
            'b': b_initializer if b_initializer else default_b_initializer()}

        self.regularizers = {'w': w_regularizer, 'b': b_regularizer}
Ejemplo n.º 16
0
    def __init__(self,
                 func,
                 n_conv,
                 n_feature_chns,
                 n_output_chns,
                 w_initializer=None,
                 w_regularizer=None,
                 b_regularizer=None,
                 b_initializer=None,
                 acti_func='relu',
                 name='vnet_block'):
        """

        :param func: string, defines final block operation (Downsampling, upsampling, same)
        :param n_conv: int, number of conv layers to apply
        :param n_feature_chns: int, number of feature channels (output channels) for each conv layer
        :param n_output_chns: int, number of output channels of the final block operation (func)
        :param w_initializer: weight initialisation of convolutional layers
        :param w_regularizer: weight regularisation of convolutional layers
        :param b_initializer: bias initialisation of convolutional layers
        :param b_regularizer: bias regularisation of convolutional layers
        :param acti_func: activation function to use
        :param name: layer name
        """

        super(VNetBlock, self).__init__(name=name)

        self.func = look_up_operations(func.upper(), SUPPORTED_OP)
        self.n_conv = n_conv
        self.n_feature_chns = n_feature_chns
        self.n_output_chns = n_output_chns
        self.acti_func = acti_func

        self.initializers = {'w': w_initializer, 'b': b_initializer}
        self.regularizers = {'w': w_regularizer, 'b': b_regularizer}
Ejemplo n.º 17
0
    def __init__(self,
                 interpolation="LINEAR",
                 boundary="REPLICATE",
                 name="resampler"):
        super(ResamplerLayer, self).__init__(name=name)
        self.boundary = boundary.upper()
        self.boundary_func = look_up_operations(
            self.boundary, SUPPORTED_BOUNDARY)
        self.interpolation = look_up_operations(
            interpolation.upper(), SUPPORTED_INTERPOLATION)

        if self.boundary == 'ZERO' and self.interpolation == 'BSPLINE':
            tf.logging.fatal('Zero padding is not supported for BSPLINE mode')
            raise NotImplementedError

        if self.boundary == 'ZERO' and self.interpolation == 'IDW':
            tf.logging.warning('Zero padding is not supported for IDW mode')
Ejemplo n.º 18
0
    def __init__(self,
                 n_output_chns=1,
                 kernel_size=3,
                 dilation=1,
                 acti_func='relu',
                 w_initializer=None,
                 w_regularizer=None,
                 moving_decay=0.9,
                 eps=1e-5,
                 type_string='bn_acti_conv',
                 name='res-downsample'):
        """
        Implementation of residual unit presented in:

            [1] He et al., Identity mapping in deep residual networks, ECCV 2016
            [2] He et al., Deep residual learning for image recognition, CVPR 2016

        The possible types of connections are::

            'original': residual unit presented in [2]
            'conv_bn_acti': ReLU before addition presented in [1]
            'acti_conv_bn': ReLU-only pre-activation presented in [1]
            'bn_acti_conv': full pre-activation presented in [1]

        [1] recommends 'bn_acti_conv'

        :param n_output_chns: number of output feature channels
            if this doesn't match the input, a 1x1 projection will be created.
        :param kernel_size:
        :param dilation:
        :param acti_func:
        :param w_initializer:
        :param w_regularizer:
        :param moving_decay:
        :param eps:
        :param type_string:
        :param name:
        """

        super(TrainableLayer, self).__init__(name=name)
        self.type_string = look_up_operations(type_string.lower(),
                                              SUPPORTED_OP)
        self.acti_func = acti_func
        self.conv_param = {
            'w_initializer': w_initializer,
            'w_regularizer': w_regularizer,
            'kernel_size': kernel_size,
            'dilation': dilation,
            'n_output_chns': n_output_chns
        }
        self.bn_param = {
            'regularizer': w_regularizer,
            'moving_decay': moving_decay,
            'eps': eps
        }
Ejemplo n.º 19
0
    def __init__(self,
                 func,
                 initializer=None,
                 regularizer=None,
                 name='residual'):

        self.func = look_up_operations(func.upper(), SUPPORTED_OP)
        self.layer_name = '{}_{}'.format(name, self.func.lower())

        super(ElementwiseLayer, self).__init__(name=self.layer_name)
        self.initializers = {'w': initializer}
        self.regularizers = {'w': regularizer}
Ejemplo n.º 20
0
    def __init__(self,
                 func,
                 initializer=None,
                 regularizer=None,
                 name='residual'):

        self.func = look_up_operations(func.upper(), SUPPORTED_OP)
        self.layer_name = '{}_{}'.format(name, self.func.lower())

        super(ElementwiseLayer, self).__init__(name=self.layer_name)
        self.initializers = {'w': initializer}
        self.regularizers = {'w': regularizer}
Ejemplo n.º 21
0
    def __init__(self, sigma=1, truncate=3.0, type_str='gaussian'):
        """

        :param sigma: standard deviation
        :param truncate: Truncate the filter at this many standard deviations
        :param type_str: type of kernels
        """
        Layer.__init__(self, name='approximated_smoothing')
        self.kernel_func = look_up_operations(
            type_str.lower(), SUPPORTED_KERNELS)
        self.sigma = sigma
        self.truncate = truncate
Ejemplo n.º 22
0
    def __init__(self,
                 func,
                 n_layers=1,
                 w_initializer=None,
                 w_regularizer=None,
                 acti_func='relu',
                 name='scaleblock'):
        self.func = look_up_operations(func.upper(), SUPPORTED_OP)
        super(ScaleBlock, self).__init__(name=name)
        self.n_layers = n_layers
        self.acti_func = acti_func

        self.initializers = {'w': w_initializer}
        self.regularizers = {'w': w_regularizer}
Ejemplo n.º 23
0
    def __init__(self,
                 func,
                 n_layers=1,
                 w_initializer=None,
                 w_regularizer=None,
                 acti_func='relu',
                 name='scaleblock'):
        self.func = look_up_operations(func.upper(), SUPPORTED_OP)
        super(ScaleBlock, self).__init__(name=name)
        self.n_layers = n_layers
        self.acti_func = acti_func

        self.initializers = {'w': w_initializer}
        self.regularizers = {'w': w_regularizer}
Ejemplo n.º 24
0
    def __init__(self,
                 n_output_chns=1,
                 kernel_size=3,
                 dilation=1,
                 acti_func='relu',
                 w_initializer=None,
                 w_regularizer=None,
                 moving_decay=0.9,
                 eps=1e-5,
                 type_string='bn_acti_conv',
                 name='res-downsample'):
        """
        Implementation of residual unit presented in:

            [1] He et al., Identity mapping in deep residual networks, ECCV 2016
            [2] He et al., Deep residual learning for image recognition, CVPR 2016

        The possible types of connections are::

            'original': residual unit presented in [2]
            'conv_bn_acti': ReLU before addition presented in [1]
            'acti_conv_bn': ReLU-only pre-activation presented in [1]
            'bn_acti_conv': full pre-activation presented in [1]

        [1] recommends 'bn_acti_conv'

        :param n_output_chns: number of output feature channels
            if this doesn't match the input, a 1x1 projection will be created.
        :param kernel_size:
        :param dilation:
        :param acti_func:
        :param w_initializer:
        :param w_regularizer:
        :param moving_decay:
        :param eps:
        :param type_string:
        :param name:
        """

        super(TrainableLayer, self).__init__(name=name)
        self.type_string = look_up_operations(type_string.lower(), SUPPORTED_OP)
        self.acti_func = acti_func
        self.conv_param = {'w_initializer': w_initializer,
                           'w_regularizer': w_regularizer,
                           'kernel_size': kernel_size,
                           'dilation': dilation,
                           'n_output_chns': n_output_chns}
        self.bn_param = {'regularizer': w_regularizer,
                         'moving_decay': moving_decay,
                         'eps': eps}
Ejemplo n.º 25
0
 def number_of_subjects(self, phase=ALL):
     """
     query number of images according to phase
     :param phase:
     :return:
     """
     if self._file_list is None:
         return 0
     phase = look_up_operations(phase, SUPPORTED_PHASES)
     if phase == ALL:
         return self._file_list[COLUMN_UNIQ_ID].count()
     if self._partition_ids is None:
         return 0
     selector = self._partition_ids[COLUMN_PHASE] == phase
     return self._partition_ids[selector].count()[COLUMN_UNIQ_ID]
Ejemplo n.º 26
0
 def layer_op(self, input_tensor, keep_prob=None):
     func_ = look_up_operations(self.func, SUPPORTED_OP)
     if self.func == 'prelu':
         alphas = tf.get_variable('alpha',
                                  input_tensor.shape[-1],
                                  initializer=self.initializers['alpha'],
                                  regularizer=self.regularizers['alpha'])
         output_tensor = func_(input_tensor, alphas)
     elif self.func == 'dropout':
         output_tensor = func_(input_tensor,
                               keep_prob=keep_prob,
                               name='dropout')
     else:
         output_tensor = func_(input_tensor, name='acti')
     return output_tensor
Ejemplo n.º 27
0
    def number_of_subjects(self, phase=ALL):
        """
        query number of images according to phase.

        :param phase:
        :return:
        """
        if self._file_list is None:
            return 0
        phase = look_up_operations(phase, SUPPORTED_PHASES)
        if phase == ALL:
            return self._file_list[COLUMN_UNIQ_ID].count()
        if self._partition_ids is None:
            return 0
        selector = self._partition_ids[COLUMN_PHASE] == phase
        return self._partition_ids[selector].count()[COLUMN_UNIQ_ID]
Ejemplo n.º 28
0
 def layer_op(self, input_tensor, keep_prob=None):
     func_ = look_up_operations(self.func, SUPPORTED_OP)
     if self.func == 'prelu':
         alphas = tf.get_variable(
             'alpha', input_tensor.shape[-1],
             initializer=self.initializers['alpha'],
             regularizer=self.regularizers['alpha'])
         output_tensor = func_(input_tensor, alphas)
     elif self.func == 'dropout':
         assert keep_prob > 0.0
         assert keep_prob <= 1.0
         output_tensor = func_(input_tensor,
                               keep_prob=keep_prob,
                               name='dropout')
     else:
         output_tensor = func_(input_tensor, name='acti')
     return output_tensor
Ejemplo n.º 29
0
    def initialise_dataset_loader(self, data_param=None, task_param=None):
        self.data_param = data_param
        self.autoencoder_param = task_param

        if not self.is_training:
            self._infer_type = look_up_operations(
                self.autoencoder_param.inference_type, SUPPORTED_INFERENCE)
        else:
            self._infer_type = None

        # read each line of csv files into an instance of Subject
        if self.is_training:
            self.reader = ImageReader(['image'])
        if self._infer_type in ('encode', 'encode-decode'):
            self.reader = ImageReader(['image'])
        elif self._infer_type == 'sample':
            self.reader = ()
        elif self._infer_type == 'linear_interpolation':
            self.reader = ImageReader(['feature'])

        if self.reader:
            self.reader.initialise_reader(data_param, task_param)

            augmentation_layers = []
            if self.is_training:
                if self.action_param.random_flipping_axes != -1:
                    augmentation_layers.append(
                        RandomFlipLayer(
                            flip_axes=self.action_param.random_flipping_axes))
                if self.action_param.scaling_percentage:
                    augmentation_layers.append(
                        RandomSpatialScalingLayer(
                            min_percentage=self.action_param.
                            scaling_percentage[0],
                            max_percentage=self.action_param.
                            scaling_percentage[1]))
                if self.action_param.rotation_angle:
                    augmentation_layers.append(
                        RandomRotationLayer(
                            min_angle=self.action_param.rotation_angle[0],
                            max_angle=self.action_param.rotation_angle[1]))
            self.reader.add_preprocessing_layers(augmentation_layers)
Ejemplo n.º 30
0
    def __init__(self,
                 func,
                 kernel_size=3,
                 stride=2,
                 w_initializer=None,
                 w_regularizer=None,
                 with_bias=False,
                 b_initializer=None,
                 b_regularizer=None,
                 name='upsample'):
        self.func = look_up_operations(func.upper(), SUPPORTED_OP)
        self.layer_name = '{}_{}'.format(self.func.lower(), name)
        super(UpSampleLayer, self).__init__(name=self.layer_name)

        self.kernel_size = kernel_size
        self.stride = stride
        self.with_bias = with_bias

        self.initializers = {'w': w_initializer, 'b': b_initializer}
        self.regularizers = {'w': w_regularizer, 'b': b_regularizer}
Ejemplo n.º 31
0
    def __init__(self,
                 func,
                 kernel_size=3,
                 stride=2,
                 w_initializer=None,
                 w_regularizer=None,
                 with_bias=False,
                 b_initializer=None,
                 b_regularizer=None,
                 name='upsample'):
        self.func = look_up_operations(func.upper(), SUPPORTED_OP)
        self.layer_name = '{}_{}'.format(self.func.lower(), name)
        super(UpSampleLayer, self).__init__(name=self.layer_name)

        self.kernel_size = kernel_size
        self.stride = stride
        self.with_bias = with_bias

        self.initializers = {'w': w_initializer, 'b': b_initializer}
        self.regularizers = {'w': w_regularizer, 'b': b_regularizer}
Ejemplo n.º 32
0
    def __init__(self,
                 func,
                 n_chns,
                 kernels,
                 w_initializer=None,
                 w_regularizer=None,
                 with_downsample_branch=False,
                 acti_func='relu',
                 name='UNet_block'):

        super(UNetBlock, self).__init__(name=name)

        self.func = look_up_operations(func.upper(), SUPPORTED_OP)

        self.kernels = kernels
        self.n_chns = n_chns
        self.with_downsample_branch = with_downsample_branch
        self.acti_func = acti_func

        self.initializers = {'w': w_initializer}
        self.regularizers = {'w': w_regularizer}
Ejemplo n.º 33
0
    def get_file_lists(self, data_partitioner):
        """This function pull the correct file_lists from the data partitioner
        depending on the phase
        :param data_partitioner:
                           specifies train/valid/infer splitting if needed
        :return:           list of file lists of length 2 if validation is
                           needed otherwise 1"""
        if self.is_training:
            if self.action_param.validation_every_n > 0 and\
                data_partitioner.has_validation:
                return [data_partitioner.train_files,
                        data_partitioner.validation_files]
            else:
                return [data_partitioner.train_files]

        dataset = self.action_param.dataset_to_infer
        if dataset:
            dataset = look_up_operations(dataset, SUPPORTED_PHASES)
            return [data_partitioner.get_file_list(dataset)]

        return [data_partitioner.inference_files]
Ejemplo n.º 34
0
    def initialise_dataset_loader(self, data_param=None, task_param=None):
        self.data_param = data_param
        self.autoencoder_param = task_param

        if not self.is_training:
            self._infer_type = look_up_operations(
                self.autoencoder_param.inference_type, SUPPORTED_INFERENCE)
        else:
            self._infer_type = None

        # read each line of csv files into an instance of Subject
        if self.is_training:
            self.reader = ImageReader(['image'])
        if self._infer_type in ('encode', 'encode-decode'):
            self.reader = ImageReader(['image'])
        elif self._infer_type == 'sample':
            self.reader = ()
        elif self._infer_type == 'linear_interpolation':
            self.reader = ImageReader(['feature'])

        if self.reader:
            self.reader.initialise_reader(data_param, task_param)
Ejemplo n.º 35
0
    def __init__(self,
                 func,
                 n_layers=1,
                 w_initializer=None,
                 w_regularizer=None,
                 acti_func='relu',
                 name='scaleblock'):
        """
        :param func: merging function (SUPPORTED_OP: MAX, AVERAGE)
        :param n_layers: int, number of layers
        :param w_initializer: weight initialisation for network
        :param w_regularizer: weight regularisation for network
        :param acti_func: activation function to use
        :param name: layer name
        """
        self.func = look_up_operations(func.upper(), SUPPORTED_OP)
        super(ScaleBlock, self).__init__(name=name)
        self.n_layers = n_layers
        self.acti_func = acti_func

        self.initializers = {'w': w_initializer}
        self.regularizers = {'w': w_regularizer}
Ejemplo n.º 36
0
 def interpret_output(self, batch_output):
     if self.is_training:
         return True
     else:
         infer_type = look_up_operations(
             self.autoencoder_param.inference_type,
             SUPPORTED_INFERENCE)
         if infer_type == 'encode':
             return self.output_decoder.decode_batch(
                 batch_output['embedded'],
                 batch_output['location'][:, 0:1])
         if infer_type == 'encode-decode':
             return self.output_decoder.decode_batch(
                 batch_output['generated_image'],
                 batch_output['location'][:, 0:1])
         if infer_type == 'sample':
             return self.output_decoder.decode_batch(
                 batch_output['generated_image'],
                 None)
         if infer_type == 'linear_interpolation':
             return self.output_decoder.decode_batch(
                 batch_output['generated_image'],
                 batch_output['location'][:, :2])
Ejemplo n.º 37
0
    def initialise_dataset_loader(
            self, data_param=None, task_param=None, data_partitioner=None):
        self.data_param = data_param
        self.autoencoder_param = task_param

        if not self.is_training:
            self._infer_type = look_up_operations(
                self.autoencoder_param.inference_type, SUPPORTED_INFERENCE)
        else:
            self._infer_type = None

        # read each line of csv files into an instance of Subject
        if self.is_training:
            file_lists = []
            if self.action_param.validation_every_n > 0:
                file_lists.append(data_partitioner.train_files)
                file_lists.append(data_partitioner.validation_files)
            else:
                file_lists.append(data_partitioner.train_files)

            self.readers = []
            for file_list in file_lists:
                reader = ImageReader(['image'])
                reader.initialise(data_param, task_param, file_list)
                self.readers.append(reader)
        if self._infer_type in ('encode', 'encode-decode'):
            self.readers = [ImageReader(['image'])]
            self.readers[0].initialise(data_param,
                                       task_param,
                                       data_partitioner.inference_files)
        elif self._infer_type == 'sample':
            self.readers = []
        elif self._infer_type == 'linear_interpolation':
            self.readers = [ImageReader(['feature'])]
            self.readers[0].initialise(data_param,
                                       task_param,
                                       data_partitioner.inference_files)
Ejemplo n.º 38
0
 def action(self, value):
     self._action = look_up_operations(value, self.SUPPORTED_ACTIONS)
Ejemplo n.º 39
0
 def __init__(self, func='', num_classes=0, name='post_processing'):
     super(PostProcessingLayer, self).__init__(name=name)
     self.func = look_up_operations(func.upper(), SUPPORTED_OPS)
     self.num_classes = num_classes
Ejemplo n.º 40
0
    def get_file_list(self, phase=ALL, *section_names):
        """
        get file names as a dataframe, by partitioning phase and section names
        set phase to ALL to load all subsets.

        :param phase: the label of the subset generated by self._partition_ids
                    should be one of the SUPPORTED_PHASES
        :param section_names: one or multiple input section names
        :return: a pandas.dataframe of file names
        """
        if self._file_list is None:
            tf.logging.warning('Empty file list, please initialise'
                               'ImageSetsPartitioner first.')
            return []
        try:
            look_up_operations(phase, SUPPORTED_PHASES)
        except ValueError:
            tf.logging.fatal('Unknown phase argument.')
            raise
        for name in section_names:
            try:
                look_up_operations(name, set(self._file_list))
            except ValueError:
                tf.logging.fatal(
                    'Requesting files under input section [%s],\n'
                    'however the section does not exist in the config.', name)
                raise
        if phase == ALL:
            self._file_list = self._file_list.sort_index()
            if section_names:
                section_names = [COLUMN_UNIQ_ID] + list(section_names)
                return self._file_list[section_names]
            return self._file_list
        if self._partition_ids is None or self._partition_ids.empty:
            tf.logging.fatal('No partition ids available.')
            if self.new_partition:
                tf.logging.fatal('Unable to create new partitions,'
                                 'splitting ratios: %s, writing file %s',
                                 self.ratios, self.data_split_file)
            elif os.path.isfile(self.data_split_file):
                tf.logging.fatal(
                    'Unable to load %s, initialise the'
                    'ImageSetsPartitioner with `new_partition=True`'
                    'to overwrite the file.',
                    self.data_split_file)
            raise ValueError

        selector = self._partition_ids[COLUMN_PHASE] == phase
        selected = self._partition_ids[selector][[COLUMN_UNIQ_ID]]
        if selected.empty:
            tf.logging.warning(
                'Empty subset for phase [%s], returning None as file list. '
                'Please adjust splitting fractions.', phase)
            return None
        subset = pandas.merge(self._file_list, selected, on=COLUMN_UNIQ_ID)
        if subset.empty:
            tf.logging.warning(
                'No subject id matched in between file names and '
                'partition files.\nPlease check the partition files %s,\nor '
                'removing it to generate a new file automatically.',
                self.data_split_file)
        if section_names:
            section_names = [COLUMN_UNIQ_ID] + list(section_names)
            return subset[list(section_names)]
        return subset
Ejemplo n.º 41
0
def transform_by_mapping(img, mask, mapping, cutoff, type_hist='quartile'):
    """
    Performs the standardisation of a given image.

    :param img: image to standardise
    :param mask: mask over which to determine the landmarks
    :param mapping: mapping landmarks to use for the piecewise linear
        transformations
    :param cutoff: cutoff points for the mapping
    :param type_hist: Type of landmarks scheme to use: choice between
        quartile percentile and median
    :return new_img: the standardised image
    """
    image_shape = img.shape
    img = img.reshape(-1)
    mask = mask.reshape(-1)

    type_hist = look_up_operations(type_hist.lower(), SUPPORTED_CUTPOINTS)
    if type_hist == 'quartile':
        range_to_use = [0, 3, 6, 9, 12]
    elif type_hist == 'percentile':
        range_to_use = [0, 1, 2, 4, 5, 6, 7, 8, 10, 11, 12]
    elif type_hist == 'median':
        range_to_use = [0, 6, 12]
    else:
        raise ValueError('unknown cutting points type_str')
    assert len(mapping) >= len(range_to_use), \
        "wrong mapping format, please check the histogram reference file"
    mapping = np.asarray(mapping)
    cutoff = __standardise_cutoff(cutoff, type_hist)
    perc = __compute_percentiles(img, mask, cutoff)
    # Apply linear histogram standardisation
    range_mapping = mapping[range_to_use]
    range_perc = perc[range_to_use]
    diff_mapping = range_mapping[1:] - range_mapping[:-1]
    diff_perc = range_perc[1:] - range_perc[:-1]

    # handling the case where two landmarks are the same
    # for a given input image.  This usually happens when
    # image background are not removed from the image.
    diff_perc[diff_perc == 0] = np.inf

    affine_map = np.zeros([2, len(range_to_use) - 1])
    # compute slopes of the linear models
    affine_map[0] = diff_mapping / diff_perc
    # compute intercepts of the linear models
    affine_map[1] = range_mapping[:-1] - affine_map[0] * range_perc[:-1]

    bin_id = np.digitize(img, range_perc[1:-1], right=False)
    lin_img = affine_map[0, bin_id]
    aff_img = affine_map[1, bin_id]
    # handling below cutoff[0] over cutoff[1]
    # values are mapped linearly and then smoothed
    new_img = lin_img * img + aff_img

    # Apply smooth thresholding (exponential)
    # below cutoff[0] and over cutoff[1]
    # this might not guarantee one to one mapping

    # lowest_values = img <= range_perc[0]
    # highest_values = img >= range_perc[-1]
    # new_img[lowest_values] = smooth_threshold(
    #     new_img[lowest_values], mode='low')
    # new_img[highest_values] = smooth_threshold(
    #     new_img[highest_values], mode='high')

    # Apply mask and set background to zero
    # new_img[mask == False] = 0.
    new_img = new_img.reshape(image_shape)
    return new_img
Ejemplo n.º 42
0
 def phase(self, value):
     self._phase = look_up_operations(value, PHASES)