Пример #1
0
 def load_maps(map_list):
     tmp = []
     for data in map_list:
         if isinstance(data, string_types):
             tmp.append(load_nifti(data).get_data())
         else:
             tmp.append(data)
     return tmp
Пример #2
0
def sort_maps(maps_to_sort_on,
              extra_maps_to_sort=None,
              reversed_sort=False,
              sort_index_map=None):
    """Sort the given maps on the maps to sort on.

    This first creates a sort matrix to index the maps in sorted order per voxel. Next, it creates the output
    maps for the maps we sort on. If extra_maps_to_sort is given it should be of the same length as the maps_to_sort_on.

    Args:
        maps_to_sort_on (:class:`list`): a list of string (filenames) or ndarrays we will use and compare
        extra_maps_to_sort (:class:`list`) an additional list we will sort based on the indices in maps_to_sort. This should
            be of the same length as maps_to_sort_on.
        reversed_sort (boolean): if we want to sort from large to small instead of small to large.
        sort_index_map (ndarray): if given we use this sort index map instead of generating one by sorting the
            maps_to_sort_on.

    Returns:
        tuple: the first element is the list of sorted volumes, the second the list of extra sorted maps and the
            last is the sort index map used.
    """
    def load_maps(map_list):
        tmp = []
        for data in map_list:
            if isinstance(data, string_types):
                tmp.append(load_nifti(data).get_data())
            else:
                tmp.append(data)
        return tmp

    maps_to_sort_on = load_maps(maps_to_sort_on)
    if extra_maps_to_sort:
        extra_maps_to_sort = load_maps(extra_maps_to_sort)

        if len(extra_maps_to_sort) != len(maps_to_sort_on):
            raise ValueError(
                'The length of the maps to sort on and the extra maps to sort do not match.'
            )

    from mdt.utils import create_sort_matrix, sort_volumes_per_voxel

    if sort_index_map is None:
        sort_index_map = create_sort_matrix(np.concatenate(
            [m for m in maps_to_sort_on], axis=3),
                                            reversed_sort=reversed_sort)
    elif isinstance(sort_index_map, string_types):
        sort_index_map = np.round(
            load_nifti(sort_index_map).get_data()).astype(np.int64)

    sorted_maps = sort_volumes_per_voxel(maps_to_sort_on, sort_index_map)
    if extra_maps_to_sort:
        sorted_extra_maps = sort_volumes_per_voxel(extra_maps_to_sort,
                                                   sort_index_map)
        return sorted_maps, sorted_extra_maps, sort_index_map
    return sorted_maps, [], sort_index_map
Пример #3
0
    def _preload_extra_protocol_items(self, extra_protocol):
        """Load all the extra protocol items that were defined by a filename."""
        if extra_protocol is None:
            return {}

        return_items = {}
        for key, value in extra_protocol.items():
            if isinstance(value, str):
                if value.endswith('.nii') or value.endswith('.nii.gz'):
                    loaded_val = load_nifti(value).get_data()
                else:
                    loaded_val = np.genfromtxt(value)
            else:
                loaded_val = value
            return_items[key] = loaded_val
        return return_items
Пример #4
0
        def _compute_noise_std():
            if self._noise_std is None:
                try:
                    return estimate_noise_std(self)
                except NoiseStdEstimationNotPossible:
                    self._logger.warning('Failed to obtain a noise std for this subject. '
                                         'We will continue with an std of 1.')
                    return 1

            if isinstance(self._noise_std, (numbers.Number, np.ndarray)):
                return self._noise_std

            if isinstance(self._noise_std, str):
                filename = str(self._noise_std)
                if filename[-4:] == '.txt':
                    with open(filename, 'r') as f:
                        return float(f.read())
                return load_nifti(filename).get_data()

            self._logger.warning('Failed to obtain a noise std for this subject. We will continue with an std of 1.')
            return 1
Пример #5
0
def sort_maps(input_maps, reversed_sort=False, sort_index_matrix=None):
    """Sort the values of the given maps voxel by voxel.

    This first creates a sort matrix to index the maps in sorted order per voxel. Next, it creates the output
    maps for the maps we sort on.

    Args:
        input_maps (:class:`list`): a list of string (filenames) or ndarrays we will sort
        reversed_sort (boolean): if we want to sort from large to small instead of small to large.
            This is not used if a sort index matrix is provided.
        sort_index_matrix (ndarray): if given we use this sort index map instead of generating one by sorting the
            maps_to_sort_on. Supposed to be a integer matrix.

    Returns:
        list: the list of sorted volumes
    """
    if sort_index_matrix is None:
        sort_index_matrix = create_sort_matrix(input_maps, reversed_sort=reversed_sort)
    elif isinstance(sort_index_matrix, str):
        sort_index_matrix = np.round(load_nifti(sort_index_matrix).get_data()).astype(np.int64)
    return sort_volumes_per_voxel(input_maps, sort_index_matrix)
Пример #6
0
def load_input_data(volume_info, protocol, mask, extra_protocol=None, gradient_deviations=None,
                    noise_std=None, volume_weights=None):
    """Load and create the input data object for diffusion MRI modeling.

    Args:
        volume_info (string or tuple): Either an (ndarray, img_header) tuple or the full path
            to the volume (4d signal data).
        protocol (:class:`~mdt.protocols.Protocol` or str): A protocol object with the right protocol for the
            given data, or a string object with a filename to the given file.
        mask (ndarray, str): A full path to a mask file or a 3d ndarray containing the mask
        extra_protocol (Dict[str, val]): additional protocol items. Here one may additionally specify values to be
            used for the protocol parameters. These additional values can be scalars, vectors and/or volumes. This in
            contrast to the ``protocol`` which only contains scalars and vectors. Items specified here will overwrite
            items from the protocol in the case of duplicated names. This parameter can for example be used to specify
            gradient volumes, instead of a gradient in the protocol, for example by specifying::

                extra_protocol = {'g': np.array(...)}

            Per element, the input can be a scalar, a vector, an array, or a filename. If a filename is given
            we will try to interpret it again as a scalar, vector or array.

        gradient_deviations (str or ndarray): a gradient deviations matrix. If a string is given we will interpret it
                as a Nifti file. The matrix can be provided in multiple formats:

            - an (x, y, z, 9) matrix with per voxel 9 values that constitute the gradient non-linearities
                according to the HCP guidelines. (see
                ``www.humanconnectome.org/storage/app/media/documentation/data_release/Q1_Release_Appendix_II.pdf``).
                If given in this format, we will automatically add the identity matrix to it, as specified by the
                HCP guidelines.
            - an (x, y, z, 3, 3) matrix with per voxel the deformation matrix. This will be used as given (i.e. no
                identity matrix will be added to it like in the HCP format).
            - an (x, y, z, m, 3, 3) matrix with per voxel and per volume a deformation matrix. This will be used as
                given.

        noise_std (number or ndarray): either None for automatic detection,
            or a scalar, or an 3d matrix with one value per voxel.

        volume_weights (ndarray): if given, a float matrix of the same size as the volume with per voxel and volume
            a weight in [0, 1]. If set, these weights are used during model fitting to weigh the objective function
            values per observation.

    Returns:
        SimpleMRIInputData: the input data object containing all the info needed for diffusion MRI model fitting
    """
    protocol = load_protocol(protocol)
    mask = load_brain_mask(mask)

    if isinstance(volume_info, str):
        info = load_nifti(volume_info)
        signal4d = info.get_data()
        img_header = info.header
    else:
        signal4d, img_header = volume_info

    if isinstance(gradient_deviations, str):
        gradient_deviations = load_nifti(gradient_deviations).get_data()

    if isinstance(volume_weights, str):
        volume_weights = load_nifti(volume_weights).get_data()

    return SimpleMRIInputData(protocol, signal4d, mask, img_header, extra_protocol=extra_protocol, noise_std=noise_std,
                              gradient_deviations=gradient_deviations, volume_weights=volume_weights)