Пример #1
0
def test_determine_num_processs():
    # Test that the correct number of effective num_processes is returned

    # 0 should raise an error
    assert_raises(ValueError, determine_num_processes, 0)

    # A string should raise an error
    assert_raises(TypeError, determine_num_processes, "0")

    # 1 should be 1
    assert_equal(determine_num_processes(1), 1)

    # A positive integer should not change
    assert_equal(determine_num_processes(4), 4)

    # None and -1 should be equal (all cores)
    assert_equal(determine_num_processes(None), determine_num_processes(-1))

    # A big negative number should be 1
    assert_equal(determine_num_processes(-10000), 1)

    # -2 should be one less than -1 (if there are more than 1 cores)
    if determine_num_processes(-1) > 1:
        assert_equal(determine_num_processes(-1),
                     determine_num_processes(-2) + 1)
Пример #2
0
def gibbs_removal(vol,
                  slice_axis=2,
                  n_points=3,
                  inplace=True,
                  num_processes=1):
    """Suppresses Gibbs ringing artefacts of images volumes.

    Parameters
    ----------
    vol : ndarray ([X, Y]), ([X, Y, Z]) or ([X, Y, Z, g])
        Matrix containing one volume (3D) or multiple (4D) volumes of images.
    slice_axis : int (0, 1, or 2)
        Data axis corresponding to the number of acquired slices.
        Default is set to the third axis.
    n_points : int, optional
        Number of neighbour points to access local TV (see note).
        Default is set to 3.
    inplace : bool, optional
        If True, the input data is replaced with results. Otherwise, returns
        a new array.
        Default is set to True.
    num_processes : int or None, optional
        Split the calculation to a pool of children processes. This only
        applies to 3D or 4D `data` arrays. Default is 1. If < 0 the maximal
        number of cores minus ``num_processes + 1`` is used (enter -1 to use
        as many cores as possible). 0 raises an error.

    Returns
    -------
    vol : ndarray ([X, Y]), ([X, Y, Z]) or ([X, Y, Z, g])
        Matrix containing one volume (3D) or multiple (4D) volumes of corrected
        images.

    Notes
    -----
    For 4D matrix last element should always correspond to the number of
    diffusion gradient directions.

    References
    ----------
    Please cite the following articles
    .. [1] Neto Henriques, R., 2018. Advanced Methods for Diffusion MRI Data
           Analysis and their Application to the Healthy Ageing Brain
           (Doctoral thesis). https://doi.org/10.17863/CAM.29356
    .. [2] Kellner E, Dhital B, Kiselev VG, Reisert M. Gibbs-ringing artifact
           removal based on local subvoxel-shifts. Magn Reson Med. 2016
           doi: 10.1002/mrm.26054.

    """
    nd = vol.ndim

    # check matrix dimension
    if nd > 4:
        raise ValueError("Data have to be a 4D, 3D or 2D matrix")
    elif nd < 2:
        raise ValueError("Data is not an image")

    if not isinstance(inplace, bool):
        raise TypeError("inplace must be a boolean.")

    num_processes = determine_num_processes(num_processes)

    # check the axis corresponding to different slices
    # 1) This axis cannot be larger than 2
    if slice_axis > 2:
        raise ValueError("Different slices have to be organized along" +
                         "one of the 3 first matrix dimensions")

    # 2) Reorder axis to allow iteration over the first axis
    elif nd == 3:
        vol = np.moveaxis(vol, slice_axis, 0)
    elif nd == 4:
        vol = np.moveaxis(vol, (slice_axis, 3), (0, 1))

    if nd == 4:
        inishap = vol.shape
        vol = vol.reshape((inishap[0] * inishap[1], inishap[2], inishap[3]))

    # Produce weighting functions for 2D Gibbs removal
    shap = vol.shape
    G0, G1 = _weights(shap[-2:])

    # Copy data if not inplace
    if not inplace:
        vol = vol.copy()

    # Run Gibbs removal of 2D images
    if nd == 2:
        vol[:, :] = _gibbs_removal_2d(vol, n_points=n_points, G0=G0, G1=G1)
    else:
        pool = Pool(num_processes)

        partial_func = partial(_gibbs_removal_2d,
                               n_points=n_points,
                               G0=G0,
                               G1=G1)
        vol[:, :, :] = pool.map(partial_func, vol)
        pool.close()
        pool.join()

    # Reshape data to original format
    if nd == 3:
        vol = np.moveaxis(vol, 0, slice_axis)
    if nd == 4:
        vol = vol.reshape(inishap)
        vol = np.moveaxis(vol, (0, 1), (slice_axis, 3))

    return vol
Пример #3
0
def reslice(data, affine, zooms, new_zooms, order=1, mode='constant', cval=0,
            num_processes=1):
    """Reslice data with new voxel resolution defined by ``new_zooms``

    Parameters
    ----------
    data : array, shape (I,J,K) or (I,J,K,N)
        3d volume or 4d volume with datasets
    affine : array, shape (4,4)
        mapping from voxel coordinates to world coordinates
    zooms : tuple, shape (3,)
        voxel size for (i,j,k) dimensions
    new_zooms : tuple, shape (3,)
        new voxel size for (i,j,k) after resampling
    order : int, from 0 to 5
        order of interpolation for resampling/reslicing,
        0 nearest interpolation, 1 trilinear etc..
        if you don't want any smoothing 0 is the option you need.
    mode : string ('constant', 'nearest', 'reflect' or 'wrap')
        Points outside the boundaries of the input are filled according
        to the given mode.
    cval : float
        Value used for points outside the boundaries of the input if
        mode='constant'.
    num_processes : int, optional
        Split the calculation to a pool of children processes. This only
        applies to 4D `data` arrays. Default is 1. If < 0 the maximal number
        of cores minus |num_processes + 1| is used (enter -1 to use as many
        cores as possible). 0 raises an error.

    Returns
    -------
    data2 : array, shape (I,J,K) or (I,J,K,N)
        datasets resampled into isotropic voxel size
    affine2 : array, shape (4,4)
        new affine for the resampled image

    Examples
    --------
    >>> from dipy.io.image import load_nifti
    >>> from dipy.align.reslice import reslice
    >>> from dipy.data import get_fnames
    >>> f_name = get_fnames('aniso_vox')
    >>> data, affine, zooms = load_nifti(f_name, return_voxsize=True)
    >>> data.shape == (58, 58, 24)
    True
    >>> zooms
    (4.0, 4.0, 5.0)
    >>> new_zooms = (3.,3.,3.)
    >>> new_zooms
    (3.0, 3.0, 3.0)
    >>> data2, affine2 = reslice(data, affine, zooms, new_zooms)
    >>> data2.shape == (77, 77, 40)
    True
    """
    num_processes = determine_num_processes(num_processes)

    # We are suppressing warnings emitted by scipy >= 0.18,
    # described in https://github.com/dipy/dipy/issues/1107.
    # These warnings are not relevant to us, as long as our offset
    # input to scipy's affine_transform is [0, 0, 0]
    with warnings.catch_warnings():
        warnings.filterwarnings("ignore", message=".*scipy.*18.*",
                                category=UserWarning)
        new_zooms = np.array(new_zooms, dtype='f8')
        zooms = np.array(zooms, dtype='f8')
        R = new_zooms / zooms
        new_shape = zooms / new_zooms * np.array(data.shape[:3])
        new_shape = tuple(np.round(new_shape).astype('i8'))
        kwargs = {'matrix': R, 'output_shape': new_shape, 'order': order,
                  'mode': mode, 'cval': cval}
        if data.ndim == 3:
            data2 = affine_transform(input=data, **kwargs)
        if data.ndim == 4:
            data2 = np.zeros(new_shape+(data.shape[-1],), data.dtype)

            if num_processes == 1:
                for i in range(data.shape[-1]):
                    affine_transform(input=data[..., i], output=data2[..., i],
                                     **kwargs)
            else:
                params = []
                for i in range(data.shape[-1]):
                    _kwargs = {'input': data[..., i]}
                    _kwargs.update(kwargs)
                    params.append(_kwargs)

                pool = Pool(num_processes)

                for i, res in enumerate(pool.imap(_affine_transform, params)):
                    data2[..., i] = res
                pool.close()

        Rx = np.eye(4)
        Rx[:3, :3] = np.diag(R)
        affine2 = np.dot(affine, Rx)
    return data2, affine2
Пример #4
0
def peaks_from_model(model, data, sphere, relative_peak_threshold,
                     min_separation_angle, mask=None, return_odf=False,
                     return_sh=True, gfa_thr=0, normalize_peaks=False,
                     sh_order=8, sh_basis_type=None, npeaks=5, B=None,
                     invB=None, parallel=False, num_processes=None):
    """Fit the model to data and computes peaks and metrics

    Parameters
    ----------
    model : a model instance
        `model` will be used to fit the data.
    data : ndarray
        Diffusion data.
    sphere : Sphere
        The Sphere providing discrete directions for evaluation.
    relative_peak_threshold : float
        Only return peaks greater than ``relative_peak_threshold * m`` where m
        is the largest peak.
    min_separation_angle : float in [0, 90] The minimum distance between
        directions. If two peaks are too close only the larger of the two is
        returned.
    mask : array, optional
        If `mask` is provided, voxels that are False in `mask` are skipped and
        no peaks are returned.
    return_odf : bool
        If True, the odfs are returned.
    return_sh : bool
        If True, the odf as spherical harmonics coefficients is returned
    gfa_thr : float
        Voxels with gfa less than `gfa_thr` are skipped, no peaks are returned.
    normalize_peaks : bool
        If true, all peak values are calculated relative to `max(odf)`.
    sh_order : int, optional
        Maximum SH order in the SH fit.  For `sh_order`, there will be
        ``(sh_order + 1) * (sh_order + 2) / 2`` SH coefficients (default 8).
    sh_basis_type : {None, 'tournier07', 'descoteaux07'}
        ``None`` for the default DIPY basis,
        ``tournier07`` for the Tournier 2007 [2]_ basis, and
        ``descoteaux07`` for the Descoteaux 2007 [1]_ basis
        (``None`` defaults to ``descoteaux07``).
    npeaks : int
        Maximum number of peaks found (default 5 peaks).
    B : ndarray, optional
        Matrix that transforms spherical harmonics to spherical function
        ``sf = np.dot(sh, B)``.
    invB : ndarray, optional
        Inverse of B.
    parallel: bool
        If True, use multiprocessing to compute peaks and metric
        (default False). Temporary files are saved in the default temporary
        directory of the system. It can be changed using ``import tempfile``
        and ``tempfile.tempdir = '/path/to/tempdir'``.
    num_processes: int, optional
        If `parallel` is True, the number of subprocesses to use
        (default multiprocessing.cpu_count()). If < 0 the maximal number of
        cores minus ``num_processes + 1`` is used (enter -1 to use as many
        cores as possible). 0 raises an error.

    Returns
    -------
    pam : PeaksAndMetrics
        An object with ``gfa``, ``peak_directions``, ``peak_values``,
        ``peak_indices``, ``odf``, ``shm_coeffs`` as attributes

    References
    ----------
    .. [1] Descoteaux, M., Angelino, E., Fitzgibbons, S. and Deriche, R.
           Regularized, Fast, and Robust Analytical Q-ball Imaging.
           Magn. Reson. Med. 2007;58:497-510.
    .. [2] Tournier J.D., Calamante F. and Connelly A. Robust determination
           of the fibre orientation distribution in diffusion MRI:
           Non-negativity constrained super-resolved spherical deconvolution.
           NeuroImage. 2007;35(4):1459-1472.

    """
    if return_sh and (B is None or invB is None):
        B, invB = sh_to_sf_matrix(
            sphere, sh_order, sh_basis_type, return_inv=True)

    num_processes = determine_num_processes(num_processes)

    if parallel and num_processes > 1:
        # It is mandatory to provide B and invB to the parallel function.
        # Otherwise, a call to np.linalg.pinv is made in a subprocess and
        # makes it timeout on some system.
        # see https://github.com/dipy/dipy/issues/253 for details
        return _peaks_from_model_parallel(model,
                                          data, sphere,
                                          relative_peak_threshold,
                                          min_separation_angle,
                                          mask, return_odf,
                                          return_sh,
                                          gfa_thr,
                                          normalize_peaks,
                                          sh_order,
                                          sh_basis_type,
                                          npeaks,
                                          B,
                                          invB,
                                          num_processes)

    shape = data.shape[:-1]
    if mask is None:
        mask = np.ones(shape, dtype='bool')
    else:
        if mask.shape != shape:
            raise ValueError("Mask is not the same shape as data.")

    gfa_array = np.zeros(shape)
    qa_array = np.zeros((shape + (npeaks,)))

    peak_dirs = np.zeros((shape + (npeaks, 3)))
    peak_values = np.zeros((shape + (npeaks,)))
    peak_indices = np.zeros((shape + (npeaks,)), dtype='int')
    peak_indices.fill(-1)

    if return_sh:
        n_shm_coeff = (sh_order + 2) * (sh_order + 1) // 2
        shm_coeff = np.zeros((shape + (n_shm_coeff,)))

    if return_odf:
        odf_array = np.zeros((shape + (len(sphere.vertices),)))

    global_max = -np.inf
    for idx in ndindex(shape):
        if not mask[idx]:
            continue

        odf = model.fit(data[idx]).odf(sphere)

        if return_sh:
            shm_coeff[idx] = np.dot(odf, invB)

        if return_odf:
            odf_array[idx] = odf

        gfa_array[idx] = gfa(odf)
        if gfa_array[idx] < gfa_thr:
            global_max = max(global_max, odf.max())
            continue

        # Get peaks of odf
        direction, pk, ind = peak_directions(odf, sphere,
                                             relative_peak_threshold,
                                             min_separation_angle)

        # Calculate peak metrics
        if pk.shape[0] != 0:
            global_max = max(global_max, pk[0])

            n = min(npeaks, pk.shape[0])
            qa_array[idx][:n] = pk[:n] - odf.min()

            peak_dirs[idx][:n] = direction[:n]
            peak_indices[idx][:n] = ind[:n]
            peak_values[idx][:n] = pk[:n]

            if normalize_peaks:
                peak_values[idx][:n] /= pk[0]
                peak_dirs[idx] *= peak_values[idx][:, None]

    qa_array /= global_max

    return _pam_from_attrs(PeaksAndMetrics,
                           sphere,
                           peak_indices,
                           peak_values,
                           peak_dirs,
                           gfa_array,
                           qa_array,
                           shm_coeff if return_sh else None,
                           B if return_sh else None,
                           odf_array if return_odf else None)
Пример #5
0
    def fit(self,
            data,
            mask=None,
            num_processes=1,
            parallel_backend='multiprocessing'):
        """
        Fit the SparseFascicleModel object to data.

        Parameters
        ----------
        data : array
            The measured signal.

        mask : array, optional
            A boolean array used to mark the coordinates in the data that
            should be analyzed. Has the shape `data.shape[:-1]`. Default: None,
            which implies that all points should be analyzed.

        num_processes : int, optional
            Split the `fit` calculation to a pool of children processes using
            joblib. This only applies to 4D `data` arrays. Default is 1,
            which does not require joblib and will run `fit` serially.
            If < 0 the maximal number of cores minus ``num_processes + 1``
            is used (enter -1 to use as many cores as possible).
            0 raises an error.

        parallel_backend: str, ParallelBackendBase instance or None
            Specify the parallelization backend implementation.
            Supported backends are:
            - "loky" used by default, can induce some
              communication and memory overhead when exchanging input and
              output data with the worker Python processes.
            - "multiprocessing" previous process-based backend based on
              `multiprocessing.Pool`. Less robust than `loky`.
            - "threading" is a very low-overhead backend but it suffers
              from the Python Global Interpreter Lock if the called function
              relies a lot on Python objects. "threading" is mostly useful
              when the execution bottleneck is a compiled extension that
              explicitly releases the GIL (for instance a Cython loop wrapped
              in a "with nogil" block or an expensive call to a library such
              as NumPy).
            Default: 'multiprocessing'.

        Returns
        -------
        SparseFascicleFit object
        """

        if mask is None:
            # Flatten it to 2D either way:
            data_in_mask = np.reshape(data, (-1, data.shape[-1]))
        else:
            # Check for valid shape of the mask
            if mask.shape != data.shape[:-1]:
                raise ValueError("Mask is not the same shape as data.")
            mask = np.array(mask, dtype=bool, copy=False)
            data_in_mask = np.reshape(data[mask], (-1, data.shape[-1]))

        # Fitting is done on the relative signal (S/S0):
        flat_S0 = np.mean(data_in_mask[..., self.gtab.b0s_mask], -1)
        if not flat_S0.size or not flat_S0.max():
            flat_S = np.zeros(data_in_mask[..., ~self.gtab.b0s_mask].shape)
        else:
            flat_S = (data_in_mask[..., ~self.gtab.b0s_mask] /
                      flat_S0[..., None])
        isotropic = self.isotropic(self.gtab).fit(data, mask)
        flat_params = np.zeros(
            (data_in_mask.shape[0], self.design_matrix.shape[-1]))
        del data_in_mask
        gc.collect()

        isopredict = isotropic.predict()
        if mask is None:
            isopredict = np.reshape(isopredict, (-1, isopredict.shape[-1]))
        else:
            isopredict = isopredict[mask]

        if not num_processes:
            num_processes = determine_num_processes(num_processes)

        if num_processes > 1 and has_joblib:
            with joblib.Parallel(n_jobs=num_processes,
                                 backend=parallel_backend,
                                 mmap_mode='r+') as parallel:
                out = parallel(
                    joblib.delayed(self._fit_solver2voxels)(
                        isopredict, vox_data, vox, True)
                    for vox, vox_data in enumerate(flat_S))

            del parallel

            flat_params_dict = {}
            for d in out:
                flat_params_dict.update(d)
            flat_params = np.concatenate([
                np.array(i).reshape(1, flat_params.shape[1]) for i in list(
                    OrderedDict(
                        sorted(flat_params_dict.items(),
                               key=lambda x: int(x[0]))).values())
            ])
        else:
            for vox, vox_data in enumerate(flat_S):
                flat_params[vox] = self._fit_solver2voxels(
                    isopredict, vox_data, vox, False)

        del isopredict, flat_S
        gc.collect()

        if mask is None:
            out_shape = data.shape[:-1] + (-1, )
            beta = flat_params.reshape(out_shape)
            S0 = flat_S0.reshape(data.shape[:-1])
        else:
            beta = np.zeros(data.shape[:-1] + (self.design_matrix.shape[-1], ))
            beta[mask, :] = flat_params
            S0 = np.zeros(data.shape[:-1])
            S0[mask] = flat_S0

        return SparseFascicleFit(self, beta, S0, isotropic)