def test_check_niimg_3d():
    # check error for non-forced but necessary resampling
    assert_raises_regex(TypeError, 'nibabel format',
                        _utils.check_niimg, 0)

    # check error for non-forced but necessary resampling
    assert_raises_regex(TypeError, 'empty object',
                        _utils.check_niimg, [])

    # Test dimensionality error
    img = Nifti1Image(np.zeros((10, 10, 10)), np.eye(4))
    assert_raises_regex(TypeError,
                        "Input data has incompatible dimensionality: "
                        "Expected dimension is 3D and you provided a list "
                        "of 3D images \(4D\).",
                        _utils.check_niimg_3d, [img, img])

    # Check that a filename does not raise an error
    data = np.zeros((40, 40, 40, 1))
    data[20, 20, 20] = 1
    data_img = Nifti1Image(data, np.eye(4))

    with testing.write_tmp_imgs(data_img, create_files=True) as filename:
        _utils.check_niimg_3d(filename)

    # check data dtype equal with dtype='auto'
    img_check = _utils.check_niimg_3d(img, dtype='auto')
    assert_equal(img.get_data().dtype.kind, img_check.get_data().dtype.kind)
def test_check_niimg_3d():
    # check error for non-forced but necessary resampling
    assert_raises_regex(TypeError, 'nibabel format', _utils.check_niimg, 0)

    # check error for non-forced but necessary resampling
    assert_raises_regex(TypeError, 'empty object', _utils.check_niimg, [])

    # Test dimensionality error
    img = Nifti1Image(np.zeros((10, 10, 10)), np.eye(4))
    assert_raises_regex(
        TypeError, "Input data has incompatible dimensionality: "
        "Expected dimension is 3D and you provided a list "
        "of 3D images \(4D\).", _utils.check_niimg_3d, [img, img])

    # Check that a filename does not raise an error
    data = np.zeros((40, 40, 40, 1))
    data[20, 20, 20] = 1
    data_img = Nifti1Image(data, np.eye(4))

    with testing.write_tmp_imgs(data_img, create_files=True) as filename:
        _utils.check_niimg_3d(filename)

    # check data dtype equal with dtype='auto'
    img_check = _utils.check_niimg_3d(img, dtype='auto')
    assert_equal(img.get_data().dtype.kind, img_check.get_data().dtype.kind)
def _apply_mask(imgs, mask_img):
	mask_img = _utils.check_niimg_3d(mask_img)

	mask_img = _utils.check_niimg_3d(mask_img)
	mask = mask_img.get_data()
	mask = _utils.as_ndarray(mask, dtype=bool)

	mask_img = new_img_like(mask_img, mask, mask_img.affine)

	return _apply_mask_fmri(imgs, mask_img, dtype='f', smoothing_fwhm=None, ensure_finite=True)
Beispiel #4
0
    def transform(self, imgs, confounds=None):
        if self.smoothing_fwhm is not None or self.target_affine is not None:
            if self.smoothing_fwhm is not None:
                imgs = smooth_img(imgs, self.smoothing_fwhm)
            if self.target_affine is not None:
                imgs = resample_img(imgs, target_affine=self.target_affine)
        else:
            imgs = [check_niimg_3d(img) for img in imgs] if isinstance(
                imgs, list) else check_niimg_3d(imgs)

        return imgs
def test_check_niimg_3d():
    # check error for non-forced but necessary resampling
    assert_raises_regex(TypeError, 'nibabel format', _utils.check_niimg, 0)

    # check error for non-forced but necessary resampling
    assert_raises_regex(TypeError, 'empty object', _utils.check_niimg, [])

    # Check that a filename does not raise an error
    data = np.zeros((40, 40, 40, 1))
    data[20, 20, 20] = 1
    data_img = Nifti1Image(data, np.eye(4))

    with testing.write_tmp_imgs(data_img, create_files=True) as filename:
        _utils.check_niimg_3d(filename)
Beispiel #6
0
def concat_3D_imgs(in_files, out_file=None):
    """ Use nilearn.image.concat_imgs to concat 3D volumes into one 4D volume.

    If `in_files` is a list of 3D volumes the return value is the path to one 4D volume.
    Else if `in_files` is a list of 4D volumes the return value is `in_files`.

    Returns
    -------
    out_file: str
        The absolute path to the output file.
    """
    import nilearn.image as niimg

    from nilearn._utils import check_niimg_3d

    all_3D = True
    for idx, img in enumerate(in_files):
        try:
            _ = check_niimg_3d(img)
        except Exception:
            all_3D = False
            break

    if not all_3D:
        # raise AttributeError('Expected all input images to be 3D volumes, but '
        #                     ' at least the {}th is not.'.format(idx))
        return in_files
    else:
        return niimg.concat_imgs(in_files)
Beispiel #7
0
def concat_3D_imgs(in_files, out_file=None):
    """ Use nilearn.image.concat_imgs to concat 3D volumes into one 4D volume.

    If `in_files` is a list of 3D volumes the return value is the path to one 4D volume.
    Else if `in_files` is a list of 4D volumes the return value is `in_files`.

    Returns
    -------
    out_file: str
        The absolute path to the output file.
    """
    import nilearn.image as niimg

    from   nilearn._utils import check_niimg_3d

    all_3D = True
    for idx, img in enumerate(in_files):
        try:
            _ = check_niimg_3d(img)
        except Exception:
            all_3D = False
            break

    if not all_3D:
        #raise AttributeError('Expected all input images to be 3D volumes, but '
        #                     ' at least the {}th is not.'.format(idx))
        return in_files
    else:
        return niimg.concat_imgs(in_files)
Beispiel #8
0
    def transform(self, imgs, confounds=None):
        """

        Parameters
        ----------
        imgs: list of Niimg-like objects
        """
        self._check_fitted()

        if self.smoothing_fwhm:
            imgs = smooth_img(imgs, self.smoothing_fwhm)

        imgs = [_utils.check_niimg_3d(img) for img in imgs]

        for i, roi in enumerate(self.mask_img_):
            masker = NiftiMasker(mask_img=roi)
            x = masker.fit_transform(imgs)
            if self.extract_funcs is not None:
                x = np.array([
                    FDICT[f][0](x, **FDICT[f][1]) for f in self.extract_funcs
                ])
            if i == 0:
                X = x
            else:
                X = np.concatenate((X, x), axis=0)

        return X.swapaxes(0, 1)
Beispiel #9
0
    def transform(self, imgs, confounds=None):

        smooth_prefix = '' if self.smoothing_fwhm is None else 's%g' % self.smoothing_fwhm
        resample_prefix = '' if self.smoothing_fwhm is None else 'r%g' % self.smoothing_fwhm

        if not isinstance(imgs, list):
            imgs = [imgs]

        path_first = imgs[0] if isinstance(imgs[0], str) else imgs[0].get_filename()

        path_first_resampled = os.path.join(os.path.dirname(path_first), resample_prefix + os.path.basename(path_first))
        path_first_smoothed = os.path.join(os.path.dirname(path_first), smooth_prefix + resample_prefix + os.path.basename(path_first))

        if self.resampling is not None and self.smoothing_fwhm is not None:
            if self.resampling is not None:
                if not os.path.exists(path_first_resampled) and not os.path.exists(path_first_smoothed):
                    imgs = resample_img(imgs, target_affine=np.diag(self.resampling * np.ones(3)))
                else:
                    imgs = []
            if self.smoothing_fwhm is not None:
                if not os.path.exists(path_first_smoothed):
                    imgs = smooth_img(imgs, self.smoothing_fwhm)
                else:
                    imgs = []
        else:
            imgs = [check_niimg_3d(img) for img in imgs]

        return self.masker.transform(imgs)
Beispiel #10
0
    def transform(self, imgs, confounds=None):
        """

        Parameters
        ----------
        imgs: list of Niimg-like objects
        """
        self._check_fitted()

        if self.smoothing_fwhm:
            imgs = smooth_img(imgs, self.smoothing_fwhm)

        imgs = [_utils.check_niimg_3d(img) for img in imgs]

        for i, roi in enumerate(self.mask_img_):
            masker = NiftiMasker(mask_img=roi)
            x = masker.fit_transform(imgs)
            if self.extract_funcs is not None:
                x = np.array([FDICT[f][0](x, **FDICT[f][1]) for f in self.extract_funcs])
            if i == 0:
                X = x
            else:
                X = np.concatenate((X, x), axis=0)

        return X.swapaxes(0, 1)
def test_check_niimg_3d():
    # check error for non-forced but necessary resampling
    assert_raises_regex(TypeError, 'nibabel format',
                        _utils.check_niimg, 0)

    # check error for non-forced but necessary resampling
    assert_raises_regex(TypeError, 'empty object',
                        _utils.check_niimg, [])

    # Check that a filename does not raise an error
    data = np.zeros((40, 40, 40, 1))
    data[20, 20, 20] = 1
    data_img = Nifti1Image(data, np.eye(4))

    with testing.write_tmp_imgs(data_img, create_files=True) as filename:
        _utils.check_niimg_3d(filename)
Beispiel #12
0
    def preprocess(self, imgs):

        smooth_prefix = '' if self.smoothing_fwhm is None else 's%g' % self.smoothing_fwhm
        resample_prefix = '' if self.resampling is None else 'r%g' % self.resampling

        if not isinstance(imgs, list):
            imgs = [imgs]

        path_first = imgs[0] if isinstance(imgs[0], str) else imgs[0].get_filename()

        path_first_resampled = os.path.join(os.path.dirname(path_first), resample_prefix + os.path.basename(path_first))
        path_first_smoothed = os.path.join(os.path.dirname(path_first), smooth_prefix + resample_prefix + os.path.basename(path_first))

        if self.resampling is not None or self.smoothing_fwhm is not None:
            if self.resampling is not None and not os.path.exists(path_first_smoothed):
                if not os.path.exists(path_first_resampled):
                    imgs = resample_img(imgs, target_affine=np.diag(self.resampling * np.ones(3)))
                else:
                    imgs = [os.path.join(os.path.dirname(img), resample_prefix + os.path.basename(img)) if isinstance(img, str)
                            else os.path.join(os.path.dirname(img.get_filename()), resample_prefix + os.path.basename(img.get_filename())) for img in imgs]
            if self.smoothing_fwhm is not None:
                if not os.path.exists(path_first_smoothed):
                    imgs = smooth_img(imgs, self.smoothing_fwhm)
                else:
                    imgs = [os.path.join(os.path.dirname(img), smooth_prefix + resample_prefix + os.path.basename(img)) if isinstance(img, str)
                            else os.path.join(os.path.dirname(img.get_filename()), smooth_prefix + resample_prefix + os.path.basename(img.get_filename())) for img in imgs]
        else:
            imgs = [check_niimg_3d(img) for img in imgs]

        return imgs
def test_check_niimg_3d():
    # check error for non-forced but necessary resampling
    assert_raises_regex(TypeError, "nibabel format", _utils.check_niimg, 0)

    # check error for non-forced but necessary resampling
    assert_raises_regex(TypeError, "empty object", _utils.check_niimg, [])

    # Test dimensionality error
    img = Nifti1Image(np.zeros((10, 10, 10)), np.eye(4))
    assert_raises_regex(TypeError, "Data must be a 3D", _utils.check_niimg_3d, [img, img])

    # Check that a filename does not raise an error
    data = np.zeros((40, 40, 40, 1))
    data[20, 20, 20] = 1
    data_img = Nifti1Image(data, np.eye(4))

    with testing.write_tmp_imgs(data_img, create_files=True) as filename:
        _utils.check_niimg_3d(filename)
Beispiel #14
0
def check_atlas_info(atlas, atlas_info, labels=None):
    """
    Checks whether provided `info` on `atlas` is sufficient for processing

    Parameters
    ----------
    atlas : niimg-like object
        Parcel image, where each parcel should be identified with a unique
        integer ID
    atlas_info : str or pandas.DataFrame
        Filepath or dataframe containing information about `atlas`. Must have
        at least columns 'id', 'hemisphere', and 'structure' containing
        information mapping atlas IDs to hemisphere and broad structural class
        (e.g., "cortex", "subcortex", "cerebellum").
    labels : array_like, optional
        List of values containing labels to compare between `atlas` and
        `atlas_info`, if they don't all match. If not specified this function
        will attempt to confirm that all IDs present in `atlas` have entries in
        `atlas_info` and vice versa. Default: None

    Returns
    -------
    atlas_info : pandas.DataFrame
        Loaded dataframe with information on atlas
    """

    atlas = check_niimg_3d(atlas)

    # load info, if not already
    try:
        atlas_info = pd.read_csv(atlas_info)
    except ValueError:
        pass

    try:
        if 'id' in atlas_info.columns:
            atlas_info = atlas_info.set_index('id')
    except AttributeError:
        raise ValueError('Provided atlas_info must be a filepath or pandas.'
                         'DataFrame. Please confirm inputs and try again.')

    ids = get_unique_labels(atlas) if labels is None else labels
    cols = ['hemisphere', 'structure']
    try:
        assert all(c in atlas_info.columns for c in cols)
        assert ('id' == atlas_info.index.name)
        assert len(np.intersect1d(ids, atlas_info.index)) == len(ids)
    except AssertionError:
        raise ValueError('Provided `atlas_info` does not have adequate '
                         'information  on supplied `atlas`. Please confirm '
                         'that `atlas_info` has columns [\'id\', '
                         '\'hemisphere\', \'structure\'], and that the region '
                         'IDs listed in `atlas_info` account for all those '
                         'found in `atlas.')

    return atlas_info
Beispiel #15
0
    def transform(self, imgs, confounds=None):
        if self.smoothing_fwhm is not None or self.target_affine is not None:
            if self.smoothing_fwhm is not None:
                imgs = smooth_img(imgs, self.smoothing_fwhm)
            if self.target_affine is not None:
                imgs = resample_img(imgs, target_affine=self.target_affine)
        else:
            imgs = [check_niimg_3d(img) for img in imgs] if isinstance(imgs, list) else check_niimg_3d(imgs)

        return imgs
Beispiel #16
0
    def fit(self, imgs=None, y=None):
        """Compute the mask corresponding to the data

        Parameters
        ----------
        imgs: list of Niimg-like objects
            See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.
            Data on which the mask must be calculated. If this is a list,
            the affine is considered the same for all.
        """
        # y=None is for scikit-learn compatibility (unused here).

        # Load data (if filenames are given, load them)
        if self.verbose > 0:
            print("[%s.fit] Loading data from %s" % (
                self.__class__.__name__,
                _utils._repr_niimgs(imgs)[:200]))

        # Compute the mask if not given by the user
        if self.mask_img is None:
            mask_args = (self.mask_args if self.mask_args is not None
                         else {})
            if self.mask_strategy == 'background':
                compute_mask = masking.compute_background_mask
            elif self.mask_strategy == 'epi':
                compute_mask = masking.compute_epi_mask
            else:
                raise ValueError("Unknown value of mask_strategy '%s'. "
                                 "Acceptable values are 'background' and "
                                 "'epi'." % self.mask_strategy)
            if self.verbose > 0:
                print("[%s.fit] Computing the mask" % self.__class__.__name__)
            self.mask_img_ = self._cache(compute_mask, ignore=['verbose'])(
                imgs, verbose=max(0, self.verbose - 1), **mask_args)
        else:
            self.mask_img_ = _utils.check_niimg_3d(self.mask_img)

        # If resampling is requested, resample also the mask
        # Resampling: allows the user to change the affine, the shape or both
        if self.verbose > 0:
            print("[%s.fit] Resampling mask" % self.__class__.__name__)
        self.mask_img_ = self._cache(image.resample_img)(
            self.mask_img_,
            target_affine=self.target_affine,
            target_shape=self.target_shape,
            copy=False)
        if self.target_affine is not None:
            self.affine_ = self.target_affine
        else:
            self.affine_ = self.mask_img_.get_affine()
        # Load data in memory
        self.mask_img_.get_data()
        if self.verbose > 10:
            print("[%s.fit] Finished fit" % self.__class__.__name__)
        return self
Beispiel #17
0
def _get_volume(img,
                threshold=0,
                atlas=None,
                stride=1,
                t_start=0,
                t_end=-1,
                n_t=50,
                t_r=None,
                marker_size=3,
                cmap=cm.cold_hot,
                symmetric_cmap=True,
                vmax=None,
                vmin=None):
    connectome = {}
    img = check_niimg_4d(img)
    t_unit = "" if not t_r else " s"
    if not t_r:
        t_r = 1
    if t_end < 0:
        t_end = img.shape[3] + t_end
    if not n_t:
        n_t = t_end - t_start
    t_idx = np.round(np.linspace(t_start, t_end, n_t)).astype(int)
    t_labels = [str(t_r * t) + t_unit for t in t_idx]
    data = _safe_get_data(img)[::stride, ::stride, ::stride, t_idx]
    mask = np.abs(data[:, :, :, 0]) > threshold
    i, j, k = mask.nonzero()
    x, y, z = coord_transform(i * stride, j * stride, k * stride, img.affine)
    for coord, cname in [(x, "x"), (y, "y"), (z, "z")]:
        connectome["_con_{}".format(cname)] = encode(
            np.asarray(coord, dtype='<f4'))
    colors = colorscale(cmap,
                        data.ravel(),
                        symmetric_cmap=symmetric_cmap,
                        vmax=vmax,
                        vmin=vmin)
    if atlas:
        atlas = check_niimg_3d(atlas)
        atlas_data = _safe_get_data(atlas)[::stride, ::stride, ::stride]
        connectome['atlas'] = encode(
            np.asarray(atlas_data[i, j, k], dtype='<f4'))
        connectome['atlas_nb'] = int(np.max(atlas_data))
    connectome['colorscale'] = colors['colors']
    connectome['cmin'] = float(colors['vmin'])
    connectome['cmax'] = float(colors['vmax'])
    connectome['n_time'] = n_t
    connectome['t_labels'] = t_labels
    values = [
        encode(np.asarray(data[i, j, k, t], dtype='<f4'))
        for t in range(data.shape[3])
    ]
    connectome['values'] = values

    return connectome
Beispiel #18
0
    def preprocess(self, imgs):

        smooth_prefix = '' if self.smoothing_fwhm is None else 's%g' % self.smoothing_fwhm
        resample_prefix = '' if self.resampling is None else 'r%g' % self.resampling

        if not isinstance(imgs, list):
            imgs = [imgs]

        path_first = imgs[0] if isinstance(imgs[0],
                                           str) else imgs[0].get_filename()

        path_first_resampled = os.path.join(
            os.path.dirname(path_first),
            resample_prefix + os.path.basename(path_first))
        path_first_smoothed = os.path.join(
            os.path.dirname(path_first),
            smooth_prefix + resample_prefix + os.path.basename(path_first))

        if self.resampling is not None or self.smoothing_fwhm is not None:
            if self.resampling is not None and not os.path.exists(
                    path_first_smoothed):
                if not os.path.exists(path_first_resampled):
                    imgs = resample_img(imgs,
                                        target_affine=np.diag(self.resampling *
                                                              np.ones(3)))
                else:
                    imgs = [
                        os.path.join(os.path.dirname(img), resample_prefix +
                                     os.path.basename(img))
                        if isinstance(img, str) else os.path.join(
                            os.path.dirname(
                                img.get_filename()), resample_prefix +
                            os.path.basename(img.get_filename()))
                        for img in imgs
                    ]
            if self.smoothing_fwhm is not None:
                if not os.path.exists(path_first_smoothed):
                    imgs = smooth_img(imgs, self.smoothing_fwhm)
                else:
                    imgs = [
                        os.path.join(
                            os.path.dirname(img), smooth_prefix +
                            resample_prefix + os.path.basename(img))
                        if isinstance(img, str) else os.path.join(
                            os.path.dirname(img.get_filename()),
                            smooth_prefix + resample_prefix +
                            os.path.basename(img.get_filename()))
                        for img in imgs
                    ]
        else:
            imgs = [check_niimg_3d(img) for img in imgs]

        return imgs
Beispiel #19
0
 def apply_mask(self, series, mask_img):
     """
     Apply mask on series.
     :param series: np.ndarray, data working on
     :param mask_img:
     :return:
     """
     mask_img = _utils.check_niimg_3d(mask_img)
     mask, mask_affine = masking._load_mask_img(mask_img)
     mask_img = image.new_img_like(mask_img, mask, mask_affine)
     mask_data = _utils.as_ndarray(mask_img.get_fdata(),
                                   dtype=np.bool)
     return series[mask_data].T
Beispiel #20
0
def compute_confounds(imgs, mask_img, n_confounds=5, get_randomized_svd=False,
                      compute_not_mask=False):
    """
    """
    confounds = []
    if not isinstance(imgs, collections.Iterable) or \
            isinstance(imgs, _basestring):
        imgs = [imgs, ]

    img = _utils.check_niimg_4d(imgs[0])
    shape = img.shape[:3]
    affine = get_affine(img)

    if isinstance(mask_img, _basestring):
        mask_img = _utils.check_niimg_3d(mask_img)

    if not _check_same_fov(img, mask_img):
        mask_img = resample_img(
            mask_img, target_shape=shape, target_affine=affine,
            interpolation='nearest')

    if compute_not_mask:
        print("Non mask based confounds extraction")
        not_mask_data = np.logical_not(mask_img.get_data().astype(np.int))
        whole_brain_mask = masking.compute_multi_epi_mask(imgs)
        not_mask = np.logical_and(not_mask_data, whole_brain_mask.get_data())
        mask_img = new_img_like(img, not_mask.astype(np.int), affine)

    for img in imgs:
        print("[Confounds Extraction] {0}".format(img))
        img = _utils.check_niimg_4d(img)
        print("[Confounds Extraction] high ariance confounds computation]")
        high_variance = high_variance_confounds(img, mask_img=mask_img,
                                                n_confounds=n_confounds)
        if compute_not_mask and get_randomized_svd:
            signals = masking.apply_mask(img, mask_img)
            non_constant = np.any(np.diff(signals, axis=0) != 0, axis=0)
            signals = signals[:, non_constant]
            signals = signal.clean(signals, detrend=True)
            print("[Confounds Extraction] Randomized SVD computation")
            U, s, V = randomized_svd(signals, n_components=n_confounds,
                                     random_state=0)
            if high_variance is not None:
                confound_ = np.hstack((U, high_variance))
            else:
                confound_ = U
        else:
            confound_ = high_variance
        confounds.append(confound_)

    return confounds
def extract_confounds(imgs, mask_img, n_confounds=10):
    """To extract confounds on list of subjects

    See nilearn.image.high_variance_confounds for technical details.

    Parameters
    ----------
    imgs : list of Nifti images, either str or nibabel.Nifti1Image
        Functional images on which confounds should be extracted

    mask_img : str or nibabel.Nifti1Image
        Mask image with binary values. This image is imposed on
        each functional image for confounds extraction..

    n_confounds : int, optional
        By default, 10 high variance confounds are extracted. Otherwise,
        confounds as specified are extracted.

    Returns
    -------
    confounds : list of Numpy arrays.
        Each numpy array is a confound corresponding to imgs provided.
        Each numpy array will have shape (n_timepoints, n_confounds)        
    """
    confounds = []
    if not isinstance(imgs, collections.Iterable) or \
            isinstance(imgs, _basestring):
        imgs = [imgs, ]

    img = _utils.check_niimg_4d(imgs[0])
    shape = img.shape[:3]
    affine = img.affine

    if isinstance(mask_img, _basestring):
        mask_img = _utils.check_niimg_3d(mask_img)

    if not _check_same_fov(img, mask_img):
        mask_img = resample_img(
            mask_img, target_shape=shape, target_affine=affine,
            interpolation='nearest')

    for img in imgs:
        print("[Confounds Extraction] Image selected {0}".format(img))
        img = _utils.check_niimg_4d(img)
        print("Extracting high variance confounds")
        high_variance = high_variance_confounds(img, mask_img=mask_img,
                                                n_confounds=n_confounds)
        confounds.append(high_variance)
    return confounds
Beispiel #22
0
def get_unique_labels(label_image):
    """
    Returns all possible ROI labels from ``label_image``

    Parameters
    ----------
    label_image : niimg-like object
        ROI image, where each ROI is identified with a unique integer ID

    Returns
    -------
    labels : np.ndarray
        Integer labels of all ROIS found within ``label_image``
    """

    label_image = check_niimg_3d(label_image)
    return np.trim_zeros(np.unique(label_image.get_data())).astype(int)
Beispiel #23
0
    def inverse_transform(self, img, mask):
        """Transform data back to its original space.

        In other words, return an input X_original whose transform would be X.

        Parameters
        ----------
        img : 4D niimg_like
            Component weight maps.
        mask : 3D niimg_like
            Mask to apply on ``img``.

        Returns
        -------
        img_orig : 4D niimg_like
            Reconstructed original data, with fourth axis corresponding to time.

        Notes
        -----
        This is different from scikit-learn's approach, which ignores explained variance.
        """
        img = check_niimg_4d(img)
        mask = check_niimg_3d(mask)
        data = img.get_fdata()
        mask = mask.get_fdata()

        [n_x, n_y, n_z, n_components] = data.shape
        data_nib_V = np.reshape(data, (n_x * n_y * n_z, n_components),
                                order="F")
        mask_vec = np.reshape(mask, n_x * n_y * n_z, order="F")
        X = data_nib_V[mask_vec == 1, :]

        X_orig = np.dot(np.dot(X, np.diag(self.explained_variance_)),
                        self.components_)
        if self.normalize:
            X_orig = self.scaler_.inverse_transform(X_orig.T).T

        n_t = X_orig.shape[1]
        out_data = np.zeros((n_x * n_y * n_z, n_t))
        out_data[mask_vec == 1, :] = X_orig
        out_data = np.reshape(out_data, (n_x, n_y, n_z, n_t), order="F")
        img_orig = nib.Nifti1Image(out_data, img.affine, img.header)
        return img_orig
Beispiel #24
0
    def transform(self, imgs, confounds=None):

        smooth_prefix = '' if self.smoothing_fwhm is None else 's%g' % self.smoothing_fwhm
        resample_prefix = '' if self.smoothing_fwhm is None else 'r%g' % self.smoothing_fwhm

        if not isinstance(imgs, list):
            imgs = [imgs]

        path_first = imgs[0] if isinstance(imgs[0],
                                           str) else imgs[0].get_filename()

        path_first_resampled = os.path.join(
            os.path.dirname(path_first),
            resample_prefix + os.path.basename(path_first))
        path_first_smoothed = os.path.join(
            os.path.dirname(path_first),
            smooth_prefix + resample_prefix + os.path.basename(path_first))

        if self.resampling is not None and self.smoothing_fwhm is not None:
            if self.resampling is not None:
                if not os.path.exists(
                        path_first_resampled) and not os.path.exists(
                            path_first_smoothed):
                    imgs = resample_img(imgs,
                                        target_affine=np.diag(self.resampling *
                                                              np.ones(3)))
                else:
                    imgs = []
            if self.smoothing_fwhm is not None:
                if not os.path.exists(path_first_smoothed):
                    imgs = smooth_img(imgs, self.smoothing_fwhm)
                else:
                    imgs = []
        else:
            imgs = [check_niimg_3d(img) for img in imgs]

        return self.masker.transform(imgs)
Beispiel #25
0
def get_centroids(img, labels=None, image_space=False):
    """
    Finds centroids of `labels` in `img`

    Parameters
    ----------
    img : niimg-like object
        3D image containing integer label at each point
    labels : array_like, optional
        List of labels for which to find centroids. If not specified all
        labels present in `img` will be used. Zero will be ignored as it is
        considered "background." Default: None
    image_space : bool, optional
        Whether to return xyz (image space) coordinates for centroids based
        on transformation in `img.affine`. Default: False

    Returns
    -------
    centroids : (N, 3) np.ndarray
        Coordinates of centroids for ROIs in input data
    """

    from nilearn._utils import check_niimg_3d

    img = check_niimg_3d(img)
    data = img.get_data()

    if labels is None:
        labels = np.trim_zeros(np.unique(data))

    centroids = np.row_stack(
        ndimage.center_of_mass(data, labels=data, index=labels))

    if image_space:
        centroids = nib.affines.apply_affine(img.affine, centroids)

    return centroids
Beispiel #26
0
def get_centroids(image, labels=None, image_space=False):
    """
    Finds centroids of ``labels`` in ``label_image``

    Parameters
    ----------
    label_image : niimg-like object
        3D image containing integer label at each point
    labels : array_like, optional
        List of values containing labels of which to find centroids. Default:
        all possible labels
    image_space : bool, optional
        Whether to return xyz (image space) coordinates for centroids based
        on transformation in ``label_image.affine``. Default: False

    Returns
    -------
    centroids : (N, 3) np.ndarray
        Coordinates of centroids for ROIs in input data
    """

    image = check_niimg_3d(image)
    data = image.get_data()

    # if no labels of interest provided, get all possible labels
    if labels is None:
        labels = np.trim_zeros(np.unique(data))

    # get centroids for all possible labels
    centroids = np.row_stack(center_of_mass(data, labels=data, index=labels))

    # return xyz if desired; otherwise, ijk
    if image_space:
        centroids = ijk_to_xyz(centroids, image.affine)

    return centroids
Beispiel #27
0
def plot_carpet(img, mask_img=None, detrend=True, output_file=None,
                figure=None, axes=None, title=None):
    """Plot an image representation of voxel intensities across time also know
        as the "carpet plot" or "Power plot". See Jonathan Power Neuroimage
        2017 Jul 1; 154:150-158.
        Parameters
        ----------
        img : Niimg-like object
            See http://nilearn.github.io/manipulating_images/input_output.html
            4D input image
        mask_img : Niimg-like object, optional
            See http://nilearn.github.io/manipulating_images/input_output.html
            Limit plotted voxels to those inside the provided mask. If not
            specified a new mask will be derived from data.
        detrend : boolean, optional
            Detrend and standardize the data prior to plotting.
        output_file : string, or None, optional
            The name of an image file to export the plot to. Valid extensions
            are .png, .pdf, .svg. If output_file is not None, the plot
            is saved to a file, and the display is closed.
        figure : matplotlib figure, optional
            Matplotlib figure used. If None is given, a
            new figure is created.
        axes : matplotlib axes, optional
            The axes used to display the plot. If None, the complete
            figure is used.
        title : string, optional
            The title displayed on the figure.
    """
    img_nii = _utils.check_niimg_4d(img, dtype='auto')
    img_data = _safe_get_data(img_nii, ensure_finite=True)

    # Define TR and number of frames
    tr = img_nii.header.get_zooms()[-1]
    ntsteps = img_nii.shape[-1]

    if not mask_img:
        nifti_masker = NiftiMasker(mask_strategy='epi', standardize=False)
        nifti_masker.fit(img_nii)
        mask_data = nifti_masker.mask_img_.get_data().astype(bool)
    else:
        mask_nii = _utils.check_niimg_3d(mask_img, dtype='auto')
        mask_data = _safe_get_data(mask_nii, ensure_finite=True)

    data = img_data[mask_data > 0].reshape(-1, ntsteps)
    # Detrend data
    if detrend:
        data = clean(data.T, t_r=tr).T

    if not figure:
        if not axes:
            figure = plt.figure()
        else:
            figure = axes.figure

    if not axes:
        axes = figure.add_subplot(1, 1, 1)
    else:
        assert axes.figure is figure, ("The axes passed are not "
                                       "in the figure")

    # Avoid segmentation faults for long acquisitions by decimating the input
    # data
    long_cutoff = 800
    if data.shape[1] > long_cutoff:
        data = data[:, ::2]
    else:
        data = data[:, :]

    axes.imshow(data, interpolation='nearest',
                aspect='auto', cmap='gray', vmin=-2, vmax=2)

    axes.grid(False)
    axes.set_yticks([])
    axes.set_yticklabels([])

    # Set 10 frame markers in X axis
    interval = max(
        (int(data.shape[-1] + 1) // 10, int(data.shape[-1] + 1) // 5, 1))
    xticks = list(range(0, data.shape[-1])[::interval])
    axes.set_xticks(xticks)

    axes.set_xlabel('time (s)')
    axes.set_ylabel('voxels')
    if title:
        axes.set_title(title)
    labels = tr * (np.array(xticks))
    if data.shape[1] > long_cutoff:
        labels *= 2
    axes.set_xticklabels(['%.02f' % t for t in labels.tolist()])

    # Remove and redefine spines
    for side in ["top", "right"]:
        # Toggle the spine objects
        axes.spines[side].set_color('none')
        axes.spines[side].set_visible(False)

    axes.yaxis.set_ticks_position('left')
    axes.xaxis.set_ticks_position('bottom')
    axes.spines["bottom"].set_position(('outward', 20))
    axes.spines["left"].set_position(('outward', 20))

    if output_file is not None:
        figure.savefig(output_file)
        figure.close()
        figure = None

    return figure
def check_embedded_atlas_masker(estimator, atlas_type=None, img=None,
                                multi_subject=True, seeds=None, radius=None,
                                t_r=None, low_pass=None, high_pass=None):
    """Base function to return masker type and its parameters

    Accepts all Nilearn masker types but returns only Maps or Labels masker.
    The idea being that this function returns an object with essential
    parameters embedded in it such as repetition time, low pass, high pass,
    standardize, detrend for resting state fMRI data analysis.

    Mostly useful for pipelined analysis.

    Parameters
    ----------
    estimator : object, instance of all masker types
        Accepts any instance masker type from nilearn.input_data

    img : maps_img or labels_img
        Used in initialization of instance masker object depending upon
        the length/type of the image. If maps_img related then used in
        NiftiMapsMasker instance or labels_img then NiftiLabelsMasker.

    seeds : List of triplet of coordinates in native space
        Used in NiftiSpheresMasker initialization.

    atlas_type : str {'maps', 'labels'}
        'maps' implies NiftiMapsMasker
        'labels' implies NiftiLabelsMasker

    multi_subject : bool, optional
        Indicates whether to return masker of multi subject type.
        List of subjects.

    Returns
    -------
    masker : NiftiMapsMasker, NiftiLabelsMasker
        Depending upon atlas type.

    params : dict
        Masker parameters
    """
    if atlas_type is not None:
        if atlas_type not in ['maps', 'labels', 'spheres', 'auto']:
            raise ValueError(
                "You provided unsupported masker type for atlas_type={0} "
                "selection. Choose one among them ['maps', 'labels', 'spheres']"
                "for atlas type masker. Otherwise atlas_type=None for general "
                "Nifti or MultiNifti Maskers.".format(atlas_type))

    if not isinstance(estimator, (NiftiMasker, MultiNiftiMasker, NiftiMapsMasker,
                                  NiftiLabelsMasker, NiftiSpheresMasker)):
        raise ValueError("Unsupported 'estimator' instance of masker is "
                         "provided".format(estimator))

    if atlas_type == 'spheres' and seeds is None:
        raise ValueError("'seeds' must be specified for atlas_type='spheres'."
                         "See documentation nilearn.input_data.NiftiSpheresMasker.")

    if (atlas_type == 'maps' or atlas_type == 'labels') and img is None:
        raise ValueError("'img' should not be None for atlas_type={0} related "
                         "instance of masker. Atlas related maskers is created "
                         "by provided a valid atlas image. See documenation in "
                         "nilearn.input_data for specific "
                         "masker related either maps or labels".format(atlas_type))

    if atlas_type == 'auto' and img is not None:
        img = check_niimg(img)
        if len(img.shape) > 3:
            atlas_type = 'maps'
        else:
            atlas_type = 'labels'

    if atlas_type == 'maps' and img is not None:
        img = check_niimg_4d(img)

    if atlas_type == 'labels' and img is not None:
        img = check_niimg_3d(img)

    new_masker = check_embedded_nifti_masker(estimator,
                                             multi_subject=multi_subject)
    mask = getattr(new_masker, 'mask_img', None)
    estimator_mask = getattr(estimator, 'mask_img', None)
    if mask is None and estimator_mask is not None:
        new_masker.mask_img = estimator.mask_img

    if atlas_type is None:
        return new_masker
    else:
        masker_params = new_masker.get_params()
        new_masker_params = dict()
        _ignore = set(('mask_strategy', 'mask_args', 'n_jobs', 'target_affine',
                       'target_shape'))
        for param in masker_params:
            if param in _ignore:
                continue
            if hasattr(new_masker, param):
                new_masker_params[param] = getattr(new_masker, param)
        # Append atlas extraction related parameters
        if t_r is not None:
            new_masker_params['t_r'] = t_r

        if low_pass is not None:
            new_masker_params['low_pass'] = low_pass

        if high_pass is not None:
            new_masker_params['high_pass'] = high_pass

        if atlas_type is not None:
            if len(img.shape) > 3 and atlas_type == 'maps':
                new_masker_params['maps_img'] = img
                new_masker = NiftiMapsMasker(**new_masker_params)
            elif len(img.shape) == 3 and atlas_type == 'labels':
                new_masker_params['labels_img'] = img
                new_masker = NiftiLabelsMasker(**new_masker_params)

            return new_masker

        if seeds is not None and atlas_type == 'spheres':
            if radius is not None:
                new_masker_params['radius'] = radius
            new_masker_params['seeds'] = seeds
            new_masker = NiftiSpheresMasker(**new_masker_params)

        return new_masker
Beispiel #29
0
def get_expression_data(atlas,
                        atlas_info=None,
                        *,
                        exact=True,
                        tolerance=2,
                        metric='mean',
                        ibf_threshold=0.5,
                        corrected_mni=True,
                        reannotated=True,
                        return_counts=False,
                        return_donors=False,
                        donors='all',
                        data_dir=None):
    """
    Assigns microarray expression data to ROIs defined in `atlas`

    This function aims to provide a workflow for generating pre-processed,
    microarray expression data for abitrary `atlas` designations. First, some
    basic filtering of genetic probes is performed, including:

        1. Intensity-based filtering of microarray probes to remove probes that
           do not exceed a certain level of background noise (specified via the
           `ibf_threshold` parameter), and
        2. Selection of a single, representative probe for each gene via a
           differential stability metric, wherein the probe that has the most
           consistent regional variation across donors is retained.

    Tissue samples are then matched to parcels in the defined `atlas` for each
    donor. If `atlas_info` is provided then this matching is constrained by
    both hemisphere and tissue class designation (e.g., cortical samples from
    the left hemisphere are only matched to ROIs in the left cortex,
    subcortical samples from the right hemisphere are only matched to ROIs in
    the left subcortex); see the `atlas_info` parameter description for more
    information.

    Matching of microarray samples to parcels in `atlas` is done via a multi-
    step process:

        1. Determine if the sample falls directly within a parcel,
        2. Check to see if there are nearby parcels by slowly expanding the
           search space to include nearby voxels, up to a specified distance
           (specified via the `tolerance` parameter),
        3. If there are multiple nearby parcels, the sample is assigned to the
           closest parcel, as determined by the parcel centroid.

    If at any step a sample can be assigned to a parcel the matching process is
    terminated. If multiple sample are assigned to the same parcel they are
    aggregated with the metric specified via the `metric` parameter. More
    control over the sample matching can be obtained by setting the `exact`
    parameter; see the parameter description for more information.

    Once all samples have been matched to parcels for all supplied donors, the
    microarray expression data are normalized within-donor via a scaled robust
    sigmoid (SRS) procedure before being combined across donors via the
    supplied `metric`.

    Parameters
    ----------
    atlas : niimg-like object
        A parcellation image in MNI space, where each parcel is identified by a
        unique integer ID
    atlas_info : str or :class:`pandas.DataFrame`, optional
        Filepath to or pre-loaded dataframe containing information about
        `atlas`. Must have at least columns 'id', 'hemisphere', and 'structure'
        containing information mapping atlas IDs to hemisphere (i.e, "L", "R")
        and broad structural class (i.e., "cortex", "subcortex", "cerebellum").
        Default: None
    exact : bool, optional
        Whether to use exact matching of donor tissue samples to parcels in
        `atlas`. If True, this function will match tissue samples to parcels
        within `threshold` mm of the sample; any samples that are beyond
        `threshold` mm of a parcel will be discarded. This may result in some
        parcels having no assigned sample / expression data. If False, the
        default matching procedure will be performed and followed by a check
        for parcels with no assigned samples; any such parcels will be matched
        to the nearest sample (nearest defined as the sample with the closest
        Euclidean distance to the parcel centroid). Default: True
    tolerance : int, optional
        Distance (in mm) that a sample must be from a parcel for it to be
        matched to that parcel. This is only considered if the sample is not
        directly within a parcel. Default: 2
    metric : str or func, optional
        Mechanism by which to collapse across donors, if input `files` provides
        multiple donor datasets. If a str, should be in ['mean', 'median']; if
        a function, should be able to accept an `N`-dimensional input and the
        `axis` keyword argument and return an `N-1`-dimensional output.
        Default: 'mean'
    ibf_threshold : [0, 1] float, optional
        Threshold for intensity-based filtering specifying. This number should
        specify the ratio of samples, across all supplied donors, for which a
        probe must have signal above background noise in order to be retained.
        Default: 0.5
    corrected_mni : bool, optional
        Whether to use the "corrected" MNI coordinates shipped with the
        `alleninf` package instead of the coordinates provided with the AHBA
        data when matching tissue samples to anatomical regions. Default: True
    reannotated : bool, optional
        Whether to use reannotated probe information provided by [1]_ instead
        of the default probe information from the AHBA dataset. Using
        reannotated information will discard probes that could not be reliably
        matched to genes. Default: True
    return_counts : bool, optional
        Whether to return how many samples were assigned to each parcel in
        `atlas` for each donor. Default: False
    return_donors : bool, optional
        Whether to return donor-level expression arrays instead of aggregating
        expression across donors with provided `metric`. Default: False
    donors : list, optional
        List of donors to use as sources of expression data. Can be either
        donor numbers or UID. If not specified will use all available donors.
        Default: 'all'
    data_dir : str, optional
        Directory where expression data should be downloaded (if it does not
        already exist) / loaded. If not specified will use the current
        directory. Default: None

    Returns
    -------
    expression : (R, G) :class:`pandas.DataFrame`
        Microarray expression for `R` regions in `atlas` for `G` genes,
        aggregated across donors, where the index corresponds to the unique
        integer IDs of `atlas` and the columns are gene names.
    counts : (R, D) :class:`pandas.DataFrame`
        Number of samples assigned to each of `R` regions in `atlas` for each
        of `D` donors (if multiple donors were specified); only returned if
        `return_counts=True`.

    References
    ----------
    .. [1] Arnatkevic̆iūtė, A., Fulcher, B. D., & Fornito, A. (2019). A
       practical guide to linking brain-wide gene expression and neuroimaging
       data. NeuroImage, 189, 353-367.
    .. [2] Hawrylycz, M.J. et al. (2012) An anatomically comprehensive atlas of
       the adult human transcriptome. Nature, 489, 391-399.
    """

    # fetch files
    files = datasets.fetch_microarray(data_dir=data_dir, donors=donors)
    for key in ['microarray', 'probes', 'annotation', 'pacall', 'ontology']:
        if key not in files:
            raise KeyError('Provided `files` dictionary is missing {}. '
                           'Please check inputs.'.format(key))

    # load atlas_info, if provided
    atlas = check_niimg_3d(atlas)
    if atlas_info is not None:
        atlas_info = utils.check_atlas_info(atlas, atlas_info)

    # get combination functions
    metric = utils.check_metric(metric)

    # get some info on the number of subjects, labels in `atlas_img`
    num_subj = len(files.microarray)
    all_labels = utils.get_unique_labels(atlas)
    if not exact:
        centroids = utils.get_centroids(atlas, labels=all_labels)

    # reannotate probes based on updates from Arnatkeviciute et al., 2018 then
    # perform intensity-based filter of probes and select probe with highest
    # differential stability for each gene amongst remaining probes
    if reannotated:
        probes = process.reannotate_probes(files.probes[0])
    else:
        probes = io.read_probes(files.probes[0])
    probes = process.filter_probes(files.pacall,
                                   probes,
                                   threshold=ibf_threshold)
    probes = process.get_stable_probes(files.microarray, files.annotation,
                                       probes)

    expression, missing = [], []
    counts = pd.DataFrame(np.zeros((len(all_labels) + 1, num_subj)),
                          index=np.append([0], all_labels))
    for subj in range(num_subj):
        # get rid of samples whose coordinates don't match ontological profile
        annotation = process.drop_mismatch_samples(files.annotation[subj],
                                                   files.ontology[subj],
                                                   corrected=corrected_mni)

        # subset representative probes + samples from microarray data
        microarray = io.read_microarray(files.microarray[subj])
        samples = microarray.loc[probes.index, annotation.index].T
        samples.columns = probes.gene_symbol

        # assign samples to regions and aggregate samples w/i the same region
        sample_labels = label_samples(annotation,
                                      atlas,
                                      atlas_info=atlas_info,
                                      tolerance=tolerance)
        expression += [
            group_by_label(samples, sample_labels, all_labels, metric=metric)
        ]

        # get counts of samples collapsed into each ROI
        labs, num = np.unique(sample_labels, return_counts=True)
        counts.loc[labs, subj] = num

        # if we don't want to do exact matching then cache which parcels are
        # missing data and the expression data for the closest sample to that
        # parcel; we'll use this once we've iterated through all donors
        if not exact:
            coords = utils.xyz_to_ijk(annotation[['mni_x', 'mni_y', 'mni_z']],
                                      atlas.affine)
            empty = ~np.in1d(all_labels, labs)
            closest, dist = utils.closest_centroid(coords,
                                                   centroids[empty],
                                                   return_dist=True)
            closest = samples.loc[annotation.iloc[closest].index]
            empty = all_labels[empty]
            closest.index = pd.Series(empty, name='label')
            missing += [(closest, dict(zip(empty, np.diag(dist))))]

    # check for missing ROIs and fill in, as needed
    if not exact:
        # find labels that are missing across all donors
        empty = reduce(set.intersection, [set(f.index) for f, d in missing])
        for roi in empty:
            # find donor with sample closest to centroid of empty parcel
            ind = np.argmin([d.get(roi) for f, d in missing])
            # assign expression data from that sample and add to count
            expression[ind].loc[roi] = missing[ind][0].loc[roi]
            counts.loc[roi, ind] += 1

    # normalize data with SRS and aggregate across donors
    expression = [process.normalize_expression(e) for e in expression]
    if not return_donors:
        expression = process.aggregate_donors(expression, metric)

    if return_counts:
        return expression, counts.iloc[1:]

    return expression
Beispiel #30
0
def plot_img_on_surf(stat_map,
                     surf_mesh='fsaverage5',
                     mask_img=None,
                     hemispheres=['left', 'right'],
                     inflate=False,
                     views=['lateral', 'medial'],
                     output_file=None,
                     title=None,
                     colorbar=True,
                     vmax=None,
                     threshold=None,
                     cmap='cold_hot',
                     aspect_ratio=1.4,
                     **kwargs):
    """Convenience function to plot multiple views of plot_surf_stat_map
    in a single figure. It projects stat_map into meshes and plots views of
    left and right hemispheres. The *views* argument defines the views
    that are shown. This function returns the fig, axes elements from
    matplotlib unless kwargs sets and output_file, in which case nothing
    is returned.

    Parameters
    ----------
    stat_map : str or 3D Niimg-like object
        See http://nilearn.github.io/manipulating_images/input_output.html

    surf_mesh : str, dict, or None, optional
        If str, either one of the two:
        'fsaverage5': the low-resolution fsaverage5 mesh (10242 nodes)
        'fsaverage': the high-resolution fsaverage mesh (163842 nodes)
        If dict, a dictionary with keys: ['infl_left', 'infl_right',
        'pial_left', 'pial_right', 'sulc_left', 'sulc_right'], where
        values are surface mesh geometries as accepted by plot_surf_stat_map.
        Default='fsaverage5'.

    mask_img : Niimg-like object or None, optional
        The mask is passed to vol_to_surf.
        Samples falling out of this mask or out of the image are ignored
        during projection of the volume to the surface.
        If ``None``, don't apply any mask.

    inflate : bool, optional
        If True, display images in inflated brain.
        If False, display images in pial surface.
        Default=False.

    views : list of strings, optional
        A list containing all views to display.
        The montage will contain as many rows as views specified by
        display mode. Order is preserved, and left and right hemispheres
        are shown on the left and right sides of the figure.
        Default=['lateral', 'medial'].

    hemispheres : list of strings, optional
        Hemispheres to display. Default=['left', 'right'].

    output_file : str, optional
        The name of an image file to export plot to. Valid extensions
        are: *.png*, *.pdf*, *.svg*. If output_file is not None,
        the plot is saved to a file, and the display is closed. Return
        value is None.

    title : str, optional
        Place a title on the upper center of the figure.

    colorbar : bool, optional
        If *True*, a symmetric colorbar of the statistical map is displayed.
        Default=True.

    vmax : float, optional
        Upper bound for plotting of stat_map values.

    threshold : float, optional
        If None is given, the image is not thresholded.
        If a number is given, it is used to threshold the image,
        values below the threshold (in absolute value) are plotted
        as transparent.

    cmap : str, optional
        The name of a matplotlib or nilearn colormap. Default='cold_hot'.

    kwargs : dict, optional
        keyword arguments passed to plot_surf_stat_map.

    See Also
    --------
    nilearn.datasets.fetch_surf_fsaverage : For surface data object to be
        used as the default background map for this plotting function.

    nilearn.surface.vol_to_surf : For info on the generation of surfaces.

    nilearn.plotting.plot_surf_stat_map : For info on kwargs options
        accepted by plot_img_on_surf.

    """
    for arg in ('figure', 'axes'):
        if arg in kwargs:
            raise ValueError(('plot_img_on_surf does not'
                              ' accept %s as an argument' % arg))

    stat_map = check_niimg_3d(stat_map, dtype='auto')
    modes = _check_views(views)
    hemis = _check_hemispheres(hemispheres)
    surf_mesh = _check_mesh(surf_mesh)

    mesh_prefix = "infl" if inflate else "pial"
    surf = {
        'left': surf_mesh[mesh_prefix + '_left'],
        'right': surf_mesh[mesh_prefix + '_right'],
    }

    texture = {
        'left': vol_to_surf(stat_map,
                            surf_mesh['pial_left'],
                            mask_img=mask_img),
        'right': vol_to_surf(stat_map,
                             surf_mesh['pial_right'],
                             mask_img=mask_img)
    }

    figsize = plt.figaspect(len(modes) / (aspect_ratio * len(hemispheres)))
    fig, axes = plt.subplots(nrows=len(modes),
                             ncols=len(hemis),
                             figsize=figsize,
                             subplot_kw={'projection': '3d'})

    axes = np.atleast_2d(axes)

    if len(hemis) == 1:
        axes = axes.T

    for index_mode, mode in enumerate(modes):
        for index_hemi, hemi in enumerate(hemis):
            bg_map = surf_mesh['sulc_%s' % hemi]
            plot_surf_stat_map(
                surf[hemi],
                texture[hemi],
                view=mode,
                hemi=hemi,
                bg_map=bg_map,
                axes=axes[index_mode, index_hemi],
                colorbar=False,  # Colorbar created externally.
                vmax=vmax,
                threshold=threshold,
                cmap=cmap,
                **kwargs)

    for ax in axes.flatten():
        # We increase this value to better position the camera of the
        # 3D projection plot. The default value makes meshes look too small.
        ax.dist = 6

    if colorbar:
        sm = _colorbar_from_array(image.get_data(stat_map),
                                  vmax,
                                  threshold,
                                  kwargs,
                                  cmap=get_cmap(cmap))

        cbar_ax = fig.add_subplot(32, 1, 32)
        fig.colorbar(sm, cax=cbar_ax, orientation='horizontal')

    fig.subplots_adjust(wspace=-0.02, hspace=0.0)

    if title is not None:
        fig.suptitle(title)

    if output_file is not None:
        fig.savefig(output_file)
        plt.close(fig)
    else:
        return fig, axes
Beispiel #31
0
 def apply_mask(self, series, mask_img):
     mask_img = _utils.check_niimg_3d(mask_img)
     mask, mask_affine = masking._load_mask_img(mask_img)
     mask_img = image.new_img_like(mask_img, mask, mask_affine)
     mask_data = _utils.as_ndarray(mask_img.get_data(), dtype=np.bool)
     return series[mask_data].T
Beispiel #32
0
def get_clusters_table(stat_img,
                       stat_threshold,
                       cluster_threshold=None,
                       min_distance=8.):
    """Creates pandas dataframe with img cluster statistics.

    Parameters
    ----------
    stat_img : Niimg-like object,
       Statistical image (presumably in z- or p-scale).

    stat_threshold : `float`
        Cluster forming threshold in same scale as `stat_img` (either a
        p-value or z-scale value).

    cluster_threshold : `int` or `None`, optional
        Cluster size threshold, in voxels.

    min_distance : `float`, optional
        Minimum distance between subpeaks in mm. Default=8mm.

    Returns
    -------
    df : `pandas.DataFrame`
        Table with peaks and subpeaks from thresholded `stat_img`. For binary
        clusters (clusters with >1 voxel containing only one value), the table
        reports the center of mass of the cluster,
        rather than any peaks/subpeaks.

    """
    cols = ['Cluster ID', 'X', 'Y', 'Z', 'Peak Stat', 'Cluster Size (mm3)']

    # check that stat_img is niimg-like object and 3D
    stat_img = check_niimg_3d(stat_img)
    # If cluster threshold is used, there is chance that stat_map will be
    # modified, therefore copy is needed
    if cluster_threshold is None:
        stat_map = get_data(stat_img)
    else:
        stat_map = get_data(stat_img).copy()

    conn_mat = np.zeros((3, 3, 3), int)  # 6-connectivity, aka NN1 or "faces"
    conn_mat[1, 1, :] = 1
    conn_mat[1, :, 1] = 1
    conn_mat[:, 1, 1] = 1
    voxel_size = np.prod(stat_img.header.get_zooms())

    # Binarize using CDT
    binarized = stat_map > stat_threshold
    binarized = binarized.astype(int)

    # If the stat threshold is too high simply return an empty dataframe
    if np.sum(binarized) == 0:
        warnings.warn('Attention: No clusters with stat higher than %f' %
                      stat_threshold)
        return pd.DataFrame(columns=cols)

    # Extract connected components above cluster size threshold
    label_map = ndimage.measurements.label(binarized, conn_mat)[0]
    clust_ids = sorted(list(np.unique(label_map)[1:]))
    for c_val in clust_ids:
        if cluster_threshold is not None and np.sum(
                label_map == c_val) < cluster_threshold:
            stat_map[label_map == c_val] = 0
            binarized[label_map == c_val] = 0

    # If the cluster threshold is too high simply return an empty dataframe
    # this checks for stats higher than threshold after small clusters
    # were removed from stat_map
    if np.sum(stat_map > stat_threshold) == 0:
        warnings.warn('Attention: No clusters with more than %d voxels' %
                      cluster_threshold)
        return pd.DataFrame(columns=cols)

    # Now re-label and create table
    label_map = ndimage.measurements.label(binarized, conn_mat)[0]
    clust_ids = sorted(list(np.unique(label_map)[1:]))
    peak_vals = np.array(
        [np.max(stat_map * (label_map == c)) for c in clust_ids])
    clust_ids = [clust_ids[c] for c in (-peak_vals).argsort()
                 ]  # Sort by descending max value

    rows = []
    for c_id, c_val in enumerate(clust_ids):
        cluster_mask = label_map == c_val
        masked_data = stat_map * cluster_mask

        cluster_size_mm = int(np.sum(cluster_mask) * voxel_size)

        # Get peaks, subpeaks and associated statistics
        subpeak_ijk, subpeak_vals = _local_max(masked_data,
                                               stat_img.affine,
                                               min_distance=min_distance)
        subpeak_xyz = np.asarray(
            coord_transform(subpeak_ijk[:, 0], subpeak_ijk[:, 1],
                            subpeak_ijk[:, 2], stat_img.affine)).tolist()
        subpeak_xyz = np.array(subpeak_xyz).T

        # Only report peak and, at most, top 3 subpeaks.
        n_subpeaks = np.min((len(subpeak_vals), 4))
        for subpeak in range(n_subpeaks):
            if subpeak == 0:
                row = [
                    c_id + 1, subpeak_xyz[subpeak, 0], subpeak_xyz[subpeak, 1],
                    subpeak_xyz[subpeak,
                                2], subpeak_vals[subpeak], cluster_size_mm
                ]
            else:
                # Subpeak naming convention is cluster num+letter: 1a, 1b, etc
                sp_id = '{0}{1}'.format(c_id + 1, ascii_lowercase[subpeak - 1])
                row = [
                    sp_id, subpeak_xyz[subpeak, 0], subpeak_xyz[subpeak, 1],
                    subpeak_xyz[subpeak, 2], subpeak_vals[subpeak], ''
                ]
            rows += [row]
    df = pd.DataFrame(columns=cols, data=rows)
    return df
Beispiel #33
0
def get_stat_map(stat_map_img,
                 bg_img,
                 cut_coords=None,
                 colorbar=True,
                 title=None,
                 threshold=None,
                 annotate=True,
                 draw_cross=True,
                 black_bg='auto',
                 cmap=cm.cold_hot,
                 symmetric_cbar='auto',
                 dim='auto',
                 vmax=None,
                 resampling_interpolation='continuous',
                 n_colors=256,
                 opacity=1,
                 **kwargs):
    """
    Intarctive viewer of a statistical map, with optional background

    Parameters
    ----------
    stat_map_img : Niimg-like object
        See http://nilearn.github.io/manipulating_images/input_output.html
        The statistical map image.
    bg_img : Niimg-like object (default='MNI152')
        See http://nilearn.github.io/manipulating_images/input_output.html
        The background image that the stat map will be plotted on top of.
        If nothing is specified, the MNI152 template will be used.
        To turn off background image, just pass "bg_img=False".
    cut_coords : None, a tuple of floats, or an integer (default None)
        The MNI coordinates of the point where the cut is performed
        as a 3-tuple: (x, y, z). If None is given, the cuts is calculated
        automaticaly.
        This parameter is not currently supported.
    colorbar : boolean, optional (default True)
        If True, display a colorbar next to the plots.
    title : string or None (default=None)
        The title displayed on the figure (or None: no title).
        This parameter is not currently supported.
    threshold : str, number or None  (default=None)
        If None is given, the image is not thresholded.
        If a number is given, it is used to threshold the image:
        values below the threshold (in absolute value) are plotted
        as transparent. If auto is given, the threshold is determined
        magically by analysis of the image.
    annotate : boolean (default=True)
        If annotate is True, positions and left/right annotation
        are added to the plot.
    draw_cross : boolean (default=True)
        If draw_cross is True, a cross is drawn on the plot to
        indicate the cut plosition.
    black_bg : boolean (default='auto')
        If True, the background of the image is set to be black.
        Otherwise, a white background is used.
        If set to auto, an educated guess is made to find if the background
        is white or black.
    cmap : matplotlib colormap, optional
        The colormap for specified image. The colormap *must* be
        symmetrical.
    symmetric_cbar : boolean or 'auto' (default='auto')
        Specifies whether the colorbar should range from -vmax to vmax
        or from vmin to vmax. Setting to 'auto' will select the latter if
        the range of the whole image is either positive or negative.
        Note: The colormap will always be set to range from -vmax to vmax.
    dim : float, 'auto' (default='auto')
        Dimming factor applied to background image. By default, automatic
        heuristics are applied based upon the background image intensity.
        Accepted float values, where a typical scan is between -2 and 2
        (-2 = increase constrast; 2 = decrease contrast), but larger values
        can be used for a more pronounced effect. 0 means no dimming.
    vmax : float, or None (default=)
        max value for mapping colors.
    resampling_interpolation : string, optional (default nearest)
        The interpolation method for resampling
        See nilearn.image.resample_img
    n_colors : integer (default=256)
        The number of discrete colors to use in the colormap, if it is
        generated.
    opacity : float in [0,1] (default 1)
        The level of opacity of the overlay (0: transparent, 1: opaque)

    Returns
    -------
    StatMapView : plot of the stat map.
        It can be saved as an html page or rendered (transparently) by the
        Jupyter notebook.
    """

    # Load stat map
    stat_map_img = check_niimg_3d(stat_map_img, dtype='auto')

    _, _, vmin, vmax = _get_colorbar_and_data_ranges(
        _safe_get_data(stat_map_img, ensure_finite=True), vmax, symmetric_cbar,
        kwargs)

    # load background image, and resample stat map
    if bg_img is not None and bg_img is not False:
        bg_img, black_bg, bg_min, bg_max = _load_anat(bg_img,
                                                      dim=dim,
                                                      black_bg=black_bg)
        bg_img = _resample_to_self(bg_img,
                                   interpolation=resampling_interpolation)
        stat_map_img = image.resample_to_img(
            stat_map_img, bg_img, interpolation=resampling_interpolation)

    else:
        stat_map_img = _resample_to_self(
            stat_map_img, interpolation=resampling_interpolation)
        bg_img = image.new_img_like(stat_map_img, np.zeros(stat_map_img.shape),
                                    stat_map_img.affine)
        bg_min = 0
        bg_max = 0
        if black_bg == 'auto':
            black_bg = False

    # Select coordinates for the cut
    # https://github.com/nilearn/nilearn/blob/master/nilearn/plotting/displays.py#L943
    if isinstance(cut_coords, numbers.Number):
        raise ValueError(
            "The input given for display_mode='ortho' needs to be "
            "a list of 3d world coordinates in (x, y, z). "
            "You provided single cut, cut_coords={0}".format(cut_coords))
    if cut_coords is None:
        cut_coords = find_xyz_cut_coords(stat_map_img,
                                         activation_threshold=threshold)
    print(cut_coords)

    # Create a base64 sprite for the background
    bg_sprite = BytesIO()
    save_sprite(bg_img,
                output_sprite=bg_sprite,
                cmap='gray',
                format='jpg',
                resample=False,
                vmin=bg_min,
                vmax=bg_max)
    bg_sprite.seek(0)
    bg_base64 = encodebytes(bg_sprite.read()).decode('utf-8')
    bg_sprite.close()

    # Create a base64 sprite for the stat map
    # Possibly, also generate a file with the colormap
    stat_map_sprite = BytesIO()
    stat_map_json = StringIO()
    if colorbar:
        stat_map_cm = BytesIO()
    else:
        stat_map_cm = None
    cmap_c = _custom_cmap(cmap, vmin, vmax, threshold)
    save_sprite(stat_map_img, stat_map_sprite, stat_map_cm, stat_map_json,
                vmax, vmin, cmap_c, threshold, n_colors, 'png', False)

    # Convert the sprite and colormap to base64
    stat_map_sprite.seek(0)
    stat_map_base64 = encodebytes(stat_map_sprite.read()).decode('utf-8')
    stat_map_sprite.close()

    if colorbar:
        stat_map_cm.seek(0)
        cm_base64 = encodebytes(stat_map_cm.read()).decode('utf-8')
        stat_map_cm.close()
    else:
        cm_base64 = ''
    # Load the sprite meta-data from the json dump
    stat_map_json.seek(0)
    params = json.load(stat_map_json)
    stat_map_json.close()

    # Convet cut coordinates into cut slices
    cut_slices = np.round(
        nb.affines.apply_affine(np.linalg.inv(stat_map_img.affine),
                                cut_coords))

    # Create a json-like structure
    # with all the brain sprite parameters
    sprite_params = {
        'canvas': '3Dviewer',
        'sprite': 'spriteImg',
        'nbSlice': params['nbSlice'],
        'overlay': {
            'sprite': 'overlayImg',
            'nbSlice': params['nbSlice'],
            'opacity': opacity
        },
        'colorBackground': '#000000',
        'colorFont': '#ffffff',
        'colorCrosshair': '#de101d',
        'crosshair': draw_cross,
        'affine': params['affine'],
        'flagCoordinates': annotate,
        'title': title,
        'flagValue': annotate,
        'numSlice': {
            'X': cut_slices[0],
            'Y': cut_slices[1],
            'Z': cut_slices[2]
        },
    }
    if colorbar:
        sprite_params['colorMap'] = {
            'img': 'colorMap',
            'min': params['min'],
            'max': params['max']
        }

    return sprite_params, bg_base64, stat_map_base64, cm_base64
Beispiel #34
0
def save_sprite(img,
                output_sprite,
                output_cmap=None,
                output_json=None,
                vmax=None,
                vmin=None,
                cmap='Greys',
                threshold=None,
                n_colors=256,
                format='png',
                resample=True,
                interpolation='nearest'):
    """ Generate a sprite from a 3D Niimg-like object.

        Parameters
        ----------
        img :  Niimg-like object
            See http://nilearn.github.io/manipulating_images/input_output.html
        output_file : string or file-like
            Path string to a filename, or a Python file-like object.
            If *format* is *None* and *fname* is a string, the output
            format is deduced from the extension of the filename.
        output_cmap : string, file-like or None, optional (default None)
            Path string to a filename, or a Python file-like object.
            The color map will be saved in that file (unless it is None).
            If *format* is *None* and *fname* is a string, the output format is
            deduced from the extension of the filename.
        output_json : string, file-like or None, optional (default None)
            Path string to a filename, or a Python file-like object.
            The parameters of the sprite will be saved in that file
            (unless it is None): Y and Z sizes, vmin, vmax, affine transform.
        vmax : float, or None, optional (default None)
            max value for mapping colors.
        vmin : float, or None, optional (default None)
            min value for mapping color.
        cmap : name of a matplotlib colormap, optional (default 'Greys')
            The colormap for the sprite. A matplotlib colormap can also
            be passed directly in cmap.
        threshold : a number, None, or 'auto', optional (default None)
            If None is given, the image is not thresholded.
            If a number is given, it is used to threshold the image:
            values below the threshold (in absolute value) are plotted
            as transparent. If auto is given, the threshold is determined
            magically by analysis of the image.
        n_colors : integer, optional (default 256)
            The number of discrete colors to use in the colormap, if it is
            generated.
        format : string, optional (default 'png')
            One of the file extensions supported by the active backend.  Most
            backends support png, pdf, ps, eps and svg.
        resample : boolean, optional (default True)
            Resample to isotropic voxels, with a LR/AP/VD orientation.
            This is necessary for proper rendering of arbitrary Niimg volumes,
            but not necessary if the image is in an isotropic standard space.
        interpolation : string, optional (default nearest)
            The interpolation method for resampling
            See nilearn.image.resample_img
        black_bg : boolean, optional
            If True, the background of the image is set to be black.

        Returns
        ----------
        sprite : numpy array with the sprite
    """

    # Get cmap
    if isinstance(cm, str):
        cmap = plt.cm.get_cmap(cmap)

    img = check_niimg_3d(img, dtype='auto')

    # resample to isotropic voxel with standard orientation
    if resample:
        img = _resample_to_self(img, interpolation)

    # Read data
    data = _safe_get_data(img, ensure_finite=True)
    if np.isnan(np.sum(data)):
        data = np.nan_to_num(data)

    # Deal with automatic settings of plot parameters
    if threshold == 'auto':
        # Threshold epsilon below a percentile value, to be sure that some
        # voxels pass the threshold
        threshold = fast_abs_percentile(data) - 1e-5

    # threshold
    threshold = float(threshold) if threshold is not None else None

    # Get vmin vmax
    show_nan_msg = False
    if vmax is not None and np.isnan(vmax):
        vmax = None
        show_nan_msg = True
    if vmin is not None and np.isnan(vmin):
        vmin = None
        show_nan_msg = True
    if show_nan_msg:
        nan_msg = ('NaN is not permitted for the vmax and vmin arguments.\n'
                   'Tip: Use np.nanmax() instead of np.max().')
        warnings.warn(nan_msg)

    if vmax is None:
        vmax = np.nanmax(data)
    if vmin is None:
        vmin = np.nanmin(data)

    # Create sprite
    sprite = _data2sprite(data)

    # Mask sprite
    if threshold is not None:
        if threshold == 0:
            sprite = np.ma.masked_equal(sprite, 0, copy=False)
        else:
            sprite = np.ma.masked_inside(sprite,
                                         -threshold,
                                         threshold,
                                         copy=False)
    # Save the sprite
    imsave(output_sprite,
           sprite,
           vmin=vmin,
           vmax=vmax,
           cmap=cmap,
           format=format)

    # Save the parameters
    if type(vmin).__module__ == 'numpy':
        vmin = vmin.tolist()  # json does not deal with numpy array
    if type(vmax).__module__ == 'numpy':
        vmax = vmax.tolist()  # json does not deal with numpy array

    if output_json is not None:
        params = {
            'nbSlice': {
                'X': data.shape[0],
                'Y': data.shape[1],
                'Z': data.shape[2]
            },
            'min': vmin,
            'max': vmax,
            'affine': img.affine.tolist()
        }
        if isinstance(output_json, str):
            f = open(output_json, 'w')
            f.write(json.dumps(params))
            f.close()
        else:
            output_json.write(json.dumps(params))

    # save the colormap
    if output_cmap is not None:
        data = np.arange(0, n_colors) / (n_colors - 1)
        data = data.reshape([1, n_colors])
        imsave(output_cmap, data, cmap=cmap, format=format)

    return sprite
Beispiel #35
0
    def fit(self, imgs=None, y=None):
        """Compute the mask corresponding to the data

        Parameters
        ----------
        imgs: list of Niimg-like objects
            See http://nilearn.github.io/manipulating_visualizing/manipulating_images.html#niimg.
            Data on which the mask must be calculated. If this is a list,
            the affine is considered the same for all.
        """

        # Load data (if filenames are given, load them)
        if self.verbose > 0:
            print("[%s.fit] Loading data from %s" % (
                self.__class__.__name__,
                _utils._repr_niimgs(imgs)[:200]))
        # Compute the mask if not given by the user
        if self.mask_img is None:
            if self.verbose > 0:
                print("[%s.fit] Computing mask" % self.__class__.__name__)
            if not isinstance(imgs, collections.Iterable) \
                    or isinstance(imgs, _basestring):
                raise ValueError("[%s.fit] For multiple processing, you should"
                                 " provide a list of data "
                                 "(e.g. Nifti1Image objects or filenames)."
                                 "%r is an invalid input"
                                 % (self.__class__.__name__, imgs))

            mask_args = (self.mask_args if self.mask_args is not None
                         else {})
            if self.mask_strategy == 'background':
                compute_mask = masking.compute_multi_background_mask
            elif self.mask_strategy == 'epi':
                compute_mask = masking.compute_multi_epi_mask
            else:
                raise ValueError("Unknown value of mask_strategy '%s'. "
                                 "Acceptable values are 'background' and 'epi'.")

            self.mask_img_ = self._cache(compute_mask,
                                         ignore=['n_jobs', 'verbose',
                                                 'memory'])(
                imgs,
                target_affine=self.target_affine,
                target_shape=self.target_shape,
                n_jobs=self.n_jobs,
                memory=self.memory,
                verbose=max(0, self.verbose - 1),
                **mask_args)
        else:
            if imgs is not None:
                warnings.warn('[%s.fit] Generation of a mask has been'
                              ' requested (imgs != None) while a mask has'
                              ' been provided at masker creation. Given mask'
                              ' will be used.' % self.__class__.__name__)
            self.mask_img_ = _utils.check_niimg_3d(self.mask_img)

        # If resampling is requested, resample the mask as well.
        # Resampling: allows the user to change the affine, the shape or both.
        if self.verbose > 0:
            print("[%s.transform] Resampling mask" % self.__class__.__name__)
        self.mask_img_ = self._cache(image.resample_img)(
            self.mask_img_,
            target_affine=self.target_affine,
            target_shape=self.target_shape,
            interpolation='nearest', copy=False)
        if self.target_affine is not None:
            self.affine_ = self.target_affine
        else:
            self.affine_ = self.mask_img_.get_affine()
        # Load data in memory
        self.mask_img_.get_data()
        return self
Beispiel #36
0
def jumeg_plot_stat_map(stat_map_img, t, bg_img=MNI152TEMPLATE, cut_coords=None,
                        output_file=None, display_mode='ortho', colorbar=True,
                        figure=None, axes=None, title=None, threshold=1e-6,
                        annotate=True, draw_cross=True, black_bg='auto',
                        cmap='magma', symmetric_cbar="auto", cbar_range=None,
                        dim='auto', vmax=None, resampling_interpolation='continuous',
                        **kwargs):
    """
    Plot cuts of an ROI/mask image (by default 3 cuts: Frontal, Axial, and
    Lateral)

    This is based on nilearn.plotting.plot_stat_map
    Parameters
    ----------
    stat_map_img : Niimg-like object
        See http://nilearn.github.io/manipulating_images/input_output.html
        The statistical map image
    t : int
        Plot activity at time point given by time t.
    bg_img : Niimg-like object
        See http://nilearn.github.io/manipulating_images/input_output.html
        The background image that the ROI/mask will be plotted on top of.
        If nothing is specified, the MNI152 template will be used.
        To turn off background image, just pass "bg_img=False".
    cut_coords : None, a tuple of floats, or an integer
        The MNI coordinates of the point where the cut is performed
        If display_mode is 'ortho', this should be a 3-tuple: (x, y, z)
        For display_mode == 'x', 'y', or 'z', then these are the
        coordinates of each cut in the corresponding direction.
        If None is given, the cuts is calculated automaticaly.
        If display_mode is 'x', 'y' or 'z', cut_coords can be an integer,
        in which case it specifies the number of cuts to perform
    output_file : string, or None, optional
        The name of an image file to export the plot to. Valid extensions
        are .png, .pdf, .svg. If output_file is not None, the plot
        is saved to a file, and the display is closed.
    display_mode : {'ortho', 'x', 'y', 'z', 'yx', 'xz', 'yz'}
        Choose the direction of the cuts: 'x' - sagittal, 'y' - coronal,
        'z' - axial, 'ortho' - three cuts are performed in orthogonal
        directions.
    colorbar : boolean, optional
        If True, display a colorbar on the right of the plots.
    figure : integer or matplotlib figure, optional
        Matplotlib figure used or its number. If None is given, a
        new figure is created.
    axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional
        The axes, or the coordinates, in matplotlib figure space,
        of the axes used to display the plot. If None, the complete
        figure is used.
    title : string, optional
        The title displayed on the figure.
    threshold : a number, None, or 'auto'
        If None is given, the image is not thresholded.
        If a number is given, it is used to threshold the image:
        values below the threshold (in absolute value) are plotted
        as transparent. If auto is given, the threshold is determined
        magically by analysis of the image.
    annotate : boolean, optional
        If annotate is True, positions and left/right annotation
        are added to the plot.
    draw_cross : boolean, optional
        If draw_cross is True, a cross is drawn on the plot to
        indicate the cut plosition.
    black_bg : boolean, optional
        If True, the background of the image is set to be black. If
        you wish to save figures with a black background, you
        will need to pass "facecolor='k', edgecolor='k'"
        to matplotlib.pyplot.savefig.
    cmap : matplotlib colormap, optional
        The colormap for specified image. The ccolormap *must* be
        symmetrical.
    symmetric_cbar : boolean or 'auto', optional, default 'auto'
        Specifies whether the colorbar should range from -vmax to vmax
        or from vmin to vmax. Setting to 'auto' will select the latter if
        the range of the whole image is either positive or negative.
        Note: The colormap will always be set to range from -vmax to vmax.
    cbar_range : None, 2-tuple
        Color range of the plot.
    dim : float, 'auto' (by default), optional
        Dimming factor applied to background image. By default, automatic
        heuristics are applied based upon the background image intensity.
        Accepted float values, where a typical scan is between -2 and 2
        (-2 = increase constrast; 2 = decrease contrast), but larger values
        can be used for a more pronounced effect. 0 means no dimming.
    vmax : float
        Upper bound for plotting, passed to matplotlib.pyplot.imshow
    resampling_interpolation : str
        Interpolation to use when resampling the image to the destination
        space. Can be "continuous" (default) to use 3rd-order spline
        interpolation, or "nearest" to use nearest-neighbor mapping.
        "nearest" is faster but can be noisier in some cases.

    Notes
    -----
    Arrays should be passed in numpy convention: (x, y, z)
    ordered.

    For visualization, non-finite values found in passed 'stat_map_img' or
    'bg_img' are set to zero.

    See Also
    --------

    nilearn.plotting.plot_anat : To simply plot anatomical images
    nilearn.plotting.plot_epi : To simply plot raw EPI images
    nilearn.plotting.plot_glass_brain : To plot maps in a glass brain

    """
    # noqa: E501
    # dim the background
    from nilearn.plotting.img_plotting import _load_anat, _plot_img_with_bg, _get_colorbar_and_data_ranges
    from nilearn._utils import check_niimg_3d, check_niimg_4d
    from nilearn._utils.niimg_conversions import _safe_get_data

    bg_img, black_bg, bg_vmin, bg_vmax = _load_anat(bg_img, dim=dim,
                                                    black_bg=black_bg)

    stat_map_img = check_niimg_4d(stat_map_img, dtype='auto')

    cbar_vmin, cbar_vmax, vmin, vmax = _get_colorbar_and_data_ranges(_safe_get_data(stat_map_img, ensure_finite=True),
                                                                     vmax, symmetric_cbar, kwargs)

    if cbar_range is not None:
        cbar_vmin = cbar_range[0]
        cbar_vmax = cbar_range[1]
        vmin = cbar_range[0]
        vmax = cbar_range[1]

    stat_map_img_at_time_t = index_img(stat_map_img, t)
    stat_map_img_at_time_t = check_niimg_3d(stat_map_img_at_time_t, dtype='auto')

    display = _plot_img_with_bg(
        img=stat_map_img_at_time_t, bg_img=bg_img, cut_coords=cut_coords,
        output_file=output_file, display_mode=display_mode,
        figure=figure, axes=axes, title=title, annotate=annotate,
        draw_cross=draw_cross, black_bg=black_bg, threshold=threshold,
        bg_vmin=bg_vmin, bg_vmax=bg_vmax, cmap=cmap, vmin=vmin, vmax=vmax,
        colorbar=colorbar, cbar_vmin=cbar_vmin, cbar_vmax=cbar_vmax,
        resampling_interpolation=resampling_interpolation, **kwargs)

    return display
    def compute_spheres_values(self, imgs):
        start_t = time.time()

        # Force to get a list of imgs even if only one is given
        imgs_to_check = [imgs] if not isinstance(imgs, list) else imgs

        # Load Nifti images
        ref_shape = imgs_to_check[0].dataobj.shape
        ref_affine = imgs_to_check[0].affine
        imgs = []
        for img in imgs_to_check:
            # check if image is 4D
            imgs.append(check_niimg_4d(img))

            # Check that all images have same number of volumes
            if ref_shape != img.dataobj.shape:
                raise ValueError("All fMRI image must have same shape")
            if np.array_equal(ref_affine, img.affine):
                warnings.warn("fMRI images do not have same affine")
        self.ref_img = imgs[0]

        # Compute world coordinates of the seeds
        process_mask_img = check_niimg_3d(self.process_mask_img)
        process_mask_img = image.resample_to_img(
            process_mask_img, imgs[0], interpolation='nearest'
        )
        self.process_mask_img = process_mask_img

        process_mask, process_mask_affine = masking._load_mask_img(
            process_mask_img
        )
        process_mask_coords = np.where(process_mask != 0)
        self.vx_mask_coords = process_mask_coords
        process_mask_coords = coord_transform(
            process_mask_coords[0], process_mask_coords[1],
            process_mask_coords[2], process_mask_affine)
        process_mask_coords = np.asarray(process_mask_coords).T

        if self.verbose:
            print("{} seeds found in the mask".format(len(process_mask_coords)))

        # Compute spheres
        _, A = _apply_mask_and_get_affinity(
            process_mask_coords, imgs[0], self.radius, True,
            mask_img=self.mask_img
        )

        # Number of runs: 1 4D fMRI image / run
        n_runs = len(imgs)

        # Number of spheres (or seed voxels)
        n_spheres = A.shape[0]

        # Number of volumes in each 4D fMRI image
        n_conditions = imgs[0].dataobj.shape[3]

        mask_img = check_niimg_3d(self.mask_img)
        mask_img = image.resample_img(
            mask_img, target_affine=imgs[0].affine,
            target_shape=imgs[0].shape[:3], interpolation='nearest'
        )

        masked_imgs_data = []
        for i_run, img in enumerate(imgs):
            masked_imgs_data.append(masking._apply_mask_fmri(img, mask_img))

        # Extract data of each sphere
        # X will be #spheres x #run x #conditions x #values
        X = []
        for i_sph in range(n_spheres):
            # Indexes of all voxels included in the current sphere
            sph_indexes = A.rows[i_sph]

            if len(sph_indexes) == 0:
                # Append when no data are available around the process voxel
                X.append(np.full((n_runs, n_conditions, 1), 0))
                print("Empty sphere")
            else:
                # Number of voxel in the current sphere
                n_values = len(sph_indexes)

                sub_X = np.empty((n_runs, n_conditions, n_values), dtype=object)
                for i_run, img in enumerate(imgs):
                    for i_cond in range(n_conditions):
                        sub_X[i_run, i_cond] = masked_imgs_data[i_run][i_cond][
                            sph_indexes]
                X.append(sub_X)

        if self.verbose:
            dt = time.time() - start_t
            print("Elapsed time to extract spheres values: {:.01f}s".format(dt))

        self.spheres_values = X
Beispiel #38
0
    def fit(self):

        self.mask_img_ = [_utils.check_niimg_3d(img) for img in self.mask_img]

        return self
Beispiel #39
0
    def fit(self):

        self.mask_img_ = [_utils.check_niimg_3d(img) for img in self.mask_img]

        return self
def connected_label_regions(labels_img, min_size=None, connect_diag=True,
                            labels=None):
    """ Extract connected regions from a brain atlas image defined by labels
    (integers)

    For each label in an parcellations, separates out connected
    components and assigns to each separated region a unique label.

    Parameters
    ----------
    labels_img : Nifti-like image
        A 3D image which contains regions denoted as labels. Each region
        is assigned with integers.

    min_size : float, in mm^3 optional (default None)
        Minimum region size in volume required to keep after extraction.
        Removes small or spurious regions.

    connect_diag : bool (default True)
        If 'connect_diag' is True, two voxels are considered in the same region
        if they are connected along the diagonal (26-connectivity). If it is
        False, two voxels are considered connected only if they are within the
        same x, y, or z direction.

    labels : 1D numpy array or list of str, (default None), optional
        Each string in a list or array denote the name of the brain atlas
        regions given in labels_img input. If provided, same names will be
        re-assigned corresponding to each connected component based extraction
        of regions relabelling. The total number of names should match with the
        number of labels assigned in the image.
        NOTE: The order of the names given in labels should be appropriately
        matched with the unique labels (integers) assigned to each region
        given in labels_img.

    Returns
    -------
    new_labels_img : Nifti-like image
        A new image comprising of regions extracted on an input labels_img.
    new_labels : list, optional
        If labels are provided, new labels assigned to region extracted will
        be returned. Otherwise, only new labels image will be returned.

    """
    labels_img = check_niimg_3d(labels_img)
    labels_data = _safe_get_data(labels_img)
    affine = labels_img.get_affine()

    check_unique_labels = np.unique(labels_data)

    if min_size is not None and not isinstance(min_size, numbers.Number):
        raise ValueError("Expected 'min_size' to be specified as integer. "
                         "You provided {0}".format(min_size))
    if not isinstance(connect_diag, bool):
        raise ValueError("'connect_diag' must be specified as True or False. "
                         "You provided {0}".format(connect_diag))
    if np.any(check_unique_labels < 0):
        raise ValueError("The 'labels_img' you provided has unknown/negative "
                         "integers as labels {0} assigned to regions. "
                         "All regions in an image should have positive "
                         "integers assigned as labels."
                         .format(check_unique_labels))

    unique_labels = set(check_unique_labels)

    # check for background label indicated as 0
    if np.any(check_unique_labels == 0):
        unique_labels.remove(0)

    if labels is not None:
        if (not isinstance(labels, collections.Iterable) or
                isinstance(labels, _basestring)):
            labels = [labels, ]
        if len(unique_labels) != len(labels):
            raise ValueError("The number of labels: {0} provided as input "
                             "in labels={1} does not match with the number "
                             "of unique labels in labels_img: {2}. "
                             "Please provide appropriate match with unique "
                             "number of labels in labels_img."
                             .format(len(labels), labels, len(unique_labels)))
        new_names = []

    if labels is None:
        this_labels = [None] * len(unique_labels)
    else:
        this_labels = labels

    new_labels_data = np.zeros(labels_data.shape, dtype=np.int)
    current_max_label = 0
    for label_id, name in zip(unique_labels, this_labels):
        this_label_mask = (labels_data == label_id)
        # Extract regions assigned to each label id
        if connect_diag:
            structure = np.ones((3, 3, 3), dtype=np.int)
            regions, this_n_labels = ndimage.label(
                this_label_mask.astype(np.int), structure=structure)
        else:
            regions, this_n_labels = ndimage.label(this_label_mask.astype(np.int))

        if min_size is not None:
            index = np.arange(this_n_labels + 1)
            this_label_mask = this_label_mask.astype(np.int)
            regions = _remove_small_regions(regions, this_label_mask,
                                            index, affine, min_size=min_size)
            this_n_labels = regions.max()

        cur_regions = regions[regions != 0] + current_max_label
        new_labels_data[regions != 0] = cur_regions
        current_max_label += this_n_labels
        if name is not None:
            new_names.extend([name] * this_n_labels)

    new_labels_img = new_img_like(labels_img, new_labels_data, affine=affine)
    if labels is not None:
        new_labels = new_names
        return new_labels_img, new_labels

    return new_labels_img
Beispiel #41
0
def plot_vstc_grid_slice(vstc, params_plot_img_with_bg, time=None, cut_coords=6, display_mode='z',
                         figure=None, axes=None, colorbar=False, cmap='magma', threshold='min',
                         **kwargs):
    """
    Plot a volume source space estimation for one slice in the grid in
    plot_vstc_sliced_grid.

    Parameters:
    -----------
    vstc : VolSourceEstimate
        The volume source estimate.
    time : int, float | None
        None is default for finding the time sample with the voxel with global
        maximal amplitude. If int, float the given time point is selected and
        plotted.
    cut_coords : list of a single float
        The MNI coordinates of the point where the cut is performed
        For display_mode == 'x', 'y', or 'z' this is the
        coordinate of the cut in the corresponding direction.
    display_mode : 'x', 'y', 'z'
        Direction in which the brain is sliced.
    figure : matplotlib.figure | None
        Specify the figure container to plot in. If None, a new
        matplotlib.figure is created
    axes : matplotlib.figure.axes | None
        Specify the axes of the given figure to plot in. Only necessary if
        a figure is passed.
    colorbar : bool
        Show the colorbar.
    cmap : matplotlib colormap, optional
        The colormap for specified image. The colormap *must* be
        symmetrical.
    threshold : a number, None, 'auto', or 'min'
        If None is given, the image is not thresholded.
        If a number is given, it is used to threshold the image:
        values below the threshold (in absolute value) are plotted
        as transparent. If auto is given, the threshold is determined
        magically by analysis of the image.

    Returns:
    --------
    Figure : matplotlib.figure
          VolSourceEstimation plotted for given or 'auto' coordinates at given
          or 'auto' timepoint.
    """

    vstcdata = vstc.data

    if time is None:
        # global maximum amp in time
        t = int(np.where(np.sum(vstcdata, axis=0) == np.max(np.sum(vstcdata, axis=0)))[0])
        t_in_ms = vstc.times[t] * 1e3

    else:
        t = np.argmin(np.fabs(vstc.times - time))
        t_in_ms = vstc.times[t] * 1e3

    print('    Found time slice: ', t_in_ms, 'ms')

    if threshold == 'min':
        threshold = vstcdata.min()

    # jumeg_plot_stat_map starts here
    output_file = None
    title = None
    annotate = True
    draw_cross = True
    resampling_interpolation = 'continuous'

    bg_img = params_plot_img_with_bg['bg_img']
    black_bg = params_plot_img_with_bg['black_bg']
    bg_vmin = params_plot_img_with_bg['bg_vmin']
    bg_vmax = params_plot_img_with_bg['bg_vmax']
    stat_map_img = params_plot_img_with_bg['stat_map_img']
    cbar_vmin = params_plot_img_with_bg['cbar_vmin']
    cbar_vmax = params_plot_img_with_bg['cbar_vmax']
    vmin = params_plot_img_with_bg['vmin']
    vmax = params_plot_img_with_bg['vmax']

    # noqa: E501
    # dim the background
    from nilearn.plotting.img_plotting import _plot_img_with_bg
    from nilearn._utils import check_niimg_3d

    stat_map_img_at_time_t = index_img(stat_map_img, t)
    stat_map_img_at_time_t = check_niimg_3d(stat_map_img_at_time_t, dtype='auto')

    display = _plot_img_with_bg(
        img=stat_map_img_at_time_t, bg_img=bg_img, cut_coords=cut_coords,
        output_file=output_file, display_mode=display_mode,
        figure=figure, axes=axes, title=title, annotate=annotate,
        draw_cross=draw_cross, black_bg=black_bg, threshold=threshold,
        bg_vmin=bg_vmin, bg_vmax=bg_vmax, cmap=cmap, vmin=vmin, vmax=vmax,
        colorbar=colorbar, cbar_vmin=cbar_vmin, cbar_vmax=cbar_vmax,
        resampling_interpolation=resampling_interpolation, **kwargs)

    vstc_plt = display

    return vstc_plt
Beispiel #42
0
def remove_distance(coexpression, atlas, atlas_info=None, labels=None):
    """
    Corrects for distance-dependent correlation effects in `coexpression`

    Regresses Euclidean distance between regions in `atlas` from correlated
    gene expression array `coexpression`. If `atlas_info` is provided different
    connection types (e.g., cortex-cortex, cortex-subcortex, subcortex-
    subcortex) will be residualized independently.

    Parameters
    ----------
    coexpression : (R x R) array_like
        Correlated gene expression array, where `R` is the number of regions,
        as generated with e.g., `numpy.corrcoef(expression)`.
    atlas : niimg-like object
        A parcellation image in MNI space, where each parcel is identified by a
        unique integer ID
    atlas_info : str or pandas.DataFrame, optional
        Filepath to or pre-loaded dataframe containing information about
        `atlas`. Must have at least columns 'id', 'hemisphere', and 'structure'
        containing information mapping atlas IDs to hemisphere (i.e, "L", "R")
        and broad structural class (i.e., "cortex", "subcortex", "cerebellum").
        Default: None
    labels : (N,) array_like, optional
        If only a subset `N` of the ROIs in `atlas` were used to generate the
        `coexpression` array this array should specify which to consider. Not
        specifying this may cause a ValueError if `atlas` and `atlas_info` do
        not match. Default: None

    Returns
    -------
    residualized : (R, R) numpy.ndarray
        Provided `coexpression` data residualized against spatial distance
         between region pairs
    """

    # load atlas_info, if provided
    atlas = check_niimg_3d(atlas)
    if atlas_info is not None:
        atlas_info = utils.check_atlas_info(atlas, atlas_info, labels=labels)
        if labels is not None and len(labels) != len(coexpression):
            raise ValueError('Provided labels {} are a different length than '
                             'provided coexpression matrix of size {}. Please '
                             'confirm inputs and try again.'.format(
                                 labels, coexpression.shape))

    # check that provided coexpression array is symmetric
    check_symmetric(coexpression, raise_exception=True)

    # we'll do basic Euclidean distance correction for now
    # TODO: implement gray matter volume / cortical surface path distance
    centroids = utils.get_centroids(atlas, labels=labels)
    dist = cdist(centroids, centroids, metric='euclidean')

    corr_resid = np.zeros_like(coexpression)
    triu_inds = np.triu_indices_from(coexpression, k=1)
    # if no atlas_info, just residualize all correlations against distance
    if atlas_info is None:
        corr_resid[triu_inds] = _resid_dist(coexpression[triu_inds],
                                            dist[triu_inds])
    # otherwise, we can residualize the different connection types separately
    else:
        triu_inds = np.ravel_multi_index(triu_inds, corr_resid.shape)
        coexpression, dist = coexpression.ravel(), dist.ravel()
        types = ['cortex', 'subcortex']
        for src, tar in itertools.combinations_with_replacement(types, 2):
            # get indices of sources and targets
            sources = np.where(atlas_info.structure == src)[0]
            targets = np.where(atlas_info.structure == tar)[0]
            inds = np.ravel_multi_index(np.ix_(sources, targets),
                                        corr_resid.shape)
            if src != tar:  # e.g., cortex + subcortex
                rev = np.ravel_multi_index(np.ix_(targets, sources),
                                           corr_resid.shape)
                inds = np.append(inds.ravel(), rev.ravel())
            # find intersection of source / target indices + upper triangle
            inds = np.intersect1d(triu_inds, inds)
            back = np.unravel_index(inds, corr_resid.shape)
            # residualize
            corr_resid[back] = _resid_dist(coexpression[inds], dist[inds])

    corr_resid = (corr_resid + corr_resid.T + np.eye(len(corr_resid)))

    return corr_resid
Beispiel #43
0
    def _fit(self, img, mask):
        LGR.info(
            "Performing dimensionality reduction based on GIFT "
            "(https://trendscenter.org/software/gift/) and Li, Y. O., Adali, T., "
            "& Calhoun, V. D. (2007). Estimating the number of independent components "
            "for functional magnetic resonance imaging data. Human Brain Mapping, 28(11), "
            "1251–1266. https://doi.org/10.1002/hbm.20359")

        img = check_niimg_4d(img)
        mask = check_niimg_3d(mask)
        data = img.get_fdata()
        mask = mask.get_fdata()

        [n_x, n_y, n_z, n_timepoints] = data.shape
        data_nib_V = np.reshape(data, (n_x * n_y * n_z, n_timepoints),
                                order="F")
        mask_vec = np.reshape(mask, n_x * n_y * n_z, order="F")
        X = data_nib_V[mask_vec == 1, :]

        n_samples = np.sum(mask_vec)

        self.scaler_ = StandardScaler(with_mean=True, with_std=True)
        if self.normalize:
            # TODO: determine if tedana is already normalizing before this
            X = self.scaler_.fit_transform(X.T).T  # This was X_sc
            # X = ((X.T - X.T.mean(axis=0)) / X.T.std(axis=0)).T

        LGR.info("Performing SVD on original data...")
        V, eigenvalues = utils._icatb_svd(X, n_timepoints)
        LGR.info("SVD done on original data")

        # Reordering of values
        eigenvalues = eigenvalues[::-1]
        dataN = np.dot(X, V[:, ::-1])
        # Potentially the small differences come from the different signs on V

        # Using 12 gaussian components from middle, top and bottom gaussian
        # components to determine the subsampling depth.
        # Final subsampling depth is determined using median
        kurt = kurtosis(dataN, axis=0, fisher=True)
        kurt[kurt < 0] = 0
        kurt = np.expand_dims(kurt, 1)

        kurt[eigenvalues > np.mean(eigenvalues)] = 1000
        idx_gauss = np.where(
            ((kurt[:, 0] < 0.3) & (kurt[:, 0] > 0)
             & (eigenvalues > np.finfo(float).eps)
             ) == 1)[0]  # NOTE: make sure np.where is giving us just one tuple
        idx = np.array(idx_gauss[:]).T
        dfs = np.sum(eigenvalues > np.finfo(float).eps)  # degrees of freedom
        minTp = 12

        if len(idx) >= minTp:
            middle = int(np.round(len(idx) / 2))
            idx = np.hstack([idx[0:4], idx[middle - 1:middle + 3], idx[-4:]])
        else:
            minTp = np.min([minTp, dfs])
            idx = np.arange(dfs - minTp, dfs)

        idx = np.unique(idx)

        # Estimate the subsampling depth for effectively i.i.d. samples
        LGR.info(
            "Estimating the subsampling depth for effective i.i.d samples...")
        mask_ND = np.reshape(mask_vec, (n_x, n_y, n_z), order="F")
        sub_depth = len(idx)
        sub_iid_sp = np.zeros((sub_depth, ))
        for i in range(sub_depth):
            x_single = np.zeros(n_x * n_y * n_z)
            x_single[mask_vec == 1] = dataN[:, idx[i]]
            x_single = np.reshape(x_single, (n_x, n_y, n_z), order="F")
            sub_iid_sp[i] = utils._est_indp_sp(x_single)[0] + 1
            if i > 6:
                tmp_sub_sp = sub_iid_sp[0:i]
                tmp_sub_median = np.round(np.median(tmp_sub_sp))
                if np.sum(tmp_sub_sp == tmp_sub_median) > 6:
                    sub_iid_sp = tmp_sub_sp
                    break
            dim_n = x_single.ndim

        sub_iid_sp_median = int(np.round(np.median(sub_iid_sp)))
        if np.floor(np.power(n_samples / n_timepoints,
                             1 / dim_n)) < sub_iid_sp_median:
            sub_iid_sp_median = int(
                np.floor(np.power(n_samples / n_timepoints, 1 / dim_n)))
        N = np.round(n_samples / np.power(sub_iid_sp_median, dim_n))

        if sub_iid_sp_median != 1:
            mask_s = utils._subsampling(mask_ND, sub_iid_sp_median)
            mask_s_1d = np.reshape(mask_s, np.prod(mask_s.shape), order="F")
            dat = np.zeros((int(np.sum(mask_s_1d)), n_timepoints))
            LGR.info("Generating subsampled i.i.d. data...")
            for i_vol in range(n_timepoints):
                x_single = np.zeros(n_x * n_y * n_z)
                x_single[mask_vec == 1] = X[:, i_vol]
                x_single = np.reshape(x_single, (n_x, n_y, n_z), order="F")
                dat0 = utils._subsampling(x_single, sub_iid_sp_median)
                dat0 = np.reshape(dat0, np.prod(dat0.shape), order="F")
                dat[:, i_vol] = dat0[mask_s_1d == 1]

            # Perform Variance Normalization
            temp_scaler = StandardScaler(with_mean=True, with_std=True)
            dat = temp_scaler.fit_transform(dat.T).T

            # (completed)
            LGR.info("Performing SVD on subsampled i.i.d. data...")
            V, eigenvalues = utils._icatb_svd(dat, n_timepoints)
            LGR.info("SVD done on subsampled i.i.d. data")
            eigenvalues = eigenvalues[::-1]

        LGR.info("Effective number of i.i.d. samples %d" % N)

        # Make eigen spectrum adjustment
        LGR.info("Perform eigen spectrum adjustment ...")
        eigenvalues = utils._eigensp_adj(eigenvalues, N, eigenvalues.shape[0])
        # (completed)
        if np.sum(np.imag(eigenvalues)):
            raise ValueError(
                "Invalid eigenvalue found for the subsampled data.")

        # Correction on the ill-conditioned results (when tdim is large,
        # some least significant eigenvalues become small negative numbers)
        if eigenvalues[
                np.real(eigenvalues) <= np.finfo(float).eps].shape[0] > 0:
            eigenvalues[np.real(eigenvalues) <= np.finfo(float).eps] = np.min(
                eigenvalues[np.real(eigenvalues) >= np.finfo(float).eps])

        LGR.info("Estimating the dimensionality ...")
        p = n_timepoints
        aic = np.zeros(p - 1)
        kic = np.zeros(p - 1)
        mdl = np.zeros(p - 1)

        for k_idx, k in enumerate(np.arange(1, p)):
            LH = np.log(
                np.prod(np.power(eigenvalues[k:], 1 / (p - k))) /
                np.mean(eigenvalues[k:]))
            mlh = 0.5 * N * (p - k) * LH
            df = 1 + 0.5 * k * (2 * p - k + 1)
            aic[k_idx] = (-2 * mlh) + (2 * df)
            kic[k_idx] = (-2 * mlh) + (3 * df)
            mdl[k_idx] = -mlh + (0.5 * df * np.log(N))

        itc = np.row_stack([aic, kic, mdl])

        dlap = np.diff(itc, axis=1)

        # Calculate optimal number of components with each criterion
        # AIC
        a_aic = np.where(dlap[0, :] > 0)[0] + 1
        if a_aic.size == 0:
            n_aic = itc[0, :].shape[0]
        else:
            n_aic = a_aic[0]

        # KIC
        a_kic = np.where(dlap[1, :] > 0)[0] + 1
        if a_kic.size == 0:
            n_kic = itc[1, :].shape[0]
        else:
            n_kic = a_kic[0]

        # MDL
        a_mdl = np.where(dlap[2, :] > 0)[0] + 1
        if a_mdl.size == 0:
            n_mdl = itc[2, :].shape[0]
        else:
            n_mdl = a_mdl[0]

        if self.criterion == "aic":
            n_components = n_aic
        elif self.criterion == "kic":
            n_components = n_kic
        elif self.criterion == "mdl":
            n_components = n_mdl

        LGR.info("Performing PCA")

        # PCA with all possible components (the estimated selection is made after)
        ppca = PCA(n_components=None,
                   svd_solver="full",
                   copy=False,
                   whiten=False)
        ppca.fit(X)

        # Get cumulative explained variance as components are added
        cumsum_varexp = np.cumsum(ppca.explained_variance_ratio_)

        # Calculate number of components for 90% varexp
        n_comp_varexp_90 = np.where(cumsum_varexp >= 0.9)[0][0] + 1

        # Calculate number of components for 95% varexp
        n_comp_varexp_95 = np.where(cumsum_varexp >= 0.95)[0][0] + 1

        LGR.info("Estimated number of components is %d" % n_components)

        # Save results of each criterion into dictionaries
        self.aic_ = {
            "n_components": n_aic,
            "value": aic,
            "explained_variance_total": cumsum_varexp[n_aic - 1],
        }
        self.kic_ = {
            "n_components": n_kic,
            "value": kic,
            "explained_variance_total": cumsum_varexp[n_kic - 1],
        }
        self.mdl_ = {
            "n_components": n_mdl,
            "value": mdl,
            "explained_variance_total": cumsum_varexp[n_mdl - 1],
        }
        self.varexp_90_ = {
            "n_components": n_comp_varexp_90,
            "explained_variance_total": cumsum_varexp[n_comp_varexp_90 - 1],
        }
        self.varexp_95_ = {
            "n_components": n_comp_varexp_95,
            "explained_variance_total": cumsum_varexp[n_comp_varexp_95 - 1],
        }

        # Assign attributes from model
        self.components_ = ppca.components_[:n_components, :]
        self.explained_variance_ = ppca.explained_variance_[:n_components]
        self.explained_variance_ratio_ = ppca.explained_variance_ratio_[:
                                                                        n_components]
        self.singular_values_ = ppca.singular_values_[:n_components]
        self.mean_ = ppca.mean_
        self.n_components_ = n_components
        self.n_features_ = ppca.n_features_
        self.n_samples_ = ppca.n_samples_
        # Commenting out noise variance as it depends on the covariance of the estimation
        # self.noise_variance_ = ppca.noise_variance_
        component_maps = np.dot(np.dot(X, self.components_.T),
                                np.diag(1.0 / self.explained_variance_))
        component_maps_3d = np.zeros((n_x * n_y * n_z, n_components))
        component_maps_3d[mask_vec == 1, :] = component_maps
        component_maps_3d = np.reshape(component_maps_3d,
                                       (n_x, n_y, n_z, n_components),
                                       order="F")
        self.u_ = component_maps
        self.u_nii_ = nib.Nifti1Image(component_maps_3d, img.affine,
                                      img.header)