Exemplo n.º 1
0
def test_nifti_maps_masker_with_nans_and_infs_in_data():
    """Apply a NiftiMapsMasker to 4D data containing NaNs and infs.

    The masker should replace those NaNs and infs with zeros,
    while raising a warning.
    """
    length = 3
    n_regions = 8
    fmri_img, mask_img = generate_random_img((13, 11, 12),
                                             affine=np.eye(4),
                                             length=length)
    maps_img, maps_mask_img = data_gen.generate_maps((13, 11, 12),
                                                     n_regions,
                                                     affine=np.eye(4))

    # Add NaNs and infs to data
    fmri_data = get_data(fmri_img)

    fmri_data[:, 9, 9, :] = np.nan
    fmri_data[:, 5, 5, :] = np.inf

    fmri_img = nibabel.Nifti1Image(fmri_data, np.eye(4))

    masker = NiftiMapsMasker(maps_img, mask_img=mask_img)

    with pytest.warns(UserWarning, match="Non-finite values detected."):
        sig = masker.fit_transform(fmri_img)

    assert sig.shape == (length, n_regions)
    assert np.all(np.isfinite(sig))
Exemplo n.º 2
0
def test_nifti_maps_masker_report_image_in_fit(niftimapsmasker_inputs):
    """"""
    masker = NiftiMapsMasker(**niftimapsmasker_inputs)
    image, _ = generate_random_img((13, 11, 12), affine=np.eye(4), length=3)
    masker.fit(image)
    html = masker.generate_report(2)
    assert masker._report_content['report_id'] == 0
    assert masker._report_content['number_of_maps'] == 9
    assert masker._report_content['warning_message'] is None
    assert html.body.count("<img") == 2
Exemplo n.º 3
0
def test_standardization():
    rng = np.random.RandomState(42)
    data_shape = (9, 9, 5)
    n_samples = 500

    signals = rng.standard_normal(size=(np.prod(data_shape), n_samples))
    means = rng.standard_normal(size=(np.prod(data_shape), 1)) * 50 + 1000
    signals += means
    img = nibabel.Nifti1Image(signals.reshape(data_shape + (n_samples, )),
                              np.eye(4))

    maps, _ = data_gen.generate_maps((9, 9, 5), 10)

    # Unstandarized
    masker = NiftiMapsMasker(maps, standardize=False)
    unstandarized_label_signals = masker.fit_transform(img)

    # z-score
    masker = NiftiMapsMasker(maps, standardize='zscore')
    trans_signals = masker.fit_transform(img)

    np.testing.assert_almost_equal(trans_signals.mean(0), 0)
    np.testing.assert_almost_equal(trans_signals.std(0), 1)

    # psc
    masker = NiftiMapsMasker(maps, standardize='psc')
    trans_signals = masker.fit_transform(img)

    np.testing.assert_almost_equal(trans_signals.mean(0), 0)
    np.testing.assert_almost_equal(
        trans_signals,
        unstandarized_label_signals / unstandarized_label_signals.mean(0) * 100
        - 100,
    )
Exemplo n.º 4
0
def test_nifti_maps_masker_report_list_and_arrays_maps_number(
        niftimapsmasker_inputs, displayed_maps):
    """Tests report generation for NiftiMapsMasker with displayed_maps
    passed as a list of a Numpy arrays.
    """
    masker = NiftiMapsMasker(**niftimapsmasker_inputs)
    masker.fit()
    html = masker.generate_report(displayed_maps)
    assert masker._report_content['report_id'] == 0
    assert masker._report_content['number_of_maps'] == 9
    assert (masker._report_content['displayed_maps'] == list(displayed_maps))
    msg = ("No image provided to fit in NiftiMapsMasker. "
           "Plotting only spatial maps for reporting.")
    assert masker._report_content['warning_message'] == msg
    assert html.body.count("<img") == len(displayed_maps)
Exemplo n.º 5
0
def test_3d_images():
    # Test that the NiftiMapsMasker works with 3D images
    affine = np.eye(4)
    n_regions = 3
    shape3 = (16, 17, 18)

    maps33_img, _ = data_gen.generate_maps(shape3, n_regions)
    mask_img = nibabel.Nifti1Image(np.ones(shape3, dtype=np.int8),
                                   affine=affine)
    epi_img1 = nibabel.Nifti1Image(np.ones(shape3), affine=affine)
    epi_img2 = nibabel.Nifti1Image(np.ones(shape3), affine=affine)
    masker = NiftiMapsMasker(maps33_img, mask_img=mask_img)

    epis = masker.fit_transform(epi_img1)
    assert (epis.shape == (1, 3))
    epis = masker.fit_transform([epi_img1, epi_img2])
    assert (epis.shape == (2, 3))
Exemplo n.º 6
0
def test_nifti_maps_masker_with_nans_and_infs():
    """Apply a NiftiMapsMasker containing NaNs and infs.

    The masker should replace those NaNs and infs with zeros,
    without raising a warning.
    """
    length = 3
    n_regions = 8
    fmri_img, mask_img = generate_random_img((13, 11, 12),
                                             affine=np.eye(4),
                                             length=length)
    maps_img, maps_mask_img = data_gen.generate_maps((13, 11, 12),
                                                     n_regions,
                                                     affine=np.eye(4))

    # Add NaNs and infs to atlas
    maps_data = get_data(maps_img).astype(np.float32)
    mask_data = get_data(mask_img).astype(np.float32)
    maps_data = maps_data * mask_data[..., None]

    # Choose a good voxel from the first label
    vox_idx = np.where(maps_data[..., 0] > 0)
    i1, j1, k1 = vox_idx[0][0], vox_idx[1][0], vox_idx[2][0]
    i2, j2, k2 = vox_idx[0][1], vox_idx[1][1], vox_idx[2][1]

    maps_data[:, :, :, 0] = np.nan
    maps_data[i2, j2, k2, 0] = np.inf
    maps_data[i1, j1, k1, 0] = 1

    maps_img = nibabel.Nifti1Image(maps_data, np.eye(4))

    # No warning, because maps_img is run through clean_img
    # *before* _safe_get_data.
    masker = NiftiMapsMasker(maps_img, mask_img=mask_img)

    sig = masker.fit_transform(fmri_img)

    assert sig.shape == (length, n_regions)
    assert np.all(np.isfinite(sig))
Exemplo n.º 7
0
def test_nifti_maps_masker_report_displayed_maps_errors(
        niftimapsmasker_inputs, displayed_maps):
    """Tests that a TypeError is raised when the argument `displayed_maps`
    of `generate_report()` is not valid.
    """
    masker = NiftiMapsMasker(**niftimapsmasker_inputs)
    masker.fit()
    with pytest.raises(TypeError, match=("Parameter ``displayed_maps``")):
        masker.generate_report(displayed_maps)
Exemplo n.º 8
0
def test_nifti_maps_masker_report_maps_number_errors(niftimapsmasker_inputs,
                                                     displayed_maps):
    """Tests that a ValueError is raised when the argument `displayed_maps`
    contains invalid map numbers.
    """
    masker = NiftiMapsMasker(**niftimapsmasker_inputs)
    masker.fit()
    with pytest.raises(ValueError,
                       match="Report cannot display the following maps"):
        masker.generate_report(displayed_maps)
Exemplo n.º 9
0
def test_nifti_maps_masker_report_integer_and_all_displayed_maps(
        niftimapsmasker_inputs, displayed_maps):
    """Tests NiftiMapsMasker reporting with no image provided to fit
    and displayed_maps provided as an integer or as 'all'.
    """
    masker = NiftiMapsMasker(**niftimapsmasker_inputs)
    masker.fit()
    expected_n_maps = 9 if displayed_maps == 'all' else min(9, displayed_maps)
    if displayed_maps != 'all' and displayed_maps > 9:
        with pytest.warns(UserWarning, match="masker only has 9 maps."):
            html = masker.generate_report(displayed_maps)
    else:
        html = masker.generate_report(displayed_maps)
    assert masker._report_content['report_id'] == 0
    assert masker._report_content['number_of_maps'] == 9
    assert (masker._report_content['displayed_maps'] == list(
        range(expected_n_maps)))
    msg = ("No image provided to fit in NiftiMapsMasker. "
           "Plotting only spatial maps for reporting.")
    assert masker._report_content['warning_message'] == msg
    assert html.body.count("<img") == expected_n_maps
Exemplo n.º 10
0
# Loading atlas image stored in 'maps'
atlas_filename = atlas['maps']
# Loading atlas data stored in 'labels'
labels = atlas['labels']

# Load the functional datasets
data = datasets.fetch_development_fmri(n_subjects=1)

print('First subject resting-state nifti image (4D) is located at: %s' %
      data.func[0])

############################################################################
# Extract the time series
# ------------------------
from nilearn.maskers import NiftiMapsMasker
masker = NiftiMapsMasker(maps_img=atlas_filename, standardize=True,
                         memory='nilearn_cache', verbose=5)
masker.fit(data.func[0])
time_series = masker.transform(data.func[0],
                               confounds=data.confounds)

############################################################################
# We can generate an HTML report and visualize the components of the
# :class:`~nilearn.maskers.NiftiMapsMasker`.
# You can pass the indices of the spatial maps you want to include in the
# report in the order you want them to appear.
# Here, we only include maps 2, 6, 7, 16, and 21 in the report:
report = masker.generate_report(displayed_maps=[2, 6, 7, 16, 21])
report

############################################################################
# `time_series` is now a 2D matrix, of shape (number of time points x
# Loading atlas data stored in 'labels'
labels = atlas['labels']

# Loading the functional datasets
data = datasets.fetch_development_fmri(n_subjects=1)

# print basic information on the dataset
print('First subject functional nifti images (4D) are at: %s' %
      data.func[0])  # 4D data

##############################################################################
# Extract time series
# --------------------
from nilearn.maskers import NiftiMapsMasker
masker = NiftiMapsMasker(maps_img=atlas_filename,
                         standardize=True,
                         memory='nilearn_cache',
                         verbose=5)

time_series = masker.fit_transform(data.func[0], confounds=data.confounds)

##############################################################################
# Compute the sparse inverse covariance
# --------------------------------------
try:
    from sklearn.covariance import GraphicalLassoCV
except ImportError:
    # for Scitkit-Learn < v0.20.0
    from sklearn.covariance import GraphLassoCV as GraphicalLassoCV

estimator = GraphicalLassoCV()
estimator.fit(time_series)
Exemplo n.º 12
0
##############################################################################
# Extracting region signals
# --------------------------
from nilearn.maskers import NiftiMapsMasker

# A "memory" to avoid recomputation
from joblib import Memory
mem = Memory('nilearn_cache')

masker = NiftiMapsMasker(msdl_atlas_dataset.maps,
                         resampling_target="maps",
                         detrend=True,
                         high_variance_confounds=True,
                         low_pass=None,
                         high_pass=0.01,
                         t_r=2,
                         standardize=True,
                         memory='nilearn_cache',
                         memory_level=1,
                         verbose=2)
masker.fit()

subject_time_series = []
func_filenames = rest_dataset.func
confound_filenames = rest_dataset.confounds
for func_filename, confound_filename in zip(func_filenames,
                                            confound_filenames):
    print("Processing file %s" % func_filename)

    region_ts = masker.transform(func_filename, confounds=confound_filename)
Exemplo n.º 13
0
def test_nifti_maps_masker_2():
    # Test resampling in NiftiMapsMasker
    affine = np.eye(4)

    shape1 = (10, 11, 12)  # fmri
    shape2 = (13, 14, 15)  # mask
    shape3 = (16, 17, 18)  # maps

    n_regions = 9
    length = 3

    fmri11_img, _ = generate_random_img(shape1, affine=affine, length=length)
    _, mask22_img = generate_random_img(shape2, affine=affine, length=length)

    maps33_img, _ = \
        data_gen.generate_maps(shape3, n_regions, affine=affine)

    mask_img_4d = nibabel.Nifti1Image(np.ones((2, 2, 2, 2), dtype=np.int8),
                                      affine=np.diag((4, 4, 4, 1)))

    # verify that 4D mask arguments are refused
    masker = NiftiMapsMasker(maps33_img, mask_img=mask_img_4d)
    with pytest.raises(DimensionError,
                       match="Input data has incompatible dimensionality: "
                       "Expected dimension is 3D and you provided "
                       "a 4D image."):
        masker.fit()

    # Test error checking
    pytest.raises(ValueError,
                  NiftiMapsMasker,
                  maps33_img,
                  resampling_target="mask")
    pytest.raises(ValueError,
                  NiftiMapsMasker,
                  maps33_img,
                  resampling_target="invalid")

    # Target: mask
    masker = NiftiMapsMasker(maps33_img,
                             mask_img=mask22_img,
                             resampling_target="mask")

    masker.fit()
    np.testing.assert_almost_equal(masker.mask_img_.affine, mask22_img.affine)
    assert masker.mask_img_.shape == mask22_img.shape

    np.testing.assert_almost_equal(masker.mask_img_.affine,
                                   masker.maps_img_.affine)
    assert masker.mask_img_.shape == masker.maps_img_.shape[:3]

    transformed = masker.transform(fmri11_img)
    assert transformed.shape == (length, n_regions)

    fmri11_img_r = masker.inverse_transform(transformed)
    np.testing.assert_almost_equal(fmri11_img_r.affine,
                                   masker.maps_img_.affine)
    assert fmri11_img_r.shape == (masker.maps_img_.shape[:3] + (length, ))

    # Target: maps
    masker = NiftiMapsMasker(maps33_img,
                             mask_img=mask22_img,
                             resampling_target="maps")

    masker.fit()
    np.testing.assert_almost_equal(masker.maps_img_.affine, maps33_img.affine)
    assert masker.maps_img_.shape == maps33_img.shape

    np.testing.assert_almost_equal(masker.mask_img_.affine,
                                   masker.maps_img_.affine)
    assert masker.mask_img_.shape == masker.maps_img_.shape[:3]

    transformed = masker.transform(fmri11_img)
    assert transformed.shape == (length, n_regions)

    fmri11_img_r = masker.inverse_transform(transformed)
    np.testing.assert_almost_equal(fmri11_img_r.affine,
                                   masker.maps_img_.affine)
    assert fmri11_img_r.shape == (masker.maps_img_.shape[:3] + (length, ))

    # Test with clipped maps: mask does not contain all maps.
    # Shapes do matter in that case
    affine1 = np.eye(4)
    shape1 = (10, 11, 12)
    shape2 = (8, 9, 10)  # mask
    affine2 = np.diag((2, 2, 2, 1))  # just for mask
    shape3 = (16, 18, 20)  # maps

    n_regions = 9
    length = 21

    fmri11_img, _ = generate_random_img(shape1, affine=affine1, length=length)
    _, mask22_img = data_gen.generate_fake_fmri(shape2,
                                                length=1,
                                                affine=affine2)
    # Target: maps
    maps33_img, _ = \
        data_gen.generate_maps(shape3, n_regions, affine=affine1)

    masker = NiftiMapsMasker(maps33_img,
                             mask_img=mask22_img,
                             resampling_target="maps")

    masker.fit()
    np.testing.assert_almost_equal(masker.maps_img_.affine, maps33_img.affine)
    assert masker.maps_img_.shape == maps33_img.shape

    np.testing.assert_almost_equal(masker.mask_img_.affine,
                                   masker.maps_img_.affine)
    assert masker.mask_img_.shape == masker.maps_img_.shape[:3]

    transformed = masker.transform(fmri11_img)
    assert transformed.shape == (length, n_regions)
    # Some regions have been clipped. Resulting signal must be zero
    assert (transformed.var(axis=0) == 0).sum() < n_regions

    fmri11_img_r = masker.inverse_transform(transformed)
    np.testing.assert_almost_equal(fmri11_img_r.affine,
                                   masker.maps_img_.affine)
    assert (fmri11_img_r.shape == (masker.maps_img_.shape[:3] + (length, )))
Exemplo n.º 14
0
def test_nifti_maps_masker():
    # Check working of shape/affine checks
    shape1 = (13, 11, 12)
    affine1 = np.eye(4)

    shape2 = (12, 10, 14)
    affine2 = np.diag((1, 2, 3, 1))

    n_regions = 9
    length = 3

    fmri11_img, mask11_img = generate_random_img(shape1,
                                                 affine=affine1,
                                                 length=length)
    fmri12_img, mask12_img = generate_random_img(shape1,
                                                 affine=affine2,
                                                 length=length)
    fmri21_img, mask21_img = generate_random_img(shape2,
                                                 affine=affine1,
                                                 length=length)

    labels11_img, labels_mask_img = \
        data_gen.generate_maps(shape1, n_regions, affine=affine1)

    # No exception raised here
    for create_files in (True, False):
        with testing.write_tmp_imgs(labels11_img, create_files=create_files) \
                as labels11:
            masker11 = NiftiMapsMasker(labels11, resampling_target=None)
            signals11 = masker11.fit().transform(fmri11_img)
            assert signals11.shape == (length, n_regions)
            # enables to delete "labels11" on windows
            del masker11

    masker11 = NiftiMapsMasker(labels11_img,
                               mask_img=mask11_img,
                               resampling_target=None)

    with pytest.raises(ValueError, match='has not been fitted. '):
        masker11.transform(fmri11_img)
    signals11 = masker11.fit().transform(fmri11_img)
    assert signals11.shape == (length, n_regions)

    NiftiMapsMasker(labels11_img).fit_transform(fmri11_img)

    # Test all kinds of mismatches between shapes and between affines
    for create_files in (True, False):
        with testing.write_tmp_imgs(labels11_img,
                                    mask12_img,
                                    create_files=create_files) as images:
            labels11, mask12 = images
            masker11 = NiftiMapsMasker(labels11, resampling_target=None)
            masker11.fit()
            pytest.raises(ValueError, masker11.transform, fmri12_img)
            pytest.raises(ValueError, masker11.transform, fmri21_img)

            masker11 = NiftiMapsMasker(labels11,
                                       mask_img=mask12,
                                       resampling_target=None)
            pytest.raises(ValueError, masker11.fit)
            del masker11

    masker11 = NiftiMapsMasker(labels11_img,
                               mask_img=mask21_img,
                               resampling_target=None)
    pytest.raises(ValueError, masker11.fit)

    # Transform, with smoothing (smoke test)
    masker11 = NiftiMapsMasker(labels11_img,
                               smoothing_fwhm=3,
                               resampling_target=None)
    signals11 = masker11.fit().transform(fmri11_img)
    assert signals11.shape == (length, n_regions)

    masker11 = NiftiMapsMasker(labels11_img,
                               smoothing_fwhm=3,
                               resampling_target=None)
    signals11 = masker11.fit_transform(fmri11_img)
    assert signals11.shape == (length, n_regions)

    with pytest.raises(ValueError, match='has not been fitted. '):
        NiftiMapsMasker(labels11_img).inverse_transform(signals11)

    # Call inverse transform (smoke test)
    fmri11_img_r = masker11.inverse_transform(signals11)
    assert fmri11_img_r.shape == fmri11_img.shape
    np.testing.assert_almost_equal(fmri11_img_r.affine, fmri11_img.affine)

    # Now try on a masker that has never seen the call to "transform"
    masker2 = NiftiMapsMasker(labels11_img, resampling_target=None)
    masker2.fit()
    masker2.inverse_transform(signals11)

    # Test with data and atlas of different shape: the atlas should be
    # resampled to the data
    shape22 = (5, 5, 6)
    affine2 = 2 * np.eye(4)
    affine2[-1, -1] = 1

    fmri22_img, _ = generate_random_img(shape22, affine=affine2, length=length)
    masker = NiftiMapsMasker(labels11_img, mask_img=mask21_img)

    masker.fit_transform(fmri22_img)
    np.testing.assert_array_equal(masker._resampled_maps_img_.affine, affine2)
Exemplo n.º 15
0
    def fit(self, imgs, y=None, confounds=None):
        """Compute the mask and the components across subjects

        Parameters
        ----------
        imgs : list of Niimg-like objects
            See http://nilearn.github.io/manipulating_images/input_output.html
            Data on which the mask is calculated. If this is a list,
            the affine is considered the same for all.

        confounds : list of CSV file paths or numpy.ndarrays or pandas DataFrames, optional
            This parameter is passed to nilearn.signal.clean. Please see the
            related documentation for details. Should match with the list of imgs given.

         Returns
         -------
         self : object
            Returns the instance itself. Contains attributes listed
            at the object level.

        """
        # Base fit for decomposition estimators : compute the embedded masker

        if isinstance(imgs, str):
            if nilearn.EXPAND_PATH_WILDCARDS and glob.has_magic(imgs):
                imgs = _resolve_globbing(imgs)

        if isinstance(imgs, str) or not hasattr(imgs, '__iter__'):
            # these classes are meant for list of 4D images
            # (multi-subject), we want it to work also on a single
            # subject, so we hack it.
            imgs = [
                imgs,
            ]

        if len(imgs) == 0:
            # Common error that arises from a null glob. Capture
            # it early and raise a helpful message
            raise ValueError('Need one or more Niimg-like objects as input, '
                             'an empty list was given.')
        self.masker_ = _check_embedded_nifti_masker(self)

        # Avoid warning with imgs != None
        # if masker_ has been provided a mask_img
        if self.masker_.mask_img is None:
            self.masker_.fit(imgs)
        else:
            self.masker_.fit()
        self.mask_img_ = self.masker_.mask_img_

        # mask_and_reduce step for decomposition estimators i.e.
        # MultiPCA, CanICA and Dictionary Learning
        if self.verbose:
            print("[{0}] Loading data".format(self.__class__.__name__))
        data = mask_and_reduce(self.masker_,
                               imgs,
                               confounds=confounds,
                               n_components=self.n_components,
                               random_state=self.random_state,
                               memory=self.memory,
                               memory_level=max(0, self.memory_level + 1),
                               n_jobs=self.n_jobs)
        self._raw_fit(data)

        # Create and fit NiftiMapsMasker for transform
        # and inverse_transform
        self.nifti_maps_masker_ = NiftiMapsMasker(self.components_img_,
                                                  self.masker_.mask_img_,
                                                  resampling_target='maps')

        self.nifti_maps_masker_.fit()

        return self
Exemplo n.º 16
0
n_regions = len(msdl_coords)
print('MSDL has {0} ROIs, part of the following networks :\n{1}.'.format(
    n_regions, msdl_data.networks))

###############################################################################
# Region signals extraction
# -------------------------
# To extract regions time series, we instantiate a
# :class:`nilearn.maskers.NiftiMapsMasker` object and pass the atlas the
# file name to it, as well as filtering band-width and detrending option.
from nilearn.maskers import NiftiMapsMasker

masker = NiftiMapsMasker(msdl_data.maps,
                         resampling_target="data",
                         t_r=2,
                         detrend=True,
                         low_pass=.1,
                         high_pass=.01,
                         memory='nilearn_cache',
                         memory_level=1).fit()

###############################################################################
# Then we compute region signals and extract useful phenotypic information.
children = []
pooled_subjects = []
groups = []  # child or adult
for func_file, confound_file, phenotypic in zip(
        development_dataset.func, development_dataset.confounds,
        development_dataset.phenotypic):
    time_series = masker.transform(func_file, confounds=confound_file)
    pooled_subjects.append(time_series)
    if phenotypic['Child_Adult'] == 'child':
Exemplo n.º 17
0
def test_nifti_maps_masker_overlap():
    # Test resampling in NiftiMapsMasker
    affine = np.eye(4)
    shape = (5, 5, 5)
    length = 10

    fmri_img, _ = generate_random_img(shape, affine=affine, length=length)
    non_overlapping_maps = np.zeros(shape + (2, ))
    non_overlapping_maps[:2, :, :, 0] = 1.
    non_overlapping_maps[2:, :, :, 1] = 1.
    non_overlapping_maps_img = nibabel.Nifti1Image(non_overlapping_maps,
                                                   affine)

    overlapping_maps = np.zeros(shape + (2, ))
    overlapping_maps[:3, :, :, 0] = 1.
    overlapping_maps[2:, :, :, 1] = 1.
    overlapping_maps_img = nibabel.Nifti1Image(overlapping_maps, affine)

    overlapping_masker = NiftiMapsMasker(non_overlapping_maps_img,
                                         allow_overlap=True)
    overlapping_masker.fit_transform(fmri_img)
    overlapping_masker = NiftiMapsMasker(overlapping_maps_img,
                                         allow_overlap=True)
    overlapping_masker.fit_transform(fmri_img)

    non_overlapping_masker = NiftiMapsMasker(non_overlapping_maps_img,
                                             allow_overlap=False)
    non_overlapping_masker.fit_transform(fmri_img)
    non_overlapping_masker = NiftiMapsMasker(overlapping_maps_img,
                                             allow_overlap=False)
    with pytest.raises(ValueError, match='Overlap detected'):
        non_overlapping_masker.fit_transform(fmri_img)
Exemplo n.º 18
0
##########################################################################
# Load probabilistic atlases - extracting coordinates on brain maps
# -----------------------------------------------------------------

dim = 64
difumo = datasets.fetch_atlas_difumo(dimension=dim, resolution_mm=2)

##########################################################################
# Iterate over fetched atlases to extract coordinates - probabilistic
# -------------------------------------------------------------------
from nilearn.maskers import NiftiMapsMasker

# create masker to extract functional data within atlas parcels
masker = NiftiMapsMasker(maps_img=difumo.maps,
                         standardize=True,
                         memory='nilearn_cache')

# extract time series from all subjects and concatenate them
time_series = []
for func, confounds in zip(data.func, data.confounds):
    time_series.append(masker.fit_transform(func, confounds=confounds))

# calculate correlation matrices across subjects and display
correlation_matrices = connectome_measure.fit_transform(time_series)

# Mean correlation matrix across 10 subjects can be grabbed like this,
# using connectome measure object
mean_correlation_matrix = connectome_measure.mean_

# grab center coordinates for probabilistic atlas
Exemplo n.º 19
0
class BaseDecomposition(BaseEstimator, CacheMixin, TransformerMixin):
    """Base class for matrix factorization based decomposition estimators.

    Handles mask logic, provides transform and inverse_transform methods

     .. versionadded:: 0.2

    Parameters
    ----------
    n_components : int, optional
        Number of components to extract, for each 4D-Niimage
        Default=20.

    random_state : int or RandomState, optional
        Pseudo number generator state used for random sampling.

    mask : Niimg-like object or MultiNiftiMasker instance, optional
        Mask to be used on data. If an instance of masker is passed,
        then its mask will be used. If no mask is given, it will be computed
        automatically by a MultiNiftiMasker with default parameters.
    %(smoothing_fwhm)s
    standardize : boolean, optional
        If standardize is True, the time-series are centered and normed:
        their mean is put to 0 and their variance to 1 in the time dimension.
        Default=True.

    standardize_confounds : boolean, optional
        If standardize_confounds is True, the confounds are z-scored:
        their mean is put to 0 and their variance to 1 in the time dimension.
        Default=True.

    detrend : boolean, optional
        This parameter is passed to signal.clean. Please see the related
        documentation for details. Default=True.

    low_pass : None or float, optional
        This parameter is passed to signal.clean. Please see the related
        documentation for details

    high_pass : None or float, optional
        This parameter is passed to signal.clean. Please see the related
        documentation for details

    t_r : float, optional
        This parameter is passed to signal.clean. Please see the related
        documentation for details

    target_affine : 3x3 or 4x4 matrix, optional
        This parameter is passed to image.resample_img. Please see the
        related documentation for details.

    target_shape : 3-tuple of integers, optional
        This parameter is passed to image.resample_img. Please see the
        related documentation for details.

    %(mask_strategy)s

        .. note::
             Depending on this value, the mask will be computed from
             :func:`nilearn.masking.compute_background_mask`,
             :func:`nilearn.masking.compute_epi_mask`, or
             :func:`nilearn.masking.compute_brain_mask`.

        Default='epi'.

    mask_args : dict, optional
        If mask is None, these are additional parameters passed to
        masking.compute_background_mask or masking.compute_epi_mask
        to fine-tune mask computation. Please see the related documentation
        for details.

    memory : instance of joblib.Memory or str, optional
        Used to cache the masking process.
        By default, no caching is done. If a string is given, it is the
        path to the caching directory.

    memory_level : integer, optional
        Rough estimator of the amount of memory used by caching. Higher value
        means more memory for caching. Default=0.

    n_jobs : integer, optional
        The number of CPUs to use to do the computation. -1 means
        'all CPUs', -2 'all CPUs but one', and so on. Default=1.

    verbose : integer, optional
        Indicate the level of verbosity. By default, nothing is printed.
        Default=0.

    Attributes
    ----------
    `mask_img_` : Niimg-like object
        See http://nilearn.github.io/manipulating_images/input_output.html
        The mask of the data. If no mask was given at masker creation, contains
        the automatically computed mask.

    """
    def __init__(self,
                 n_components=20,
                 random_state=None,
                 mask=None,
                 smoothing_fwhm=None,
                 standardize=True,
                 standardize_confounds=True,
                 detrend=True,
                 low_pass=None,
                 high_pass=None,
                 t_r=None,
                 target_affine=None,
                 target_shape=None,
                 mask_strategy='epi',
                 mask_args=None,
                 memory=Memory(location=None),
                 memory_level=0,
                 n_jobs=1,
                 verbose=0):
        self.n_components = n_components
        self.random_state = random_state
        self.mask = mask

        self.smoothing_fwhm = smoothing_fwhm
        self.standardize = standardize
        self.standardize_confounds = standardize_confounds
        self.detrend = detrend
        self.low_pass = low_pass
        self.high_pass = high_pass
        self.t_r = t_r
        self.target_affine = target_affine
        self.target_shape = target_shape
        self.mask_strategy = mask_strategy
        self.mask_args = mask_args
        self.memory = memory
        self.memory_level = memory_level
        self.n_jobs = n_jobs
        self.verbose = verbose

    def fit(self, imgs, y=None, confounds=None):
        """Compute the mask and the components across subjects

        Parameters
        ----------
        imgs : list of Niimg-like objects
            See http://nilearn.github.io/manipulating_images/input_output.html
            Data on which the mask is calculated. If this is a list,
            the affine is considered the same for all.

        confounds : list of CSV file paths or numpy.ndarrays or pandas DataFrames, optional
            This parameter is passed to nilearn.signal.clean. Please see the
            related documentation for details. Should match with the list of imgs given.

         Returns
         -------
         self : object
            Returns the instance itself. Contains attributes listed
            at the object level.

        """
        # Base fit for decomposition estimators : compute the embedded masker

        if isinstance(imgs, str):
            if nilearn.EXPAND_PATH_WILDCARDS and glob.has_magic(imgs):
                imgs = _resolve_globbing(imgs)

        if isinstance(imgs, str) or not hasattr(imgs, '__iter__'):
            # these classes are meant for list of 4D images
            # (multi-subject), we want it to work also on a single
            # subject, so we hack it.
            imgs = [
                imgs,
            ]

        if len(imgs) == 0:
            # Common error that arises from a null glob. Capture
            # it early and raise a helpful message
            raise ValueError('Need one or more Niimg-like objects as input, '
                             'an empty list was given.')
        self.masker_ = _check_embedded_nifti_masker(self)

        # Avoid warning with imgs != None
        # if masker_ has been provided a mask_img
        if self.masker_.mask_img is None:
            self.masker_.fit(imgs)
        else:
            self.masker_.fit()
        self.mask_img_ = self.masker_.mask_img_

        # mask_and_reduce step for decomposition estimators i.e.
        # MultiPCA, CanICA and Dictionary Learning
        if self.verbose:
            print("[{0}] Loading data".format(self.__class__.__name__))
        data = mask_and_reduce(self.masker_,
                               imgs,
                               confounds=confounds,
                               n_components=self.n_components,
                               random_state=self.random_state,
                               memory=self.memory,
                               memory_level=max(0, self.memory_level + 1),
                               n_jobs=self.n_jobs)
        self._raw_fit(data)

        # Create and fit NiftiMapsMasker for transform
        # and inverse_transform
        self.nifti_maps_masker_ = NiftiMapsMasker(self.components_img_,
                                                  self.masker_.mask_img_,
                                                  resampling_target='maps')

        self.nifti_maps_masker_.fit()

        return self

    def _check_components_(self):
        if not hasattr(self, 'components_'):
            raise ValueError("Object has no components_ attribute. "
                             "This is probably because fit has not "
                             "been called.")

    def transform(self, imgs, confounds=None):
        """Project the data into a reduced representation

        Parameters
        ----------
        imgs : iterable of Niimg-like objects
            See http://nilearn.github.io/manipulating_images/input_output.html
            Data to be projected

        confounds : CSV file path or numpy.ndarray or pandas DataFrame, optional
            This parameter is passed to nilearn.signal.clean. Please see the
            related documentation for details

        Returns
        ----------
        loadings : list of 2D ndarray,
            For each subject, each sample, loadings for each decomposition
            components
            shape: number of subjects * (number of scans, number of regions)

        """

        self._check_components_()
        # XXX: dealing properly with 4D/ list of 4D data?
        if confounds is None:
            confounds = [None] * len(imgs)
        return [
            self.nifti_maps_masker_.transform(img, confounds=confound)
            for img, confound in zip(imgs, confounds)
        ]

    def inverse_transform(self, loadings):
        """Use provided loadings to compute corresponding linear component
        combination in whole-brain voxel space

        Parameters
        ----------
        loadings : list of numpy array (n_samples x n_components)
            Component signals to transform back into voxel signals

        Returns
        -------
        reconstructed_imgs : list of nibabel.Nifti1Image
            For each loading, reconstructed Nifti1Image

        """
        if not hasattr(self, 'components_'):
            raise ValueError('Object has no components_ attribute. This is '
                             'either because fit has not been called '
                             'or because _DecompositionEstimator has '
                             'directly been used')
        self._check_components_()
        # XXX: dealing properly with 2D/ list of 2D data?
        return [
            self.nifti_maps_masker_.inverse_transform(loading)
            for loading in loadings
        ]

    def _sort_by_score(self, data):
        """Sort components on the explained variance over data of estimator
        components_"""
        components_score = self._raw_score(data, per_component=True)
        order = np.argsort(components_score)[::-1]
        self.components_ = self.components_[order]

    def _raw_score(self, data, per_component=True):
        """Return explained variance over data of estimator components_"""
        return self._cache(explained_variance)(data,
                                               self.components_,
                                               per_component=per_component)

    def score(self, imgs, confounds=None, per_component=False):
        """Score function based on explained variance on imgs.

        Should only be used by DecompositionEstimator derived classes

        Parameters
        ----------
        imgs : iterable of Niimg-like objects
            See http://nilearn.github.io/manipulating_images/input_output.html
            Data to be scored

        confounds : CSV file path or numpy.ndarray or pandas DataFrame, optional
            This parameter is passed to nilearn.signal.clean. Please see the
            related documentation for details

        per_component : bool, optional
            Specify whether the explained variance ratio is desired for each
            map or for the global set of components. Default=False.

        Returns
        -------
        score : float
            Holds the score for each subjects. Score is two dimensional
            if per_component is True. First dimension
            is squeezed if the number of subjects is one

        """
        self._check_components_()
        data = mask_and_reduce(self.masker_,
                               imgs,
                               confounds,
                               reduction_ratio=1.,
                               random_state=self.random_state)
        return self._raw_score(data, per_component=per_component)