コード例 #1
0
ファイル: test_canica.py プロジェクト: ccvanschie/nilearn
def test_component_sign():
    # We should have a heuristic that flips the sign of components in
    # CanICA to have more positive values than negative values, for
    # instance by making sure that the largest value is positive.

    # make data (SVD)
    rng = np.random.RandomState(0)
    shape = (20, 10, 1)
    affine = np.eye(4)
    components = _make_canica_components(shape)

    # make +ve
    for mp in components:
        mp[rng.randn(*mp.shape) > .8] *= -5.
        assert_less_equal(mp.max(), -mp.min())  # goal met ?

    # synthesize data with given components
    data = _make_data_from_components(components, affine, shape, rng=rng,
                                      n_subjects=2)
    mask_img = nibabel.Nifti1Image(np.ones(shape, dtype=np.int8), affine)

    # run CanICA many times (this is known to produce different results)
    canica = CanICA(n_components=4, random_state=rng, mask=mask_img)
    for _ in range(3):
        canica.fit(data)
        for mp in iter_img(canica.masker_.inverse_transform(
                canica.components_)):
            mp = mp.get_data()
            assert_less_equal(-mp.min(), mp.max())
コード例 #2
0
ファイル: test_canica.py プロジェクト: fabianp/nilearn
def test_canica_square_img():
    shape = (20, 20, 1)
    affine = np.eye(4)
    rng = np.random.RandomState(0)

    # Create two images with "activated regions"
    component1 = np.zeros(shape)
    component1[:5, :10] = 1
    component1[5:10, :10] = -1

    component2 = np.zeros(shape)
    component2[:5, -10:] = 1
    component2[5:10, -10:] = -1

    component3 = np.zeros(shape)
    component3[-5:, -10:] = 1
    component3[-10:-5, -10:] = -1

    component4 = np.zeros(shape)
    component4[-5:, :10] = 1
    component4[-10:-5, :10] = -1

    components = np.vstack((component1.ravel(), component2.ravel(),
                            component3.ravel(), component4.ravel()))

    # Create a "multi-subject" dataset
    data = []
    for i in range(8):
        this_data = np.dot(rng.normal(size=(40, 4)), components)
        this_data += .01 * rng.normal(size=this_data.shape)
        # Get back into 3D for CanICA
        this_data = np.reshape(this_data, (40, ) + shape)
        this_data = np.rollaxis(this_data, 0, 4)
        data.append(nibabel.Nifti1Image(this_data, affine))

    mask_img = nibabel.Nifti1Image(np.ones(shape, dtype=np.int8), affine)

    # We do a large number of inits to be sure to find the good match
    canica = CanICA(n_components=4,
                    random_state=rng,
                    mask=mask_img,
                    smoothing_fwhm=0.,
                    n_init=50)
    canica.fit(data)
    maps = canica.masker_.inverse_transform(canica.components_).get_data()
    maps = np.rollaxis(maps, 3, 0)

    # FIXME: This could be done more efficiently, e.g. thanks to hungarian
    # Find pairs of matching components
    # compute the cross-correlation matrix between components
    K = np.corrcoef(components, maps.reshape(4, 400))[4:, :4]
    # K should be a permutation matrix, hence its coefficients
    # should all be close to 0 1 or -1
    K_abs = np.abs(K)
    assert_true(np.sum(K_abs > .9) == 4)
    K_abs[K_abs > .9] -= 1
    assert_array_almost_equal(K_abs, 0, 1)

    # Smoke test to make sure an error is raised when no data is passed.
    assert_raises(TypeError, canica.fit)
コード例 #3
0
ファイル: test_canica.py プロジェクト: bthirion/nilearn
def test_canica_square_img():
    data, mask_img, components, rng = _make_canica_test_data()

    # We do a large number of inits to be sure to find the good match
    canica = CanICA(n_components=4, random_state=rng, mask=mask_img,
                    smoothing_fwhm=0., n_init=50)
    canica.fit(data)
    maps = canica.components_img_.get_data()
    maps = np.rollaxis(maps, 3, 0)

    # FIXME: This could be done more efficiently, e.g. thanks to hungarian
    # Find pairs of matching components
    # compute the cross-correlation matrix between components
    mask = mask_img.get_data() != 0
    K = np.corrcoef(components[:, mask.ravel()],
                    maps[:, mask])[4:, :4]
    # K should be a permutation matrix, hence its coefficients
    # should all be close to 0 1 or -1
    K_abs = np.abs(K)
    assert_true(np.sum(K_abs > .9) == 4)
    K_abs[K_abs > .9] -= 1
    assert_array_almost_equal(K_abs, 0, 1)

    # Smoke test to make sure an error is raised when no data is passed.
    assert_raises(TypeError, canica.fit)
コード例 #4
0
def test_canica_square_img():
    shape = (20, 20, 1)
    affine = np.eye(4)
    rng = np.random.RandomState(0)

    # Create two images with "activated regions"
    component1 = np.zeros(shape)
    component1[:5, :10] = 1
    component1[5:10, :10] = -1

    component2 = np.zeros(shape)
    component2[:5, -10:] = 1
    component2[5:10, -10:] = -1

    component3 = np.zeros(shape)
    component3[-5:, -10:] = 1
    component3[-10:-5, -10:] = -1

    component4 = np.zeros(shape)
    component4[-5:, :10] = 1
    component4[-10:-5, :10] = -1

    components = np.vstack((component1.ravel(), component2.ravel(),
                            component3.ravel(), component4.ravel()))

    # Create a "multi-subject" dataset
    data = []
    for i in range(8):
        this_data = np.dot(rng.normal(size=(40, 4)), components)
        this_data += .01 * rng.normal(size=this_data.shape)
        # Get back into 3D for CanICA
        this_data = np.reshape(this_data, (40, ) + shape)
        this_data = np.rollaxis(this_data, 0, 4)
        data.append(nibabel.Nifti1Image(this_data, affine))

    mask_img = nibabel.Nifti1Image(np.ones(shape, dtype=np.int8), affine)

    # We do a large number of inits to be sure to find the good match
    canica = CanICA(n_components=4,
                    random_state=rng,
                    mask=mask_img,
                    smoothing_fwhm=0.,
                    n_init=50)
    canica.fit(data)
    maps = canica.masker_.inverse_transform(canica.components_).get_data()
    maps = np.rollaxis(maps, 3, 0)

    # FIXME: This could be done more efficiently, e.g. thanks to hungarian
    # Find pairs of matching components
    indices = range(4)

    for i in range(4):
        map = np.abs(maps[i]) > np.abs(maps[i]).max() * 0.95
        for j in indices:
            ref_map = components[j].ravel() != 0
            if np.all(map.ravel() == ref_map):
                indices.remove(j)
                break
        else:
            assert False, "Non matching component"
コード例 #5
0
ファイル: test_canica.py プロジェクト: saby9996/nilearn
def test_canica_square_img():
    data, mask_img, components, rng = _make_canica_test_data()

    # We do a large number of inits to be sure to find the good match
    canica = CanICA(n_components=4,
                    random_state=rng,
                    mask=mask_img,
                    smoothing_fwhm=0.,
                    n_init=50)
    canica.fit(data)
    maps = get_data(canica.components_img_)
    maps = np.rollaxis(maps, 3, 0)

    # FIXME: This could be done more efficiently, e.g. thanks to hungarian
    # Find pairs of matching components
    # compute the cross-correlation matrix between components
    mask = get_data(mask_img) != 0
    K = np.corrcoef(components[:, mask.ravel()], maps[:, mask])[4:, :4]
    # K should be a permutation matrix, hence its coefficients
    # should all be close to 0 1 or -1
    K_abs = np.abs(K)
    assert_true(np.sum(K_abs > .9) == 4)
    K_abs[K_abs > .9] -= 1
    assert_array_almost_equal(K_abs, 0, 1)

    # Smoke test to make sure an error is raised when no data is passed.
    assert_raises(TypeError, canica.fit)
コード例 #6
0
ファイル: test_canica.py プロジェクト: bthirion/nilearn
def test_masker_attributes_with_fit():
    # Test base module at sub-class
    data, mask_img, components, rng = _make_canica_test_data(n_subjects=3)
    # Passing mask_img
    canica = CanICA(n_components=3, mask=mask_img, random_state=0)
    canica.fit(data)
    assert_true(canica.mask_img_ == mask_img)
    assert_true(canica.mask_img_ == canica.masker_.mask_img_)
    # Passing masker
    masker = MultiNiftiMasker(mask_img=mask_img)
    canica = CanICA(n_components=3, mask=masker, random_state=0)
    canica.fit(data)
    assert_true(canica.mask_img_ == canica.masker_.mask_img_)
    canica = CanICA(mask=mask_img, n_components=3)
    assert_raises_regex(ValueError,
                        "Object has no components_ attribute. "
                        "This is probably because fit has not been called",
                        canica.transform, data)
    # Test if raises an error when empty list of provided.
    assert_raises_regex(ValueError,
                        'Need one or more Niimg-like objects as input, '
                        'an empty list was given.',
                        canica.fit, [])
    # Test passing masker arguments to estimator
    canica = CanICA(n_components=3,
                    target_affine=np.eye(4),
                    target_shape=(6, 8, 10),
                    mask_strategy='background')
    canica.fit(data)
コード例 #7
0
ファイル: test_canica.py プロジェクト: VincentFrouin/nilearn
def test_canica_square_img():
    shape = (20, 20, 1)
    affine = np.eye(4)
    rng = np.random.RandomState(0)

    # Create two images with "activated regions"
    component1 = np.zeros(shape)
    component1[:5, :10] = 1
    component1[5:10, :10] = -1

    component2 = np.zeros(shape)
    component2[:5, -10:] = 1
    component2[5:10, -10:] = -1

    component3 = np.zeros(shape)
    component3[-5:, -10:] = 1
    component3[-10:-5, -10:] = -1

    component4 = np.zeros(shape)
    component4[-5:, :10] = 1
    component4[-10:-5, :10] = -1

    components = np.vstack((component1.ravel(), component2.ravel(),
                            component3.ravel(), component4.ravel()))

    # Create a "multi-subject" dataset
    data = []
    for i in range(8):
        this_data = np.dot(rng.normal(size=(40, 4)), components)
        this_data += .01 * rng.normal(size=this_data.shape)
        # Get back into 3D for CanICA
        this_data = np.reshape(this_data, (40,) + shape)
        this_data = np.rollaxis(this_data, 0, 4)
        data.append(nibabel.Nifti1Image(this_data, affine))

    mask_img = nibabel.Nifti1Image(np.ones(shape, dtype=np.int8), affine)

    # We do a large number of inits to be sure to find the good match
    canica = CanICA(n_components=4, random_state=rng, mask=mask_img,
                    smoothing_fwhm=0., n_init=50)
    canica.fit(data)
    maps = canica.masker_.inverse_transform(canica.components_).get_data()
    maps = np.rollaxis(maps, 3, 0)

    # FIXME: This could be done more efficiently, e.g. thanks to hungarian
    # Find pairs of matching components
    indices = range(4)

    for i in range(4):
        map = np.abs(maps[i]) > np.abs(maps[i]).max() * 0.95
        for j in indices:
            ref_map = components[j].ravel() != 0
            if np.all(map.ravel() == ref_map):
                indices.remove(j)
                break
        else:
            assert False, "Non matching component"
コード例 #8
0
ファイル: test_canica.py プロジェクト: bthirion/nilearn
def test_components_img():
    data, mask_img, _, _ = _make_canica_test_data(n_subjects=3)
    n_components = 3
    canica = CanICA(n_components=n_components, mask=mask_img)
    canica.fit(data)
    components_img = canica.components_img_
    assert_true(isinstance(components_img, nibabel.Nifti1Image))
    check_shape = data[0].shape[:3] + (n_components,)
    assert_true(components_img.shape, check_shape)
コード例 #9
0
ファイル: test_canica.py プロジェクト: saby9996/nilearn
def test_components_img():
    data, mask_img, _, _ = _make_canica_test_data(n_subjects=3)
    n_components = 3
    canica = CanICA(n_components=n_components, mask=mask_img)
    canica.fit(data)
    components_img = canica.components_img_
    assert_true(isinstance(components_img, nibabel.Nifti1Image))
    check_shape = data[0].shape[:3] + (n_components, )
    assert_true(components_img.shape, check_shape)
コード例 #10
0
ファイル: test_canica.py プロジェクト: agramfort/nilearn
def test_canica_square_img():
    shape = (20, 20, 1)
    affine = np.eye(4)
    rng = np.random.RandomState(0)

    # Create two images with "activated regions"
    component1 = np.zeros(shape)
    component1[:5, :10] = 1
    component1[5:10, :10] = -1

    component2 = np.zeros(shape)
    component2[:5, -10:] = 1
    component2[5:10, -10:] = -1

    component3 = np.zeros(shape)
    component3[-5:, -10:] = 1
    component3[-10:-5, -10:] = -1

    component4 = np.zeros(shape)
    component4[-5:, :10] = 1
    component4[-10:-5, :10] = -1

    components = np.vstack((component1.ravel(), component2.ravel(),
                            component3.ravel(), component4.ravel()))

    # Create a "multi-subject" dataset
    data = []
    for i in range(8):
        this_data = np.dot(rng.normal(size=(40, 4)), components)
        this_data += .01 * rng.normal(size=this_data.shape)
        # Get back into 3D for CanICA
        this_data = np.reshape(this_data, (40,) + shape)
        this_data = np.rollaxis(this_data, 0, 4)
        data.append(nibabel.Nifti1Image(this_data, affine))

    mask_img = nibabel.Nifti1Image(np.ones(shape, dtype=np.int8), affine)

    # We do a large number of inits to be sure to find the good match
    canica = CanICA(n_components=4, random_state=rng, mask=mask_img,
                    smoothing_fwhm=0., n_init=50)
    canica.fit(data)
    maps = canica.masker_.inverse_transform(canica.components_).get_data()
    maps = np.rollaxis(maps, 3, 0)

    # FIXME: This could be done more efficiently, e.g. thanks to hungarian
    # Find pairs of matching components
    # compute the cross-correlation matrix between components
    K = np.corrcoef(components, maps.reshape(4, 400))[4:, :4]
    # K should be a permutation matrix, hence its coefficients 
    # should all be close to 0 1 or -1
    K_abs = np.abs(K)
    assert_true(np.sum(K_abs > .9) == 4)
    K_abs[K_abs > .9] -= 1
    assert_array_almost_equal(K_abs, 0, 1)

    # Smoke test to make sure an error is raised when no data is passed.
    assert_raises(TypeError, canica.fit)
コード例 #11
0
ファイル: test_canica.py プロジェクト: bthirion/nilearn
def test_canica_single_subject():
    # Check that canica runs on a single-subject dataset
    data, mask_img, components, rng = _make_canica_test_data(n_subjects=1)

    # We do a large number of inits to be sure to find the good match
    canica = CanICA(n_components=4, random_state=rng,
                    smoothing_fwhm=0., n_init=1)
    # This is a smoke test: we just check that things run
    canica.fit(data[0])
コード例 #12
0
ファイル: test_canica.py プロジェクト: saby9996/nilearn
def test_canica_single_subject():
    # Check that canica runs on a single-subject dataset
    data, mask_img, components, rng = _make_canica_test_data(n_subjects=1)

    # We do a large number of inits to be sure to find the good match
    canica = CanICA(n_components=4,
                    random_state=rng,
                    smoothing_fwhm=0.,
                    n_init=1)
    # This is a smoke test: we just check that things run
    canica.fit(data[0])
コード例 #13
0
ファイル: test_canica.py プロジェクト: zhiye9/nilearn
def test_percentile_range():
    # Smoke test to test warning in case ignored thresholds
    rng = np.random.RandomState(0)
    edge_case = rng.randint(low=1, high=10)
    data, *_ = _make_canica_test_data()

    # stess thresholding via edge case
    canica = CanICA(n_components=edge_case, threshold=float(edge_case))
    with warnings.catch_warnings(record=True) as warning:
        canica.fit(data)
        assert len(warning) == 1  # ensure single warning
        assert "critical threshold" in str(warning[-1].message)
コード例 #14
0
ファイル: test_canica.py プロジェクト: saby9996/nilearn
def test_with_globbing_patterns_with_single_subject():
    # single subject
    data, mask_img, _, _ = _make_canica_test_data(n_subjects=1)
    n_components = 3
    canica = CanICA(n_components=n_components, mask=mask_img)
    with write_tmp_imgs(data[0], create_files=True, use_wildcards=True) as img:
        input_image = _tmp_dir() + img
        canica.fit(input_image)
        components_img = canica.components_img_
        assert_true(isinstance(components_img, nibabel.Nifti1Image))
        # n_components = 3
        check_shape = data[0].shape[:3] + (3, )
        assert_true(components_img.shape, check_shape)
コード例 #15
0
ファイル: test_canica.py プロジェクト: bthirion/nilearn
def test_with_globbing_patterns_with_single_subject():
    # single subject
    data, mask_img, _, _ = _make_canica_test_data(n_subjects=1)
    n_components = 3
    canica = CanICA(n_components=n_components, mask=mask_img)
    with write_tmp_imgs(data[0], create_files=True, use_wildcards=True) as img:
        input_image = _tmp_dir() + img
        canica.fit(input_image)
        components_img = canica.components_img_
        assert_true(isinstance(components_img, nibabel.Nifti1Image))
        # n_components = 3
        check_shape = data[0].shape[:3] + (3,)
        assert_true(components_img.shape, check_shape)
コード例 #16
0
ファイル: run_canica.py プロジェクト: ajrichardson/nilearn_ui
def get_fitted_canica(func_files, **params):
    input_folder = params.pop('input_folder')
    func_files = sorted(os.path.join(root, filename)
                        for root, dirs, filenames in os.walk(input_folder)
                        for filename in filenames
                        if filename.endswith('.nii.gz'))

    canica = CanICA(memory='nilearn_cache', memory_level=5, random_state=0,
                    n_jobs=-1, **params)

    if not func_files:
        raise ValueError('Could not find any files in the input folder')
    canica.fit(func_files)
    return canica
コード例 #17
0
ファイル: test_canica.py プロジェクト: bthirion/nilearn
def test_component_sign():
    # We should have a heuristic that flips the sign of components in
    # CanICA to have more positive values than negative values, for
    # instance by making sure that the largest value is positive.

    data, mask_img, components, rng = _make_canica_test_data(n_subjects=2,
                                                             noisy=True)

    # run CanICA many times (this is known to produce different results)
    canica = CanICA(n_components=4, random_state=rng, mask=mask_img)
    for _ in range(3):
        canica.fit(data)
        for mp in iter_img(canica.components_img_):
            mp = mp.get_data()
            assert_less_equal(-mp.min(), mp.max())
コード例 #18
0
ファイル: test_canica.py プロジェクト: saby9996/nilearn
def test_component_sign():
    # We should have a heuristic that flips the sign of components in
    # CanICA to have more positive values than negative values, for
    # instance by making sure that the largest value is positive.

    data, mask_img, components, rng = _make_canica_test_data(n_subjects=2,
                                                             noisy=True)

    # run CanICA many times (this is known to produce different results)
    canica = CanICA(n_components=4, random_state=rng, mask=mask_img)
    for _ in range(3):
        canica.fit(data)
        for mp in iter_img(canica.components_img_):
            mp = get_data(mp)
            assert_less_equal(-mp.min(), mp.max())
コード例 #19
0
ファイル: test_canica.py プロジェクト: yogeshmj/nilearn
def test_percentile_range():
    # Smoke test to test warning in case ignored thresholds
    rng = np.random.RandomState(0)
    edge_case = rng.randint(low=1, high=10)
    data, *_ = _make_canica_test_data()

    # stess thresholding via edge case
    canica = CanICA(n_components=edge_case, threshold=float(edge_case))
    with warnings.catch_warnings(record=True) as warning:
        canica.fit(data)
        # Filter out deprecation warnings
        not_deprecation_warning = [
            not issubclass(w.category, DeprecationWarning) for w in warning
        ]
        assert sum(not_deprecation_warning) == 1  # ensure single warning
        idx_critical_warning = not_deprecation_warning.index(True)
        assert "critical threshold" in str(
            warning[idx_critical_warning].message)
コード例 #20
0
def run_CanICA_NILEAN(output_dir, working_dir, population, n_components=20):

    #create group ica outputdir
    mkdir_path(os.path.join(working_dir, 'CANICA_GROUP_ICA'))
    canica_dir = os.path.join(working_dir, 'CANICA_GROUP_ICA')

    # grab subjects
    preprocssed_all = []
    for subject in population:
        preprocssed_subject = os.path.join(output_dir, subject, 'xxxx.nii.gz')
        preprocssed_all.append(preprocssed_subject)

    canica = CanICA(n_components=n_components,
                    smoothing_fwhm=0.,
                    memory='nilearn_cashe',
                    memory_level=5,
                    threshold=3.,
                    verbose=10,
                    random_state=10)
    canica.fit(preprocssed_all)

    # save data
    components_img = canica.masker_.inverse_transform(canica.components_)
    components_img.to_filename(os.path.join(canica_dir, 'canica_IC.nii.gz'))
コード例 #21
0
def run_CanICA_NILEAN(output_dir, working_dir, population, n_components = 20):

    #create group ica outputdir
    mkdir_path(os.path.join(working_dir, 'CANICA_GROUP_ICA'))
    canica_dir = os.path.join(working_dir, 'CANICA_GROUP_ICA')

    # grab subjects
    preprocssed_all =[]
    for subject in population:
        preprocssed_subject = os.path.join(output_dir, subject, 'xxxx.nii.gz')
        preprocssed_all.append(preprocssed_subject)

    canica = CanICA(n_components=n_components,
                    smoothing_fwhm= 0.,
                    memory= 'nilearn_cashe',
                    memory_level= 5,
                    threshold = 3.,
                    verbose = 10,
                    random_state=10)
    canica.fit(preprocssed_all)

    # save data
    components_img = canica.masker_.inverse_transform(canica.components_)
    components_img.to_filename(os.path.join(canica_dir, 'canica_IC.nii.gz'))
コード例 #22
0
ファイル: test_canica.py プロジェクト: yogeshmj/nilearn
def test_canica_score():
    # Multi subjects
    imgs, mask_img, _, _ = _make_canica_test_data(n_subjects=3)
    n_components = 10
    canica = CanICA(n_components=10, mask=mask_img, random_state=0)
    canica.fit(imgs)

    # One score for all components
    scores = canica.score(imgs, per_component=False)
    assert scores <= 1
    assert 0 <= scores

    # Per component score
    scores = canica.score(imgs, per_component=True)
    assert scores.shape, (n_components, )
    assert np.all(scores <= 1)
    assert np.all(0 <= scores)
コード例 #23
0
ファイル: ICA_HCP.py プロジェクト: jishanling/broca
# create artificial mask:
mask = np.ones(29696)
mask = np.expand_dims(mask, axis=1)
mask = np.expand_dims(mask, axis=1)
img = nib.Nifti1Image(mask, np.eye(4))
img.to_filename(
    '/scr/murg2/MachineLearning/partialcorr/ICA/ICA_HCP/mask.nii.gz')

# run ICA on group level:
n_components = 20
n_jobs = 20  #number of CPUs used
canica = CanICA(
    mask='/scr/murg2/MachineLearning/partialcorr/ICA/ICA_HCP/mask.nii.gz',
    n_components=n_components,
    smoothing_fwhm=0.,
    threshold=None,
    verbose=10,
    random_state=0,
    n_jobs=n_jobs)

canica.fit(filenames)

# Retrieve the independent components in brain space
components_img = canica.masker_.inverse_transform(canica.components_)

A = np.zeros((32492, n_components))
A[cort, :] = components_img.get_data().squeeze()

#np.save('/scr/murg2/MachineLearning/partialcorr/ICA/ICA_HCP/ica_HCP101_output_%s.npy' % str(n_components), A)
savemat(
    '/scr/murg2/MachineLearning/partialcorr/ICA/ICA_HCP/ica_HCP101_output_%s.mat'
コード例 #24
0
# MultiNiftiMasker, rather than the NiftiMasker
# We specify the target_affine to downsample to 3mm isotropic
# resolution

target_affine = np.diag((3, 3, 3))
epi_img = nibabel.load(func_files[0])
mean_epi = epi_img.get_data().mean(axis=-1)
mean_epi_img = nibabel.Nifti1Image(mean_epi, epi_img.get_affine())
mean_epi = resample_img(mean_epi_img, target_affine=target_affine).get_data()

### Apply CanICA ##############################################################
from nilearn.decomposition.canica import CanICA

n_components = 20
canica = CanICA(n_components=n_components,
                smoothing_fwhm=6., target_affine=target_affine,
                memory="nilearn_cache", memory_level=5,
                threshold=3., verbose=10)
canica.fit(func_files)

components = canica.masker_.inverse_transform(canica.components_).get_data()

### Visualize the results #####################################################
# Show some interesting components
import pylab as pl
from scipy import ndimage

# Using a masked array is important to have transparency in the figures
components = np.ma.masked_equal(components, 0, copy=False)

for i in range(n_components):
    pl.figure()
コード例 #25
0
ファイル: test_canica.py プロジェクト: saby9996/nilearn
def test_masker_attributes_with_fit():
    # Test base module at sub-class
    data, mask_img, components, rng = _make_canica_test_data(n_subjects=3)
    # Passing mask_img
    canica = CanICA(n_components=3, mask=mask_img, random_state=0)
    canica.fit(data)
    assert_true(canica.mask_img_ == mask_img)
    assert_true(canica.mask_img_ == canica.masker_.mask_img_)
    # Passing masker
    masker = MultiNiftiMasker(mask_img=mask_img)
    canica = CanICA(n_components=3, mask=masker, random_state=0)
    canica.fit(data)
    assert_true(canica.mask_img_ == canica.masker_.mask_img_)
    canica = CanICA(mask=mask_img, n_components=3)
    assert_raises_regex(
        ValueError, "Object has no components_ attribute. "
        "This is probably because fit has not been called", canica.transform,
        data)
    # Test if raises an error when empty list of provided.
    assert_raises_regex(
        ValueError, 'Need one or more Niimg-like objects as input, '
        'an empty list was given.', canica.fit, [])
    # Test passing masker arguments to estimator
    canica = CanICA(n_components=3,
                    target_affine=np.eye(4),
                    target_shape=(6, 8, 10),
                    mask_strategy='background')
    canica.fit(data)
コード例 #26
0
        pet_files.append(pet_file[0])
        img = nib.load(pet_file[0])
        if idx == 0:
            data4d = np.expand_dims(img.get_data(), axis=3)
        else:
            data4d = np.concatenate((data4d,
                                     np.expand_dims(img.get_data(), axis=3)),
                                    axis=3)

pet_files = np.array(pet_files)
img4d = nib.Nifti1Image(data4d, img.get_affine())

groups = ['AD', 'LMCI', 'MCI', 'Normal']
    
n_components = 20
canica = CanICA(n_components=n_components,
                memory="nilearn_cache", memory_level=5,
                threshold='auto', verbose=10, random_state=0)
canica.fit(img4d)

print 'inverse'

components_img = canica.masker_.inverse_transform(canica.components_)
components_img.to_filename('figures/canica_'+str(n_components)+'.nii.gz')

for i in range(n_components):
    plot_stat_map(nib.Nifti1Image(components_img.get_data()[..., i],
                                      components_img.get_affine()),
                  display_mode="z", title="IC %d"%i, cut_coords=1,
                  colorbar=False)
コード例 #27
0
n_sample = 140
idx = np.random.randint(len(func_files), size=n_sample)
func_files_sample = np.array(func_files)[idx]

multi_masker = MultiNiftiMasker(mask_strategy='epi',
                                memory=CACHE_DIR,
                                n_jobs=1,
                                memory_level=2)
multi_masker.fit(func_files_sample)
plot_img(multi_masker.mask_img_)

n_components = 40
canica = CanICA(mask=multi_masker,
                n_components=n_components,
                smoothing_fwhm=6.,
                memory=CACHE_DIR,
                memory_level=5,
                threshold=3.,
                verbose=10,
                random_state=0)
canica.fit(func_files_sample)

# Retrieve the independent components in brain space
components_img = canica.masker_.inverse_transform(canica.components_)
# components_img is a Nifti Image object, and can be saved to a file with
# the following line:
components_img.to_filename(
    os.path.join(CACHE_DIR, 'canica_resting_state_140.nii.gz'))

### Visualize the results #####################################################
# Show some interesting components
コード例 #28
0
# Clean signals
X_ = []
for x in X:
    X_.append(signal.clean(x, standardize=True, detrend=False))
X = X_

### CanICA ####################################################################

if not exists(join(path, 'canica.nii.gz')):
    try:
        from nilearn.decomposition.canica import CanICA
        t0 = time.time()
        canica = CanICA(n_components=n_components, mask=mask_img,
                        smoothing_fwhm=6.,
                        memory="nilearn_cache", memory_level=1,
                        threshold=None,
                        random_state=1, n_jobs=-1)
        canica.fit(dataset.func)
        print('Canica: %f' % (time.time() - t0))
        canica_components = masking.unmask(canica.components_, mask_img)
        nibabel.save(nibabel.Nifti1Image(canica_components,
            mask_img.get_affine()), join(path, 'canica.nii.gz'))
    except ImportError:
        import warnings
        warnings.warn('nilearn must be installed to run CanICA')


canica_dmn = nibabel.load(join(path, 'canica.nii.gz')).get_data()[..., 4]

コード例 #29
0
n_subjects = 40  # ?

### Load ADHD rest dataset ####################################################
from nilearn import datasets

adhd_dataset = datasets.fetch_adhd(n_subjects)
func_filenames = adhd_dataset.func  # list of 4D nifti files for each subject

### Apply CanICA ##############################################################
import nibabel
from nilearn.decomposition.canica import CanICA

canica = CanICA(n_components=n_components,
                smoothing_fwhm=6.,
                memory="nilearn_cache",
                memory_level=5,
                threshold=3.,
                verbose=10,
                random_state=0,
                max_iter=200)
print("Loading data...")
img_list = func_filenames  #[nibabel.load(fn) for fn in func_filenames]
print("Fitting data...")
canica.fit(img_list)

# Retrieve the independent components in brain space
components_img = canica.masker_.inverse_transform(canica.components_)
# components_img is a Nifti Image object, and can be saved to a file with
# the following line:
components_img.to_filename('canica_resting_state.nii.gz')

### Visualize the results #####################################################
コード例 #30
0
"""

### Load ADHD rest dataset ####################################################
from nilearn import datasets

dataset = datasets.fetch_adhd()
func_files = dataset.func  # The list of 4D nifti files for each subject

### Apply CanICA ##############################################################
from nilearn.decomposition.canica import CanICA

n_components = 20
canica = CanICA(n_components=n_components,
                smoothing_fwhm=6.,
                memory="nilearn_cache",
                memory_level=5,
                threshold=3.,
                verbose=10,
                random_state=0)
canica.fit(func_files)

# Retrieve the independent components in brain space
components_img = canica.masker_.inverse_transform(canica.components_)
# components_img is a Nifti Image object, and can be saved to a file with
# the following line:
components_img.to_filename('canica_resting_state.nii.gz')

### Visualize the results #####################################################
# Show some interesting components
import nibabel
import matplotlib.pyplot as plt
コード例 #31
0
    idx = np.linspace(0, n_timesteps, n_timecourses + 1, dtype=int)
    print "%d time slices, %d seconds per time slice" % (n_timecourses,
                                                         idx[1] - idx[0])
    subj_img_list = [
        index_img(subj_img, range(idx[ii], idx[ii + 1]))
        for ii in range(n_timecourses)
    ]

    all_images += subj_img_list

### Apply CanICA across subjects ##############################################################
print("Fitting data over all subjects (%d images)..." % len(all_images))
canica = CanICA(n_components=n_components,
                smoothing_fwhm=6.,
                memory="nilearn_cache",
                memory_level=5,
                max_iter=max_iter,
                threshold=3.,
                verbose=10,
                random_state=0)
canica.fit(all_images)
components_img = canica.masker_.inverse_transform(canica.components_)
nibabel.save(components_img, 'components_img_cuts.nii')

### Visualize the results #####################################################
# Show some interesting components
import matplotlib.pyplot as plt
from nilearn.plotting import plot_roi, plot_stat_map, plot_glass_brain, find_xyz_cut_coords

fh = plt.figure(facecolor='w', figsize=(18, 10))
nrows = int(np.floor(np.sqrt(0.75 * n_components)))  # 4:3 aspect
ncols = int(np.ceil(n_components / float(nrows)))
コード例 #32
0
Pre-prints for both papers are available on hal
(http://hal.archives-ouvertes.fr)
"""

### Load ADHD rest dataset ####################################################
from nilearn import datasets

adhd_dataset = datasets.fetch_adhd()
func_filenames = adhd_dataset.func  # list of 4D nifti files for each subject

### Apply CanICA ##############################################################
from nilearn.decomposition.canica import CanICA

n_components = 20
canica = CanICA(n_components=n_components, smoothing_fwhm=6.,
                memory="nilearn_cache", memory_level=5,
                threshold=3., verbose=10, random_state=0)
canica.fit(func_filenames)

# Retrieve the independent components in brain space
components_img = canica.masker_.inverse_transform(canica.components_)
# components_img is a Nifti Image object, and can be saved to a file with
# the following line:
components_img.to_filename('canica_resting_state.nii.gz')

### Visualize the results #####################################################
# Show some interesting components
import nibabel
import matplotlib.pyplot as plt
from nilearn.plotting import plot_stat_map
コード例 #33
0
# Clean signals
X_ = []
for x in X:
    X_.append(signal.clean(x, standardize=True, detrend=False))
X = X_

### CanICA ####################################################################

if not exists(join(path, 'canica.nii.gz')):
    try:
        from nilearn.decomposition.canica import CanICA
        t0 = time.time()
        canica = CanICA(n_components=n_components,
                        mask=mask_img,
                        smoothing_fwhm=6.,
                        memory="nilearn_cache",
                        memory_level=1,
                        threshold=None,
                        random_state=1,
                        n_jobs=-1)
        canica.fit(dataset.func)
        print('Canica: %f' % (time.time() - t0))
        canica_components = masking.unmask(canica.components_, mask_img)
        nibabel.save(
            nibabel.Nifti1Image(canica_components, mask_img.get_affine()),
            join(path, 'canica.nii.gz'))
    except ImportError:
        import warnings
        warnings.warn('nilearn must be installed to run CanICA')

canica_dmn = nibabel.load(join(path, 'canica.nii.gz')).get_data()[..., 4]
コード例 #34
0
    subj_img = nibabel.load(fn)
    n_timesteps = subj_img.shape[-1]
    n_timecourses = np.floor(n_timesteps / float(n_components)).astype(int)
    idx = np.linspace(0, n_timesteps, n_timecourses, dtype=int)
    print "%d time slices, %d seconds per time slice" % (n_timecourses,
                                                         idx[1] - idx[0])
    subj_img_list = [
        index_img(subj_img, range(idx[ii - 1], idx[ii]))
        for ii in range(1, n_timecourses)
    ]

    print("Fitting data for subject %d..." % si)
    subj_canica = CanICA(n_components=n_components,
                         smoothing_fwhm=6.,
                         memory="nilearn_cache",
                         memory_level=5,
                         max_iter=max_iter,
                         threshold=3.,
                         verbose=10,
                         random_state=0)
    subj_canica.fit(subj_img_list)

    print("Projecting subject data into computed components...")
    subj_loadings = subj_canica.transform(subj_img)
    subj_component_imgs = subj_canica.masker_.inverse_transform(
        subj_canica.components_)
    subj_loadings_imgs = []
    for time_series, component_volume in zip(subj_loadings.T,
                                             subj_component_imgs.get_data()):
        tiled_img_data = np.tile(component_volume[..., np.newaxis],
                                 len(time_series))
        loaded_time_series = tiled_img_data * time_series