def test_masking_first_level_model(): """ Checks that using NiftiMasker when instantiating FirstLevelModel doesn't raise Error when calling generate_report(). """ with InTemporaryDirectory(): shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3 mask, fmri_data, design_matrices =\ write_fake_fmri_data_and_design(shapes, rk) masker = NiftiMasker(mask_img=mask) masker.fit(fmri_data) flm = FirstLevelModel(mask_img=masker).fit( fmri_data, design_matrices=design_matrices ) contrast = np.eye(3)[1] report_flm = flm.generate_report( contrast, plot_type='glass', height_control=None, min_distance=15, alpha=0.001, threshold=2.78 ) report_iframe = report_flm.get_iframe() # So flake8 doesn't complain about not using variable (F841) report_iframe del mask, flm, fmri_data, masker
def get_single_trial_volume(subject, session, mask=None, bids_folder='/data', smoothed=False, pca_confounds=False): key = 'glm_stim1' if smoothed: key += '.smoothed' if pca_confounds: key += '.pca_confounds' fn = op.join( bids_folder, 'derivatives', key, f'sub-{subject}', f'ses-{session}', 'func', f'sub-{subject}_ses-{session}_task-task_space-T1w_desc-stims1_pe.nii.gz' ) im = image.load_img(fn) mask = get_volume_mask(subject, session, mask, bids_folder) # paradigm = get_task_behavior(subject, session, bids_folder) masker = NiftiMasker(mask_img=mask) data = pd.DataFrame(masker.fit_transform(im)) return data
def test_z_score_opposite_contrast(): fmri, mask = generate_fake_fmri(shape=(50, 20, 50), length=96, rand_gen=np.random.RandomState(42)) nifti_masker = NiftiMasker(mask_img=mask) data = nifti_masker.fit_transform(fmri) frametimes = np.linspace(0, (96 - 1) * 2, 96) for i in [0, 20]: design_matrix = make_first_level_design_matrix( frametimes, hrf_model='spm', add_regs=np.array(data[:, i]).reshape(-1, 1)) c1 = np.array([1] + [0] * (design_matrix.shape[1] - 1)) c2 = np.array([0] + [1] + [0] * (design_matrix.shape[1] - 2)) contrasts = {'seed1 - seed2': c1 - c2, 'seed2 - seed1': c2 - c1} fmri_glm = FirstLevelModel(t_r=2., noise_model='ar1', standardize=False, hrf_model='spm', drift_model='cosine') fmri_glm.fit(fmri, design_matrices=design_matrix) z_map_seed1_vs_seed2 = fmri_glm.compute_contrast( contrasts['seed1 - seed2'], output_type='z_score') z_map_seed2_vs_seed1 = fmri_glm.compute_contrast( contrasts['seed2 - seed1'], output_type='z_score') assert_almost_equal(z_map_seed1_vs_seed2.get_data().min(), -z_map_seed2_vs_seed1.get_data().max(), decimal=10) assert_almost_equal(z_map_seed1_vs_seed2.get_data().max(), -z_map_seed2_vs_seed1.get_data().min(), decimal=10)
def test_decoder_split_cv(): X, y = make_classification(n_samples=200, n_features=125, scale=3.0, n_informative=5, n_classes=4, random_state=42) X, mask = to_niimgs(X, [5, 5, 5]) rand_local = np.random.RandomState(42) groups = rand_local.binomial(2, 0.3, size=len(y)) # Check whether ValueError is raised when cv is not set correctly for cv in ['abc', LinearSVC()]: model = Decoder(mask=NiftiMasker(), cv=cv) pytest.raises(ValueError, model.fit, X, y) # Check whether decoder raised warning when groups is set to specific # value but CV Splitter is not set expected_warning = ('groups parameter is specified but ' 'cv parameter is not set to custom CV splitter. ' 'Using default object LeaveOneGroupOut().') with pytest.warns(UserWarning, match=expected_warning): model = Decoder(mask=NiftiMasker()) model.fit(X, y, groups=groups) # Check that warning is raised when n_features is lower than 50 after # screening and clustering for FREM with pytest.warns(UserWarning, match=".*screening_percentile parameters"): model = FREMClassifier(clustering_percentile=10, screening_percentile=10, mask=NiftiMasker(), cv=1) model.fit(X, y)
def test_decoder_multiclass_classification(): X, y = make_classification(n_samples=200, n_features=125, scale=3.0, n_informative=5, n_classes=4, random_state=42) X, mask = to_niimgs(X, [5, 5, 5]) # check classification with masker object model = Decoder(mask=NiftiMasker()) model.fit(X, y) y_pred = model.predict(X) assert accuracy_score(y, y_pred) > 0.95 # check classification with masker object and dummy classifier model = Decoder(estimator='dummy_classifier', mask=NiftiMasker(), scoring="accuracy") model.fit(X, y) y_pred = model.predict(X) assert model.scoring == "accuracy" # 4-class classification assert accuracy_score(y, y_pred) > 0.2 assert model.score(X, y) == accuracy_score(y, y_pred) # check different screening_percentile value for screening_percentile in [100, 20, None]: model = Decoder(mask=mask, screening_percentile=screening_percentile) model.fit(X, y) y_pred = model.predict(X) assert accuracy_score(y, y_pred) > 0.95 # check FREM with clustering or not for clustering_percentile in [100, 99]: for estimator in ['svc_l2', 'svc_l1']: model = FREMClassifier(estimator=estimator, mask=mask, clustering_percentile=clustering_percentile, screening_percentile=90, cv=5) model.fit(X, y) y_pred = model.predict(X) assert model.scoring == "roc_auc" assert accuracy_score(y, y_pred) > 0.9 # check cross-validation scheme and fit attribute with groups enabled rand_local = np.random.RandomState(42) for cv in [KFold(n_splits=5), LeaveOneGroupOut()]: model = Decoder(estimator='svc', mask=mask, standardize=True, cv=cv) if isinstance(cv, LeaveOneGroupOut): groups = rand_local.binomial(2, 0.3, size=len(y)) else: groups = None model.fit(X, y, groups=groups) assert accuracy_score(y, y_pred) > 0.9
def test_detrend(): # Check that detrending doesn't do something stupid with 3D images data = np.zeros((9, 9, 9)) data[3:-3, 3:-3, 3:-3] = 10 img = Nifti1Image(data, np.eye(4)) mask = data.astype(np.int) mask_img = Nifti1Image(mask, np.eye(4)) masker = NiftiMasker(mask_img=mask_img, detrend=True) # Smoke test the fit X = masker.fit_transform(img) assert np.any(X != 0)
def _regression(confounds, tmp_path): """Simple regression with NiftiMasker.""" # Simulate data img, mask_conf, _, _, _ = _simu_img(tmp_path, demean=False) confounds = _handle_non_steady(confounds) # Do the regression masker = NiftiMasker(mask_img=mask_conf, standardize=True) tseries_clean = masker.fit_transform( img, confounds=confounds, sample_mask=None ) assert tseries_clean.shape[0] == confounds.shape[0]
def test_resample(): # Check that target_affine triggers the right resampling data = np.zeros((9, 9, 9)) data[3:-3, 3:-3, 3:-3] = 10 img = Nifti1Image(data, np.eye(4)) mask = data.astype(np.int) mask_img = Nifti1Image(mask, np.eye(4)) masker = NiftiMasker(mask_img=mask_img, target_affine=2 * np.eye(3)) # Smoke test the fit X = masker.fit_transform(img) assert np.any(X != 0)
def _confounds_regression(standardize_signal=True, standardize_confounds=True): rng = np.random.RandomState(42) img, mask, conf = _simu_img() masker = NiftiMasker(standardize=standardize_signal, standardize_confounds=standardize_confounds, detrend=False, mask_img=mask).fit() tseries = masker.transform(img, confounds=conf) if standardize_confounds: conf = StandardScaler(with_std=False).fit_transform(conf) cov_mat = _cov_conf(tseries, conf) return np.sum(np.abs(cov_mat))
def _denoise(img, mask_img, confounds, sample_mask, standardize_signal=False, standardize_confounds=True, detrend=False): """Extract time series with and without confounds.""" masker = NiftiMasker(mask_img=mask_img, standardize=standardize_signal, standardize_confounds=standardize_confounds, detrend=detrend) tseries_raw = masker.fit_transform(img, sample_mask=sample_mask) tseries_clean = masker.fit_transform( img, confounds=confounds, sample_mask=sample_mask ) return tseries_raw, tseries_clean
def _tseries_std(img, mask_img, confounds, sample_mask, standardize_signal=False, standardize_confounds=True, detrend=False): """Get the std of time series in a mask.""" masker = NiftiMasker( mask_img=mask_img, standardize=standardize_signal, standardize_confounds=standardize_confounds, detrend=detrend ) tseries = masker.fit_transform(img, confounds=confounds, sample_mask=sample_mask) return tseries.std(axis=0)
def test_dict_learning(): data, mask_img, components, rng = _make_canica_test_data(n_subjects=8) masker = NiftiMasker(mask_img=mask_img).fit() mask = get_data(mask_img) != 0 flat_mask = mask.ravel() dict_init = masker.inverse_transform(components[:, flat_mask]) dict_learning = DictLearning(n_components=4, random_state=0, dict_init=dict_init, mask=mask_img, smoothing_fwhm=0., alpha=1) dict_learning_auto_init = DictLearning(n_components=4, random_state=0, mask=mask_img, smoothing_fwhm=0., n_epochs=10, alpha=1) maps = {} for estimator in [dict_learning, dict_learning_auto_init]: estimator.fit(data) maps[estimator] = get_data(estimator.components_img_) maps[estimator] = np.reshape( np.rollaxis(maps[estimator], 3, 0)[:, mask], (4, flat_mask.sum())) masked_components = components[:, flat_mask] for this_dict_learning in [dict_learning]: these_maps = maps[this_dict_learning] S = np.sqrt(np.sum(masked_components**2, axis=1)) S[S == 0] = 1 masked_components /= S[:, np.newaxis] S = np.sqrt(np.sum(these_maps**2, axis=1)) S[S == 0] = 1 these_maps /= S[:, np.newaxis] K = np.abs(masked_components.dot(these_maps.T)) recovered_maps = np.sum(K > 0.9) assert (recovered_maps >= 2) # Smoke test n_epochs > 1 dict_learning = DictLearning(n_components=4, random_state=0, dict_init=dict_init, mask=mask_img, smoothing_fwhm=0., n_epochs=2, alpha=1) dict_learning.fit(data)
def get_prf_parameters_volume(subject, session, bids_folder, run=None, smoothed=False, pca_confounds=False, cross_validated=True, hemi=None, mask=None, space='fsnative'): dir = 'encoding_model' if cross_validated: if run is None: raise Exception('Give run') dir += '.cv' if smoothed: dir += '.smoothed' if pca_confounds: dir += '.pca_confounds' parameters = [] keys = ['mu', 'sd', 'amplitude', 'baseline'] mask = get_volume_mask(subject, session, mask, bids_folder) masker = NiftiMasker(mask) for parameter_key in keys: if cross_validated: fn = op.join( bids_folder, 'derivatives', dir, f'sub-{subject}', f'ses-{session}', 'func', f'sub-{subject}_ses-{session}_run-{run}_desc-{parameter_key}.optim_space-T1w_pars.nii.gz' ) else: fn = op.join( bids_folder, 'derivatives', dir, f'sub-{subject}', f'ses-{session}', 'func', f'sub-{subject}_ses-{session}_desc-{parameter_key}.optim_space-T1w_pars.nii.gz' ) pars = pd.Series(masker.fit_transform(fn).ravel()) parameters.append(pars) return pd.concat(parameters, axis=1, keys=keys, names=['parameter'])
def test_masker_attributes_with_fit(): # Test base module at sub-class data, mask_img, components, rng = _make_canica_test_data(n_subjects=3) # Passing mask_img dict_learning = DictLearning(n_components=3, mask=mask_img, random_state=0) dict_learning.fit(data) assert dict_learning.mask_img_ == mask_img assert dict_learning.mask_img_ == dict_learning.masker_.mask_img_ # Passing masker masker = NiftiMasker(mask_img=mask_img) dict_learning = DictLearning(n_components=3, mask=masker, random_state=0) dict_learning.fit(data) assert dict_learning.mask_img_ == dict_learning.masker_.mask_img_ dict_learning = DictLearning(mask=mask_img, n_components=3) with pytest.raises(ValueError, match="Object has no components_ attribute. " "This is probably because " "fit has not been called"): dict_learning.transform(data) # Test if raises an error when empty list of provided. with pytest.raises(ValueError, match='Need one or more Niimg-like objects ' 'as input, an empty list was given.'): dict_learning.fit([]) # Test passing masker arguments to estimator dict_learning = DictLearning(n_components=3, target_affine=np.eye(4), target_shape=(6, 8, 10), mask_strategy='background') dict_learning.fit(data)
def test_multi_pca_score(): shape = (6, 8, 10, 5) affine = np.eye(4) rng = np.random.RandomState(0) # Create a "multi-subject" dataset imgs = [] for i in range(8): this_img = rng.normal(size=shape) imgs.append(nibabel.Nifti1Image(this_img, affine)) mask_img = nibabel.Nifti1Image(np.ones(shape[:3], dtype=np.int8), affine) # Assert that score is between zero and one multi_pca = MultiPCA(mask=mask_img, random_state=0, memory_level=0, n_components=3) multi_pca.fit(imgs) s = multi_pca.score(imgs) assert np.all(s <= 1) assert np.all(0 <= s) # Assert that score does not fail with single subject data multi_pca = MultiPCA(mask=mask_img, random_state=0, memory_level=0, n_components=3) multi_pca.fit(imgs[0]) s = multi_pca.score(imgs[0]) assert isinstance(s, float) assert(0. <= s <= 1.) # Assert that score is one for n_components == n_sample # in single subject configuration multi_pca = MultiPCA(mask=mask_img, random_state=0, memory_level=0, n_components=5) multi_pca.fit(imgs[0]) s = multi_pca.score(imgs[0]) assert_almost_equal(s, 1., 1) # Per component score multi_pca = MultiPCA(mask=mask_img, random_state=0, memory_level=0, n_components=5) multi_pca.fit(imgs[0]) masker = NiftiMasker(mask_img).fit() s = multi_pca._raw_score(masker.transform(imgs[0]), per_component=True) assert s.shape == (5,) assert np.all(s <= 1) assert np.all(0 <= s)
def test_joblib_cache(): from joblib import hash, Memory mask = np.zeros((40, 40, 40)) mask[20, 20, 20] = 1 mask_img = Nifti1Image(mask, np.eye(4)) with testing.write_tmp_imgs(mask_img, create_files=True) as filename: masker = NiftiMasker(mask_img=filename) masker.fit() mask_hash = hash(masker.mask_img_) get_data(masker.mask_img_) assert mask_hash == hash(masker.mask_img_) # Test a tricky issue with memmapped joblib.memory that makes # imgs return by inverse_transform impossible to save cachedir = mkdtemp() try: masker.memory = Memory(location=cachedir, mmap_mode='r', verbose=0) X = masker.transform(mask_img) # inverse_transform a first time, so that the result is cached out_img = masker.inverse_transform(X) out_img = masker.inverse_transform(X) out_img.to_filename(os.path.join(cachedir, 'test.nii')) finally: # enables to delete "filename" on windows del masker shutil.rmtree(cachedir, ignore_errors=True)
def test_nifti_labels_masker_with_mask(): shape = (13, 11, 12) affine = np.eye(4) fmri_img, mask_img = generate_random_img(shape, affine=affine, length=3) labels_img = data_gen.generate_labeled_regions(shape, affine=affine, n_regions=7) masker = NiftiLabelsMasker(labels_img, resampling_target=None, mask_img=mask_img) signals = masker.fit().transform(fmri_img) bg_masker = NiftiMasker(mask_img).fit() masked_labels = bg_masker.inverse_transform( bg_masker.transform(labels_img)) masked_masker = NiftiLabelsMasker(masked_labels, resampling_target=None, mask_img=mask_img) masked_signals = masked_masker.fit().transform(fmri_img) assert np.allclose(signals, masked_signals)
def test_mask_3d(): # Dummy mask data = np.zeros((40, 40, 40, 2)) data[20, 20, 20] = 1 data_img = Nifti1Image(data, np.eye(4)) with testing.write_tmp_imgs(data_img, create_files=True)\ as filename: masker = NiftiMasker(mask_img=filename) pytest.raises(TypeError, masker.fit)
def test_check_embedded_nifti_masker(): owner = OwningClass() masker = _check_embedded_nifti_masker(owner) assert type(masker) is MultiNiftiMasker for mask, multi_subject in ((MultiNiftiMasker(), True), (NiftiMasker(), False)): owner = OwningClass(mask=mask) masker = _check_embedded_nifti_masker(owner, multi_subject=multi_subject) assert type(masker) == type(mask) for param_key in masker.get_params(): if param_key not in [ 'memory', 'memory_level', 'n_jobs', 'verbose' ]: assert (getattr(masker, param_key) == getattr(mask, param_key)) else: assert (getattr(masker, param_key) == getattr(owner, param_key)) # Check use of mask as mask_img shape = (6, 8, 10, 5) affine = np.eye(4) mask = nibabel.Nifti1Image(np.ones(shape[:3], dtype=np.int8), affine) owner = OwningClass(mask=mask) masker = _check_embedded_nifti_masker(owner) assert masker.mask_img is mask # Check attribute forwarding data = np.zeros((9, 9, 9)) data[2:-2, 2:-2, 2:-2] = 10 imgs = nibabel.Nifti1Image(data, np.eye(4)) mask = MultiNiftiMasker() mask.fit([[imgs]]) owner = OwningClass(mask=mask) masker = _check_embedded_nifti_masker(owner) assert masker.mask_img is mask.mask_img_ # Check conflict warning mask = NiftiMasker(mask_strategy='epi') owner = OwningClass(mask=mask) with pytest.warns(UserWarning): _check_embedded_nifti_masker(owner)
def test_mask_strategy_errors(): # Error with unknown mask_strategy mask = NiftiMasker(mask_strategy='oops') with pytest.raises(ValueError, match="Unknown value of mask_strategy 'oops'"): mask.fit() # Warning with deprecated 'template' strategy img = np.random.RandomState(42).uniform(size=(9, 9, 5)) img = Nifti1Image(img, np.eye(4)) mask = NiftiMasker(mask_strategy='template') with pytest.warns(UserWarning, match="Masking strategy 'template' is deprecated."): mask.fit(img)
def test_4d_single_scan(): mask = np.zeros((10, 10, 10)) mask[3:7, 3:7, 3:7] = 1 mask_img = Nifti1Image(mask, np.eye(4)) # Test that, in list of 4d images with last dimension=1, they are # considered as 3d rng = np.random.RandomState(42) data_5d = [rng.random_sample((10, 10, 10, 1)) for i in range(5)] data_4d = [d[..., 0] for d in data_5d] data_5d = [nibabel.Nifti1Image(d, np.eye(4)) for d in data_5d] data_4d = [nibabel.Nifti1Image(d, np.eye(4)) for d in data_4d] masker = NiftiMasker(mask_img=mask_img) masker.fit() data_trans_5d = masker.transform(data_5d) data_trans_4d = masker.transform(data_4d) assert_array_equal(data_trans_4d, data_trans_5d)
def test_nan(): data = np.ones((9, 9, 9)) data[0] = np.nan data[:, 0] = np.nan data[:, :, 0] = np.nan data[-1] = np.nan data[:, -1] = np.nan data[:, :, -1] = np.nan data[3:-3, 3:-3, 3:-3] = 10 img = Nifti1Image(data, np.eye(4)) masker = NiftiMasker(mask_args=dict(opening=0)) masker.fit(img) mask = get_data(masker.mask_img_) assert mask[1:-1, 1:-1, 1:-1].all() assert not mask[0].any() assert not mask[:, 0].any() assert not mask[:, :, 0].any() assert not mask[-1].any() assert not mask[:, -1].any() assert not mask[:, :, -1].any()
def test_4d_reports(mask): # Dummy 4D data data = np.zeros((10, 10, 10, 3), dtype=int) data[..., 0] = 1 data[..., 1] = 2 data[..., 2] = 3 data_img_4d = Nifti1Image(data, np.eye(4)) # test .fit method masker = NiftiMasker(mask_strategy='epi') masker.fit(data_img_4d) assert masker._report_content['warning_message'] is None html = masker.generate_report() _check_html(html) # test .fit_transform method masker = NiftiMasker(mask_img=mask, standardize=True) masker.fit_transform(data_img_4d) assert masker._report_content['warning_message'] is None html = masker.generate_report() _check_html(html)
def test_standardization(): rng = np.random.RandomState(42) data_shape = (9, 9, 5) n_samples = 500 signals = rng.standard_normal(size=(np.prod(data_shape), n_samples)) means = rng.standard_normal(size=(np.prod(data_shape), 1)) * 50 + 1000 signals += means img = Nifti1Image(signals.reshape(data_shape + (n_samples, )), np.eye(4)) mask = Nifti1Image(np.ones(data_shape), np.eye(4)) # z-score masker = NiftiMasker(mask, standardize='zscore') trans_signals = masker.fit_transform(img) np.testing.assert_almost_equal(trans_signals.mean(0), 0) np.testing.assert_almost_equal(trans_signals.std(0), 1) # psc masker = NiftiMasker(mask, standardize='psc') trans_signals = masker.fit_transform(img) np.testing.assert_almost_equal(trans_signals.mean(0), 0) np.testing.assert_almost_equal( trans_signals, (signals / signals.mean(1)[:, np.newaxis] * 100 - 100).T)
def test_rena_clustering(): data_img, mask_img = generate_fake_fmri(shape=(10, 11, 12), length=5) data = get_data(data_img) mask = get_data(mask_img) X = np.empty((data.shape[3], int(mask.sum()))) for i in range(data.shape[3]): X[i, :] = np.copy(data[:, :, :, i])[get_data(mask_img) != 0] nifti_masker = NiftiMasker(mask_img=mask_img).fit() n_voxels = nifti_masker.transform(data_img).shape[1] rena = ReNA(mask_img, n_clusters=10) X_red = rena.fit_transform(X) X_compress = rena.inverse_transform(X_red) assert 10 == rena.n_clusters_ assert X.shape == X_compress.shape memory = Memory(location=None) rena = ReNA(mask_img, n_clusters=-2, memory=memory) pytest.raises(ValueError, rena.fit, X) rena = ReNA(mask_img, n_clusters=10, scaling=True) X_red = rena.fit_transform(X) X_compress = rena.inverse_transform(X_red) for n_iter in [-2, 0]: rena = ReNA(mask_img, n_iter=n_iter, memory=memory) pytest.raises(ValueError, rena.fit, X) for n_clusters in [1, 2, 4, 8]: rena = ReNA(mask_img, n_clusters=n_clusters, n_iter=1, memory=memory).fit(X) assert n_clusters != rena.n_clusters_ del n_voxels, X_red, X_compress
def test_filter_and_mask(): data = np.zeros([20, 30, 40, 5]) mask = np.ones([20, 30, 40]) data_img = nibabel.Nifti1Image(data, np.eye(4)) mask_img = nibabel.Nifti1Image(mask, np.eye(4)) masker = NiftiMasker() params = get_params(NiftiMasker, masker) # Test return_affine = False data = _filter_and_mask(data_img, mask_img, params) assert data.shape == (5, 24000)
def test_plot_img_comparison(): """Tests for plot_img_comparision.""" fig, axes = plt.subplots(2, 1) axes = axes.ravel() kwargs = {"shape": (3, 2, 4), "length": 5} query_images, mask_img = generate_fake_fmri( rand_gen=np.random.RandomState(0), **kwargs) # plot_img_comparison doesn't handle 4d images ATM query_images = list(iter_img(query_images)) target_images, _ = generate_fake_fmri(rand_gen=np.random.RandomState(1), **kwargs) target_images = list(iter_img(target_images)) target_images[0] = query_images[0] masker = NiftiMasker(mask_img).fit() correlations = plot_img_comparison(target_images, query_images, masker, axes=axes, src_label="query") assert len(correlations) == len(query_images) assert correlations[0] == pytest.approx(1.) ax_0, ax_1 = axes # 5 scatterplots assert len(ax_0.collections) == 5 assert len(ax_0.collections[0].get_edgecolors() == masker.transform( target_images[0]).ravel().shape[0]) assert ax_0.get_ylabel() == "query" assert ax_0.get_xlabel() == "image set 1" # 5 regression lines assert len(ax_0.lines) == 5 assert ax_0.lines[0].get_linestyle() == "--" assert ax_1.get_title() == "Histogram of imgs values" assert len(ax_1.patches) == 5 * 2 * 128 correlations_1 = plot_img_comparison(target_images, query_images, masker, plot_hist=False) assert np.allclose(correlations, correlations_1)
def test_sessions(): # Test the sessions vector data = np.ones((40, 40, 40, 4)) # Create a border, so that the masking work well data[0] = 0 data[-1] = 0 data[:, -1] = 0 data[:, 0] = 0 data[..., -1] = 0 data[..., 0] = 0 data[20, 20, 20] = 1 data_img = Nifti1Image(data, np.eye(4)) masker = NiftiMasker(runs=np.ones(3, dtype=np.int)) pytest.raises(ValueError, masker.fit_transform, data_img)
def test_overlaid_report(data_img_3d): pytest.importorskip('matplotlib') masker = NiftiMasker(target_affine=np.eye(3) * 8) html = masker.generate_report() assert "Please `fit` the object" in str(html) masker.fit(data_img_3d) html = masker.generate_report() assert '<div class="overlay">' in str(html)
def test_high_level_glm_one_session(): shapes, rk = [(7, 8, 9, 15)], 3 mask, fmri_data, design_matrices =\ generate_fake_fmri_data_and_design(shapes, rk) # Give an unfitted NiftiMasker as mask_img and check that we get an error masker = NiftiMasker(mask) with pytest.raises(ValueError, match="It seems that NiftiMasker has not been fitted."): FirstLevelModel(mask_img=masker).fit( fmri_data[0], design_matrices=design_matrices[0]) # Give a fitted NiftiMasker with a None mask_img_ attribute # and check that the masker parameters are overridden by the # FirstLevelModel parameters masker.fit() masker.mask_img_ = None with pytest.warns(UserWarning, match="Parameter memory of the masker overridden"): FirstLevelModel(mask_img=masker).fit( fmri_data[0], design_matrices=design_matrices[0]) # Give a fitted NiftiMasker masker = NiftiMasker(mask) masker.fit() single_session_model = FirstLevelModel(mask_img=masker).fit( fmri_data[0], design_matrices=design_matrices[0]) assert single_session_model.masker_ == masker # Call with verbose (improve coverage) single_session_model = FirstLevelModel(mask_img=None, verbose=1).fit( fmri_data[0], design_matrices=design_matrices[0]) single_session_model = FirstLevelModel(mask_img=None).fit( fmri_data[0], design_matrices=design_matrices[0]) assert isinstance(single_session_model.masker_.mask_img_, Nifti1Image) single_session_model = FirstLevelModel(mask_img=mask).fit( fmri_data[0], design_matrices=design_matrices[0]) z1 = single_session_model.compute_contrast(np.eye(rk)[:1]) assert isinstance(z1, Nifti1Image)