def test_plot_img_comparison(): fig, axes = plt.subplots(2, 1) axes = axes.ravel() kwargs = {"shape": (3, 2, 4), "length": 5} query_images, mask_img = data_gen.generate_fake_fmri( rand_gen=np.random.RandomState(0), **kwargs) # plot_img_comparison doesn't handle 4d images ATM query_images = list(image.iter_img(query_images)) target_images, _ = data_gen.generate_fake_fmri( rand_gen=np.random.RandomState(1), **kwargs) target_images = list(image.iter_img(target_images)) target_images[0] = query_images[0] masker = NiftiMasker(mask_img).fit() correlations = plotting.plot_img_comparison( target_images, query_images, masker, axes=axes, src_label="query") assert len(correlations) == len(query_images) assert correlations[0] == pytest.approx(1.) ax_0, ax_1 = axes # 5 scatterplots assert len(ax_0.collections) == 5 assert len(ax_0.collections[0].get_edgecolors() == masker.transform( target_images[0]).ravel().shape[0]) assert ax_0.get_ylabel() == "query" assert ax_0.get_xlabel() == "image set 1" # 5 regression lines assert len(ax_0.lines) == 5 assert ax_0.lines[0].get_linestyle() == "--" assert ax_1.get_title() == "Histogram of imgs values" assert len(ax_1.patches) == 5 * 2 * 128 correlations_1 = plotting.plot_img_comparison( target_images, query_images, masker, plot_hist=False) assert np.allclose(correlations, correlations_1)
def test_generate_fake_fmri(): shapes = [(6, 6, 7), (10, 11, 12)] lengths = [16, 20] kinds = ['noise', 'step'] n_blocks = [None, 1, 4] block_size = [None, 4] block_type = ['classification', 'regression'] rand_gen = np.random.RandomState(3) for shape, length, kind, n_block, bsize, btype in itertools.product( shapes, lengths, kinds, n_blocks, block_size, block_type): if n_block is None: fmri, mask = generate_fake_fmri( shape=shape, length=length, kind=kind, n_blocks=n_block, block_size=bsize, block_type=btype, rand_gen=rand_gen) else: fmri, mask, target = generate_fake_fmri( shape=shape, length=length, kind=kind, n_blocks=n_block, block_size=bsize, block_type=btype, rand_gen=rand_gen) assert_equal(fmri.shape[:-1], shape) assert_equal(fmri.shape[-1], length) if n_block is not None: assert_equal(target.size, length) assert_raises(ValueError, generate_fake_fmri, length=10, n_blocks=10, block_size=None, rand_gen=rand_gen)
def test_mask_roi(): """ Test mask_roi functionality """ mask = pkg_resources.resource_filename( "pynets", "templates/standard/MNI152_T1_brain_mask_2mm.nii.gz") dir_path = str(tempfile.TemporaryDirectory().name) os.makedirs(dir_path, exist_ok=True) func_file = tempfile.NamedTemporaryFile(mode="w+", suffix=".nii.gz").name data_gen.generate_fake_fmri()[1].to_filename(func_file) roi = tempfile.NamedTemporaryFile(mode="w+", suffix=".nii.gz").name data_gen.generate_mni_space_img()[1].to_filename(roi) roi_masked = nodemaker.mask_roi(dir_path, roi, mask, func_file) assert roi_masked is not None
def test_index_img(): img_3d = nibabel.Nifti1Image(np.ones((3, 4, 5)), np.eye(4)) testing.assert_raises_regex(TypeError, "Input data has incompatible dimensionality: " "Expected dimension is 4D and you provided " "a 3D image.", image.index_img, img_3d, 0) affine = np.array([[1., 2., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [0., 0., 0., 1.]]) img_4d, _ = data_gen.generate_fake_fmri(affine=affine) fourth_dim_size = img_4d.shape[3] tested_indices = (list(range(fourth_dim_size)) + [slice(2, 8, 2), [1, 2, 3, 2], [], (np.arange(fourth_dim_size) % 3) == 1]) for i in tested_indices: this_img = image.index_img(img_4d, i) expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(this_img.get_data(), expected_data_3d) assert_array_equal(this_img.affine, img_4d.affine) for i in [fourth_dim_size, - fourth_dim_size - 1, [0, fourth_dim_size], np.repeat(True, fourth_dim_size + 1)]: testing.assert_raises_regex( IndexError, 'out of bounds|invalid index|out of range|boolean index', image.index_img, img_4d, i)
def test_sender_img(request_mocker, tmp_path): request_mocker.url_mapping["*"] = generate_fake_fmri()[0] resp = requests.get("ftp:example.org/download") file_path = tmp_path / "img.nii.gz" file_path.write_bytes(resp.content) img = image.load_img(str(file_path)) assert img.shape == (10, 11, 12, 17)
def test_z_score_opposite_contrast(): fmri, mask = generate_fake_fmri(shape=(50, 20, 50), length=96, rand_gen=np.random.RandomState(42)) nifti_masker = NiftiMasker(mask_img=mask) data = nifti_masker.fit_transform(fmri) frametimes = np.linspace(0, (96 - 1) * 2, 96) for i in [0, 20]: design_matrix = make_first_level_design_matrix( frametimes, hrf_model='spm', add_regs=np.array(data[:, i]).reshape(-1, 1)) c1 = np.array([1] + [0] * (design_matrix.shape[1] - 1)) c2 = np.array([0] + [1] + [0] * (design_matrix.shape[1] - 2)) contrasts = {'seed1 - seed2': c1 - c2, 'seed2 - seed1': c2 - c1} fmri_glm = FirstLevelModel(t_r=2., noise_model='ar1', standardize=False, hrf_model='spm', drift_model='cosine') fmri_glm.fit(fmri, design_matrices=design_matrix) z_map_seed1_vs_seed2 = fmri_glm.compute_contrast( contrasts['seed1 - seed2'], output_type='z_score') z_map_seed2_vs_seed1 = fmri_glm.compute_contrast( contrasts['seed2 - seed1'], output_type='z_score') assert_almost_equal(z_map_seed1_vs_seed2.get_data().min(), -z_map_seed2_vs_seed1.get_data().max(), decimal=10) assert_almost_equal(z_map_seed1_vs_seed2.get_data().max(), -z_map_seed2_vs_seed1.get_data().min(), decimal=10)
def test_hierarchical_k_means_clustering(): data_img, mask_img = generate_fake_fmri(shape=(10, 11, 12), length=15) masker = NiftiMasker(mask_img=mask_img).fit() X = masker.transform(data_img).T with pytest.raises(ValueError, match="n_clusters should be an integer greater than 0." " -2 was provided."): HierarchicalKMeans(n_clusters=-2).fit(X) hkmeans = HierarchicalKMeans(n_clusters=8) X_red = hkmeans.fit_transform(X) X_compress = hkmeans.inverse_transform(X_red) assert_array_almost_equal(X.shape, X_compress.shape) hkmeans_scaled = HierarchicalKMeans(n_clusters=8, scaling=True) X_red_scaled = hkmeans_scaled.fit_transform(X) sizes = hkmeans_scaled.sizes_ X_compress_scaled = hkmeans_scaled.inverse_transform(X_red_scaled) assert_array_almost_equal( np.asarray([np.sqrt(s) * a for s, a in zip(sizes, X_red)]), X_red_scaled) assert_array_almost_equal(X_compress, X_compress_scaled) del X_red, X_compress, X_red_scaled, X_compress_scaled
def test_index_img(): img_3d = nibabel.Nifti1Image(np.ones((3, 4, 5)), np.eye(4)) expected_error_msg = ("Input data has incompatible dimensionality: " "Expected dimension is 4D and you provided " "a 3D image.") with pytest.raises(TypeError, match=expected_error_msg): image.index_img(img_3d, 0) affine = np.array([[1., 2., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [0., 0., 0., 1.]]) img_4d, _ = data_gen.generate_fake_fmri(affine=affine) fourth_dim_size = img_4d.shape[3] tested_indices = (list(range(fourth_dim_size)) + [ slice(2, 8, 2), [1, 2, 3, 2], [], (np.arange(fourth_dim_size) % 3) == 1 ]) for i in tested_indices: this_img = image.index_img(img_4d, i) expected_data_3d = get_data(img_4d)[..., i] assert_array_equal(get_data(this_img), expected_data_3d) assert_array_equal(this_img.affine, img_4d.affine) for i in [ fourth_dim_size, -fourth_dim_size - 1, [0, fourth_dim_size], np.repeat(True, fourth_dim_size + 1) ]: with pytest.raises(IndexError, match='out of bounds|invalid index|out of range|' 'boolean index'): image.index_img(img_4d, i)
def test_smooth_img(): # This function only checks added functionalities compared # to _smooth_array() shapes = ((10, 11, 12), (13, 14, 15)) lengths = (17, 18) fwhm = (1., 2., 3.) img1, mask1 = data_gen.generate_fake_fmri(shape=shapes[0], length=lengths[0]) img2, mask2 = data_gen.generate_fake_fmri(shape=shapes[1], length=lengths[1]) for create_files in (False, True): with testing.write_tmp_imgs(img1, img2, create_files=create_files) as imgs: # List of images as input out = image.smooth_img(imgs, fwhm) assert isinstance(out, list) assert len(out) == 2 for o, s, l in zip(out, shapes, lengths): assert o.shape == (s + (l, )) # Single image as input out = image.smooth_img(imgs[0], fwhm) assert isinstance(out, nibabel.Nifti1Image) assert out.shape == (shapes[0] + (lengths[0], )) # Check corner case situations when fwhm=0, See issue #1537 # Test whether function smooth_img raises a warning when fwhm=0. with pytest.warns(UserWarning): image.smooth_img(img1, fwhm=0.) # Test output equal when fwhm=None and fwhm=0 out_fwhm_none = image.smooth_img(img1, fwhm=None) out_fwhm_zero = image.smooth_img(img1, fwhm=0.) assert_array_equal(get_data(out_fwhm_none), get_data(out_fwhm_zero)) data1 = np.zeros((10, 11, 12)) data1[2:4, 1:5, 3:6] = 1 data2 = np.zeros((13, 14, 15)) data2[2:4, 1:5, 3:6] = 9 img1_nifti2 = nibabel.Nifti2Image(data1, affine=np.eye(4)) img2_nifti2 = nibabel.Nifti2Image(data2, affine=np.eye(4)) out = image.smooth_img([img1_nifti2, img2_nifti2], fwhm=1.)
def test_smooth_img(): # This function only checks added functionalities compared # to _smooth_array() shapes = ((10, 11, 12), (13, 14, 15)) lengths = (17, 18) fwhm = (1., 2., 3.) img1, mask1 = data_gen.generate_fake_fmri(shape=shapes[0], length=lengths[0]) img2, mask2 = data_gen.generate_fake_fmri(shape=shapes[1], length=lengths[1]) for create_files in (False, True): with testing.write_tmp_imgs(img1, img2, create_files=create_files) as imgs: # List of images as input out = image.smooth_img(imgs, fwhm) assert_true(isinstance(out, list)) assert_true(len(out) == 2) for o, s, l in zip(out, shapes, lengths): assert_true(o.shape == (s + (l,))) # Single image as input out = image.smooth_img(imgs[0], fwhm) assert_true(isinstance(out, nibabel.Nifti1Image)) assert_true(out.shape == (shapes[0] + (lengths[0],))) # Check corner case situations when fwhm=0, See issue #1537 # Test whether function smooth_img raises a warning when fwhm=0. assert_warns(UserWarning, image.smooth_img, img1, fwhm=0.) # Test output equal when fwhm=None and fwhm=0 out_fwhm_none = image.smooth_img(img1, fwhm=None) out_fwhm_zero = image.smooth_img(img1, fwhm=0.) assert_array_equal(out_fwhm_none.get_data(), out_fwhm_zero.get_data()) data1 = np.zeros((10, 11, 12)) data1[2:4, 1:5, 3:6] = 1 data2 = np.zeros((13, 14, 15)) data2[2:4, 1:5, 3:6] = 9 img1_nifti2 = nibabel.Nifti2Image(data1, affine=np.eye(4)) img2_nifti2 = nibabel.Nifti2Image(data2, affine=np.eye(4)) out = image.smooth_img([img1_nifti2, img2_nifti2], fwhm=1.)
def test_clean_img(): rng = np.random.RandomState(0) data = rng.randn(10, 10, 10, 100) + .5 data_flat = data.T.reshape(100, -1) data_img = nibabel.Nifti1Image(data, np.eye(4)) assert_raises(ValueError, image.clean_img, data_img, t_r=None, low_pass=0.1) data_img_ = image.clean_img(data_img, detrend=True, standardize=False, low_pass=0.1, t_r=1.0) data_flat_ = signal.clean(data_flat, detrend=True, standardize=False, low_pass=0.1, t_r=1.0) np.testing.assert_almost_equal(data_img_.get_data().T.reshape(100, -1), data_flat_) # if NANs data[:, 9, 9] = np.nan # if infinity data[:, 5, 5] = np.inf nan_img = nibabel.Nifti1Image(data, np.eye(4)) clean_im = image.clean_img(nan_img, ensure_finite=True) assert_true(np.any(np.isfinite(clean_im.get_data())), True) # test_clean_img_passing_nifti2image data_img_nifti2 = nibabel.Nifti2Image(data, np.eye(4)) data_img_nifti2_ = image.clean_img(data_img_nifti2, detrend=True, standardize=False, low_pass=0.1, t_r=1.0) # if mask_img img, mask_img = data_gen.generate_fake_fmri(shape=(10, 10, 10), length=10) data_img_mask_ = image.clean_img(img, mask_img=mask_img) # Checks that output with full mask and without is equal data_img_ = image.clean_img(img) np.testing.assert_almost_equal(data_img_.get_data(), data_img_mask_.get_data())
def test_dict_to_archive(tmp_path): subdir = tmp_path / "tmp" subdir.mkdir() (subdir / "labels.csv").touch() (subdir / "img.nii.gz").touch() archive_spec = { "empty_data": subdir, "empty_data_path.txt": str(subdir), Path("data", "labels.csv"): "a,b,c", Path("data", "img.nii.gz"): generate_fake_fmri()[0], Path("a", "b", "c"): (100).to_bytes(length=1, byteorder="big", signed=False), } targz = _testing.dict_to_archive(archive_spec) extract_dir = tmp_path / "extract" extract_dir.mkdir() archive_path = tmp_path / "archive" with archive_path.open("wb") as f: f.write(targz) with tarfile.open(str(archive_path)) as tarf: tarf.extractall(str(extract_dir)) img = image.load_img(str(extract_dir / "data" / "img.nii.gz")) assert img.shape == (10, 11, 12, 17) with (extract_dir / "a" / "b" / "c").open("rb") as f: assert int.from_bytes(f.read(), byteorder="big", signed=False) == 100 with open(str(extract_dir / "empty_data" / "labels.csv")) as f: assert f.read() == "" zip_archive = _testing.dict_to_archive( { "readme.txt": "hello", "archive": targz }, "zip") with archive_path.open("wb") as f: f.write(zip_archive) with zipfile.ZipFile(str(archive_path)) as zipf: with zipf.open("archive", "r") as f: assert f.read() == targz from_list = _testing.list_to_archive(archive_spec.keys()) with archive_path.open("wb") as f: f.write(from_list) with tarfile.open(str(archive_path)) as tarf: assert sorted(map(Path, tarf.getnames())) == sorted( list(map(Path, archive_spec.keys())) + [Path("."), Path("a"), Path("a", "b"), Path("data")])
def test_get_data(): img, *_ = data_gen.generate_fake_fmri(shape=(10, 11, 12)) data = get_data(img) assert data.shape == img.shape assert data is img._data_cache mask_img = new_img_like(img, data > 0) data = get_data(mask_img) assert data.dtype == np.dtype('int8') img_3d = index_img(img, 0) with tempfile.TemporaryDirectory() as tempdir: filename = os.path.join(tempdir, 'img_{}.nii.gz') img_3d.to_filename(filename.format('a')) img_3d.to_filename(filename.format('b')) data = get_data(filename.format('a')) assert len(data.shape) == 3 data = get_data(filename.format('*')) assert len(data.shape) == 4
def test_pd_index_img(): # confirm indices from pandas dataframes are handled correctly if 'pandas' not in sys.modules: raise pytest.skip(msg='Pandas not available') affine = np.array([[1., 2., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [0., 0., 0., 1.]]) img_4d, _ = data_gen.generate_fake_fmri(affine=affine) fourth_dim_size = img_4d.shape[3] rng = np.random.RandomState(42) arr = rng.uniform(size=fourth_dim_size) > 0.5 df = pd.DataFrame({"arr": arr}) np_index_img = image.index_img(img_4d, arr) pd_index_img = image.index_img(img_4d, df) assert_array_equal(get_data(np_index_img), get_data(pd_index_img))
def test_iter_img(): img_3d = nibabel.Nifti1Image(np.ones((3, 4, 5)), np.eye(4)) testing.assert_raises_regex(TypeError, "Input data has incompatible dimensionality: " "Expected dimension is 4D and you provided " "a 3D image.", image.iter_img, img_3d) affine = np.array([[1., 2., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [0., 0., 0., 1.]]) img_4d, _ = data_gen.generate_fake_fmri(affine=affine) for i, img in enumerate(image.iter_img(img_4d)): expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(img.get_data(), expected_data_3d) assert_array_equal(img.affine, img_4d.affine) with testing.write_tmp_imgs(img_4d) as img_4d_filename: for i, img in enumerate(image.iter_img(img_4d_filename)): expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(img.get_data(), expected_data_3d) assert_array_equal(img.affine, img_4d.affine) # enables to delete "img_4d_filename" on windows del img img_3d_list = list(image.iter_img(img_4d)) for i, img in enumerate(image.iter_img(img_3d_list)): expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(img.get_data(), expected_data_3d) assert_array_equal(img.affine, img_4d.affine) with testing.write_tmp_imgs(*img_3d_list) as img_3d_filenames: for i, img in enumerate(image.iter_img(img_3d_filenames)): expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(img.get_data(), expected_data_3d) assert_array_equal(img.affine, img_4d.affine) # enables to delete "img_3d_filename" on windows del img
def test_iter_img(): img_3d = nibabel.Nifti1Image(np.ones((3, 4, 5)), np.eye(4)) expected_error_msg = ("Input data has incompatible dimensionality: " "Expected dimension is 4D and you provided " "a 3D image.") with pytest.raises(TypeError, match=expected_error_msg): image.iter_img(img_3d) affine = np.array([[1., 2., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [0., 0., 0., 1.]]) img_4d, _ = data_gen.generate_fake_fmri(affine=affine) for i, img in enumerate(image.iter_img(img_4d)): expected_data_3d = get_data(img_4d)[..., i] assert_array_equal(get_data(img), expected_data_3d) assert_array_equal(img.affine, img_4d.affine) with testing.write_tmp_imgs(img_4d) as img_4d_filename: for i, img in enumerate(image.iter_img(img_4d_filename)): expected_data_3d = get_data(img_4d)[..., i] assert_array_equal(get_data(img), expected_data_3d) assert_array_equal(img.affine, img_4d.affine) # enables to delete "img_4d_filename" on windows del img img_3d_list = list(image.iter_img(img_4d)) for i, img in enumerate(image.iter_img(img_3d_list)): expected_data_3d = get_data(img_4d)[..., i] assert_array_equal(get_data(img), expected_data_3d) assert_array_equal(img.affine, img_4d.affine) with testing.write_tmp_imgs(*img_3d_list) as img_3d_filenames: for i, img in enumerate(image.iter_img(img_3d_filenames)): expected_data_3d = get_data(img_4d)[..., i] assert_array_equal(get_data(img), expected_data_3d) assert_array_equal(img.affine, img_4d.affine) # enables to delete "img_3d_filename" on windows del img
def test_high_variance_confounds(): # See also test_signals.test_high_variance_confounds() # There is only tests on what is added by image.high_variance_confounds() # compared to signal.high_variance_confounds() shape = (40, 41, 42) length = 17 n_confounds = 10 img, mask_img = data_gen.generate_fake_fmri(shape=shape, length=length) confounds1 = image.high_variance_confounds(img, mask_img=mask_img, percentile=10., n_confounds=n_confounds) assert confounds1.shape == (length, n_confounds) # No mask. confounds2 = image.high_variance_confounds(img, percentile=10., n_confounds=n_confounds) assert confounds2.shape == (length, n_confounds)
def test_high_variance_confounds(): # See also test_signals.test_high_variance_confounds() # There is only tests on what is added by image.high_variance_confounds() # compared to signal.high_variance_confounds() shape = (40, 41, 42) length = 17 n_confounds = 10 img, mask_img = data_gen.generate_fake_fmri(shape=shape, length=length) confounds1 = image.high_variance_confounds(img, mask_img=mask_img, percentile=10., n_confounds=n_confounds) assert_true(confounds1.shape == (length, n_confounds)) # No mask. confounds2 = image.high_variance_confounds(img, percentile=10., n_confounds=n_confounds) assert_true(confounds2.shape == (length, n_confounds))
def test_matrix_orientation(): """Test if processing is performed along the correct axis.""" # the "step" kind generate heavyside-like signals for each voxel. # all signals being identical, standardizing along the wrong axis # would leave a null signal. Along the correct axis, the step remains. fmri, mask = data_gen.generate_fake_fmri(shape=(40, 41, 42), kind="step") masker = NiftiMasker(mask_img=mask, standardize=True, detrend=True) timeseries = masker.fit_transform(fmri) assert (timeseries.shape[0] == fmri.shape[3]) assert (timeseries.shape[1] == mask.get_data().sum()) std = timeseries.std(axis=0) assert (std.shape[0] == timeseries.shape[1]) # paranoid assert (not np.any(std < 0.1)) # Test inverse transform masker = NiftiMasker(mask_img=mask, standardize=False, detrend=False) masker.fit() timeseries = masker.transform(fmri) recovered = masker.inverse_transform(timeseries) np.testing.assert_array_almost_equal(recovered.get_data(), fmri.get_data())
def test_matrix_orientation(): """Test if processing is performed along the correct axis.""" # the "step" kind generate heavyside-like signals for each voxel. # all signals being identical, standardizing along the wrong axis # would leave a null signal. Along the correct axis, the step remains. fmri, mask = data_gen.generate_fake_fmri(shape=(40, 41, 42), kind="step") masker = NiftiMasker(mask_img=mask, standardize=True, detrend=True) timeseries = masker.fit_transform(fmri) assert(timeseries.shape[0] == fmri.shape[3]) assert(timeseries.shape[1] == mask.get_data().sum()) std = timeseries.std(axis=0) assert(std.shape[0] == timeseries.shape[1]) # paranoid assert(not np.any(std < 0.1)) # Test inverse transform masker = NiftiMasker(mask_img=mask, standardize=False, detrend=False) masker.fit() timeseries = masker.transform(fmri) recovered = masker.inverse_transform(timeseries) np.testing.assert_array_almost_equal(recovered.get_data(), fmri.get_data())
def test_clean_img(): rng = np.random.RandomState(0) data = rng.randn(10, 10, 10, 100) + .5 data_flat = data.T.reshape(100, -1) data_img = nibabel.Nifti1Image(data, np.eye(4)) assert_raises( ValueError, image.clean_img, data_img, t_r=None, low_pass=0.1) data_img_ = image.clean_img( data_img, detrend=True, standardize=False, low_pass=0.1, t_r=1.0) data_flat_ = signal.clean( data_flat, detrend=True, standardize=False, low_pass=0.1, t_r=1.0) np.testing.assert_almost_equal(data_img_.get_data().T.reshape(100, -1), data_flat_) # if NANs data[:, 9, 9] = np.nan # if infinity data[:, 5, 5] = np.inf nan_img = nibabel.Nifti1Image(data, np.eye(4)) clean_im = image.clean_img(nan_img, ensure_finite=True) assert_true(np.any(np.isfinite(clean_im.get_data())), True) # test_clean_img_passing_nifti2image data_img_nifti2 = nibabel.Nifti2Image(data, np.eye(4)) data_img_nifti2_ = image.clean_img( data_img_nifti2, detrend=True, standardize=False, low_pass=0.1, t_r=1.0) # if mask_img img, mask_img = data_gen.generate_fake_fmri(shape=(10, 10, 10), length=10) data_img_mask_ = image.clean_img(img, mask_img=mask_img) # Checks that output with full mask and without is equal data_img_ = image.clean_img(img) np.testing.assert_almost_equal(data_img_.get_data(), data_img_mask_.get_data())
def test_pd_index_img(): # confirm indices from pandas dataframes are handled correctly if 'pandas' not in sys.modules: raise SkipTest affine = np.array([[1., 2., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [0., 0., 0., 1.]]) img_4d, _ = data_gen.generate_fake_fmri(affine=affine) fourth_dim_size = img_4d.shape[3] rng = np.random.RandomState(0) arr = rng.rand(fourth_dim_size) > 0.5 df = pd.DataFrame({"arr": arr}) np_index_img = image.index_img(img_4d, arr) pd_index_img = image.index_img(img_4d, df) assert_array_equal(np_index_img.get_data(), pd_index_img.get_data())
def test_rena_clustering(): data_img, mask_img = generate_fake_fmri(shape=(10, 11, 12), length=5) data = get_data(data_img) mask = get_data(mask_img) X = np.empty((data.shape[3], int(mask.sum()))) for i in range(data.shape[3]): X[i, :] = np.copy(data[:, :, :, i])[get_data(mask_img) != 0] nifti_masker = NiftiMasker(mask_img=mask_img).fit() n_voxels = nifti_masker.transform(data_img).shape[1] rena = ReNA(mask_img, n_clusters=10) X_red = rena.fit_transform(X) X_compress = rena.inverse_transform(X_red) assert 10 == rena.n_clusters_ assert X.shape == X_compress.shape memory = Memory(cachedir=None) rena = ReNA(mask_img, n_clusters=-2, memory=memory) pytest.raises(ValueError, rena.fit, X) rena = ReNA(mask_img, n_clusters=10, scaling=True) X_red = rena.fit_transform(X) X_compress = rena.inverse_transform(X_red) for n_iter in [-2, 0]: rena = ReNA(mask_img, n_iter=n_iter, memory=memory) pytest.raises(ValueError, rena.fit, X) for n_clusters in [1, 2, 4, 8]: rena = ReNA(mask_img, n_clusters=n_clusters, n_iter=1, memory=memory).fit(X) assert n_clusters != rena.n_clusters_ del n_voxels, X_red, X_compress
def test_largest_cc_img(): """ Check the extraction of the largest connected component, for niftis Similar to smooth_img tests for largest connected_component_img, here also only the added features for largest_connected_component are tested. """ # Test whether dimension of 3Dimg and list of 3Dimgs are kept. shapes = ((10, 11, 12), (13, 14, 15)) regions = [1, 3] img1 = data_gen.generate_labeled_regions(shape=shapes[0], n_regions=regions[0]) img2 = data_gen.generate_labeled_regions(shape=shapes[1], n_regions=regions[1]) for create_files in (False, True): with testing.write_tmp_imgs(img1, img2, create_files=create_files) as imgs: # List of images as input out = largest_connected_component_img(imgs) assert isinstance(out, list) assert len(out) == 2 for o, s in zip(out, shapes): assert o.shape == (s) # Single image as input out = largest_connected_component_img(imgs[0]) assert isinstance(out, Nifti1Image) assert out.shape == (shapes[0]) # Test whether 4D Nifti throws the right error. img_4D = data_gen.generate_fake_fmri(shapes[0], length=17) pytest.raises(DimensionError, largest_connected_component_img, img_4D) # tests adapted to non-native endian data dtype img1_change_dtype = nibabel.Nifti1Image(get_data(img1).astype('>f8'), affine=img1.affine) img2_change_dtype = nibabel.Nifti1Image(get_data(img2).astype('>f8'), affine=img2.affine) for create_files in (False, True): with testing.write_tmp_imgs(img1_change_dtype, img2_change_dtype, create_files=create_files) as imgs: # List of images as input out = largest_connected_component_img(imgs) assert isinstance(out, list) assert len(out) == 2 for o, s in zip(out, shapes): assert o.shape == (s) # Single image as input out = largest_connected_component_img(imgs[0]) assert isinstance(out, Nifti1Image) assert out.shape == (shapes[0]) # Test the output with native and without native out_native = largest_connected_component_img(img1) out_non_native = largest_connected_component_img(img1_change_dtype) np.testing.assert_equal(get_data(out_native), get_data(out_non_native))
def test_signal_extraction_with_maps_and_labels(): shape = (4, 5, 6) n_regions = 7 length = 8 # Generate labels labels = list(range(n_regions + 1)) # 0 is background labels_img = generate_labeled_regions(shape, n_regions, labels=labels) labels_data = labels_img.get_data() # Convert to maps maps_data = np.zeros(shape + (n_regions,)) for n, l in enumerate(labels): if n == 0: continue maps_data[labels_data == l, n - 1] = 1 maps_img = nibabel.Nifti1Image(maps_data, labels_img.affine) # Generate fake data fmri_img, _ = generate_fake_fmri(shape=shape, length=length, affine=labels_img.affine) # Extract signals from maps and labels: results must be identical. maps_signals, maps_labels = signal_extraction.img_to_signals_maps( fmri_img, maps_img) labels_signals, labels_labels = signal_extraction.img_to_signals_labels( fmri_img, labels_img) np.testing.assert_almost_equal(maps_signals, labels_signals) # Same thing with a mask, containing only 3 regions. mask_data = (labels_data == 1) + (labels_data == 2) + (labels_data == 5) mask_img = nibabel.Nifti1Image(mask_data.astype(np.int8), labels_img.affine) labels_signals, labels_labels = signal_extraction.img_to_signals_labels( fmri_img, labels_img, mask_img=mask_img) maps_signals, maps_labels = signal_extraction.img_to_signals_maps( fmri_img, maps_img, mask_img=mask_img) np.testing.assert_almost_equal(maps_signals, labels_signals) assert_true(maps_signals.shape[1] == n_regions) assert_true(maps_labels == list(range(len(maps_labels)))) assert_true(labels_signals.shape == (length, n_regions)) assert_true(labels_labels == labels[1:]) # Inverse operation (mostly smoke test) labels_img_r = signal_extraction.signals_to_img_labels( labels_signals, labels_img, mask_img=mask_img) assert_true(labels_img_r.shape == shape + (length,)) maps_img_r = signal_extraction.signals_to_img_maps( maps_signals, maps_img, mask_img=mask_img) assert_true(maps_img_r.shape == shape + (length,)) # Check that NaNs in regions inside mask are preserved region1 = labels_data == 2 indices = [ind[:1] for ind in np.where(region1)] fmri_img.get_data()[indices + [slice(None)]] = float('nan') labels_signals, labels_labels = signal_extraction.img_to_signals_labels( fmri_img, labels_img, mask_img=mask_img) assert_true(np.all(np.isnan(labels_signals[:, labels_labels.index(2)])))
def test_nifti_maps_masker_2(): # Test resampling in NiftiMapsMasker affine = np.eye(4) shape1 = (10, 11, 12) # fmri shape2 = (13, 14, 15) # mask shape3 = (16, 17, 18) # maps n_regions = 9 length = 3 fmri11_img, _ = generate_random_img(shape1, affine=affine, length=length) _, mask22_img = generate_random_img(shape2, affine=affine, length=length) maps33_img, _ = \ data_gen.generate_maps(shape3, n_regions, affine=affine) mask_img_4d = nibabel.Nifti1Image(np.ones((2, 2, 2, 2), dtype=np.int8), affine=np.diag((4, 4, 4, 1))) # verify that 4D mask arguments are refused masker = NiftiMapsMasker(maps33_img, mask_img=mask_img_4d) testing.assert_raises_regex(DimensionError, "Input data has incompatible dimensionality: " "Expected dimension is 3D and you provided " "a 4D image.", masker.fit) # Test error checking assert_raises(ValueError, NiftiMapsMasker, maps33_img, resampling_target="mask") assert_raises(ValueError, NiftiMapsMasker, maps33_img, resampling_target="invalid") # Target: mask masker = NiftiMapsMasker(maps33_img, mask_img=mask22_img, resampling_target="mask") masker.fit() np.testing.assert_almost_equal(masker.mask_img_.affine, mask22_img.affine) assert_equal(masker.mask_img_.shape, mask22_img.shape) np.testing.assert_almost_equal(masker.mask_img_.affine, masker.maps_img_.affine) assert_equal(masker.mask_img_.shape, masker.maps_img_.shape[:3]) transformed = masker.transform(fmri11_img) assert_equal(transformed.shape, (length, n_regions)) fmri11_img_r = masker.inverse_transform(transformed) np.testing.assert_almost_equal(fmri11_img_r.affine, masker.maps_img_.affine) assert_equal(fmri11_img_r.shape, (masker.maps_img_.shape[:3] + (length,))) # Target: maps masker = NiftiMapsMasker(maps33_img, mask_img=mask22_img, resampling_target="maps") masker.fit() np.testing.assert_almost_equal(masker.maps_img_.affine, maps33_img.affine) assert_equal(masker.maps_img_.shape, maps33_img.shape) np.testing.assert_almost_equal(masker.mask_img_.affine, masker.maps_img_.affine) assert_equal(masker.mask_img_.shape, masker.maps_img_.shape[:3]) transformed = masker.transform(fmri11_img) assert_equal(transformed.shape, (length, n_regions)) fmri11_img_r = masker.inverse_transform(transformed) np.testing.assert_almost_equal(fmri11_img_r.affine, masker.maps_img_.affine) assert_equal(fmri11_img_r.shape, (masker.maps_img_.shape[:3] + (length,))) # Test with clipped maps: mask does not contain all maps. # Shapes do matter in that case affine1 = np.eye(4) shape1 = (10, 11, 12) shape2 = (8, 9, 10) # mask affine2 = np.diag((2, 2, 2, 1)) # just for mask shape3 = (16, 18, 20) # maps n_regions = 9 length = 21 fmri11_img, _ = generate_random_img(shape1, affine=affine1, length=length) _, mask22_img = data_gen.generate_fake_fmri(shape2, length=1, affine=affine2) # Target: maps maps33_img, _ = \ data_gen.generate_maps(shape3, n_regions, affine=affine1) masker = NiftiMapsMasker(maps33_img, mask_img=mask22_img, resampling_target="maps") masker.fit() np.testing.assert_almost_equal(masker.maps_img_.affine, maps33_img.affine) assert_equal(masker.maps_img_.shape, maps33_img.shape) np.testing.assert_almost_equal(masker.mask_img_.affine, masker.maps_img_.affine) assert_equal(masker.mask_img_.shape, masker.maps_img_.shape[:3]) transformed = masker.transform(fmri11_img) assert_equal(transformed.shape, (length, n_regions)) # Some regions have been clipped. Resulting signal must be zero assert_less((transformed.var(axis=0) == 0).sum(), n_regions) fmri11_img_r = masker.inverse_transform(transformed) np.testing.assert_almost_equal(fmri11_img_r.affine, masker.maps_img_.affine) assert_equal(fmri11_img_r.shape, (masker.maps_img_.shape[:3] + (length,)))
def test_signal_extraction_with_maps_and_labels(): shape = (4, 5, 6) n_regions = 7 length = 8 # Generate labels labels = list(range(n_regions + 1)) # 0 is background labels_img = generate_labeled_regions(shape, n_regions, labels=labels) labels_data = get_data(labels_img) # Convert to maps maps_data = np.zeros(shape + (n_regions, )) for n, l in enumerate(labels): if n == 0: continue maps_data[labels_data == l, n - 1] = 1 maps_img = nibabel.Nifti1Image(maps_data, labels_img.affine) # Generate fake data fmri_img, _ = generate_fake_fmri(shape=shape, length=length, affine=labels_img.affine) # Extract signals from maps and labels: results must be identical. maps_signals, maps_labels = signal_extraction.img_to_signals_maps( fmri_img, maps_img) labels_signals, labels_labels = signal_extraction.img_to_signals_labels( fmri_img, labels_img) np.testing.assert_almost_equal(maps_signals, labels_signals) # Same thing with a mask, containing only 3 regions. mask_data = (labels_data == 1) + (labels_data == 2) + (labels_data == 5) mask_img = nibabel.Nifti1Image(mask_data.astype(np.int8), labels_img.affine) labels_signals, labels_labels = signal_extraction.img_to_signals_labels( fmri_img, labels_img, mask_img=mask_img) maps_signals, maps_labels = signal_extraction.img_to_signals_maps( fmri_img, maps_img, mask_img=mask_img) np.testing.assert_almost_equal(maps_signals, labels_signals) assert_true(maps_signals.shape[1] == n_regions) assert_true(maps_labels == list(range(len(maps_labels)))) assert_true(labels_signals.shape == (length, n_regions)) assert_true(labels_labels == labels[1:]) # Inverse operation (mostly smoke test) labels_img_r = signal_extraction.signals_to_img_labels(labels_signals, labels_img, mask_img=mask_img) assert_true(labels_img_r.shape == shape + (length, )) maps_img_r = signal_extraction.signals_to_img_maps(maps_signals, maps_img, mask_img=mask_img) assert_true(maps_img_r.shape == shape + (length, )) # Check that NaNs in regions inside mask are preserved region1 = labels_data == 2 indices = [ind[:1] for ind in np.where(region1)] get_data(fmri_img)[indices + [slice(None)]] = float('nan') labels_signals, labels_labels = signal_extraction.img_to_signals_labels( fmri_img, labels_img, mask_img=mask_img) assert_true(np.all(np.isnan(labels_signals[:, labels_labels.index(2)])))
def test_nifti_maps_masker_2(): # Test resampling in NiftiMapsMasker affine = np.eye(4) shape1 = (10, 11, 12) # fmri shape2 = (13, 14, 15) # mask shape3 = (16, 17, 18) # maps n_regions = 9 length = 3 fmri11_img, _ = generate_random_img(shape1, affine=affine, length=length) _, mask22_img = generate_random_img(shape2, affine=affine, length=length) maps33_img, _ = \ data_gen.generate_maps(shape3, n_regions, affine=affine) mask_img_4d = nibabel.Nifti1Image(np.ones((2, 2, 2, 2), dtype=np.int8), affine=np.diag((4, 4, 4, 1))) # verify that 4D mask arguments are refused masker = NiftiMapsMasker(maps33_img, mask_img=mask_img_4d) testing.assert_raises_regex( DimensionError, "Input data has incompatible dimensionality: " "Expected dimension is 3D and you provided " "a 4D image.", masker.fit) # Test error checking assert_raises(ValueError, NiftiMapsMasker, maps33_img, resampling_target="mask") assert_raises(ValueError, NiftiMapsMasker, maps33_img, resampling_target="invalid") # Target: mask masker = NiftiMapsMasker(maps33_img, mask_img=mask22_img, resampling_target="mask") masker.fit() np.testing.assert_almost_equal(masker.mask_img_.affine, mask22_img.affine) assert_equal(masker.mask_img_.shape, mask22_img.shape) np.testing.assert_almost_equal(masker.mask_img_.affine, masker.maps_img_.affine) assert_equal(masker.mask_img_.shape, masker.maps_img_.shape[:3]) transformed = masker.transform(fmri11_img) assert_equal(transformed.shape, (length, n_regions)) fmri11_img_r = masker.inverse_transform(transformed) np.testing.assert_almost_equal(fmri11_img_r.affine, masker.maps_img_.affine) assert_equal(fmri11_img_r.shape, (masker.maps_img_.shape[:3] + (length, ))) # Target: maps masker = NiftiMapsMasker(maps33_img, mask_img=mask22_img, resampling_target="maps") masker.fit() np.testing.assert_almost_equal(masker.maps_img_.affine, maps33_img.affine) assert_equal(masker.maps_img_.shape, maps33_img.shape) np.testing.assert_almost_equal(masker.mask_img_.affine, masker.maps_img_.affine) assert_equal(masker.mask_img_.shape, masker.maps_img_.shape[:3]) transformed = masker.transform(fmri11_img) assert_equal(transformed.shape, (length, n_regions)) fmri11_img_r = masker.inverse_transform(transformed) np.testing.assert_almost_equal(fmri11_img_r.affine, masker.maps_img_.affine) assert_equal(fmri11_img_r.shape, (masker.maps_img_.shape[:3] + (length, ))) # Test with clipped maps: mask does not contain all maps. # Shapes do matter in that case affine1 = np.eye(4) shape1 = (10, 11, 12) shape2 = (8, 9, 10) # mask affine2 = np.diag((2, 2, 2, 1)) # just for mask shape3 = (16, 18, 20) # maps n_regions = 9 length = 21 fmri11_img, _ = generate_random_img(shape1, affine=affine1, length=length) _, mask22_img = data_gen.generate_fake_fmri(shape2, length=1, affine=affine2) # Target: maps maps33_img, _ = \ data_gen.generate_maps(shape3, n_regions, affine=affine1) masker = NiftiMapsMasker(maps33_img, mask_img=mask22_img, resampling_target="maps") masker.fit() np.testing.assert_almost_equal(masker.maps_img_.affine, maps33_img.affine) assert_equal(masker.maps_img_.shape, maps33_img.shape) np.testing.assert_almost_equal(masker.mask_img_.affine, masker.maps_img_.affine) assert_equal(masker.mask_img_.shape, masker.maps_img_.shape[:3]) transformed = masker.transform(fmri11_img) assert_equal(transformed.shape, (length, n_regions)) # Some regions have been clipped. Resulting signal must be zero assert_less((transformed.var(axis=0) == 0).sum(), n_regions) fmri11_img_r = masker.inverse_transform(transformed) np.testing.assert_almost_equal(fmri11_img_r.affine, masker.maps_img_.affine) assert_equal(fmri11_img_r.shape, (masker.maps_img_.shape[:3] + (length, )))
def test_largest_cc_img(): """ Check the extraction of the largest connected component, for niftis Similiar to smooth_img tests for largest connected_component_img, here also only the added features for largest_connected_component are tested. """ # Test whether dimension of 3Dimg and list of 3Dimgs are kept. shapes = ((10, 11, 12), (13, 14, 15)) regions = [1, 3] img1 = data_gen.generate_labeled_regions(shape=shapes[0], n_regions=regions[0]) img2 = data_gen.generate_labeled_regions(shape=shapes[1], n_regions=regions[1]) for create_files in (False, True): with testing.write_tmp_imgs(img1, img2, create_files=create_files) as imgs: # List of images as input out = largest_connected_component_img(imgs) assert_true(isinstance(out, list)) assert_true(len(out) == 2) for o, s in zip(out, shapes): assert_true(o.shape == (s)) # Single image as input out = largest_connected_component_img(imgs[0]) assert_true(isinstance(out, Nifti1Image)) assert_true(out.shape == (shapes[0])) # Test whether 4D Nifti throws the right error. img_4D = data_gen.generate_fake_fmri(shapes[0], length=17) assert_raises(DimensionError, largest_connected_component_img, img_4D) # tests adapted to non-native endian data dtype img1_change_dtype = nibabel.Nifti1Image(img1.get_data().astype('>f8'), affine=img1.affine) img2_change_dtype = nibabel.Nifti1Image(img2.get_data().astype('>f8'), affine=img2.affine) for create_files in (False, True): with testing.write_tmp_imgs(img1_change_dtype, img2_change_dtype, create_files=create_files) as imgs: # List of images as input out = largest_connected_component_img(imgs) assert_true(isinstance(out, list)) assert_true(len(out) == 2) for o, s in zip(out, shapes): assert_true(o.shape == (s)) # Single image as input out = largest_connected_component_img(imgs[0]) assert_true(isinstance(out, Nifti1Image)) assert_true(out.shape == (shapes[0])) # Test the output with native and without native out_native = largest_connected_component_img(img1) out_non_native = largest_connected_component_img(img1_change_dtype) np.testing.assert_equal(out_native.get_data(), out_non_native.get_data())
def _neurovault_file(parts, query): """Mocks the Neurovault API behind the `/media/images/` path.""" return generate_fake_fmri(length=1)[0]