def test_auto_mask(): # This mostly a smoke test data = np.zeros((9, 9, 9)) data[2:-2, 2:-2, 2:-2] = 10 img = Nifti1Image(data, np.eye(4)) masker = MultiNiftiMasker(mask_args=dict(opening=0)) # Check that if we have not fit the masker we get a intelligible # error assert_raises(ValueError, masker.transform, [[img, ]]) # Check error return due to bad data format assert_raises(ValueError, masker.fit, img) # Smoke test the fit masker.fit([[img]]) # Test mask intersection data2 = np.zeros((9, 9, 9)) data2[1:-3, 1:-3, 1:-3] = 10 img2 = Nifti1Image(data2, np.eye(4)) masker.fit([[img, img2]]) assert_array_equal(masker.mask_img_.get_data(), np.logical_or(data, data2)) # Smoke test the transform masker.transform([[img, ]]) # It should also work with a 3D image masker.transform(img) # check exception when transform() called without prior fit() masker2 = MultiNiftiMasker(mask_img=img) assert_raises_regex( ValueError, 'has not been fitted. ', masker2.transform, img2)
def test_check_threshold(): adjacency_matrix = np.array([[1., 2.], [2., 1.]]) name = 'edge_threshold' calculate = 'fast_abs_percentile' # a few not correctly formatted strings for 'edge_threshold' wrong_edge_thresholds = ['0.1', '10', '10.2.3%', 'asdf%'] for wrong_edge_threshold in wrong_edge_thresholds: assert_raises_regex(ValueError, '{0}.+should be a number followed by ' 'the percent sign'.format(name), check_threshold, wrong_edge_threshold, adjacency_matrix, calculate, name) threshold = object() assert_raises_regex(TypeError, '{0}.+should be either a number or a string'.format(name), check_threshold, threshold, adjacency_matrix, calculate, name) # To check if it also gives the score which is expected assert_true(1. < check_threshold("50%", adjacency_matrix, percentile_calculate=fast_abs_percentile, name='threshold') <= 2.)
def test_plot_surf_error(): mesh = _generate_surf() rng = np.random.RandomState(0) # Wrong inputs for view or hemi assert_raises_regex(ValueError, 'view must be one of', plot_surf, mesh, view='middle') assert_raises_regex(ValueError, 'hemi must be one of', plot_surf, mesh, hemi='lft') # Wrong size of background image assert_raises_regex(ValueError, 'bg_map does not have the same number of vertices', plot_surf, mesh, bg_map=rng.randn(mesh[0].shape[0] - 1, )) # Wrong size of surface data assert_raises_regex(ValueError, 'surf_map does not have the same number of vertices', plot_surf, mesh, surf_map=rng.randn(mesh[0].shape[0] + 1, )) assert_raises_regex(ValueError, 'surf_map can only have one dimension', plot_surf, mesh, surf_map=rng.randn(mesh[0].shape[0], 2))
def test_masker_attributes_with_fit(): # Test base module at sub-class data, mask_img, components, rng = _make_canica_test_data(n_subjects=3) # Passing mask_img canica = CanICA(n_components=3, mask=mask_img, random_state=0) canica.fit(data) assert_true(canica.mask_img_ == mask_img) assert_true(canica.mask_img_ == canica.masker_.mask_img_) # Passing masker masker = MultiNiftiMasker(mask_img=mask_img) canica = CanICA(n_components=3, mask=masker, random_state=0) canica.fit(data) assert_true(canica.mask_img_ == canica.masker_.mask_img_) canica = CanICA(mask=mask_img, n_components=3) assert_raises_regex(ValueError, "Object has no components_ attribute. " "This is probably because fit has not been called", canica.transform, data) # Test if raises an error when empty list of provided. assert_raises_regex(ValueError, 'Need one or more Niimg-like objects as input, ' 'an empty list was given.', canica.fit, []) # Test passing masker arguments to estimator canica = CanICA(n_components=3, target_affine=np.eye(4), target_shape=(6, 8, 10), mask_strategy='background') canica.fit(data)
def test_load_surf_data_file_nii_gii(): # test loading of fake data from gifti file filename_gii = tempfile.mktemp(suffix='.gii') if LooseVersion(nb.__version__) > LooseVersion('2.0.2'): darray = gifti.GiftiDataArray(data=np.zeros((20, ))) else: # Avoid a bug in nibabel 1.2.0 where GiftiDataArray were not # initialized properly: darray = gifti.GiftiDataArray.from_array(np.zeros((20, )), intent='t test') gii = gifti.GiftiImage(darrays=[darray]) gifti.write(gii, filename_gii) assert_array_equal(load_surf_data(filename_gii), np.zeros((20, ))) os.remove(filename_gii) # test loading of data from empty gifti file filename_gii_empty = tempfile.mktemp(suffix='.gii') gii_empty = gifti.GiftiImage() gifti.write(gii_empty, filename_gii_empty) assert_raises_regex(ValueError, 'must contain at least one data array', load_surf_data, filename_gii_empty) os.remove(filename_gii_empty) # test loading of fake data from nifti file filename_nii = tempfile.mktemp(suffix='.nii') filename_niigz = tempfile.mktemp(suffix='.nii.gz') nii = nb.Nifti1Image(np.zeros((20, )), affine=None) nb.save(nii, filename_nii) nb.save(nii, filename_niigz) assert_array_equal(load_surf_data(filename_nii), np.zeros((20, ))) assert_array_equal(load_surf_data(filename_niigz), np.zeros((20, ))) os.remove(filename_nii) os.remove(filename_niigz)
def test_load_surf_data_file_glob(): data2D = np.ones((20, 3)) fnames = [] for f in range(3): fnames.append(tempfile.mktemp(prefix='glob_%s_' % f, suffix='.gii')) data2D[:, f] *= f if LooseVersion(nb.__version__) > LooseVersion('2.0.2'): darray = gifti.GiftiDataArray(data=data2D[:, f]) else: # Avoid a bug in nibabel 1.2.0 where GiftiDataArray were not # initialized properly: darray = gifti.GiftiDataArray.from_array(data2D[:, f], intent='t test') gii = gifti.GiftiImage(darrays=[darray]) gifti.write(gii, fnames[f]) assert_array_equal(load_surf_data(os.path.join(os.path.dirname(fnames[0]), "glob*.gii")), data2D) # make one more gii file that has more than one dimension fnames.append(tempfile.mktemp(prefix='glob_3_', suffix='.gii')) if LooseVersion(nb.__version__) > LooseVersion('2.0.2'): darray1 = gifti.GiftiDataArray(data=np.ones((20, ))) darray2 = gifti.GiftiDataArray(data=np.ones((20, ))) darray3 = gifti.GiftiDataArray(data=np.ones((20, ))) else: # Avoid a bug in nibabel 1.2.0 where GiftiDataArray were not # initialized properly: darray1 = gifti.GiftiDataArray.from_array(np.ones((20, )), intent='t test') darray2 = gifti.GiftiDataArray.from_array(np.ones((20, )), intent='t test') darray3 = gifti.GiftiDataArray.from_array(np.ones((20, )), intent='t test') gii = gifti.GiftiImage(darrays=[darray1, darray2, darray3]) gifti.write(gii, fnames[-1]) data2D = np.concatenate((data2D, np.ones((20, 3))), axis=1) assert_array_equal(load_surf_data(os.path.join(os.path.dirname(fnames[0]), "glob*.gii")), data2D) # make one more gii file that has a different shape in axis=0 fnames.append(tempfile.mktemp(prefix='glob_4_', suffix='.gii')) if LooseVersion(nb.__version__) > LooseVersion('2.0.2'): darray = gifti.GiftiDataArray(data=np.ones((15, 1))) else: # Avoid a bug in nibabel 1.2.0 where GiftiDataArray were not # initialized properly: darray = gifti.GiftiDataArray.from_array(np.ones(15, 1), intent='t test') gii = gifti.GiftiImage(darrays=[darray]) gifti.write(gii, fnames[-1]) assert_raises_regex(ValueError, 'files must contain data with the same shape', load_surf_data, os.path.join(os.path.dirname(fnames[0]), "*.gii")) for f in fnames: os.remove(f)
def test_design_matrix0c(): # test design matrix creation when regressors are provided manually tr = 1.0 frame_times = np.linspace(0, 127 * tr, 128) ax = np.random.randn(128, 4) _, X, names = check_design_matrix( make_design_matrix(frame_times, drift_model="polynomial", drift_order=3, add_regs=ax) ) assert_almost_equal(X[:, 0], ax[:, 0]) ax = np.random.randn(127, 4) assert_raises_regex( AssertionError, "Incorrect specification of additional regressors:.", make_design_matrix, frame_times, add_regs=ax, ) ax = np.random.randn(128, 4) assert_raises_regex( ValueError, "Incorrect number of additional regressor names.", make_design_matrix, frame_times, add_regs=ax, add_reg_names="", )
def test_index_img(): img_3d = nibabel.Nifti1Image(np.ones((3, 4, 5)), np.eye(4)) testing.assert_raises_regex(TypeError, "Input data has incompatible dimensionality: " "Expected dimension is 4D and you provided " "a 3D image.", image.index_img, img_3d, 0) affine = np.array([[1., 2., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [0., 0., 0., 1.]]) img_4d, _ = data_gen.generate_fake_fmri(affine=affine) fourth_dim_size = img_4d.shape[3] tested_indices = (list(range(fourth_dim_size)) + [slice(2, 8, 2), [1, 2, 3, 2], [], (np.arange(fourth_dim_size) % 3) == 1]) for i in tested_indices: this_img = image.index_img(img_4d, i) expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(this_img.get_data(), expected_data_3d) assert_array_equal(this_img.affine, img_4d.affine) for i in [fourth_dim_size, - fourth_dim_size - 1, [0, fourth_dim_size], np.repeat(True, fourth_dim_size + 1)]: testing.assert_raises_regex( IndexError, 'out of bounds|invalid index|out of range|boolean index', image.index_img, img_4d, i)
def test_index_img(): img_3d = nibabel.Nifti1Image(np.ones((3, 4, 5)), np.eye(4)) testing.assert_raises_regex(TypeError, '4D Niimg-like', image.index_img, img_3d, 0) affine = np.array([[1., 2., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [0., 0., 0., 1.]]) img_4d, _ = testing.generate_fake_fmri(affine=affine) fourth_dim_size = img_4d.shape[3] tested_indices = (list(range(fourth_dim_size)) + [slice(2, 8, 2), [1, 2, 3, 2], [], (np.arange(fourth_dim_size) % 3) == 1]) for i in tested_indices: this_img = image.index_img(img_4d, i) expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(this_img.get_data(), expected_data_3d) assert_array_equal(this_img.get_affine(), img_4d.get_affine()) for i in [fourth_dim_size, - fourth_dim_size - 1, [0, fourth_dim_size], np.repeat(True, fourth_dim_size + 1)]: testing.assert_raises_regex( IndexError, 'out of bounds|invalid index|out of range', image.index_img, img_4d, i)
def test_base_decomposition(): shape = (6, 8, 10, 5) affine = np.eye(4) rng = np.random.RandomState(0) data = [] for i in range(8): this_data = rng.normal(size=shape) # Create fake activation to get non empty mask this_data[2:4, 2:4, 2:4, :] += 10 data.append(nibabel.Nifti1Image(this_data, affine)) mask = nibabel.Nifti1Image(np.ones(shape[:3], dtype=np.int8), affine) masker = MultiNiftiMasker(mask_img=mask) base_decomposition = BaseDecomposition(mask=masker, n_components=3) base_decomposition.fit(data) assert_true(base_decomposition.mask_img_ == mask) assert_true(base_decomposition.mask_img_ == base_decomposition.masker_.mask_img_) # Testing fit on data masker = MultiNiftiMasker() base_decomposition = BaseDecomposition(mask=masker, n_components=3) base_decomposition.fit(data) assert_true(base_decomposition.mask_img_ == base_decomposition.masker_.mask_img_) assert_raises_regex(ValueError, "Object has no components_ attribute. " "This may be because " "BaseDecomposition is directly " "being used.", base_decomposition.transform, data) assert_raises_regex(ValueError, 'Need one or more Niimg-like objects as input, ' 'an empty list was given.', base_decomposition.fit, [])
def test_iter_check_niimgs(): no_file_matching = "No files matching path: %s" affine = np.eye(4) img_4d = Nifti1Image(np.ones((10, 10, 10, 4)), affine) img_2_4d = [[img_4d, img_4d]] for empty in ((), [], (i for i in ()), [i for i in ()]): assert_raises_regex(ValueError, "Input niimgs list is empty.", list, _iter_check_niimg(empty)) nofile_path = "/tmp/nofile" assert_raises_regex(ValueError, no_file_matching % nofile_path, list, _iter_check_niimg(nofile_path)) # Create a test file filename = tempfile.mktemp(prefix="nilearn_test", suffix=".nii", dir=None) img_4d.to_filename(filename) niimgs = list(_iter_check_niimg([filename])) assert_array_equal(niimgs[0].get_data(), _utils.check_niimg(img_4d).get_data()) del img_4d del niimgs os.remove(filename) # Regular case niimgs = list(_iter_check_niimg(img_2_4d)) assert_array_equal(niimgs[0].get_data(), _utils.check_niimg(img_2_4d).get_data())
def test_nifti_maps_masker_overlap(): # Test resampling in NiftiMapsMasker affine = np.eye(4) shape = (5, 5, 5) length = 10 fmri_img, _ = generate_random_img(shape, affine=affine, length=length) non_overlapping_maps = np.zeros(shape + (2,)) non_overlapping_maps[:2, :, :, 0] = 1. non_overlapping_maps[2:, :, :, 1] = 1. non_overlapping_maps_img = nibabel.Nifti1Image(non_overlapping_maps, affine) overlapping_maps = np.zeros(shape + (2,)) overlapping_maps[:3, :, :, 0] = 1. overlapping_maps[2:, :, :, 1] = 1. overlapping_maps_img = nibabel.Nifti1Image(overlapping_maps, affine) overlapping_masker = NiftiMapsMasker(non_overlapping_maps_img, allow_overlap=True) overlapping_masker.fit_transform(fmri_img) overlapping_masker = NiftiMapsMasker(overlapping_maps_img, allow_overlap=True) overlapping_masker.fit_transform(fmri_img) non_overlapping_masker = NiftiMapsMasker(non_overlapping_maps_img, allow_overlap=False) non_overlapping_masker.fit_transform(fmri_img) non_overlapping_masker = NiftiMapsMasker(overlapping_maps_img, allow_overlap=False) assert_raises_regex(ValueError, 'Overlap detected', non_overlapping_masker.fit_transform, fmri_img)
def test_check_parameters_transform(): rng = np.random.RandomState(0) data = np.ones((10, 11, 12, 10)) data[6, 7, 8] = 2 data[9, 10, 11] = 3 # single image fmri_img = nibabel.Nifti1Image(data, affine=np.eye(4)) # single confound confounds = rng.randn(*(10, 3)) # Tests to check whether imgs, confounds returned are # list or not. Pre-check in parameters to work for list # of multi images and multi confounds imgs, confounds, single_subject = _check_parameters_transform(fmri_img, confounds) assert_true(isinstance(imgs, (list, tuple))) assert_true(isinstance(confounds, (list, tuple))) assert_true(single_subject, True) # multi images fmri_imgs = [fmri_img, fmri_img, fmri_img] confounds_list = [confounds, confounds, confounds] imgs, confounds, _ = _check_parameters_transform(fmri_imgs, confounds_list) assert_equal(imgs, fmri_imgs) assert_equal(confounds_list, confounds) # Test the error when length of images and confounds are not same msg = ("Number of confounds given does not match with the " "given number of images") not_match_confounds_list = [confounds, confounds] assert_raises_regex(ValueError, msg, _check_parameters_transform, fmri_imgs, not_match_confounds_list)
def test_fail_fetch_atlas_harvard_oxford(): # specify non-existing atlas item assert_raises_regex(ValueError, "Invalid atlas name", atlas.fetch_atlas_harvard_oxford, "not_inside") # specify existing atlas item target_atlas = "cort-maxprob-thr0-1mm" target_atlas_fname = "HarvardOxford-" + target_atlas + ".nii.gz" ho_dir = os.path.join(tmpdir, "fsl", "data", "atlases") os.makedirs(ho_dir) nifti_dir = os.path.join(ho_dir, "HarvardOxford") os.makedirs(nifti_dir) target_atlas_nii = os.path.join(nifti_dir, target_atlas_fname) struct.load_mni152_template().to_filename(target_atlas_nii) dummy = open(os.path.join(ho_dir, "HarvardOxford-Cortical.xml"), "w") dummy.write("<?xml version='1.0' encoding='us-ascii'?> " "<metadata>" "</metadata>") dummy.close() ho = atlas.fetch_atlas_harvard_oxford(target_atlas, data_dir=tmpdir) assert_true(isinstance(nibabel.load(ho.maps), nibabel.Nifti1Image)) assert_true(isinstance(ho.labels, np.ndarray)) assert_true(len(ho.labels) > 0)
def test_vec_to_sym_matrix(): # Check error if unsuitable size vec = np.ones(31) assert_raises_regex(ValueError, 'Vector of unsuitable shape', vec_to_sym_matrix, vec) # Check error if given diagonal shape incompatible with vec vec = np.ones(3) diagonal = np.zeros(4) assert_raises_regex(ValueError, 'incompatible with vector', vec_to_sym_matrix, vec, diagonal) # Check output value is correct vec = np.ones(6, ) sym = np.array([[sqrt(2), 1., 1.], [1., sqrt(2), 1.], [1., 1., sqrt(2)]]) assert_array_almost_equal(vec_to_sym_matrix(vec), sym) # Check output value is correct with seperate diagonal vec = np.ones(3, ) diagonal = np.ones(3) assert_array_almost_equal(vec_to_sym_matrix(vec, diagonal=diagonal), sym) # Check vec_to_sym_matrix is the inverse function of sym_matrix_to_vec # when diagonal is included assert_array_almost_equal(vec_to_sym_matrix(sym_matrix_to_vec(sym)), sym) # when diagonal is discarded vec = sym_matrix_to_vec(sym, discard_diagonal=True) diagonal = np.diagonal(sym) / sqrt(2) assert_array_almost_equal(vec_to_sym_matrix(vec, diagonal=diagonal), sym)
def test_get_dataset_dir(): # testing folder creation under different environments, enforcing # a custom clean install os.environ.pop('NILEARN_DATA', None) os.environ.pop('NILEARN_SHARED_DATA', None) expected_base_dir = os.path.expanduser('~/nilearn_data') data_dir = datasets._get_dataset_dir('test', verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) expected_base_dir = os.path.join(tmpdir, 'test_nilearn_data') os.environ['NILEARN_DATA'] = expected_base_dir data_dir = datasets._get_dataset_dir('test', verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) expected_base_dir = os.path.join(tmpdir, 'nilearn_shared_data') os.environ['NILEARN_SHARED_DATA'] = expected_base_dir data_dir = datasets._get_dataset_dir('test', verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) expected_base_dir = os.path.join(tmpdir, 'env_data') os.environ['MY_DATA'] = expected_base_dir data_dir = datasets._get_dataset_dir('test', env_vars=['MY_DATA'], verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) no_write = os.path.join(tmpdir, 'no_write') os.makedirs(no_write) os.chmod(no_write, 0o400) # Verify that default is used if non writeable dir os.environ['MY_DATA'] = no_write expected_base_dir = os.path.join(tmpdir, 'nilearn_shared_data') os.environ['NILEARN_SHARED_DATA'] = expected_base_dir data_dir = datasets._get_dataset_dir('test', env_vars=['MY_DATA'], verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) # Verify exception is raised on read-only directories assert_raises_regex(OSError, 'Permission denied', datasets._get_dataset_dir, 'test', no_write, verbose=0) # Verify exception for a path which exists and is a file test_file = os.path.join(tmpdir, 'some_file') with open(test_file, 'w') as out: out.write('abcfeg') assert_raises_regex(OSError, 'Not a directory', datasets._get_dataset_dir, 'test', test_file, verbose=0)
def test_fail_fetch_harvard_oxford(): # specify non-existing atlas item assert_raises_regex(ValueError, 'Invalid atlas name', datasets.fetch_harvard_oxford, 'not_inside') # specify existing atlas item target_atlas = 'cort-maxprob-thr0-1mm' target_atlas_fname = 'HarvardOxford-' + target_atlas + '.nii.gz' HO_dir = os.path.join(tmpdir, 'harvard_oxford') os.mkdir(HO_dir) nifti_dir = os.path.join(HO_dir, 'HarvardOxford') os.mkdir(nifti_dir) target_atlas_nii = os.path.join(nifti_dir, target_atlas_fname) datasets.load_mni152_template().to_filename(target_atlas_nii) dummy = open(os.path.join(HO_dir, 'HarvardOxford-Cortical.xml'), 'w') dummy.write("<?xml version='1.0' encoding='us-ascii'?> " "<metadata>" "</metadata>") dummy.close() out_nii, arr = datasets.fetch_harvard_oxford(target_atlas, data_dir=tmpdir) assert_true(isinstance(nibabel.load(out_nii), nibabel.Nifti1Image)) assert_true(isinstance(arr, np.ndarray)) assert_true(len(arr) > 0)
def test_plot_surf_error(): # Axes3DSubplot has no attribute 'plot_trisurf' for older versions of # matplotlib if LooseVersion(matplotlib.__version__) <= LooseVersion('1.3.1'): raise SkipTest mesh = _generate_surf() rng = np.random.RandomState(0) # Wrong inputs for view or hemi assert_raises_regex(ValueError, 'view must be one of', plot_surf, mesh, view='middle') assert_raises_regex(ValueError, 'hemi must be one of', plot_surf, mesh, hemi='lft') # Wrong size of background image assert_raises_regex(ValueError, 'bg_map does not have the same number of vertices', plot_surf, mesh, bg_map=rng.randn(mesh[0].shape[0] - 1, )) # Wrong size of surface data assert_raises_regex(ValueError, 'surf_map does not have the same number of vertices', plot_surf, mesh, surf_map=rng.randn(mesh[0].shape[0] + 1, )) assert_raises_regex(ValueError, 'surf_map can only have one dimension', plot_surf, mesh, surf_map=rng.randn(mesh[0].shape[0], 2))
def test_small_radius(): affine = np.eye(4) shape = (3, 3, 3) data = np.random.random(shape) mask = np.zeros(shape) mask[1, 1, 1] = 1 mask[2, 2, 2] = 1 affine = np.eye(4) * 1.2 seed = (1.4, 1.4, 1.4) masker = NiftiSpheresMasker([seed], radius=0.1, mask_img=nibabel.Nifti1Image(mask, affine)) masker.fit_transform(nibabel.Nifti1Image(data, affine)) # Test if masking is taken into account mask[1, 1, 1] = 0 mask[1, 1, 0] = 1 masker = NiftiSpheresMasker([seed], radius=0.1, mask_img=nibabel.Nifti1Image(mask, affine)) assert_raises_regex(ValueError, 'Sphere around seed #0 is empty', masker.fit_transform, nibabel.Nifti1Image(data, affine)) masker = NiftiSpheresMasker([seed], radius=1.6, mask_img=nibabel.Nifti1Image(mask, affine)) masker.fit_transform(nibabel.Nifti1Image(data, affine))
def test_fail_fetch_atlas_harvard_oxford(): # specify non-existing atlas item assert_raises_regex(ValueError, 'Invalid atlas name', atlas.fetch_atlas_harvard_oxford, 'not_inside') # specify existing atlas item target_atlas = 'cort-maxprob-thr0-1mm' target_atlas_fname = 'HarvardOxford-' + target_atlas + '.nii.gz' ho_dir = os.path.join(tst.tmpdir, 'fsl', 'data', 'atlases') os.makedirs(ho_dir) nifti_dir = os.path.join(ho_dir, 'HarvardOxford') os.makedirs(nifti_dir) target_atlas_nii = os.path.join(nifti_dir, target_atlas_fname) struct.load_mni152_template().to_filename(target_atlas_nii) dummy = open(os.path.join(ho_dir, 'HarvardOxford-Cortical.xml'), 'w') dummy.write("<?xml version='1.0' encoding='us-ascii'?> " "<metadata>" "</metadata>") dummy.close() ho = atlas.fetch_atlas_harvard_oxford(target_atlas, data_dir=tst.tmpdir) assert_true(isinstance(nibabel.load(ho.maps), nibabel.Nifti1Image)) assert_true(isinstance(ho.labels, np.ndarray)) assert_true(len(ho.labels) > 0)
def test_check_niimg(): affine = np.eye(4) img_3d = Nifti1Image(np.ones((10, 10, 10)), affine) img_4d = Nifti1Image(np.ones((10, 10, 10, 4)), affine) img_3_3d = [[[img_3d, img_3d]]] img_2_4d = [[img_4d, img_4d]] assert_raises_regex( DimensionError, "Input data has incompatible dimensionality: " "Expected dimension is 2D and you provided " "a list of list of list of 3D images \(6D\)", _utils.check_niimg, img_3_3d, ensure_ndim=2) assert_raises_regex( DimensionError, "Input data has incompatible dimensionality: " "Expected dimension is 4D and you provided " "a list of list of 4D images \(6D\)", _utils.check_niimg, img_2_4d, ensure_ndim=4) # check data dtype equal with dtype='auto' img_3d_check = _utils.check_niimg(img_3d, dtype='auto') assert_equal(img_3d.get_data().dtype.kind, img_3d_check.get_data().dtype.kind) img_4d_check = _utils.check_niimg(img_4d, dtype='auto') assert_equal(img_4d.get_data().dtype.kind, img_4d_check.get_data().dtype.kind)
def test_get_dataset_dir(): # testing folder creation under different environments, enforcing # a custom clean install os.environ.pop('NILEARN_DATA', None) os.environ.pop('NILEARN_SHARED_DATA', None) expected_base_dir = os.path.expanduser('~/nilearn_data') data_dir = utils._get_dataset_dir('test', verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) expected_base_dir = os.path.join(tst.tmpdir, 'test_nilearn_data') os.environ['NILEARN_DATA'] = expected_base_dir data_dir = utils._get_dataset_dir('test', verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) expected_base_dir = os.path.join(tst.tmpdir, 'nilearn_shared_data') os.environ['NILEARN_SHARED_DATA'] = expected_base_dir data_dir = utils._get_dataset_dir('test', verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) expected_base_dir = os.path.join(tst.tmpdir, 'env_data') expected_dataset_dir = os.path.join(expected_base_dir, 'test') data_dir = utils._get_dataset_dir( 'test', default_paths=[expected_dataset_dir], verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) no_write = os.path.join(tst.tmpdir, 'no_write') os.makedirs(no_write) os.chmod(no_write, 0o400) expected_base_dir = os.path.join(tst.tmpdir, 'nilearn_shared_data') os.environ['NILEARN_SHARED_DATA'] = expected_base_dir data_dir = utils._get_dataset_dir('test', default_paths=[no_write], verbose=0) # Non writeable dir is returned because dataset may be in there. assert_equal(data_dir, no_write) assert os.path.exists(data_dir) # Set back write permissions in order to be able to remove the file os.chmod(no_write, 0o600) shutil.rmtree(data_dir) # Verify exception for a path which exists and is a file test_file = os.path.join(tst.tmpdir, 'some_file') with open(test_file, 'w') as out: out.write('abcfeg') assert_raises_regex(OSError, 'Nilearn tried to store the dataset ' 'in the following directories, but', utils._get_dataset_dir, 'test', test_file, verbose=0)
def test_invalid_threshold_strategies(): maps, _ = generate_maps((6, 8, 10), n_regions=1) extract_strategy_check = RegionExtractor(maps, thresholding_strategy='n_') valid_strategies = ['ratio_n_voxels', 'img_value', 'percentile'] assert_raises_regex(ValueError, "'thresholding_strategy' should be either of " "these".format(valid_strategies), extract_strategy_check.fit)
def test_concat_niimgs(): # create images different in affine and 3D/4D shape shape = (10, 11, 12) affine = np.eye(4) img1 = Nifti1Image(np.ones(shape), affine) img2 = Nifti1Image(np.ones(shape), 2 * affine) img3 = Nifti1Image(np.zeros(shape), affine) img4d = Nifti1Image(np.ones(shape + (2, )), affine) shape2 = (12, 11, 10) img1b = Nifti1Image(np.ones(shape2), affine) shape3 = (11, 22, 33) img1c = Nifti1Image(np.ones(shape3), affine) # check basic concatenation with equal shape/affine concatenated = _utils.concat_niimgs((img1, img3, img1), accept_4d=False) concatenate_true = np.ones(shape + (3,)) # Smoke-test the accept_4d assert_raises_regex(ValueError, 'image', _utils.concat_niimgs, [img1, img4d]) concatenated = _utils.concat_niimgs([img1, img4d], accept_4d=True) np.testing.assert_equal(concatenated.get_data(), concatenate_true, verbose=0) # smoke-test auto_resample concatenated = _utils.concat_niimgs((img1, img1b, img1c), accept_4d=False, auto_resample=True) assert_true(concatenated.shape == img1.shape + (3, )) # check error for non-forced but necessary resampling assert_raises_regex(ValueError, 'different from reference affine', _utils.concat_niimgs, [img1, img2], accept_4d=False) # Smoke-test the 4d parsing concatenated = _utils.concat_niimgs([img1, img4d], accept_4d=True) assert_equal(concatenated.shape[3], 3) # test list of 4D niimgs as input _, tmpimg1 = tempfile.mkstemp(suffix='.nii') _, tmpimg2 = tempfile.mkstemp(suffix='.nii') try: nibabel.save(img1, tmpimg1) nibabel.save(img3, tmpimg2) concatenated = _utils.concat_niimgs([tmpimg1, tmpimg2], accept_4d=False) assert_array_equal( concatenated.get_data()[..., 0], img1.get_data()) assert_array_equal( concatenated.get_data()[..., 1], img3.get_data()) finally: _remove_if_exists(tmpimg1) _remove_if_exists(tmpimg2)
def test_invalid_in_display_mode_cut_coords_all_plots(): img = _generate_img() for plot_func in [plot_img, plot_anat, plot_roi, plot_epi, plot_stat_map, plot_prob_atlas, plot_glass_brain]: assert_raises_regex(ValueError, "The input given for display_mode='ortho' needs to " "be a list of 3d world coordinates.", plot_func, img, display_mode='ortho', cut_coords=2)
def test_get_colorbar_and_data_ranges_with_vmin(): data = np.array([[-.5, 1., np.nan], [0., np.nan, -.2], [1.5, 2.5, 3.]]) assert_raises_regex(ValueError, 'does not accept a "vmin" argument', _get_colorbar_and_data_ranges, data, vmax=None, symmetric_cbar=True, kwargs={'vmin': 1.})
def test_space_net_no_crash_not_fitted(): """Regression test.""" iris = load_iris() X, y = iris.data, iris.target X, mask = to_niimgs(X, [2, 2, 2]) for model in [SpaceNetRegressor, SpaceNetClassifier]: assert_raises_regex(RuntimeError, "This %s instance is not fitted yet" % ( model.__name__), model().predict, X) model(mask=mask, alphas=1.).fit(X, y).predict(X)
def test_filter_and_mask(): data = np.zeros([20, 30, 40, 5]) mask = np.zeros([20, 30, 40, 2]) mask[10, 15, 20, :] = 1 data_img = nibabel.Nifti1Image(data, np.eye(4)) mask_img = nibabel.Nifti1Image(mask, np.eye(4)) assert_raises_regex(DimensionError, "Data must be a 3D", filter_and_mask, data_img, mask_img, {})
def test_filter_and_mask(): data = np.zeros([20, 30, 40, 5]) mask = np.zeros([20, 30, 40, 2]) mask[10, 15, 20, :] = 1 data_img = nibabel.Nifti1Image(data, np.eye(4)) mask_img = nibabel.Nifti1Image(mask, np.eye(4)) assert_raises_regex(TypeError, "A 3D image is expected", filter_and_mask, data_img, mask_img, {})
def test_invalid_in_display_mode_tiled_cut_coords_single_all_plots(): img = _generate_img() for plot_func in [plot_img, plot_anat, plot_roi, plot_epi, plot_stat_map,plot_prob_atlas]: assert_raises_regex(ValueError, "The input given for display_mode='tiled' needs to " "be a list of 3d world coordinates.", plot_func, img, display_mode='tiled', cut_coords=2)
def test_invalid_in_display_mode_tiled_cut_coords_all_plots(): img = _generate_img() for plot_func in [plot_img, plot_anat, plot_roi, plot_epi, plot_stat_map,plot_prob_atlas]: assert_raises_regex(ValueError, "The number cut_coords passed does not " "match the display_mode", plot_func, img, display_mode='tiled', cut_coords=(2,2))
def test_find_parcellation_cut_coords(): data = np.zeros((100, 100, 100)) x_map_a, y_map_a, z_map_a = (10, 10, 10) x_map_b, y_map_b, z_map_b = (30, 30, 30) x_map_c, y_map_c, z_map_c = (50, 50, 50) # Defining 3 parcellations data[x_map_a - 10:x_map_a + 10, y_map_a - 10:y_map_a + 10, z_map_a - 10: z_map_a + 10] = 1 data[x_map_b - 10:x_map_b + 10, y_map_b - 10:y_map_b + 10, z_map_b - 10: z_map_b + 10] = 2 data[x_map_c - 10:x_map_c + 10, y_map_c - 10:y_map_c + 10, z_map_c - 10: z_map_c + 10] = 3 # Number of labels labels = np.unique(data) labels = labels[labels != 0] n_labels = len(labels) # identity affine affine = np.eye(4) img = nibabel.Nifti1Image(data, affine) # find coordinates with return label names is True coords, labels_list = find_parcellation_cut_coords(img, return_label_names=True) # Check outputs assert_equal((n_labels, 3), coords.shape) # number of labels in data should equal number of labels list returned assert_equal(n_labels, len(labels_list)) # Labels numbered should match the numbers in returned labels list assert_equal(list(labels), labels_list) # Match with the number of non-overlapping labels np.testing.assert_allclose((coords[0][0], coords[0][1], coords[0][2]), (x_map_a, y_map_a, z_map_a), rtol=6e-2) np.testing.assert_allclose((coords[1][0], coords[1][1], coords[1][2]), (x_map_b, y_map_b, z_map_b), rtol=6e-2) np.testing.assert_allclose((coords[2][0], coords[2][1], coords[2][2]), (x_map_c, y_map_c, z_map_c), rtol=6e-2) # non-trivial affine affine = np.diag([1 / 2., 1 / 3., 1 / 4., 1.]) img = nibabel.Nifti1Image(data, affine) coords = find_parcellation_cut_coords(img) assert_equal((n_labels, 3), coords.shape) np.testing.assert_allclose((coords[0][0], coords[0][1], coords[0][2]), (x_map_a / 2., y_map_a / 3., z_map_a / 4.), rtol=6e-2) np.testing.assert_allclose((coords[1][0], coords[1][1], coords[1][2]), (x_map_b / 2., y_map_b / 3., z_map_b / 4.), rtol=6e-2) np.testing.assert_allclose((coords[2][0], coords[2][1], coords[2][2]), (x_map_c / 2., y_map_c / 3., z_map_c / 4.), rtol=6e-2) # test raises an error with wrong label_hemisphere name with 'lft' error_msg = ("Invalid label_hemisphere name:lft. Should be one of " "these 'left' or 'right'.") assert_raises_regex(ValueError, error_msg, find_parcellation_cut_coords, labels_img=img, label_hemisphere='lft')
def test_get_dataset_dir(): # testing folder creation under different environments, enforcing # a custom clean install os.environ.pop('NILEARN_DATA', None) os.environ.pop('NILEARN_SHARED_DATA', None) expected_base_dir = os.path.expanduser('~/nilearn_data') data_dir = utils._get_dataset_dir('test', verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) expected_base_dir = os.path.join(tmpdir, 'test_nilearn_data') os.environ['NILEARN_DATA'] = expected_base_dir data_dir = utils._get_dataset_dir('test', verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) expected_base_dir = os.path.join(tmpdir, 'nilearn_shared_data') os.environ['NILEARN_SHARED_DATA'] = expected_base_dir data_dir = utils._get_dataset_dir('test', verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) expected_base_dir = os.path.join(tmpdir, 'env_data') expected_dataset_dir = os.path.join(expected_base_dir, 'test') data_dir = utils._get_dataset_dir( 'test', default_paths=[expected_dataset_dir], verbose=0) assert_equal(data_dir, os.path.join(expected_base_dir, 'test')) assert os.path.exists(data_dir) shutil.rmtree(data_dir) no_write = os.path.join(tmpdir, 'no_write') os.makedirs(no_write) os.chmod(no_write, 0o400) expected_base_dir = os.path.join(tmpdir, 'nilearn_shared_data') os.environ['NILEARN_SHARED_DATA'] = expected_base_dir data_dir = utils._get_dataset_dir('test', default_paths=[no_write], verbose=0) # Non writeable dir is returned because dataset may be in there. assert_equal(data_dir, no_write) assert os.path.exists(data_dir) shutil.rmtree(data_dir) # Verify exception for a path which exists and is a file test_file = os.path.join(tmpdir, 'some_file') with open(test_file, 'w') as out: out.write('abcfeg') assert_raises_regex(OSError, 'Not a directory', utils._get_dataset_dir, 'test', test_file, verbose=0)
def test_load_surf_data_file_error(): # test if files with unexpected suffixes raise errors data = np.zeros((20, )) wrong_suff = ['.vtk', '.obj', '.mnc', '.txt'] for suff in wrong_suff: filename_wrong = tempfile.mktemp(suffix=suff) np.savetxt(filename_wrong, data) assert_raises_regex(ValueError, 'input type is not recognized', load_surf_data, filename_wrong) os.remove(filename_wrong)
def test_fail_fetch_atlas_harvard_oxford(): # specify non-existing atlas item assert_raises_regex(ValueError, 'Invalid atlas name', atlas.fetch_atlas_harvard_oxford, 'not_inside') # specify existing atlas item target_atlas = 'cort-maxprob-thr0-1mm' target_atlas_fname = 'HarvardOxford-' + target_atlas + '.nii.gz' ho_dir = os.path.join(tst.tmpdir, 'fsl', 'data', 'atlases') os.makedirs(ho_dir) nifti_dir = os.path.join(ho_dir, 'HarvardOxford') os.makedirs(nifti_dir) target_atlas_nii = os.path.join(nifti_dir, target_atlas_fname) # Create false atlas atlas_data = np.zeros((10, 10, 10), dtype=int) # Create an interhemispheric map atlas_data[:, :2, :] = 1 # Create a left map atlas_data[:5, 3:5, :] = 2 # Create a right map, with one voxel on the left side atlas_data[5:, 7:9, :] = 3 atlas_data[4, 7, 0] = 3 nibabel.Nifti1Image(atlas_data, np.eye(4) * 3).to_filename( target_atlas_nii) dummy = open(os.path.join(ho_dir, 'HarvardOxford-Cortical.xml'), 'w') dummy.write("<?xml version='1.0' encoding='us-ascii'?>\n" "<data>\n" '<label index="0" x="48" y="94" z="35">R1</label>\n' '<label index="1" x="25" y="70" z="32">R2</label>\n' '<label index="2" x="33" y="73" z="63">R3</label>\n' "</data>") dummy.close() ho = atlas.fetch_atlas_harvard_oxford(target_atlas, data_dir=tst.tmpdir, symmetric_split=True) assert_true(isinstance(ho.maps, nibabel.Nifti1Image)) assert_true(isinstance(ho.labels, list)) assert_equal(len(ho.labels), 5) assert_equal(ho.labels[0], "Background") assert_equal(ho.labels[1], "R1, left part") assert_equal(ho.labels[2], "R1, right part") assert_equal(ho.labels[3], "R2") assert_equal(ho.labels[4], "R3")
def test_raises_upon_3x3_affine_and_no_shape(): img = Nifti1Image(np.zeros([8, 9, 10]), affine=np.eye(4)) exception = ValueError message = ("Given target shape without anchor " "vector: Affine shape should be \(4, 4\) and " "not \(3, 3\)") testing.assert_raises_regex( exception, message, resample_img, img, target_affine=np.eye(3) * 2, target_shape=(10, 10, 10))
def test_multi_pca(): # Smoke test the MultiPCA # XXX: this is mostly a smoke test shape = (6, 8, 10, 5) affine = np.eye(4) rng = np.random.RandomState(0) # Create a "multi-subject" dataset data = [] for i in range(8): this_data = rng.normal(size=shape) # Create fake activation to get non empty mask this_data[2:4, 2:4, 2:4, :] += 10 data.append(nibabel.Nifti1Image(this_data, affine)) mask_img = nibabel.Nifti1Image(np.ones(shape[:3], dtype=np.int8), affine) multi_pca = MultiPCA(mask=mask_img, n_components=3, random_state=0) # Test that the components are the same if we put twice the same data, and # that fit output is deterministic components1 = multi_pca.fit(data).components_ components2 = multi_pca.fit(data).components_ components3 = multi_pca.fit(2 * data).components_ np.testing.assert_array_equal(components1, components2) np.testing.assert_array_almost_equal(components1, components3) # Smoke test fit with 'confounds' argument confounds = [np.arange(10).reshape(5, 2)] * 8 multi_pca.fit(data, confounds=confounds) # Smoke test that multi_pca also works with single subject data multi_pca.fit(data[0]) # Check that asking for too little components raises a ValueError multi_pca = MultiPCA() assert_raises(ValueError, multi_pca.fit, data[:2]) # Smoke test the use of a masker and without CCA multi_pca = MultiPCA(mask=MultiNiftiMasker(mask_args=dict(opening=0)), do_cca=False, n_components=3) multi_pca.fit(data[:2]) # Smoke test the transform and inverse_transform multi_pca.inverse_transform(multi_pca.transform(data[-2:])) # Smoke test to fit with no img assert_raises(TypeError, multi_pca.fit) multi_pca = MultiPCA(mask=mask_img, n_components=3) assert_raises_regex( ValueError, "Object has no components_ attribute. " "This is probably because fit has not been called", multi_pca.transform, data)
def test_plot_surf_roi_error(): mesh = _generate_surf() rng = np.random.RandomState(0) roi_idx = rng.randint(0, mesh[0].shape[0], size=5) # Wrong input assert_raises_regex(ValueError, 'roi_map does not have the same number of vertices', plot_surf_roi, mesh, roi_map=roi_idx)
def test_invalid_thresholds_in_threshold_maps_ratio(): maps, _ = generate_maps((10, 11, 12), n_regions=2) for invalid_threshold in ['80%', 'auto', -1.0]: assert_raises_regex(ValueError, "threshold given as ratio to the number of voxels must " "be Real number and should be positive and between 0 and " "total number of maps i.e. n_maps={0}. " "You provided {1}".format(maps.shape[-1], invalid_threshold), _threshold_maps_ratio, maps, threshold=invalid_threshold)
def test_threshold_as_none_and_string_cases(): maps, _ = generate_maps((6, 8, 10), n_regions=1) extract_thr_none_check = RegionExtractor(maps, threshold=None) assert_raises_regex(ValueError, "The given input to threshold is not valid.", extract_thr_none_check.fit) extract_thr_string_check = RegionExtractor(maps, threshold='30%') assert_raises_regex(ValueError, "The given input to threshold is not valid.", extract_thr_string_check.fit)
def test_get_colorbar_and_data_ranges_with_vmin(): affine = np.eye(4) data = np.array([[-.5, 1., np.nan], [0., np.nan, -.2], [1.5, 2.5, 3.]]) img = nibabel.Nifti1Image(data, affine) assert_raises_regex(ValueError, 'does not accept a "vmin" argument', _get_colorbar_and_data_ranges, img, vmax=None, symmetric_cbar=True, kwargs={'vmin': 1.})
def test_targets_in_y_space_net_regressor(): # This tests whether raises an error when unique targets given in y # are single. iris = load_iris() X, _ = iris.data, iris.target y = np.ones((iris.target.shape)) imgs, mask = to_niimgs(X, (2, 2, 2)) regressor = SpaceNetRegressor(mask=mask) assert_raises_regex(ValueError, "The given input y must have atleast 2 targets", regressor.fit, imgs, y)
def test_invalids_extract_types_in_connected_regions(): maps, _ = generate_maps((10, 11, 12), n_regions=2) valid_names = ['connected_components', 'local_regions'] # test whether same error raises as expected when invalid inputs # are given to extract_type in connected_regions function message = ("'extract_type' should be {0}") for invalid_extract_type in ['connect_region', 'local_regios']: assert_raises_regex(ValueError, message.format(valid_names), connected_regions, maps, extract_type=invalid_extract_type)
def test_load_surf_mesh_file_error(): if LooseVersion(nb.__version__) <= LooseVersion('1.2.0'): raise SkipTest # test if files with unexpected suffixes raise errors mesh = _generate_surf() wrong_suff = ['.vtk', '.obj', '.mnc', '.txt'] for suff in wrong_suff: filename_wrong = tempfile.mktemp(suffix=suff) nb.freesurfer.write_geometry(filename_wrong, mesh[0], mesh[1]) assert_raises_regex(ValueError, 'input type is not recognized', load_surf_data, filename_wrong) os.remove(filename_wrong)
def test_filter_and_mask(): data = np.zeros([20, 30, 40, 5]) mask = np.zeros([20, 30, 40, 2]) mask[10, 15, 20, :] = 1 data_img = nibabel.Nifti1Image(data, np.eye(4)) mask_img = nibabel.Nifti1Image(mask, np.eye(4)) masker = NiftiMasker() params = get_params(NiftiMasker, masker) assert_raises_regex(DimensionError, "Data must be a 3D", filter_and_mask, data_img, mask_img, params)
def test_coregister(): anat_file = os.path.join(os.path.dirname(testing_data.__file__), 'anat.nii.gz') func_file = os.path.join(os.path.dirname(testing_data.__file__), 'func.nii.gz') mean_func_file = os.path.join(tst.tmpdir, 'mean_func.nii.gz') mean_img(func_file).to_filename(mean_func_file) bunch = func.coregister(anat_file, mean_func_file, tst.tmpdir, slice_timing=False, verbose=False, reorient_only=True) assert_true( _check_same_fov(nibabel.load(bunch.coreg_func_), nibabel.load(bunch.coreg_anat_))) assert_true(_check_same_obliquity(bunch.coreg_anat_, bunch.coreg_func_)) assert_true(os.path.isfile(bunch.coreg_transform_)) assert_less(0, len(bunch.coreg_warps_)) assert_true(bunch.coreg_warps_[-1] is None) # Last slice in functional # is without signal for warp_file in bunch.coreg_warps_[:-1]: assert_true(os.path.isfile(warp_file)) # Check environement variables setting assert_raises_regex(RuntimeError, "3dcopy", func.coregister, anat_file, mean_func_file, tst.tmpdir, slice_timing=False, verbose=False, reorient_only=True, AFNI_DECONFLICT='NO') # Check caching does not change the paths bunch2 = func.coregister(anat_file, mean_func_file, tst.tmpdir, slice_timing=False, verbose=False, caching=True, reorient_only=True, AFNI_DECONFLICT='OVERWRITE') assert_equal(bunch.coreg_func_, bunch2.coreg_func_) assert_equal(bunch.coreg_anat_, bunch2.coreg_anat_) assert_equal(bunch.coreg_transform_, bunch2.coreg_transform_) for warp_file, warp_file2 in zip(bunch.coreg_warps_, bunch2.coreg_warps_): assert_equal(warp_file, warp_file2)
def test_resampling_error_checks(): shape = (3, 2, 5, 2) target_shape = (5, 3, 2) affine = np.eye(4) data = np.random.randint(0, 10, shape) img = Nifti1Image(data, affine) # Correct parameters: no exception resample_img(img, target_shape=target_shape, target_affine=affine) resample_img(img, target_affine=affine) with testing.write_tmp_imgs(img) as filename: resample_img(filename, target_shape=target_shape, target_affine=affine) # Missing parameter assert_raises(ValueError, resample_img, img, target_shape=target_shape) # Invalid shape assert_raises(ValueError, resample_img, img, target_shape=(2, 3), target_affine=affine) # Invalid interpolation interpolation = 'an_invalid_interpolation' pattern = "interpolation must be either.+{0}".format(interpolation) testing.assert_raises_regex(ValueError, pattern, resample_img, img, target_shape=target_shape, target_affine=affine, interpolation="an_invalid_interpolation") # Noop target_shape = shape[:3] img_r = resample_img(img, copy=False) assert_equal(img_r, img) img_r = resample_img(img, copy=True) assert_false(np.may_share_memory(img_r.get_data(), img.get_data())) np.testing.assert_almost_equal(img_r.get_data(), img.get_data()) np.testing.assert_almost_equal(img_r.affine, img.affine) img_r = resample_img(img, target_affine=affine, target_shape=target_shape, copy=False) assert_equal(img_r, img) img_r = resample_img(img, target_affine=affine, target_shape=target_shape, copy=True) assert_false(np.may_share_memory(img_r.get_data(), img.get_data())) np.testing.assert_almost_equal(img_r.get_data(), img.get_data()) np.testing.assert_almost_equal(img_r.affine, img.affine)
def test_error_messages_connected_label_regions(): shape = (13, 11, 12) affine = np.eye(4) n_regions = 2 labels_img = testing.generate_labeled_regions(shape, affine=affine, n_regions=n_regions) assert_raises_regex(ValueError, "Expected 'min_size' to be specified as integer.", connected_label_regions, labels_img=labels_img, min_size='a') assert_raises_regex(ValueError, "'connect_diag' must be specified as True or False.", connected_label_regions, labels_img=labels_img, connect_diag=None)
def test_check_niimg_3d(): # check error for non-forced but necessary resampling assert_raises_regex(TypeError, 'nibabel format', _utils.check_niimg, 0) # check error for non-forced but necessary resampling assert_raises_regex(TypeError, 'empty object', _utils.check_niimg, []) # Check that a filename does not raise an error data = np.zeros((40, 40, 40, 1)) data[20, 20, 20] = 1 data_img = Nifti1Image(data, np.eye(4)) with testing.write_tmp_imgs(data_img, create_files=True) as filename: _utils.check_niimg_3d(filename)
def test_concat_niimgs(): # create images different in affine and 3D/4D shape shape = (10, 11, 12) affine = np.eye(4) img1 = Nifti1Image(np.ones(shape), affine) img2 = Nifti1Image(np.ones(shape), 2 * affine) img3 = Nifti1Image(np.zeros(shape), affine) img4d = Nifti1Image(np.ones(shape + (2, )), affine) shape2 = (12, 11, 10) img1b = Nifti1Image(np.ones(shape2), affine) shape3 = (11, 22, 33) img1c = Nifti1Image(np.ones(shape3), affine) # Regression test for #601. Dimensionality of first image was not checked # properly _dimension_error_msg = ("Input data has incompatible dimensionality: " "Expected dimension is 4D and you provided " "a list of 4D images \(5D\)") assert_raises_regex(DimensionError, _dimension_error_msg, _utils.concat_niimgs, [img4d], ensure_ndim=4) # check basic concatenation with equal shape/affine concatenated = _utils.concat_niimgs((img1, img3, img1)) assert_raises_regex(DimensionError, _dimension_error_msg, _utils.concat_niimgs, [img1, img4d]) # smoke-test auto_resample concatenated = _utils.concat_niimgs((img1, img1b, img1c), auto_resample=True) assert_true(concatenated.shape == img1.shape + (3, )) # check error for non-forced but necessary resampling assert_raises_regex(ValueError, 'Field of view of image', _utils.concat_niimgs, [img1, img2], auto_resample=False) # test list of 4D niimgs as input tmpimg1 = tempfile.mktemp(suffix='.nii') tmpimg2 = tempfile.mktemp(suffix='.nii') try: nibabel.save(img1, tmpimg1) nibabel.save(img3, tmpimg2) concatenated = _utils.concat_niimgs([tmpimg1, tmpimg2]) assert_array_equal(concatenated.get_data()[..., 0], img1.get_data()) assert_array_equal(concatenated.get_data()[..., 1], img3.get_data()) finally: _remove_if_exists(tmpimg1) _remove_if_exists(tmpimg2) img5d = Nifti1Image(np.ones((2, 2, 2, 2, 2)), affine) assert_raises_regex( TypeError, 'Concatenated images must be 3D or 4D. ' 'You gave a list of 5D images', _utils.concat_niimgs, [img5d, img5d])
def test_validity_of_ncuts_error_in_find_cut_slices(): data = np.zeros((50, 50, 50)) affine = np.eye(4) x_map, y_map, z_map = 25, 5, 20 data[x_map - 15:x_map + 15, y_map - 3:y_map + 3, z_map - 10:z_map + 10] = 1 img = nibabel.Nifti1Image(data, affine) direction = 'z' for n_cuts in (0, -2, -10.00034, 0.999999, 0.4, 0.11111111): message = ("Image has %d slices in direction %s. Therefore, the number " "of cuts must be between 1 and %d. You provided n_cuts=%s " % ( data.shape[0], direction, data.shape[0], n_cuts)) assert_raises_regex(ValueError, message, find_cut_slices, img, n_cuts=n_cuts)
def test_plot_surf_roi_error(): mesh = _generate_surf() rng = np.random.RandomState(0) roi1 = rng.randint(0, mesh[0].shape[0], size=5) roi2 = rng.randint(0, mesh[0].shape[0], size=10) # Wrong input assert_raises_regex(ValueError, 'Invalid input for roi_map', plot_surf_roi, mesh, roi_map={ 'roi1': roi1, 'roi2': roi2 })
def test_filter_and_mask_error(): data = np.zeros([20, 30, 40, 5]) mask = np.zeros([20, 30, 40, 2]) mask[10, 15, 20, :] = 1 data_img = nibabel.Nifti1Image(data, np.eye(4)) mask_img = nibabel.Nifti1Image(mask, np.eye(4)) masker = NiftiMasker() params = get_params(NiftiMasker, masker) assert_raises_regex( DimensionError, "Input data has incompatible dimensionality: " "Expected dimension is 3D and you provided " "a 4D image.", filter_and_mask, data_img, mask_img, params)
def test_iter_img(): img_3d = nibabel.Nifti1Image(np.ones((3, 4, 5)), np.eye(4)) testing.assert_raises_regex(TypeError, "Input data has incompatible dimensionality: " "Expected dimension is 4D and you provided " "a 3D image.", image.iter_img, img_3d) affine = np.array([[1., 2., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [0., 0., 0., 1.]]) img_4d, _ = testing.generate_fake_fmri(affine=affine) for i, img in enumerate(image.iter_img(img_4d)): expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(img.get_data(), expected_data_3d) assert_array_equal(compat.get_affine(img), compat.get_affine(img_4d)) with testing.write_tmp_imgs(img_4d) as img_4d_filename: for i, img in enumerate(image.iter_img(img_4d_filename)): expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(img.get_data(), expected_data_3d) assert_array_equal(compat.get_affine(img), compat.get_affine(img_4d)) # enables to delete "img_4d_filename" on windows del img img_3d_list = list(image.iter_img(img_4d)) for i, img in enumerate(image.iter_img(img_3d_list)): expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(img.get_data(), expected_data_3d) assert_array_equal(compat.get_affine(img), compat.get_affine(img_4d)) with testing.write_tmp_imgs(*img_3d_list) as img_3d_filenames: for i, img in enumerate(image.iter_img(img_3d_filenames)): expected_data_3d = img_4d.get_data()[..., i] assert_array_equal(img.get_data(), expected_data_3d) assert_array_equal(compat.get_affine(img), compat.get_affine(img_4d)) # enables to delete "img_3d_filename" on windows del img
def test_validity_threshold_value_in_threshold_img(): shape = (6, 8, 10) maps, _ = testing.generate_maps(shape, n_regions=2) # testing to raise same error when threshold=None case testing.assert_raises_regex(ValueError, "The input parameter 'threshold' is empty. ", threshold_img, maps, threshold=None) invalid_threshold_values = ['90t%', 's%', 't', '0.1'] name = 'threshold' for thr in invalid_threshold_values: testing.assert_raises_regex(ValueError, '{0}.+should be a number followed by ' 'the percent sign'.format(name), threshold_img, maps, threshold=thr)
def test_5d(): mask = np.zeros((10, 10, 10)) mask[3:7, 3:7, 3:7] = 1 mask_img = Nifti1Image(mask, np.eye(4)) # Test that, in list of 4d images with last dimension=1, they are # considered as 3d data_5d = [np.random.random((10, 10, 10, 3)) for i in range(5)] data_5d = [nibabel.Nifti1Image(d, np.eye(4)) for d in data_5d] masker = NiftiMasker(mask_img=mask_img) masker.fit() testing.assert_raises_regex( DimensionError, 'Data must be a 4D Niimg-like object but you provided' ' a list of 4D images.', masker.transform, data_5d)
def test_3d_images(): # Test that the MultiNiftiMasker works with 3D images mask_img = Nifti1Image(np.ones((2, 2, 2), dtype=np.int8), affine=np.diag((4, 4, 4, 1))) epi_img1 = Nifti1Image(np.ones((2, 2, 2)), affine=np.diag((4, 4, 4, 1))) epi_img2 = Nifti1Image(np.ones((2, 2, 2)), affine=np.diag((2, 2, 2, 1))) masker = MultiNiftiMasker(mask_img=mask_img) epis = masker.fit_transform([epi_img1, epi_img2]) # This is mostly a smoke test assert_equal(len(epis), 2) # verify that 4D mask arguments are refused mask_img_4d = Nifti1Image(np.ones((2, 2, 2, 2), dtype=np.int8), affine=np.diag((4, 4, 4, 1))) masker2 = MultiNiftiMasker(mask_img=mask_img_4d) assert_raises_regex(DimensionError, "Data must be a 3D", masker2.fit)