def test_downloader(tmp_path, request_mocker): # Sandboxing test # =============== # When nilearn downloads a file, everything is first downloaded in a # temporary directory (sandbox) and moved to the "real" data directory if # all files are present. In case of error, the sandbox is deleted. # To test this feature, we do as follow: # - create the data dir with a file that has a specific content # - try to download the dataset but make it fail on purpose (by requesting a # file that is not in the archive) # - check that the previously created file is untouched : # - if sandboxing is faulty, the file would be replaced by the file of the # archive # - if sandboxing works, the file must be untouched. local_archive = Path( __file__).parent / "data" / "craddock_2011_parcellations.tar.gz" url = "http://example.com/craddock_atlas" request_mocker.url_mapping["*craddock*"] = local_archive datasetdir = tmp_path / 'craddock_2012' datasetdir.mkdir() # Create a dummy file. If sandboxing is successful, it won't be overwritten dummy_file = datasetdir / "random_all.nii.gz" with dummy_file.open("w") as f: f.write('stuff') opts = {'uncompress': True} files = [ ('random_all.nii.gz', url, opts), # The following file does not exists. It will cause an abortion of # the fetching procedure ('bald.nii.gz', url, opts) ] pytest.raises(IOError, utils._fetch_files, str(tmp_path / 'craddock_2012'), files, verbose=0) with dummy_file.open("r") as f: stuff = f.read(5) assert stuff == 'stuff' # Downloading test # ================ # Now, we use the regular downloading feature. This will override the dummy # file created before. atlas.fetch_atlas_craddock_2012(data_dir=str(tmp_path)) with dummy_file.open() as f: stuff = f.read() assert stuff == ''
def test_downloader(tmp_path): # Sandboxing test # =============== # When nilearn downloads a file, everything is first downloaded in a # temporary directory (sandbox) and moved to the "real" data directory if # all files are present. In case of error, the sandbox is deleted. # To test this feature, we do as follow: # - create the data dir with a file that has a specific content # - try to download the dataset but make it fail on purpose (by requesting a # file that is not in the archive) # - check that the previously created file is untouched : # - if sandboxing is faulty, the file would be replaced by the file of the # archive # - if sandboxing works, the file must be untouched. local_url = "file:" + _urllib.request.pathname2url( os.path.join(tst.datadir, "craddock_2011_parcellations.tar.gz")) datasetdir = str(tmp_path / 'craddock_2012') os.makedirs(datasetdir) # Create a dummy file. If sandboxing is successful, it won't be overwritten dummy = open(os.path.join(datasetdir, 'random_all.nii.gz'), 'w') dummy.write('stuff') dummy.close() opts = {'uncompress': True} files = [ ('random_all.nii.gz', local_url, opts), # The following file does not exists. It will cause an abortion of # the fetching procedure ('bald.nii.gz', local_url, opts) ] pytest.raises(IOError, utils._fetch_files, str(tmp_path / 'craddock_2012'), files, verbose=0) dummy = open(os.path.join(datasetdir, 'random_all.nii.gz'), 'r') stuff = dummy.read(5) dummy.close() assert stuff == 'stuff' # Downloading test # ================ # Now, we use the regular downloading feature. This will override the dummy # file created before. atlas.fetch_atlas_craddock_2012(data_dir=str(tmp_path), url=local_url) dummy = open(os.path.join(datasetdir, 'random_all.nii.gz'), 'r') stuff = dummy.read() dummy.close() assert stuff == ''
def test_downloader(): # Sandboxing test # =============== # When nilearn downloads a file, everything is first downloaded in a # temporary directory (sandbox) and moved to the "real" data directory if # all files are present. In case of error, the sandbox is deleted. # To test this feature, we do as follow: # - create the data dir with a file that has a specific content # - try to download the dataset but make it fail on purpose (by requesting a # file that is not in the archive) # - check that the previously created file is untouched : # - if sandboxing is faulty, the file would be replaced by the file of the # archive # - if sandboxing works, the file must be untouched. local_url = "file:" + _urllib.request.pathname2url( os.path.join(tst.datadir, "craddock_2011_parcellations.tar.gz")) datasetdir = os.path.join(tst.tmpdir, 'craddock_2012') os.makedirs(datasetdir) # Create a dummy file. If sandboxing is successful, it won't be overwritten dummy = open(os.path.join(datasetdir, 'random_all.nii.gz'), 'w') dummy.write('stuff') dummy.close() opts = {'uncompress': True} files = [ ('random_all.nii.gz', local_url, opts), # The following file does not exists. It will cause an abortion of # the fetching procedure ('bald.nii.gz', local_url, opts) ] assert_raises(IOError, utils._fetch_files, os.path.join(tst.tmpdir, 'craddock_2012'), files, verbose=0) dummy = open(os.path.join(datasetdir, 'random_all.nii.gz'), 'r') stuff = dummy.read(5) dummy.close() assert_equal(stuff, 'stuff') # Downloading test # ================ # Now, we use the regular downloading feature. This will override the dummy # file created before. atlas.fetch_atlas_craddock_2012(data_dir=tst.tmpdir, url=local_url) dummy = open(os.path.join(datasetdir, 'random_all.nii.gz'), 'r') stuff = dummy.read() dummy.close() assert_equal(stuff, '')
def test_fetch_atlas_craddock_2012(): bunch = atlas.fetch_atlas_craddock_2012(data_dir=tmpdir, verbose=0) keys = ("scorr_mean", "tcorr_mean", "scorr_2level", "tcorr_2level", "random") filenames = [ "scorr05_mean_all.nii.gz", "tcorr05_mean_all.nii.gz", "scorr05_2level_all.nii.gz", "tcorr05_2level_all.nii.gz", "random_all.nii.gz", ] assert_equal(len(mock_url_request.urls), 1) for key, fn in zip(keys, filenames): assert_equal(bunch[key], os.path.join(tmpdir, "craddock_2012", fn)) assert_not_equal(bunch.description, "")
def test_fetch_atlas_craddock_2012(): bunch = atlas.fetch_atlas_craddock_2012(data_dir=tmpdir, verbose=0) keys = ("scorr_mean", "tcorr_mean", "scorr_2level", "tcorr_2level", "random") filenames = [ "scorr05_mean_all.nii.gz", "tcorr05_mean_all.nii.gz", "scorr05_2level_all.nii.gz", "tcorr05_2level_all.nii.gz", "random_all.nii.gz", ] assert_equal(len(mock_url_request.urls), 1) for key, fn in zip(keys, filenames): assert_equal(bunch[key], os.path.join(tmpdir, 'craddock_2012', fn))
def test_fetch_atlas_craddock_2012(tmp_path, request_mocker): bunch = atlas.fetch_atlas_craddock_2012(data_dir=str(tmp_path), verbose=0) keys = ("scorr_mean", "tcorr_mean", "scorr_2level", "tcorr_2level", "random") filenames = [ "scorr05_mean_all.nii.gz", "tcorr05_mean_all.nii.gz", "scorr05_2level_all.nii.gz", "tcorr05_2level_all.nii.gz", "random_all.nii.gz", ] assert len(tst.mock_url_request.urls) == 1 for key, fn in zip(keys, filenames): assert bunch[key] == str(tmp_path / 'craddock_2012' / fn) assert bunch.description != ''
def test_fetch_atlas_craddock_2012(tmp_path, request_mocker): local_archive = Path( __file__).parent / "data" / "craddock_2011_parcellations.tar.gz" request_mocker.url_mapping["*craddock*"] = local_archive bunch = atlas.fetch_atlas_craddock_2012(data_dir=str(tmp_path), verbose=0) keys = ("scorr_mean", "tcorr_mean", "scorr_2level", "tcorr_2level", "random") filenames = [ "scorr05_mean_all.nii.gz", "tcorr05_mean_all.nii.gz", "scorr05_2level_all.nii.gz", "tcorr05_2level_all.nii.gz", "random_all.nii.gz", ] assert request_mocker.url_count == 1 for key, fn in zip(keys, filenames): assert bunch[key] == str(tmp_path / 'craddock_2012' / fn) assert bunch.description != ''