def benchmark():
    check = True
    shape = (40, 41, 42, 150)
    affine = np.eye(4)
    data = np.ndarray(shape, order="F", dtype=np.float32)
    with profile.timestamp("Data_generation"):
        data[...] = np.random.standard_normal(data.shape)
    target_shape = tuple([s * 1.26 for s in shape[:3]])
    target_affine = affine
    img = nibabel.Nifti1Image(data, affine)

    # Resample one 4D image
    if check:
        print("Resampling (original)...")
        data_orig = utils.timeit(profile(nilearn.resampling_orig.resample_img)
                            )(img, target_shape=target_shape,
                              target_affine=target_affine,
                              interpolation="continuous")

    print("Resampling (new)...")
    data = utils.timeit(profile(nilearn.resampling.resample_img)
                        )(img, target_shape=target_shape,
                          target_affine=target_affine,
                          interpolation="continuous")
    time.sleep(0.5)
    del img
    time.sleep(0.5)
    if check:
        np.testing.assert_almost_equal(data_orig.get_data(), data.get_data())
    del data
    time.sleep(0.5)
def benchmark():
    # Concatenate all individual images, time the operation
    _, _, images = get_filenames()
    if utils.cache_tools_available:
        print("Invalidating cache...")
        utils.dontneed(images)
        print("Concatenating images...")
        data = utils.timeit(profile(nilearn.utils.concat_niimgs))(images)
        assert(data.shape[3] == len(images))
        del data

    print("Concatenating images...")
    data = utils.timeit(profile(nilearn.utils.concat_niimgs))(images)
    assert(data.shape[3] == len(images))
    del data