예제 #1
0
def test_slice_time_correction():
    # Make smooth time course at slice resolution
    TR = 2.
    n_vols = 25
    n_slices = 10
    # Create single volume
    shape_3d = (20, 30, n_slices)
    spatial_sigma = 4
    time_sigma = n_slices * 5  # time sigma in TRs
    one_vol = np.random.normal(100, 25, size=shape_3d)
    gaussian_filter(one_vol, spatial_sigma, output=one_vol)
    # Add smoothed time courses.  Time courses are at time resolution of one
    # slice time.  So, there are n_slices time points per TR.
    n_vol_slices = n_slices * n_vols
    time_courses = np.random.normal(0, 15, size=shape_3d + (n_vol_slices, ))
    gaussian_filter1d(time_courses, time_sigma, output=time_courses)
    big_data = one_vol[..., None] + time_courses
    # Can the first time point be approximated from the later ones?
    first_signal = big_data[..., 0:n_vol_slices:n_slices]
    for name, time_to_slice in (('ascending', list(range(n_slices))),
                                ('descending', list(range(n_slices)[::-1])),
                                ('asc_alt_2', (list(range(0, n_slices, 2)) +
                                               list(range(1, n_slices, 2)))),
                                ('desc_alt_2',
                                 (list(range(0, n_slices, 2)) +
                                  list(range(1, n_slices, 2)))[::-1])):
        slice_to_time = np.argsort(time_to_slice)
        acquired_signal = np.zeros_like(first_signal)
        for space_sno, time_sno in enumerate(slice_to_time):
            acquired_signal[..., space_sno, :] = \
                big_data[..., space_sno, time_sno:n_vol_slices:n_slices]
        # do STC - minimizer will fail
        acquired_image = Image(acquired_signal, vox2scanner(np.eye(5)))
        stc = SpaceTimeRealign(acquired_image, TR, name, 2)
        stc.estimate(refscan=None,
                     loops=1,
                     between_loops=1,
                     optimizer='steepest')
        # Check no motion estimated
        assert_array_equal([t.param for t in stc._transforms[0]], 0)
        corrected = stc.resample()[0].get_data()
        # check we approximate first time slice with correction
        assert_false(
            np.allclose(acquired_signal, corrected, rtol=1e-3, atol=0.1))
        check_stc(first_signal,
                  corrected,
                  ref_slice=slice_to_time[0],
                  rtol=5e-4,
                  atol=1e-6)
예제 #2
0
def test_slice_time_correction():
    # Make smooth time course at slice resolution
    TR = 2.
    n_vols = 25
    n_slices = 10
    # Create single volume
    shape_3d = (20, 30, n_slices)
    spatial_sigma = 4
    time_sigma = n_slices * 5 # time sigma in TRs
    one_vol = np.random.normal(100, 25, size=shape_3d)
    gaussian_filter(one_vol, spatial_sigma, output=one_vol)
    # Add smoothed time courses.  Time courses are at time resolution of one
    # slice time.  So, there are n_slices time points per TR.
    n_vol_slices = n_slices * n_vols
    time_courses = np.random.normal(0, 15, size=shape_3d + (n_vol_slices,))
    gaussian_filter1d(time_courses, time_sigma, output=time_courses)
    big_data = one_vol[..., None] + time_courses
    # Can the first time point be approximated from the later ones?
    first_signal = big_data[..., 0:n_vol_slices:n_slices]
    for name, time_to_slice in (
        ('ascending', list(range(n_slices))),
        ('descending', list(range(n_slices)[::-1])),
        ('asc_alt_2', (list(range(0, n_slices, 2)) +
                       list(range(1, n_slices, 2)))),
        ('desc_alt_2', (list(range(0, n_slices, 2)) +
                        list(range(1, n_slices, 2)))[::-1])
    ):
        slice_to_time = np.argsort(time_to_slice)
        acquired_signal = np.zeros_like(first_signal)
        for space_sno, time_sno in enumerate(slice_to_time):
            acquired_signal[..., space_sno, :] = \
                big_data[..., space_sno, time_sno:n_vol_slices:n_slices]
        # do STC - minimizer will fail
        acquired_image = Image(acquired_signal, vox2scanner(np.eye(5)))
        stc = SpaceTimeRealign(acquired_image, TR, name, 2)
        stc.estimate(refscan=None, loops=1, between_loops=1, optimizer='steepest')
        # Check no motion estimated
        assert_array_equal([t.param for t in stc._transforms[0]], 0)
        corrected = stc.resample()[0].get_data()
        # check we approximate first time slice with correction
        assert_false(np.allclose(acquired_signal, corrected, rtol=1e-3,
                                 atol=0.1))
        check_stc(first_signal, corrected, ref_slice=slice_to_time[0],
                  rtol=5e-4, atol=1e-6)
예제 #3
0
from nipy import save_image, load_image
from nipy.core.api import Image, vox2scanner

# This gets the filename for a tiny example file
from nipy.testing import anatfile

# Load an image to get an array and affine
#
# Use one of our test files to get an array and affine (as numpy array) from.
img = load_image(anatfile)
arr = img.get_data()
affine_array = img.coordmap.affine.copy()

# 1) Create a CoordinateMap from the affine transform which specifies
# the mapping from input to output coordinates. The ``vox2scanner`` function
# makes a coordinate map from voxels to scanner coordinates.  Other options are
# ``vox2mni`` or ``vox2talairach``
affine_coordmap = vox2scanner(affine_array)

# 2) Create a nipy image from the array and CoordinateMap
newimg = Image(arr, affine_coordmap)

# Save the nipy image to the specified filename
save_image(newimg, 'an_image.nii.gz')

# Reload and verify the data and affine were saved correctly.
img_back = load_image('an_image.nii.gz')
assert np.allclose(img_back.get_data(), img.get_data())
assert np.allclose(img_back.coordmap.affine, img.coordmap.affine)
예제 #4
0
from nipy import save_image, load_image
from nipy.core.api import Image, vox2scanner

# This gets the filename for a tiny example file
from nipy.testing import anatfile

# Load an image to get an array and affine
#
# Use one of our test files to get an array and affine (as numpy array) from.
img = load_image(anatfile)
arr = img.get_data()
affine_array = img.coordmap.affine.copy()

# 1) Create a CoordinateMap from the affine transform which specifies
# the mapping from input to output coordinates. The ``vox2scanner`` function
# makes a coordinate map from voxels to scanner coordinates.  Other options are
# ``vox2mni`` or ``vox2talairach``
affine_coordmap = vox2scanner(affine_array)

# 2) Create a nipy image from the array and CoordinateMap
newimg = Image(arr, affine_coordmap)

# Save the nipy image to the specified filename
save_image(newimg, "an_image.nii.gz")

# Reload and verify the data and affine were saved correctly.
img_back = load_image("an_image.nii.gz")
assert np.allclose(img_back.get_data(), img.get_data())
assert np.allclose(img_back.coordmap.affine, img.coordmap.affine)