예제 #1
0
def test_rotate2d():
    # Rotate an image in 2d on a square grid, should result in transposed image
    g = AffineTransform.from_params('ij', 'xy', np.diag([0.7, 0.5, 1]))
    g2 = AffineTransform.from_params('ij', 'xy', np.diag([0.5, 0.7, 1]))
    i = Image(np.ones((100, 100)), g)
    # This sets the image data by writing into the array
    i.get_data()[50:55, 40:55] = 3.
    a = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]], np.float)
    ir = resample(i, g2, a, (100, 100))
    assert_array_almost_equal(ir.get_data().T, i.get_data())
예제 #2
0
def test_rotate3d():
    # Rotate / transpose a 3d image on a non-square grid
    g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5, 0.6, 0.7, 1]))
    g2 = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5, 0.7, 0.6, 1]))
    shape = (100, 90, 80)
    i = Image(np.ones(shape), g)
    i.get_data()[50:55, 40:55, 30:33] = 3.
    a = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1.]])
    ir = resample(i, g2, a, (100, 80, 90))
    assert_array_almost_equal(np.transpose(ir.get_data(), (0, 2, 1)),
                              i.get_data())
예제 #3
0
def test_rotate2d():
    # Rotate an image in 2d on a square grid, should result in transposed image
    g = AffineTransform.from_params('ij', 'xy', np.diag([0.7,0.5,1]))
    g2 = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.7,1]))
    i = Image(np.ones((100,100)), g)
    # This sets the image data by writing into the array
    i.get_data()[50:55,40:55] = 3.
    a = np.array([[0,1,0],
                  [1,0,0],
                  [0,0,1]], np.float)
    ir = resample(i, g2, a, (100, 100))
    assert_array_almost_equal(ir.get_data().T, i.get_data())
예제 #4
0
def test_rollaxis():
    data = np.random.standard_normal((3,4,7,5))
    im = Image(data, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1])))

    # for the inverse we must specify an integer
    yield assert_raises, ValueError, image.rollaxis, im, 'i', True

    # Check that rollaxis preserves diagonal affines, as claimed

    yield assert_almost_equal, image.rollaxis(im, 1).affine, np.diag([2,1,3,4,1])
    yield assert_almost_equal, image.rollaxis(im, 2).affine, np.diag([3,1,2,4,1])
    yield assert_almost_equal, image.rollaxis(im, 3).affine, np.diag([4,1,2,3,1])

    # Check that ambiguous axes raise an exception
    # 'l' appears both as an axis and a reference coord name
    # and in different places

    im_amb = Image(data, AffineTransform.from_params('ijkl', 'xylt', np.diag([1,2,3,4,1])))
    yield assert_raises, ValueError, image.rollaxis, im_amb, 'l'

    # But if it's unambiguous, then
    # 'l' can appear both as an axis and a reference coord name

    im_unamb = Image(data, AffineTransform.from_params('ijkl', 'xyzl', np.diag([1,2,3,4,1])))
    im_rolled = image.rollaxis(im_unamb, 'l')
    yield assert_almost_equal, im_rolled.get_data(), \
        im_unamb.get_data().transpose([3,0,1,2])

    for i, o, n in zip('ijkl', 'xyzt', range(4)):
        im_i = image.rollaxis(im, i)
        im_o = image.rollaxis(im, o)
        im_n = image.rollaxis(im, n)

        yield assert_almost_equal, im_i.get_data(), \
                                  im_o.get_data()

        yield assert_almost_equal, im_i.affine, \
            im_o.affine

        yield assert_almost_equal, im_n.get_data(), \
            im_o.get_data()

        for _im in [im_n, im_o, im_i]:
            im_n_inv = image.rollaxis(_im, n, inverse=True)

            yield assert_almost_equal, im_n_inv.affine, \
                im.affine

            yield assert_almost_equal, im_n_inv.get_data(), \
                im.get_data()
예제 #5
0
def test_rotate3d():
    # Rotate / transpose a 3d image on a non-square grid
    g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.6,0.7,1]))
    g2 = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.7,0.6,1]))
    shape = (100,90,80)
    i = Image(np.ones(shape), g)
    i.get_data()[50:55,40:55,30:33] = 3.
    a = np.array([[1,0,0,0],
                  [0,0,1,0],
                  [0,1,0,0],
                  [0,0,0,1.]])
    ir = resample(i, g2, a, (100,80,90))
    assert_array_almost_equal(np.transpose(ir.get_data(), (0,2,1)),
                              i.get_data())
예제 #6
0
def test_nonaffine():
    # resamples an image along a curve through the image.
    #
    # FIXME: use the reference.evaluate.Grid to perform this nicer
    # FIXME: Remove pylab references
    def curve(x): # function accept N by 1, returns N by 2 
        return (np.vstack([5*np.sin(x.T),5*np.cos(x.T)]).T + [52,47])
    for names in (('xy', 'ij', 't', 'u'),('ij', 'xy', 't', 's')):
        in_names, out_names, tin_names, tout_names = names
        g = AffineTransform.from_params(in_names, out_names, np.identity(3))
        img = Image(np.ones((100,90)), g)
        img.get_data()[50:55,40:55] = 3.
        tcoordmap = AffineTransform.from_start_step(
            tin_names,
            tout_names,
            [0],
            [np.pi*1.8/100])
        ir = resample(img, tcoordmap, curve, (100,))
    if gui_review:
        import pylab
        pylab.figure(num=3)
        pylab.imshow(img, interpolation='nearest')
        d = curve(np.linspace(0,1.8*np.pi,100))
        pylab.plot(d[0], d[1])
        pylab.gca().set_ylim([0,99])
        pylab.gca().set_xlim([0,89])
        pylab.figure(num=4)
        pylab.plot(ir.get_data())
예제 #7
0
def test_slice_from_3d():
    # Resample a 3d image, returning a zslice, yslice and xslice
    #
    # This example creates a coordmap that coincides with
    # a given z, y, or x slice of an image, and checks that
    # resampling agrees with the data in the given slice.
    shape = (100,90,80)
    g = AffineTransform.from_params('ijk',
                                    'xyz',
                                    np.diag([0.5,0.5,0.5,1]))
    img = Image(np.ones(shape), g)
    img.get_data()[50:55,40:55,30:33] = 3
    I = np.identity(4)
    zsl = slices.zslice(26,
                        ((0,49.5), 100),
                        ((0,44.5), 90),
                        img.reference)
    ir = resample(img, zsl, I, (100, 90))
    assert_array_almost_equal(ir.get_data(), img[:,:,53].get_data())
    ysl = slices.yslice(22,
                        ((0,49.5), 100),
                        ((0,39.5), 80),
                        img.reference)
    ir = resample(img, ysl, I, (100, 80))
    assert_array_almost_equal(ir.get_data(), img[:,45,:].get_data())
    xsl = slices.xslice(15.5,
                        ((0,44.5), 90),
                        ((0,39.5), 80),
                        img.reference)
    ir = resample(img, xsl, I, (90, 80))
    assert_array_almost_equal(ir.get_data(), img[32,:,:].get_data())
예제 #8
0
def test_synchronized_order():

    data = np.random.standard_normal((3,4,7,5))
    im = Image(data, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1])))

    im_scrambled = im.reordered_axes('iljk').reordered_reference('xtyz')
    im_unscrambled = image.synchronized_order(im_scrambled, im)
    
    yield assert_equal, im_unscrambled.coordmap, im.coordmap
    yield assert_almost_equal, im_unscrambled.get_data(), im.get_data()
    yield assert_equal, im_unscrambled, im
    yield assert_true, im_unscrambled == im
    yield assert_false, im_unscrambled != im

    # the images don't have to be the same shape

    data2 = np.random.standard_normal((3,11,9,4))
    im2 = Image(data, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1])))

    im_scrambled2 = im2.reordered_axes('iljk').reordered_reference('xtyz')
    im_unscrambled2 = image.synchronized_order(im_scrambled2, im)

    yield assert_equal, im_unscrambled2.coordmap, im.coordmap

    # or the same coordmap

    data3 = np.random.standard_normal((3,11,9,4))
    im3 = Image(data, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,9,3,-2,1])))

    im_scrambled3 = im3.reordered_axes('iljk').reordered_reference('xtyz')
    im_unscrambled3 = image.synchronized_order(im_scrambled3, im)

    yield assert_equal, im_unscrambled3.axes, im.axes
    yield assert_equal, im_unscrambled3.reference, im.reference
예제 #9
0
def test_nonaffine():
    # resamples an image along a curve through the image.
    #
    # FIXME: use the reference.evaluate.Grid to perform this nicer
    # FIXME: Remove pylab references
    def curve(x):  # function accept N by 1, returns N by 2
        return (np.vstack([5 * np.sin(x.T), 5 * np.cos(x.T)]).T + [52, 47])

    for names in (('xy', 'ij', 't', 'u'), ('ij', 'xy', 't', 's')):
        in_names, out_names, tin_names, tout_names = names
        g = AffineTransform.from_params(in_names, out_names, np.identity(3))
        img = Image(np.ones((100, 90)), g)
        img.get_data()[50:55, 40:55] = 3.
        tcoordmap = AffineTransform.from_start_step(tin_names, tout_names, [0],
                                                    [np.pi * 1.8 / 100])
        ir = resample(img, tcoordmap, curve, (100, ))
    if gui_review:
        import pylab
        pylab.figure(num=3)
        pylab.imshow(img, interpolation='nearest')
        d = curve(np.linspace(0, 1.8 * np.pi, 100))
        pylab.plot(d[0], d[1])
        pylab.gca().set_ylim([0, 99])
        pylab.gca().set_xlim([0, 89])
        pylab.figure(num=4)
        pylab.plot(ir.get_data())
예제 #10
0
파일: files.py 프로젝트: cournape/nipy
def load(filename):
    """Load an image from the given filename.

    Parameters
    ----------
    filename : string
        Should resolve to a complete filename path.

    Returns
    -------
    image : An `Image` object
        If successful, a new `Image` object is returned.

    See Also
    --------
    save_image : function for saving images
    fromarray : function for creating images from numpy arrays

    Examples
    --------

    >>> from nipy.io.api import load_image
    >>> from nipy.testing import anatfile
    >>> img = load_image(anatfile)
    >>> img.shape
    (33, 41, 25)
    """
    img = formats.load(filename)
    aff = img.get_affine()
    shape = img.get_shape()
    hdr = img.get_header()

    # Get info from NIFTI header, if present, to tell which axes are
    # which.  This is a NIFTI-specific kludge, that might be abstracted
    # out into the image backend in a general way.  Similarly for
    # getting zooms

    # axis_renames is a dictionary: dict([(int, str)])
    # that has keys in range(3)
    # the axes of the Image are renamed from 'ijk'
    # using these names

    try:
        axis_renames = hdr.get_axis_renames()
    except (TypeError, AttributeError):
        axis_renames = {}

    try:
        zooms = hdr.get_zooms()
    except AttributeError:
        zooms = np.ones(len(shape))

    # affine_transform is a 3-d transform

    affine_transform3d, affine_transform = \
        affine_transform_from_array(aff, 'ijk', pixdim=zooms[3:])
    img = Image(img.get_data(), affine_transform.renamed_domain(axis_renames))
    img.header = hdr
    return img
예제 #11
0
파일: test_fmri.py 프로젝트: Lx37/nipy
def test_labels1():
    img = load_image(funcfile)
    data = img.get_data()
    parcelmap = Image(img[0].get_data(), AfT("kji", "zyx", np.eye(4)))
    parcelmap = (parcelmap.get_data() * 100).astype(np.int32)
    v = 0
    for i, d in axis0_generator(data, parcels(parcelmap)):
        v += d.shape[1]
    assert_equal(v, parcelmap.size)
예제 #12
0
def test_labels1():
    img = load_image(funcfile)
    data = img.get_data()
    parcelmap = Image(img[0].get_data(), AfT('kji', 'zyx', np.eye(4)))
    parcelmap = (parcelmap.get_data() * 100).astype(np.int32)
    v = 0
    for i, d in axis0_generator(data, parcels(parcelmap)):
        v += d.shape[1]
    assert_equal(v, parcelmap.size)
예제 #13
0
def test_resample2d2():
    g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1]))
    i = Image(np.ones((100,90)), g)
    i.get_data()[50:55,40:55] = 3.
    a = np.identity(3)
    a[:2,-1] = 4.
    A = np.identity(2)
    b = np.ones(2)*4
    ir = resample(i, i.coordmap, (A, b), (100,90))
    assert_array_almost_equal(ir.get_data()[42:47,32:47], 3.)
예제 #14
0
def test_resample2d2():
    g = AffineTransform.from_params('ij', 'xy', np.diag([0.5, 0.5, 1]))
    i = Image(np.ones((100, 90)), g)
    i.get_data()[50:55, 40:55] = 3.
    a = np.identity(3)
    a[:2, -1] = 4.
    A = np.identity(2)
    b = np.ones(2) * 4
    ir = resample(i, i.coordmap, (A, b), (100, 90))
    assert_array_almost_equal(ir.get_data()[42:47, 32:47], 3.)
예제 #15
0
def test_resample2d3():
    # Same as test_resample2d, only a different way of specifying
    # the transform: here it is an (A,b) pair
    g = AffineTransform.from_params('ij', 'xy', np.diag([0.5, 0.5, 1]))
    i = Image(np.ones((100, 90)), g)
    i.get_data()[50:55, 40:55] = 3.
    a = np.identity(3)
    a[:2, -1] = 4.
    ir = resample(i, i.coordmap, a, (100, 90))
    assert_array_almost_equal(ir.get_data()[42:47, 32:47], 3.)
예제 #16
0
def test_resample2d3():
    # Same as test_resample2d, only a different way of specifying
    # the transform: here it is an (A,b) pair
    g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1]))
    i = Image(np.ones((100,90)), g)
    i.get_data()[50:55,40:55] = 3.
    a = np.identity(3)
    a[:2,-1] = 4.
    ir = resample(i, i.coordmap, a, (100,90))
    assert_array_almost_equal(ir.get_data()[42:47,32:47], 3.)
예제 #17
0
파일: files.py 프로젝트: bergtholdt/nipy
def load(filename):
    """Load an image from the given filename.

    Parameters
    ----------
    filename : string
        Should resolve to a complete filename path.

    Returns
    -------
    image : An `Image` object
        If successful, a new `Image` object is returned.

    See Also
    --------
    save_image : function for saving images
    fromarray : function for creating images from numpy arrays

    Examples
    --------

    >>> from nipy.io.api import load_image
    >>> from nipy.testing import anatfile
    >>> img = load_image(anatfile)
    >>> img.shape
    (33, 41, 25)
    """
    img = nib.load(filename)
    aff = img.get_affine()
    shape = img.get_shape()
    hdr = img.get_header()
    # If the header implements it, get a list of names, one per axis,
    # and put this into the coordinate map.  In fact, no image format
    # implements this at the moment, so in practice, the following code
    # is not currently called. 
    axis_renames = {}
    try:
        axis_names = hdr.axis_names
    except AttributeError:
        pass
    else:
        # axis_renames is a dictionary: dict([(int, str)]) that has keys
        # in range(3). The axes of the Image are renamed from 'ijk' using
        # these names
        for i in range(min([len(axis_names), 3])):
            name = axis_names[i]
            if not (name is None or name == ''):
                axis_renames[i] = name
    zooms = hdr.get_zooms()
    # affine_transform is a 3-d transform
    affine_transform3d, affine_transform = \
        affine_transform_from_array(aff, 'ijk', pixdim=zooms[3:])
    img = Image(img.get_data(), affine_transform.renamed_domain(axis_renames))
    img.header = hdr
    return img
예제 #18
0
def test_rotate2d3():
    # Another way to rotate/transpose the image, similar to
    # test_rotate2d2 and test_rotate2d, except the world of the
    # output coordmap is the same as the world of the
    # original image. That is, the data is transposed on disk, but the
    # output coordinates are still 'x,'y' order, not 'y', 'x' order as
    # above

    # this functionality may or may not be used a lot. if data is to
    # be transposed but one wanted to keep the NIFTI order of output
    # coords this would do the trick
    g = AffineTransform.from_params('xy', 'ij', np.diag([0.5, 0.7, 1]))
    i = Image(np.ones((100, 80)), g)
    # This sets the image data by writing into the array
    i.get_data()[50:55, 40:55] = 3.
    a = np.identity(3)
    g2 = AffineTransform.from_params(
        'xy', 'ij', np.array([[0, 0.5, 0], [0.7, 0, 0], [0, 0, 1]]))
    ir = resample(i, g2, a, (80, 100))
    assert_array_almost_equal(ir.get_data().T, i.get_data())
예제 #19
0
def test_rotate2d3():
    # Another way to rotate/transpose the image, similar to
    # test_rotate2d2 and test_rotate2d, except the world of the
    # output coordmap is the same as the world of the
    # original image. That is, the data is transposed on disk, but the
    # output coordinates are still 'x,'y' order, not 'y', 'x' order as
    # above

    # this functionality may or may not be used a lot. if data is to
    # be transposed but one wanted to keep the NIFTI order of output
    # coords this would do the trick
    g = AffineTransform.from_params('xy', 'ij', np.diag([0.5,0.7,1]))
    i = Image(np.ones((100,80)), g)
    # This sets the image data by writing into the array
    i.get_data()[50:55,40:55] = 3.
    a = np.identity(3)
    g2 = AffineTransform.from_params('xy', 'ij', np.array([[0,0.5,0],
                                                  [0.7,0,0],
                                                  [0,0,1]]))
    ir = resample(i, g2, a, (80,100))
    assert_array_almost_equal(ir.get_data().T, i.get_data())
예제 #20
0
def test_2d_from_3d():
    # Resample a 3d image on a 2d affine grid
    # This example creates a coordmap that coincides with
    # the 10th slice of an image, and checks that
    # resampling agrees with the data in the 10th slice.
    shape = (100, 90, 80)
    g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5, 0.5, 0.5, 1]))
    i = Image(np.ones(shape), g)
    i.get_data()[50:55, 40:55, 30:33] = 3.
    a = np.identity(4)
    g2 = ArrayCoordMap.from_shape(g, shape)[10]
    ir = resample(i, g2.coordmap, a, g2.shape)
    assert_array_almost_equal(ir.get_data(), i[10].get_data())
예제 #21
0
def test_2d_from_3d():
    # Resample a 3d image on a 2d affine grid
    # This example creates a coordmap that coincides with
    # the 10th slice of an image, and checks that
    # resampling agrees with the data in the 10th slice.
    shape = (100,90,80)
    g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.5,0.5,1]))
    i = Image(np.ones(shape), g)
    i.get_data()[50:55,40:55,30:33] = 3.
    a = np.identity(4)
    g2 = ArrayCoordMap.from_shape(g, shape)[10]
    ir = resample(i, g2.coordmap, a, g2.shape)
    assert_array_almost_equal(ir.get_data(), i[10].get_data())
예제 #22
0
def test_resample2d1():
    # Tests the same as test_resample2d, only using a callable instead of
    # an AffineTransform instance
    g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1]))
    i = Image(np.ones((100,90)), g)
    i.get_data()[50:55,40:55] = 3.
    a = np.identity(3)
    a[:2,-1] = 4.
    A = np.identity(2)
    b = np.ones(2)*4
    def mapper(x):
        return np.dot(x, A.T) + b
    ir = resample(i, i.coordmap, mapper, (100,90))
    assert_array_almost_equal(ir.get_data()[42:47,32:47], 3.)
예제 #23
0
def load(filename):
    """Load an image from the given filename.

    Parameters
    ----------
    filename : string
        Should resolve to a complete filename path.

    Returns
    -------
    image : An `Image` object
        If successful, a new `Image` object is returned.

    See Also
    --------
    save_image : function for saving images
    fromarray : function for creating images from numpy arrays

    Examples
    --------

    >>> from nipy.io.api import load_image
    >>> from nipy.testing import anatfile
    >>> img = load_image(anatfile)
    >>> img.shape
    (33, 41, 25)
    """
    img = formats.load(filename)
    aff = img.get_affine()
    shape = img.get_shape()
    hdr = img.get_header()
    # Get info from NIFTI header, if present, to tell which axes are
    # which.  This is a NIFTI-specific kludge, that might be abstracted
    # out into the image backend in a general way.  Similarly for
    # getting zooms
    try:
        fps = hdr.get_dim_info()
    except (TypeError, AttributeError):
        fps = (None, None, None)
    ijk = ijk_from_fps(fps)
    try:
        zooms = hdr.get_zooms()
    except AttributeError:
        zooms = np.ones(len(shape))
    aff = _match_affine(aff, len(shape), zooms)
    coordmap = coordmap_from_affine(aff, ijk)
    img = Image(img.get_data(), coordmap)
    img.header = hdr
    return img
예제 #24
0
def test_resample2d():
    g = AffineTransform.from_params('ij', 'xy', np.diag([0.5, 0.5, 1]))
    i = Image(np.ones((100, 90)), g)
    i.get_data()[50:55, 40:55] = 3.
    # This mapping describes a mapping from the "target" physical
    # coordinates to the "image" physical coordinates.  The 3x3 matrix
    # below indicates that the "target" physical coordinates are related
    # to the "image" physical coordinates by a shift of -4 in each
    # coordinate.  Or, to find the "image" physical coordinates, given
    # the "target" physical coordinates, we add 4 to each "target
    # coordinate".  The resulting resampled image should show the
    # overall image shifted -8,-8 voxels towards the origin
    a = np.identity(3)
    a[:2, -1] = 4.
    ir = resample(i, i.coordmap, a, (100, 90))
    assert_array_almost_equal(ir.get_data()[42:47, 32:47], 3.)
예제 #25
0
def test_resample2d1():
    # Tests the same as test_resample2d, only using a callable instead of
    # an AffineTransform instance
    g = AffineTransform.from_params('ij', 'xy', np.diag([0.5, 0.5, 1]))
    i = Image(np.ones((100, 90)), g)
    i.get_data()[50:55, 40:55] = 3.
    a = np.identity(3)
    a[:2, -1] = 4.
    A = np.identity(2)
    b = np.ones(2) * 4

    def mapper(x):
        return np.dot(x, A.T) + b

    ir = resample(i, i.coordmap, mapper, (100, 90))
    assert_array_almost_equal(ir.get_data()[42:47, 32:47], 3.)
예제 #26
0
def test_resample2d():
    g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1]))
    i = Image(np.ones((100,90)), g)
    i.get_data()[50:55,40:55] = 3.
    # This mapping describes a mapping from the "target" physical
    # coordinates to the "image" physical coordinates.  The 3x3 matrix
    # below indicates that the "target" physical coordinates are related
    # to the "image" physical coordinates by a shift of -4 in each
    # coordinate.  Or, to find the "image" physical coordinates, given
    # the "target" physical coordinates, we add 4 to each "target
    # coordinate".  The resulting resampled image should show the
    # overall image shifted -8,-8 voxels towards the origin
    a = np.identity(3)
    a[:2,-1] = 4.
    ir = resample(i, i.coordmap, a, (100,90))
    assert_array_almost_equal(ir.get_data()[42:47,32:47], 3.)
예제 #27
0
def test_resample3d():
    g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.5,0.5,1]))
    shape = (100,90,80)
    i = Image(np.ones(shape), g)
    i.get_data()[50:55,40:55,30:33] = 3.
    # This mapping describes a mapping from the "target" physical
    # coordinates to the "image" physical coordinates.  The 4x4 matrix
    # below indicates that the "target" physical coordinates are related
    # to the "image" physical coordinates by a shift of -4 in each
    # coordinate.  Or, to find the "image" physical coordinates, given
    # the "target" physical coordinates, we add 4 to each "target
    # coordinate".  The resulting resampled image should show the
    # overall image shifted [-6,-8,-10] voxels towards the origin
    a = np.identity(4)
    a[:3,-1] = [3,4,5]
    ir = resample(i, i.coordmap, a, (100,90,80))
    assert_array_almost_equal(ir.get_data()[44:49,32:47,20:23], 3.)
예제 #28
0
def test_resample3d():
    g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5, 0.5, 0.5, 1]))
    shape = (100, 90, 80)
    i = Image(np.ones(shape), g)
    i.get_data()[50:55, 40:55, 30:33] = 3.
    # This mapping describes a mapping from the "target" physical
    # coordinates to the "image" physical coordinates.  The 4x4 matrix
    # below indicates that the "target" physical coordinates are related
    # to the "image" physical coordinates by a shift of -4 in each
    # coordinate.  Or, to find the "image" physical coordinates, given
    # the "target" physical coordinates, we add 4 to each "target
    # coordinate".  The resulting resampled image should show the
    # overall image shifted [-6,-8,-10] voxels towards the origin
    a = np.identity(4)
    a[:3, -1] = [3, 4, 5]
    ir = resample(i, i.coordmap, a, (100, 90, 80))
    assert_array_almost_equal(ir.get_data()[44:49, 32:47, 20:23], 3.)
예제 #29
0
def test_slice_from_3d():
    # Resample a 3d image, returning a zslice, yslice and xslice
    #
    # This example creates a coordmap that coincides with
    # a given z, y, or x slice of an image, and checks that
    # resampling agrees with the data in the given slice.
    shape = (100, 90, 80)
    g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5, 0.5, 0.5, 1]))
    img = Image(np.ones(shape), g)
    img.get_data()[50:55, 40:55, 30:33] = 3
    I = np.identity(4)
    zsl = slices.zslice(26, ((0, 49.5), 100), ((0, 44.5), 90), img.reference)
    ir = resample(img, zsl, I, (100, 90))
    assert_array_almost_equal(ir.get_data(), img[:, :, 53].get_data())
    ysl = slices.yslice(22, ((0, 49.5), 100), ((0, 39.5), 80), img.reference)
    ir = resample(img, ysl, I, (100, 80))
    assert_array_almost_equal(ir.get_data(), img[:, 45, :].get_data())
    xsl = slices.xslice(15.5, ((0, 44.5), 90), ((0, 39.5), 80), img.reference)
    ir = resample(img, xsl, I, (90, 80))
    assert_array_almost_equal(ir.get_data(), img[32, :, :].get_data())
예제 #30
0
파일: fiac_example.py 프로젝트: FNNDSC/nipy
def run_model(subj, run):
    """
    Single subject fitting of FIAC model
    """
    #----------------------------------------------------------------------
    # Set initial parameters of the FIAC dataset
    #----------------------------------------------------------------------
    # Number of volumes in the fMRI data
    nvol = 191
    # The TR of the experiment
    TR = 2.5 
    # The time of the first volume
    Tstart = 0.0
    # The array of times corresponding to each 
    # volume in the fMRI data
    volume_times = np.arange(nvol)*TR + Tstart
    # This recarray of times has one column named 't'
    # It is used in the function design.event_design
    # to create the design matrices.
    volume_times_rec = make_recarray(volume_times, 't')
    # Get a path description dictionary that contains all the path data
    # relevant to this subject/run
    path_info = futil.path_info(subj,run)

    #----------------------------------------------------------------------
    # Experimental design
    #----------------------------------------------------------------------

    # Load the experimental description from disk.  We have utilities in futil
    # that reformat the original FIAC-supplied format into something where the
    # factorial structure of the design is more explicit.  This has already
    # been run once, and get_experiment_initial() will simply load the
    # newly-formatted design description files (.csv) into record arrays.
    experiment, initial = futil.get_experiment_initial(path_info)

    # Create design matrices for the "initial" and "experiment" factors,
    # saving the default contrasts. 

    # The function event_design will create
    # design matrices, which in the case of "experiment"
    # will have num_columns =
    # (# levels of speaker) * (# levels of sentence) * len(delay.spectral) =
    #      2 * 2 * 2 = 8
    # For "initial", there will be
    # (# levels of initial) * len([hrf.glover]) = 1 * 1 = 1

    # Here, delay.spectral is a sequence of 2 symbolic HRFs that 
    # are described in 
    # 
    # Liao, C.H., Worsley, K.J., Poline, J-B., Aston, J.A.D., Duncan, G.H.,
    #    Evans, A.C. (2002). \'Estimating the delay of the response in fMRI
    #    data.\' NeuroImage, 16:593-606.

    # The contrasts, cons_exper,
    # is a dictionary with keys: ['constant_0', 'constant_1', 'speaker_0', 
    # 'speaker_1',
    # 'sentence_0', 'sentence_1', 'sentence:speaker_0', 'sentence:speaker_1']
    # representing the four default contrasts: constant, main effects + 
    # interactions,
    # each convolved with 2 HRFs in delay.spectral. Its values
    # are matrices with 8 columns.

    # XXX use the hrf __repr__ for naming contrasts

    X_exper, cons_exper = design.event_design(experiment, volume_times_rec,
                                              hrfs=delay.spectral)

    # The contrasts for 'initial' are ignored 
    # as they are "uninteresting" and are included
    # in the model as confounds.

    X_initial, _ = design.event_design(initial, volume_times_rec,
                                       hrfs=[hrf.glover])

    # In addition to factors, there is typically a "drift" term
    # In this case, the drift is a natural cubic spline with
    # a not at the midpoint (volume_times.mean())

    vt = volume_times # shorthand
    drift = np.array( [vt**i for i in range(4)] +
                      [(vt-vt.mean())**3 * (np.greater(vt, vt.mean()))] )
    for i in range(drift.shape[0]):
        drift[i] /= drift[i].max()

    # We transpose the drift so that its shape is (nvol,5) so that it will have
    # the same number of rows as X_initial and X_exper.
    drift = drift.T

    # There are helper functions to create these drifts: design.fourier_basis,
    # design.natural_spline.  Therefore, the above is equivalent (except for
    # the normalization by max for numerical stability) to
    #
    # >>> drift = design.natural_spline(t, [volume_times.mean()])

    # Stack all the designs, keeping the new contrasts which has the same keys
    # as cons_exper, but its values are arrays with 15 columns, with the
    # non-zero entries matching the columns of X corresponding to X_exper
    X, cons = design.stack_designs((X_exper, cons_exper),
                                   (X_initial, {}),
                                   (drift, {}))

    # Sanity check: delete any non-estimable contrasts
    # XXX - this seems to be broken right now, it's producing bogus warnings.
    ## for k in cons.keys():
    ##     if not isestimable(X, cons[k]):
    ##         del(cons[k])
    ##         warnings.warn("contrast %s not estimable for this run" % k)

    # The default contrasts are all t-statistics.  We may want to output
    # F-statistics for 'speaker', 'sentence', 'speaker:sentence' based on the
    # two coefficients, one for each HRF in delay.spectral

    cons['speaker'] = np.vstack([cons['speaker_0'], cons['speaker_1']])
    cons['sentence'] = np.vstack([cons['sentence_0'], cons['sentence_1']])
    cons['sentence:speaker'] = np.vstack([cons['sentence:speaker_0'],
                                          cons['sentence:speaker_1']])

    #----------------------------------------------------------------------
    # Data loading
    #----------------------------------------------------------------------

    # Load in the fMRI data, saving it as an array
    # It is transposed to have time as the first dimension,
    # i.e. fmri[t] gives the t-th volume.

    fmri_lpi = futil.get_fmri(path_info) # an LPIImage
    fmri_im = Image(fmri_lpi._data, fmri_lpi.coordmap)
    fmri_im = image_rollaxis(fmri_im, 't')

    fmri = fmri_im.get_data() # now, it's an ndarray

    nvol, volshape = fmri.shape[0], fmri.shape[1:]
    nslice, sliceshape = volshape[0], volshape[1:]

    #----------------------------------------------------------------------
    # Model fit
    #----------------------------------------------------------------------

    # The model is a two-stage model, the first stage being an OLS (ordinary
    # least squares) fit, whose residuals are used to estimate an AR(1)
    # parameter for each voxel.

    m = OLSModel(X)
    ar1 = np.zeros(volshape)

    # Fit the model, storing an estimate of an AR(1) parameter at each voxel
    for s in range(nslice):
        d = np.array(fmri[:,s])
        flatd = d.reshape((d.shape[0], -1))
        result = m.fit(flatd)
        ar1[s] = ((result.resid[1:] * result.resid[:-1]).sum(0) /
                  (result.resid**2).sum(0)).reshape(sliceshape)

    # We round ar1 to nearest one-hundredth
    # and group voxels by their rounded ar1 value,
    # fitting an AR(1) model to each batch of voxels.

    # XXX smooth here?
    # ar1 = smooth(ar1, 8.0)

    ar1 *= 100
    ar1 = ar1.astype(np.int) / 100.

    # We split the contrasts into F-tests and t-tests.
    # XXX helper function should do this

    fcons = {}; tcons = {}
    for n, v in cons.items():
        v = np.squeeze(v)
        if v.ndim == 1:
            tcons[n] = v
        else:
            fcons[n] = v

    # Setup a dictionary to hold all the output
    # XXX ideally these would be memmap'ed Image instances

    output = {}
    for n in tcons:
        tempdict = {}
        for v in ['sd', 't', 'effect']:
            tempdict[v] = np.memmap(NamedTemporaryFile(prefix='%s%s.nii' \
                                    % (n,v)), dtype=np.float,
                                    shape=volshape, mode='w+')
        output[n] = tempdict

    for n in fcons:
        output[n] = np.memmap(NamedTemporaryFile(prefix='%s%s.nii' \
                                    % (n,v)), dtype=np.float,
                                    shape=volshape, mode='w+')

    # Loop over the unique values of ar1

    for val in np.unique(ar1):
        armask = np.equal(ar1, val)
        m = ARModel(X, val)
        d = fmri[:,armask]
        results = m.fit(d)

        # Output the results for each contrast

        for n in tcons:
            resT = results.Tcontrast(tcons[n])
            output[n]['sd'][armask] = resT.sd
            output[n]['t'][armask] = resT.t
            output[n]['effect'][armask] = resT.effect

        for n in fcons:
            output[n][armask] = results.Fcontrast(fcons[n]).F

    # Dump output to disk
    odir = futil.output_dir(path_info,tcons,fcons)
    # The coordmap for a single volume in the time series
    vol0_map = fmri_im[0].coormap
    for n in tcons:
        for v in ['t', 'sd', 'effect']:
            im = Image(output[n][v], vol0_map)
            save_image(im, pjoin(odir, n, '%s.nii' % v))

    for n in fcons:
        im = Image(output[n], vol0_map)
        save_image(im, pjoin(odir, n, "F.nii"))
예제 #31
0
class AR1(object):
    """
    Second pass through fmri_image.

    Parameters
    ----------
    fmri_image : `FmriImageList`
       object returning 4D array from ``np.asarray``, having attribute
       ``volume_start_times`` (if `volume_start_times` is None), and
       such that ``object[0]`` returns something with attributes ``shape``
    formula :  :class:`nipy.algorithms.statistics.formula.Formula`
    rho : ``Image``
       image of AR(1) coefficients.  Returning data from
       ``rho.get_data()``, and having attribute ``coordmap``
    outputs :
    volume_start_times : 
    """
    def __init__(self,
                 fmri_image,
                 formula,
                 rho,
                 outputs=[],
                 volume_start_times=None):
        self.fmri_image = fmri_image
        try:
            self.data = fmri_image.get_data()
        except AttributeError:
            self.data = fmri_image.get_list_data(axis=0)
        self.formula = formula
        self.outputs = outputs
        # Cleanup rho values, truncate them to a scale of 0.01
        g = copy.copy(rho.coordmap)
        rho = rho.get_data()
        m = np.isnan(rho)
        r = (np.clip(rho, -1, 1) * 100).astype(np.int) / 100.
        r[m] = np.inf
        self.rho = Image(r, g)
        if volume_start_times is None:
            self.volume_start_times = self.fmri_image.volume_start_times
        else:
            self.volume_start_times = volume_start_times

    def execute(self):

        iterable = parcels(self.rho, exclude=[np.inf])

        def model_params(i):
            return (self.rho.get_data()[i].mean(), )

        # Generates indexer, data, model
        m = model_generator(self.formula,
                            self.data,
                            self.volume_start_times,
                            iterable=iterable,
                            model_type=ARModel,
                            model_params=model_params)
        # Generates indexer, data, 2D results
        r = results_generator(m)

        def reshape(i, x):
            """
            To write output, arrays have to be reshaped --
            this function does the appropriate reshaping for the two
            passes of fMRIstat.

            These passes are:
              i) 'slices through the z-axis'
              ii) 'parcels of approximately constant AR1 coefficient'
            """
            if len(x.shape) == 2:  # 2D imput matrix
                if type(i) is type(1):  # integer indexing
                    # reshape to ND (where N is probably 4)
                    x.shape = (x.shape[0], ) + self.fmri_image[0].shape[1:]
                # Convert lists to tuples, put anything else into a tuple
                if type(i) not in [type([]), type(())]:
                    i = (i, )
                else:
                    i = tuple(i)
                # Add : to indexing
                i = (slice(None, None, None), ) + tuple(i)
            else:  # not 2D
                if type(i) is type(1):  # integer indexing
                    x.shape = self.fmri_image[0].shape[1:]
            return i, x

        # Put results pulled from results generator r, into outputs
        o = generate_output(self.outputs, r, reshape=reshape)
예제 #32
0
파일: model.py 프로젝트: alexsavio/nipy
class AR1(object):
    """
    Second pass through fmri_image.

    Parameters
    ----------
    fmri_image : `FmriImageList`
       object returning 4D array from ``np.asarray``, having attribute
       ``volume_start_times`` (if `volume_start_times` is None), and
       such that ``object[0]`` returns something with attributes ``shape``
    formula :  :class:`nipy.algorithms.statistics.formula.Formula`
    rho : ``Image``
       image of AR(1) coefficients.  Returning data from
       ``rho.get_data()``, and having attribute ``coordmap``
    outputs :
    volume_start_times : 
    """

    def __init__(self, fmri_image, formula, rho, outputs=[],
                 volume_start_times=None):
        self.fmri_image = fmri_image
        try:
            self.data = fmri_image.get_data()
        except AttributeError:
            self.data = fmri_image.get_list_data(axis=0)
        self.formula = formula
        self.outputs = outputs
        # Cleanup rho values, truncate them to a scale of 0.01
        g = copy.copy(rho.coordmap)
        rho = rho.get_data()
        m = np.isnan(rho)
        r = (np.clip(rho,-1,1) * 100).astype(np.int) / 100.
        r[m] = np.inf
        self.rho = Image(r, g)
        if volume_start_times is None:
            self.volume_start_times = self.fmri_image.volume_start_times
        else:
            self.volume_start_times = volume_start_times

    def execute(self):

        iterable = parcels(self.rho, exclude=[np.inf])
        def model_params(i):
            return (self.rho.get_data()[i].mean(),)
        # Generates indexer, data, model
        m = model_generator(self.formula, self.data,
                            self.volume_start_times,
                            iterable=iterable,
                            model_type=ARModel,
                            model_params=model_params)
        # Generates indexer, data, 2D results
        r = results_generator(m)

        def reshape(i, x):
            """
            To write output, arrays have to be reshaped --
            this function does the appropriate reshaping for the two
            passes of fMRIstat.

            These passes are:
              i) 'slices through the z-axis'
              ii) 'parcels of approximately constant AR1 coefficient'
            """
            if len(x.shape) == 2: # 2D imput matrix
                if type(i) is type(1): # integer indexing
                    # reshape to ND (where N is probably 4)
                    x.shape = (x.shape[0],) + self.fmri_image[0].shape[1:]
                # Convert lists to tuples, put anything else into a tuple
                if type(i) not in [type([]), type(())]:
                    i = (i,)
                else:
                    i = tuple(i)
                # Add : to indexing
                i = (slice(None,None,None),) + tuple(i)
            else: # not 2D
                if type(i) is type(1): # integer indexing
                    x.shape = self.fmri_image[0].shape[1:]
            return i, x

        # Put results pulled from results generator r, into outputs
        o = generate_output(self.outputs, r, reshape=reshape)