def __init__(self, data, affine, coord_sys, metadata=None): """ Creates a new nipy image with an affine mapping. Parameters ---------- data : ndarray ndarray representing the data. affine : 4x4 ndarray affine transformation to the reference coordinate system coord_system : string name of the reference coordinate system. """ function_domain = CoordinateSystem(['axis%d' % i for i in range(3)], name=coord_sys) function_range = CoordinateSystem(['x','y','z'], name='world') spatial_coordmap = AffineTransform(function_domain, function_range, affine) nonspatial_names = ['axis%d' % i for i in range(3, data.ndim)] if nonspatial_names: nonspatial_coordmap = AffineTransform.from_start_step(nonspatial_names, nonspatial_names, [0]*(data.ndim-3), [1]*(data.ndim-3)) full_coordmap = cmap_product(coordmap, nonspatial_coordmap) else: full_coordmap = spatial_coordmap self._spatial_coordmap = spatial_coordmap self.coord_sys = coord_sys Image.__init__(self, data, full_coordmap) if metadata is not None: self.metadata = metadata
def __init__(self, data, affine, axis_names, metadata={}, lps=True): """ Creates a new nipy image with an affine mapping. Parameters ---------- data : ndarray ndarray representing the data. affine : 4x4 ndarray affine transformation to the reference coordinate system axis_names : [string] names of the axes in the coordinate system. """ if len(axis_names) < 3: raise ValueError('XYZImage must have a minimum of 3 axes') # The first three axes are assumed to be the # spatial ones xyz_transform = XYZTransform(affine, axis_names[:3], lps) nonspatial_names = axis_names[3:] if nonspatial_names: nonspatial_affine_transform = AffineTransform.from_start_step(nonspatial_names, nonspatial_names, [0]*(data.ndim-3), [1]*(data.ndim-3)) full_dimensional_affine_transform = cmap_product(xyz_transform, nonspatial_affine_transform) else: full_dimensional_affine_transform = xyz_transform self._xyz_transform = xyz_transform Image.__init__(self, data, full_dimensional_affine_transform, metadata=metadata)
def __init__(self, resels=None, fwhm=None, **keywords): """ Initialize resel image Parameters ---------- resels : `core.api.Image` Image of resel per voxel values. fwhm : `core.api.Image` Image of FWHM values. keywords : ``dict`` Passed as keywords arguments to `core.api.Image` """ if not resels and not fwhm: raise ValueError('need either a resels image or an FWHM image') if fwhm is not None: fwhm = Image(fwhm, **keywords) Resels.__init__(self, fwhm, resels=resels, fwhm=fwhm) if resels is not None: resels = Image(resels, **keywords) Resels.__init__(self, resels, resels=resels, fwhm=fwhm) if not self.fwhm: self.fwhm = Image(self.resel2fwhm(self.resels[:]), coordmap=self.coordmap, **keywords) if not self.resels: self.resels = Image(self.fwhm2resel(self.fwhm[:]), coordmap=self.coordmap, **keywords)
def get_nifti(self, topo_view, base_nifti=None, **kwargs): """ Process the nifti Parameters ---------- topo_view: array-like Topological view to create nifti. 3D. Returns ------- image: nipy image Nifti image from topological view. """ if base_nifti is None: assert self.base_nifti is not None, ("`base.nii` not in dataset " "directory. You may need to " "reprocess.") base_nifti = self.base_nifti image = Image.from_image(base_nifti, data=topo_view) else: if isinstance(base_nifti, str): base_nifti = load_image(base_nifti) base2new_affine = np.linalg.inv(base_nifti.affine).dot( self.base_nifti.affine) cmap = AffineTransform("kji", "zxy", base2new_affine) image = Image.from_image(base_nifti, data=topo_view, coordmap=cmap) return image
def test_slice_from_3d(): # Resample a 3d image, returning a zslice, yslice and xslice # # This example creates a coordmap that coincides with # a given z, y, or x slice of an image, and checks that # resampling agrees with the data in the given slice. shape = (100,90,80) g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.5,0.5,1])) img = Image(np.ones(shape), g) img.get_data()[50:55,40:55,30:33] = 3 I = np.identity(4) zsl = slices.zslice(26, ((0,49.5), 100), ((0,44.5), 90), img.reference) ir = resample(img, zsl, I, (100, 90)) assert_array_almost_equal(ir.get_data(), img[:,:,53].get_data()) ysl = slices.yslice(22, ((0,49.5), 100), ((0,39.5), 80), img.reference) ir = resample(img, ysl, I, (100, 80)) assert_array_almost_equal(ir.get_data(), img[:,45,:].get_data()) xsl = slices.xslice(15.5, ((0,44.5), 90), ((0,39.5), 80), img.reference) ir = resample(img, xsl, I, (90, 80)) assert_array_almost_equal(ir.get_data(), img[32,:,:].get_data())
def get_nifti(self, topo_view, base_nifti=None, **kwargs): """ Process the nifti Parameters ---------- topo_view: array-like Topological view to create nifti. 3D. Returns ------- image: nipy image Nifti image from topological view. """ if base_nifti is None: assert self.base_nifti is not None, ("`base.nii` not in dataset " "directory. You may need to " "reprocess.") base_nifti = self.base_nifti image = Image.from_image(base_nifti, data=topo_view) else: if isinstance(base_nifti, str): base_nifti = load_image(base_nifti) base2new_affine = np.linalg.inv( base_nifti.affine).dot(self.base_nifti.affine) cmap = AffineTransform("kji", "zxy", base2new_affine) image = Image.from_image(base_nifti, data=topo_view, coordmap=cmap) return image
def test_nonaffine(): # resamples an image along a curve through the image. # # FIXME: use the reference.evaluate.Grid to perform this nicer # FIXME: Remove pylab references def curve(x): # function accept N by 1, returns N by 2 return (np.vstack([5 * np.sin(x.T), 5 * np.cos(x.T)]).T + [52, 47]) for names in (('xy', 'ij', 't', 'u'), ('ij', 'xy', 't', 's')): in_names, out_names, tin_names, tout_names = names g = AffineTransform.from_params(in_names, out_names, np.identity(3)) img = Image(np.ones((100, 90)), g) img.get_data()[50:55, 40:55] = 3. tcoordmap = AffineTransform.from_start_step(tin_names, tout_names, [0], [np.pi * 1.8 / 100]) ir = resample(img, tcoordmap, curve, (100, )) if gui_review: import pylab pylab.figure(num=3) pylab.imshow(img, interpolation='nearest') d = curve(np.linspace(0, 1.8 * np.pi, 100)) pylab.plot(d[0], d[1]) pylab.gca().set_ylim([0, 99]) pylab.gca().set_xlim([0, 89]) pylab.figure(num=4) pylab.plot(ir.get_data())
def test_nonaffine(): # resamples an image along a curve through the image. # # FIXME: use the reference.evaluate.Grid to perform this nicer # FIXME: Remove pylab references def curve(x): # function accept N by 1, returns N by 2 return (np.vstack([5*np.sin(x.T),5*np.cos(x.T)]).T + [52,47]) for names in (('xy', 'ij', 't', 'u'),('ij', 'xy', 't', 's')): in_names, out_names, tin_names, tout_names = names g = AffineTransform.from_params(in_names, out_names, np.identity(3)) img = Image(np.ones((100,90)), g) img.get_data()[50:55,40:55] = 3. tcoordmap = AffineTransform.from_start_step( tin_names, tout_names, [0], [np.pi*1.8/100]) ir = resample(img, tcoordmap, curve, (100,)) if gui_review: import pylab pylab.figure(num=3) pylab.imshow(img, interpolation='nearest') d = curve(np.linspace(0,1.8*np.pi,100)) pylab.plot(d[0], d[1]) pylab.gca().set_ylim([0,99]) pylab.gca().set_xlim([0,89]) pylab.figure(num=4) pylab.plot(ir.get_data())
def load(filename): """Load an image from the given filename. Parameters ---------- filename : string Should resolve to a complete filename path. Returns ------- image : An `Image` object If successful, a new `Image` object is returned. See Also -------- save_image : function for saving images fromarray : function for creating images from numpy arrays Examples -------- >>> from nipy.io.api import load_image >>> from nipy.testing import anatfile >>> img = load_image(anatfile) >>> img.shape (33, 41, 25) """ img = formats.load(filename) aff = img.get_affine() shape = img.get_shape() hdr = img.get_header() # Get info from NIFTI header, if present, to tell which axes are # which. This is a NIFTI-specific kludge, that might be abstracted # out into the image backend in a general way. Similarly for # getting zooms # axis_renames is a dictionary: dict([(int, str)]) # that has keys in range(3) # the axes of the Image are renamed from 'ijk' # using these names try: axis_renames = hdr.get_axis_renames() except (TypeError, AttributeError): axis_renames = {} try: zooms = hdr.get_zooms() except AttributeError: zooms = np.ones(len(shape)) # affine_transform is a 3-d transform affine_transform3d, affine_transform = \ affine_transform_from_array(aff, 'ijk', pixdim=zooms[3:]) img = Image(img.get_data(), affine_transform.renamed_domain(axis_renames)) img.header = hdr return img
def test_labels1(): img = load_image(funcfile) data = img.get_data() parcelmap = Image(img[0].get_data(), AfT("kji", "zyx", np.eye(4))) parcelmap = (parcelmap.get_data() * 100).astype(np.int32) v = 0 for i, d in axis0_generator(data, parcels(parcelmap)): v += d.shape[1] assert_equal(v, parcelmap.size)
def test_labels1(): img = load_image(funcfile) data = img.get_data() parcelmap = Image(img[0].get_data(), AfT('kji', 'zyx', np.eye(4))) parcelmap = (parcelmap.get_data() * 100).astype(np.int32) v = 0 for i, d in axis0_generator(data, parcels(parcelmap)): v += d.shape[1] assert_equal(v, parcelmap.size)
def test_output_dtypes(): shape = (4, 2, 3) rng = np.random.RandomState(19441217) # IN-S BD data = rng.normal(4, 20, size=shape) aff = np.diag([2.2, 3.3, 4.1, 1]) cmap = vox2mni(aff) img = Image(data, cmap) fname_root = 'my_file' with InTemporaryDirectory(): for ext in 'img', 'nii': out_fname = fname_root + '.' + ext # Default is for data to come from data dtype save_image(img, out_fname) img_back = load_image(out_fname) hdr = img_back.metadata['header'] assert_dt_no_end_equal(hdr.get_data_dtype(), np.float) del img_back # lets window re-use the file # All these types are OK for both output formats for out_dt in 'i2', 'i4', np.int16, '<f4', '>f8': # Specified output dtype save_image(img, out_fname, out_dt) img_back = load_image(out_fname) hdr = img_back.metadata['header'] assert_dt_no_end_equal(hdr.get_data_dtype(), out_dt) del img_back # windows file re-use # Output comes from data by default data_typed = data.astype(out_dt) img_again = Image(data_typed, cmap) save_image(img_again, out_fname) img_back = load_image(out_fname) hdr = img_back.metadata['header'] assert_dt_no_end_equal(hdr.get_data_dtype(), out_dt) del img_back # Even if header specifies otherwise in_hdr = Nifti1Header() in_hdr.set_data_dtype(np.dtype('c8')) img_more = Image(data_typed, cmap, metadata={'header': in_hdr}) save_image(img_more, out_fname) img_back = load_image(out_fname) hdr = img_back.metadata['header'] assert_dt_no_end_equal(hdr.get_data_dtype(), out_dt) del img_back # But can come from header if specified save_image(img_more, out_fname, dtype_from='header') img_back = load_image(out_fname) hdr = img_back.metadata['header'] assert_dt_no_end_equal(hdr.get_data_dtype(), 'c8') del img_back # u2 only OK for nifti save_image(img, 'my_file.nii', 'u2') img_back = load_image('my_file.nii') hdr = img_back.metadata['header'] assert_dt_no_end_equal(hdr.get_data_dtype(), 'u2') # Check analyze can't save u2 datatype assert_raises(HeaderDataError, save_image, img, 'my_file.img', 'u2') del img_back
def test_resample2d2(): g = AffineTransform.from_params('ij', 'xy', np.diag([0.5, 0.5, 1])) i = Image(np.ones((100, 90)), g) i.get_data()[50:55, 40:55] = 3. a = np.identity(3) a[:2, -1] = 4. A = np.identity(2) b = np.ones(2) * 4 ir = resample(i, i.coordmap, (A, b), (100, 90)) assert_array_almost_equal(ir.get_data()[42:47, 32:47], 3.)
def test_resample2d3(): # Same as test_resample2d, only a different way of specifying # the transform: here it is an (A,b) pair g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1])) i = Image(np.ones((100,90)), g) i.get_data()[50:55,40:55] = 3. a = np.identity(3) a[:2,-1] = 4. ir = resample(i, i.coordmap, a, (100,90)) assert_array_almost_equal(ir.get_data()[42:47,32:47], 3.)
def test_resample2d2(): g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1])) i = Image(np.ones((100,90)), g) i.get_data()[50:55,40:55] = 3. a = np.identity(3) a[:2,-1] = 4. A = np.identity(2) b = np.ones(2)*4 ir = resample(i, i.coordmap, (A, b), (100,90)) assert_array_almost_equal(ir.get_data()[42:47,32:47], 3.)
def test_rotate2d(): # Rotate an image in 2d on a square grid, should result in transposed image g = AffineTransform.from_params('ij', 'xy', np.diag([0.7, 0.5, 1])) g2 = AffineTransform.from_params('ij', 'xy', np.diag([0.5, 0.7, 1])) i = Image(np.ones((100, 100)), g) # This sets the image data by writing into the array i.get_data()[50:55, 40:55] = 3. a = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]], np.float) ir = resample(i, g2, a, (100, 100)) assert_array_almost_equal(ir.get_data().T, i.get_data())
def test_resample2d3(): # Same as test_resample2d, only a different way of specifying # the transform: here it is an (A,b) pair g = AffineTransform.from_params('ij', 'xy', np.diag([0.5, 0.5, 1])) i = Image(np.ones((100, 90)), g) i.get_data()[50:55, 40:55] = 3. a = np.identity(3) a[:2, -1] = 4. ir = resample(i, i.coordmap, a, (100, 90)) assert_array_almost_equal(ir.get_data()[42:47, 32:47], 3.)
def load(filename): """Load an image from the given filename. Parameters ---------- filename : string Should resolve to a complete filename path. Returns ------- image : An `Image` object If successful, a new `Image` object is returned. See Also -------- save_image : function for saving images fromarray : function for creating images from numpy arrays Examples -------- >>> from nipy.io.api import load_image >>> from nipy.testing import anatfile >>> img = load_image(anatfile) >>> img.shape (33, 41, 25) """ img = nib.load(filename) aff = img.get_affine() shape = img.get_shape() hdr = img.get_header() # If the header implements it, get a list of names, one per axis, # and put this into the coordinate map. In fact, no image format # implements this at the moment, so in practice, the following code # is not currently called. axis_renames = {} try: axis_names = hdr.axis_names except AttributeError: pass else: # axis_renames is a dictionary: dict([(int, str)]) that has keys # in range(3). The axes of the Image are renamed from 'ijk' using # these names for i in range(min([len(axis_names), 3])): name = axis_names[i] if not (name is None or name == ''): axis_renames[i] = name zooms = hdr.get_zooms() # affine_transform is a 3-d transform affine_transform3d, affine_transform = \ affine_transform_from_array(aff, 'ijk', pixdim=zooms[3:]) img = Image(img.get_data(), affine_transform.renamed_domain(axis_renames)) img.header = hdr return img
def test_rotate3d(): # Rotate / transpose a 3d image on a non-square grid g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5, 0.6, 0.7, 1])) g2 = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5, 0.7, 0.6, 1])) shape = (100, 90, 80) i = Image(np.ones(shape), g) i.get_data()[50:55, 40:55, 30:33] = 3. a = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1.]]) ir = resample(i, g2, a, (100, 80, 90)) assert_array_almost_equal(np.transpose(ir.get_data(), (0, 2, 1)), i.get_data())
def test_rotate2d(): # Rotate an image in 2d on a square grid, should result in transposed image g = AffineTransform.from_params('ij', 'xy', np.diag([0.7,0.5,1])) g2 = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.7,1])) i = Image(np.ones((100,100)), g) # This sets the image data by writing into the array i.get_data()[50:55,40:55] = 3. a = np.array([[0,1,0], [1,0,0], [0,0,1]], np.float) ir = resample(i, g2, a, (100, 100)) assert_array_almost_equal(ir.get_data().T, i.get_data())
def test_resample_outvalue(): # Test resampling with different modes, constant values, datatypes, orders def func(xyz): return xyz + np.asarray([1, 0, 0]) coordmap = vox2mni(np.eye(4)) arr = np.arange(3 * 3 * 3).reshape(3, 3, 3) aff = np.eye(4) aff[0, 3] = 1. # x translation for mapping, dt, order in product( [aff, func], [np.int8, np.intp, np.int32, np.int64, np.float32, np.float64], [0, 1, 3]): img = Image(arr.astype(dt), coordmap) # Test constant value of 0 img2 = resample(img, coordmap, mapping, img.shape, order=order, mode='constant', cval=0.) exp_arr = np.zeros(arr.shape) exp_arr[:-1, :, :] = arr[1:, :, :] assert_array_almost_equal(img2.get_data(), exp_arr) # Test constant value of 1 img2 = resample(img, coordmap, mapping, img.shape, order=order, mode='constant', cval=1.) exp_arr[-1, :, :] = 1 assert_array_almost_equal(img2.get_data(), exp_arr) # Test nearest neighbor img2 = resample(img, coordmap, mapping, img.shape, order=order, mode='nearest') exp_arr[-1, :, :] = arr[-1, :, :] assert_array_almost_equal(img2.get_data(), exp_arr) # Test img2img target_coordmap = vox2mni(aff) target = Image(arr, target_coordmap) img2 = resample_img2img(img, target, 3, 'nearest') assert_array_almost_equal(img2.get_data(), exp_arr) img2 = resample_img2img(img, target, 3, 'constant', cval=1.) exp_arr[-1, :, :] = 1 assert_array_almost_equal(img2.get_data(), exp_arr)
def test_2d_from_3d(): # Resample a 3d image on a 2d affine grid # This example creates a coordmap that coincides with # the 10th slice of an image, and checks that # resampling agrees with the data in the 10th slice. shape = (100,90,80) g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.5,0.5,1])) i = Image(np.ones(shape), g) i.get_data()[50:55,40:55,30:33] = 3. a = np.identity(4) g2 = ArrayCoordMap.from_shape(g, shape)[10] ir = resample(i, g2.coordmap, a, g2.shape) assert_array_almost_equal(ir.get_data(), i[10].get_data())
def test_2d_from_3d(): # Resample a 3d image on a 2d affine grid # This example creates a coordmap that coincides with # the 10th slice of an image, and checks that # resampling agrees with the data in the 10th slice. shape = (100, 90, 80) g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5, 0.5, 0.5, 1])) i = Image(np.ones(shape), g) i.get_data()[50:55, 40:55, 30:33] = 3. a = np.identity(4) g2 = ArrayCoordMap.from_shape(g, shape)[10] ir = resample(i, g2.coordmap, a, g2.shape) assert_array_almost_equal(ir.get_data(), i[10].get_data())
def test_resample2d1(): # Tests the same as test_resample2d, only using a callable instead of # an AffineTransform instance g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1])) i = Image(np.ones((100,90)), g) i.get_data()[50:55,40:55] = 3. a = np.identity(3) a[:2,-1] = 4. A = np.identity(2) b = np.ones(2)*4 def mapper(x): return np.dot(x, A.T) + b ir = resample(i, i.coordmap, mapper, (100,90)) assert_array_almost_equal(ir.get_data()[42:47,32:47], 3.)
def test_rollaxis(): data = np.random.standard_normal((3,4,7,5)) im = Image(data, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1]))) # for the inverse we must specify an integer yield assert_raises, ValueError, image.rollaxis, im, 'i', True # Check that rollaxis preserves diagonal affines, as claimed yield assert_almost_equal, image.rollaxis(im, 1).affine, np.diag([2,1,3,4,1]) yield assert_almost_equal, image.rollaxis(im, 2).affine, np.diag([3,1,2,4,1]) yield assert_almost_equal, image.rollaxis(im, 3).affine, np.diag([4,1,2,3,1]) # Check that ambiguous axes raise an exception # 'l' appears both as an axis and a reference coord name # and in different places im_amb = Image(data, AffineTransform.from_params('ijkl', 'xylt', np.diag([1,2,3,4,1]))) yield assert_raises, ValueError, image.rollaxis, im_amb, 'l' # But if it's unambiguous, then # 'l' can appear both as an axis and a reference coord name im_unamb = Image(data, AffineTransform.from_params('ijkl', 'xyzl', np.diag([1,2,3,4,1]))) im_rolled = image.rollaxis(im_unamb, 'l') yield assert_almost_equal, im_rolled.get_data(), \ im_unamb.get_data().transpose([3,0,1,2]) for i, o, n in zip('ijkl', 'xyzt', range(4)): im_i = image.rollaxis(im, i) im_o = image.rollaxis(im, o) im_n = image.rollaxis(im, n) yield assert_almost_equal, im_i.get_data(), \ im_o.get_data() yield assert_almost_equal, im_i.affine, \ im_o.affine yield assert_almost_equal, im_n.get_data(), \ im_o.get_data() for _im in [im_n, im_o, im_i]: im_n_inv = image.rollaxis(_im, n, inverse=True) yield assert_almost_equal, im_n_inv.affine, \ im.affine yield assert_almost_equal, im_n_inv.get_data(), \ im.get_data()
def test_rotate3d(): # Rotate / transpose a 3d image on a non-square grid g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.6,0.7,1])) g2 = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.7,0.6,1])) shape = (100,90,80) i = Image(np.ones(shape), g) i.get_data()[50:55,40:55,30:33] = 3. a = np.array([[1,0,0,0], [0,0,1,0], [0,1,0,0], [0,0,0,1.]]) ir = resample(i, g2, a, (100,80,90)) assert_array_almost_equal(np.transpose(ir.get_data(), (0,2,1)), i.get_data())
def load(filename): """Load an image from the given filename. Parameters ---------- filename : string Should resolve to a complete filename path. Returns ------- image : An `Image` object If successful, a new `Image` object is returned. See Also -------- save_image : function for saving images fromarray : function for creating images from numpy arrays Examples -------- >>> from nipy.io.api import load_image >>> from nipy.testing import anatfile >>> img = load_image(anatfile) >>> img.shape (33, 41, 25) """ img = formats.load(filename) aff = img.get_affine() shape = img.get_shape() hdr = img.get_header() # Get info from NIFTI header, if present, to tell which axes are # which. This is a NIFTI-specific kludge, that might be abstracted # out into the image backend in a general way. Similarly for # getting zooms try: fps = hdr.get_dim_info() except (TypeError, AttributeError): fps = (None, None, None) ijk = ijk_from_fps(fps) try: zooms = hdr.get_zooms() except AttributeError: zooms = np.ones(len(shape)) aff = _match_affine(aff, len(shape), zooms) coordmap = coordmap_from_affine(aff, ijk) img = Image(img.get_data(), coordmap) img.header = hdr return img
def sources_to_nifti(CHECKPOINT, MASKMAT, BASENIFTI, ONAME, savepath, voxels, win): bnifti = load_image(BASENIFTI) mask = loadmat(MASKMAT)['mask'] model = np.load(CHECKPOINT) # Numpy array of sources from Infomax ICA for i in range(len(model)): # Goes component by component W = model[i,:].reshape([voxels,win]) f = zeros(len(mask)) idx = where(mask==1) data = zeros((bnifti.shape[0],bnifti.shape[1],bnifti.shape[2],W.shape[1])) f[idx[0].tolist()] = detrend(W)/std(W) for j in range(0,W.shape[1]): data[:,:,:,j] = reshape(f,(bnifti.shape[0],bnifti.shape[1],bnifti.shape[2] ), order='F') img = Image.from_image(bnifti,data=data) os.chdir(savepath) fn = ONAME + "%s.nii" % (str(i)) # Where result should be saved and under what name save_image(img,fn)
def test_resample2d1(): # Tests the same as test_resample2d, only using a callable instead of # an AffineTransform instance g = AffineTransform.from_params('ij', 'xy', np.diag([0.5, 0.5, 1])) i = Image(np.ones((100, 90)), g) i.get_data()[50:55, 40:55] = 3. a = np.identity(3) a[:2, -1] = 4. A = np.identity(2) b = np.ones(2) * 4 def mapper(x): return np.dot(x, A.T) + b ir = resample(i, i.coordmap, mapper, (100, 90)) assert_array_almost_equal(ir.get_data()[42:47, 32:47], 3.)
def expandFrames(imgFn, saveDir): """ Expand a timeseries image into a set of individual frames in the specified directory Inputs: - imgFn: the timeseries image's filename - saveDir: the directory in which the frames will be stored Returns: - frameFns: the list of filenames """ # Load the image img = load_image(imgFn) coord = img.coordmap frameFns = [] # Make the save directory framesDir = saveDir + '/frames/' # need to check for // # check for duplicate // framesDir = framesDir.replace("//", '/') if not os.path.exists(framesDir): os.mkdir(framesDir) for i in xrange(img.get_data().shape[3]): frame = img[:, :, :, i].get_data()[:, :, :, None] frameImg = Image(frame, coord) outFn = framesDir + str(i).zfill(3) + ".nii.gz" save_image(frameImg, outFn) frameFns.append(outFn) return frameFns
def __init__(self, filename, coordmap, shape, clobber=False): self.filename = filename self._im_data = np.zeros(shape) self._im = Image(self._im_data, coordmap) # Using a dangerous undocumented API here self.clobber = clobber self._flushed = False
def test_resample2d(): g = AffineTransform.from_params('ij', 'xy', np.diag([0.5,0.5,1])) i = Image(np.ones((100,90)), g) i.get_data()[50:55,40:55] = 3. # This mapping describes a mapping from the "target" physical # coordinates to the "image" physical coordinates. The 3x3 matrix # below indicates that the "target" physical coordinates are related # to the "image" physical coordinates by a shift of -4 in each # coordinate. Or, to find the "image" physical coordinates, given # the "target" physical coordinates, we add 4 to each "target # coordinate". The resulting resampled image should show the # overall image shifted -8,-8 voxels towards the origin a = np.identity(3) a[:2,-1] = 4. ir = resample(i, i.coordmap, a, (100,90)) assert_array_almost_equal(ir.get_data()[42:47,32:47], 3.)
def test_resample2d(): g = AffineTransform.from_params('ij', 'xy', np.diag([0.5, 0.5, 1])) i = Image(np.ones((100, 90)), g) i.get_data()[50:55, 40:55] = 3. # This mapping describes a mapping from the "target" physical # coordinates to the "image" physical coordinates. The 3x3 matrix # below indicates that the "target" physical coordinates are related # to the "image" physical coordinates by a shift of -4 in each # coordinate. Or, to find the "image" physical coordinates, given # the "target" physical coordinates, we add 4 to each "target # coordinate". The resulting resampled image should show the # overall image shifted -8,-8 voxels towards the origin a = np.identity(3) a[:2, -1] = 4. ir = resample(i, i.coordmap, a, (100, 90)) assert_array_almost_equal(ir.get_data()[42:47, 32:47], 3.)
def randimg_in2out(rng, in_dtype, out_dtype, name): in_dtype = np.dtype(in_dtype) out_dtype = np.dtype(out_dtype) shape = (2, 3, 4) if in_dtype.kind in 'iu': info = np.iinfo(in_dtype) dmin, dmax = info.min, info.max # Numpy bug for np < 1.6.0 allows overflow for range that does not fit # into C long int (int32 on 32-bit, int64 on 64-bit) try: data = rng.randint(dmin, dmax, size=shape) except ValueError: from random import randint vals = [randint(dmin, dmax) for v in range(np.prod(shape))] data = np.array(vals).astype(in_dtype).reshape(shape) elif in_dtype.kind == 'f': info = np.finfo(in_dtype) dmin, dmax = info.min, info.max # set some value for scaling our data scale = np.iinfo(np.uint16).max * 2.0 data = rng.normal(size=shape, scale=scale) data[0, 0, 0] = dmin data[1, 0, 0] = dmax data = data.astype(in_dtype) img = Image(data, vox2mni(np.eye(4))) # The dtype_from dtype won't be visible until the image is loaded newimg = save_image(img, name, dtype_from=out_dtype) return newimg.get_data(), data
def save_to_image(data, template_file=DEFAULT_template, output_file=DEFAULT_output): template = load_image(template_file) newimg = Image(data, vox2mni(template.affine)) save_image(newimg, output_file) return output_file
def test_resample3d(): g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5,0.5,0.5,1])) shape = (100,90,80) i = Image(np.ones(shape), g) i.get_data()[50:55,40:55,30:33] = 3. # This mapping describes a mapping from the "target" physical # coordinates to the "image" physical coordinates. The 4x4 matrix # below indicates that the "target" physical coordinates are related # to the "image" physical coordinates by a shift of -4 in each # coordinate. Or, to find the "image" physical coordinates, given # the "target" physical coordinates, we add 4 to each "target # coordinate". The resulting resampled image should show the # overall image shifted [-6,-8,-10] voxels towards the origin a = np.identity(4) a[:3,-1] = [3,4,5] ir = resample(i, i.coordmap, a, (100,90,80)) assert_array_almost_equal(ir.get_data()[44:49,32:47,20:23], 3.)
def save_nii(data, coord, save_file): """ Saves a numpy array (data) as a nifti file The coordinate space must match the array dimensions """ arr_img = Image(data, coord) save_image(arr_img, save_file) return 0
def test_resample3d(): g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5, 0.5, 0.5, 1])) shape = (100, 90, 80) i = Image(np.ones(shape), g) i.get_data()[50:55, 40:55, 30:33] = 3. # This mapping describes a mapping from the "target" physical # coordinates to the "image" physical coordinates. The 4x4 matrix # below indicates that the "target" physical coordinates are related # to the "image" physical coordinates by a shift of -4 in each # coordinate. Or, to find the "image" physical coordinates, given # the "target" physical coordinates, we add 4 to each "target # coordinate". The resulting resampled image should show the # overall image shifted [-6,-8,-10] voxels towards the origin a = np.identity(4) a[:3, -1] = [3, 4, 5] ir = resample(i, i.coordmap, a, (100, 90, 80)) assert_array_almost_equal(ir.get_data()[44:49, 32:47, 20:23], 3.)
def convertArrayToImage(seq, coords): # Condense the replicated sequence seqStack = np.stack(seq, axis=-1) # Convert the image sequence into an Image seqImg = Image(seqStack, coords) return seqImg
def test_synchronized_order(): data = np.random.standard_normal((3,4,7,5)) im = Image(data, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1]))) im_scrambled = im.reordered_axes('iljk').reordered_reference('xtyz') im_unscrambled = image.synchronized_order(im_scrambled, im) yield assert_equal, im_unscrambled.coordmap, im.coordmap yield assert_almost_equal, im_unscrambled.get_data(), im.get_data() yield assert_equal, im_unscrambled, im yield assert_true, im_unscrambled == im yield assert_false, im_unscrambled != im # the images don't have to be the same shape data2 = np.random.standard_normal((3,11,9,4)) im2 = Image(data, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,2,3,4,1]))) im_scrambled2 = im2.reordered_axes('iljk').reordered_reference('xtyz') im_unscrambled2 = image.synchronized_order(im_scrambled2, im) yield assert_equal, im_unscrambled2.coordmap, im.coordmap # or the same coordmap data3 = np.random.standard_normal((3,11,9,4)) im3 = Image(data, AffineTransform.from_params('ijkl', 'xyzt', np.diag([1,9,3,-2,1]))) im_scrambled3 = im3.reordered_axes('iljk').reordered_reference('xtyz') im_unscrambled3 = image.synchronized_order(im_scrambled3, im) yield assert_equal, im_unscrambled3.axes, im.axes yield assert_equal, im_unscrambled3.reference, im.reference
def generateTestingPair(betaGT): betaGTRads = np.array(betaGT, dtype=np.float64) betaGTRads[0:3] = np.copy(np.pi * betaGTRads[0:3] / 180.0) ns = 181 nr = 217 nc = 181 left = np.fromfile('data/t2/t2_icbm_normal_1mm_pn0_rf0.rawb', dtype=np.ubyte).reshape(ns, nr, nc) left = left.astype(np.float64) right = np.fromfile('data/t1/t1_icbm_normal_1mm_pn0_rf0.rawb', dtype=np.ubyte).reshape(ns, nr, nc) right = right.astype(np.float64) right = rcommon.applyRigidTransformation3D(right, betaGTRads) affine_transform = AffineTransform( 'ijk', ['aligned-z=I->S', 'aligned-y=P->A', 'aligned-x=L->R'], np.eye(4)) left = Image(left, affine_transform) right = Image(right, affine_transform) nipy.save_image(left, 'moving.nii') nipy.save_image(right, 'fixed.nii')
def test_rotate2d3(): # Another way to rotate/transpose the image, similar to # test_rotate2d2 and test_rotate2d, except the world of the # output coordmap is the same as the world of the # original image. That is, the data is transposed on disk, but the # output coordinates are still 'x,'y' order, not 'y', 'x' order as # above # this functionality may or may not be used a lot. if data is to # be transposed but one wanted to keep the NIFTI order of output # coords this would do the trick g = AffineTransform.from_params('xy', 'ij', np.diag([0.5, 0.7, 1])) i = Image(np.ones((100, 80)), g) # This sets the image data by writing into the array i.get_data()[50:55, 40:55] = 3. a = np.identity(3) g2 = AffineTransform.from_params( 'xy', 'ij', np.array([[0, 0.5, 0], [0.7, 0, 0], [0, 0, 1]])) ir = resample(i, g2, a, (80, 100)) assert_array_almost_equal(ir.get_data().T, i.get_data())
def test_interpolator(): arr = np.arange(24).reshape((2, 3, 4)) coordmap = vox2mni(np.eye(4)) img = Image(arr, coordmap) # Interpolate off top right corner with different modes interp = ImageInterpolator(img, mode='nearest') assert_almost_equal(interp.evaluate([0, 0, 4]), arr[0, 0, -1]) interp = ImageInterpolator(img, mode='constant', cval=0) assert_array_equal(interp.evaluate([0, 0, 4]), 0) interp = ImageInterpolator(img, mode='constant', cval=1) assert_array_equal(interp.evaluate([0, 0, 4]), 1)
def Fmask(Fimg, dfnum, dfdenom, pvalue=1.0e-04): """ Create mask for use in estimating pooled covariance based on an F contrast. """ ## TODO check nipy.algorithms.statistics.models.contrast to see if rank is ## correctly set -- I don't think it is right now. print dfnum, dfdenom thresh = FDbn.ppf(pvalue, dfnum, dfdenom) return Image(np.greater(np.asarray(Fimg), thresh), Fimg.grid.copy())
def __init__(self, fmri_image, formula, rho, outputs=[], volume_start_times=None): self.fmri_image = fmri_image try: self.data = fmri_image.get_data() except AttributeError: self.data = fmri_image.get_list_data(axis=0) self.formula = formula self.outputs = outputs # Cleanup rho values, truncate them to a scale of 0.01 g = copy.copy(rho.coordmap) rho = rho.get_data() m = np.isnan(rho) r = (np.clip(rho,-1,1) * 100).astype(np.int) / 100. r[m] = np.inf self.rho = Image(r, g) if volume_start_times is None: self.volume_start_times = self.fmri_image.volume_start_times else: self.volume_start_times = volume_start_times
def test_slice_from_3d(): # Resample a 3d image, returning a zslice, yslice and xslice # # This example creates a coordmap that coincides with # a given z, y, or x slice of an image, and checks that # resampling agrees with the data in the given slice. shape = (100, 90, 80) g = AffineTransform.from_params('ijk', 'xyz', np.diag([0.5, 0.5, 0.5, 1])) img = Image(np.ones(shape), g) img.get_data()[50:55, 40:55, 30:33] = 3 I = np.identity(4) zsl = slices.zslice(26, ((0, 49.5), 100), ((0, 44.5), 90), img.reference) ir = resample(img, zsl, I, (100, 90)) assert_array_almost_equal(ir.get_data(), img[:, :, 53].get_data()) ysl = slices.yslice(22, ((0, 49.5), 100), ((0, 39.5), 80), img.reference) ir = resample(img, ysl, I, (100, 80)) assert_array_almost_equal(ir.get_data(), img[:, 45, :].get_data()) xsl = slices.xslice(15.5, ((0, 44.5), 90), ((0, 39.5), 80), img.reference) ir = resample(img, xsl, I, (90, 80)) assert_array_almost_equal(ir.get_data(), img[32, :, :].get_data())
def test_rotate2d3(): # Another way to rotate/transpose the image, similar to # test_rotate2d2 and test_rotate2d, except the world of the # output coordmap is the same as the world of the # original image. That is, the data is transposed on disk, but the # output coordinates are still 'x,'y' order, not 'y', 'x' order as # above # this functionality may or may not be used a lot. if data is to # be transposed but one wanted to keep the NIFTI order of output # coords this would do the trick g = AffineTransform.from_params('xy', 'ij', np.diag([0.5,0.7,1])) i = Image(np.ones((100,80)), g) # This sets the image data by writing into the array i.get_data()[50:55,40:55] = 3. a = np.identity(3) g2 = AffineTransform.from_params('xy', 'ij', np.array([[0,0.5,0], [0.7,0,0], [0,0,1]])) ir = resample(i, g2, a, (80,100)) assert_array_almost_equal(ir.get_data().T, i.get_data())
def load_npz(filename): """ Load an .npz Image, this .npz file must have at least two arrays * data: the data array * dimnames: the dimension names of the corresponding grid * affine: the affine transformation of grid The remaining arrays of .npz file are stored as the 'extra' attribute of the Image. """ npzobj = np.load(filename) im = Image(npzobj['data'], CoordinateMap.from_affine(Affine(npzobj['affine']), list(npzobj['dimnames']), npzobj['data'].shape)) im.extra = {} for f in npzobj.files: if f not in ['affine', 'dimnames', 'data']: im.extra[f] = npzobj[f] return im
def expandTimepoints(imgFn, baseDir): """ Expand an image sequence stored as a .nii.gz file into a collection of .nii.gz images (where each frame is its own .nii.gz file) Inputs: - imgFn: the time series image's filename - baseDir: the directory in which a new directory will be created to hold the collection of files Returns: - filenames: list of filenames """ # load the image img = load_image(imgFn) coord = img.coordmap if not os.path.exists(baseDir + 'timepoints/'): os.mkdir(baseDir + 'timepoints/') outDir = baseDir + 'timepoints/' # pull out the first image from the sequence (timepoint 0) first = img[:, :, :, 0].get_data()[:, :, :, None] first_img = Image(first, coord) # save the first image as 000 save_image(first_img, outDir + str(0).zfill(3) + '.nii.gz') # build the list of filenames filenames = [outDir + '000.nii.gz'] # for the remaining images for i in xrange(1, img.get_data().shape[3], 1): # pull out the image and save it tmp = img[:, :, :, i].get_data()[:, :, :, None] tmp_img = Image(tmp, coord) outFn = str(i).zfill(3) + '.nii.gz' save_image(tmp_img, outDir + outFn) # add the name of the image to the list of filenames filenames.append(outDir + outFn) return filenames
def peelTemplateBrain(): ns = 181 nr = 217 nc = 181 gt_template = np.fromfile('data/phantom_1.0mm_normal_crisp.rawb', dtype=np.ubyte).reshape((ns, nr, nc)) t1_template = np.fromfile('data/t1/t1_icbm_normal_1mm_pn0_rf0.rawb', dtype=np.ubyte).reshape((ns, nr, nc)) t2_template = np.fromfile('data/t2/t2_icbm_normal_1mm_pn0_rf0.rawb', dtype=np.ubyte).reshape((ns, nr, nc)) #t1_template*=((1<=gt_template)*(gt_template<=3)+(gt_template==8)) t1_template *= ((1 <= gt_template) * (gt_template <= 3)) t2_template *= ((1 <= gt_template) * (gt_template <= 3)) affine_transform = AffineTransform( 'ijk', ['aligned-z=I->S', 'aligned-y=P->A', 'aligned-x=L->R'], np.eye(4)) t1_template = Image(t1_template, affine_transform) t2_template = Image(t2_template, affine_transform) nipy.save_image(t1_template, 'data/t1/t1_icbm_normal_1mm_pn0_rf0_peeled.nii.gz') nipy.save_image(t2_template, 'data/t2/t2_icbm_normal_1mm_pn0_rf0_peeled.nii.gz')
def make_image(self, X, base_nifti): '''Create a nitfi image from array. Args: X (numpy.array): array from which to make nifti image. base_nifti (nipy.core.api.Image): nifti image template. Returns: nipy.core.api.Image ''' image = Image.from_image(base_nifti, data=X) return image
def __iter__(self): """ Return iterator Returns ------- itor : iterator self """ if not self.fwhm: im = Image(np.zeros(self.resid.shape), coordmap=self.coordmap) else: im = \ Image(self.fwhm, clobber=self.clobber, mode='w', coordmap=self.coordmap) self.fwhm = im if not self.resels: im = Image(np.zeros(self.resid.shape), coordmap=self.coordmap) else: im = \ Image(self.resels, clobber=self.clobber, mode='w', coordmap=self.coordmap) self.resels = im return self
def get_nifti(self, W, base_nifti=None): """ Function to make a nifti file from weights. Parameters ---------- W: array-like Weights. """ m, r, c, d = W.shape if basenifti is None: base_nifti = self.base_nifti else: base2new_affine = np.linalg.inv( base_nifti.get_affine()).dot(self.base_nifti.get_affine()) data = np.zeros([r, c, d, m], dtype=W.dtype) for i in range(m): data[:, :, :, i] = W[i] image = Image.from_image(base_nifti, data=data) return image
class AR1(object): """ Second pass through fmri_image. Parameters ---------- fmri_image : `FmriImageList` object returning 4D array from ``np.asarray``, having attribute ``volume_start_times`` (if `volume_start_times` is None), and such that ``object[0]`` returns something with attributes ``shape`` formula : :class:`nipy.algorithms.statistics.formula.Formula` rho : ``Image`` image of AR(1) coefficients. Returning data from ``rho.get_data()``, and having attribute ``coordmap`` outputs : volume_start_times : """ def __init__(self, fmri_image, formula, rho, outputs=[], volume_start_times=None): self.fmri_image = fmri_image try: self.data = fmri_image.get_data() except AttributeError: self.data = fmri_image.get_list_data(axis=0) self.formula = formula self.outputs = outputs # Cleanup rho values, truncate them to a scale of 0.01 g = copy.copy(rho.coordmap) rho = rho.get_data() m = np.isnan(rho) r = (np.clip(rho,-1,1) * 100).astype(np.int) / 100. r[m] = np.inf self.rho = Image(r, g) if volume_start_times is None: self.volume_start_times = self.fmri_image.volume_start_times else: self.volume_start_times = volume_start_times def execute(self): iterable = parcels(self.rho, exclude=[np.inf]) def model_params(i): return (self.rho.get_data()[i].mean(),) # Generates indexer, data, model m = model_generator(self.formula, self.data, self.volume_start_times, iterable=iterable, model_type=ARModel, model_params=model_params) # Generates indexer, data, 2D results r = results_generator(m) def reshape(i, x): """ To write output, arrays have to be reshaped -- this function does the appropriate reshaping for the two passes of fMRIstat. These passes are: i) 'slices through the z-axis' ii) 'parcels of approximately constant AR1 coefficient' """ if len(x.shape) == 2: # 2D imput matrix if type(i) is type(1): # integer indexing # reshape to ND (where N is probably 4) x.shape = (x.shape[0],) + self.fmri_image[0].shape[1:] # Convert lists to tuples, put anything else into a tuple if type(i) not in [type([]), type(())]: i = (i,) else: i = tuple(i) # Add : to indexing i = (slice(None,None,None),) + tuple(i) else: # not 2D if type(i) is type(1): # integer indexing x.shape = self.fmri_image[0].shape[1:] return i, x # Put results pulled from results generator r, into outputs o = generate_output(self.outputs, r, reshape=reshape)
def run_model(subj, run): """ Single subject fitting of FIAC model """ #---------------------------------------------------------------------- # Set initial parameters of the FIAC dataset #---------------------------------------------------------------------- # Number of volumes in the fMRI data nvol = 191 # The TR of the experiment TR = 2.5 # The time of the first volume Tstart = 0.0 # The array of times corresponding to each # volume in the fMRI data volume_times = np.arange(nvol)*TR + Tstart # This recarray of times has one column named 't' # It is used in the function design.event_design # to create the design matrices. volume_times_rec = make_recarray(volume_times, 't') # Get a path description dictionary that contains all the path data # relevant to this subject/run path_info = futil.path_info(subj,run) #---------------------------------------------------------------------- # Experimental design #---------------------------------------------------------------------- # Load the experimental description from disk. We have utilities in futil # that reformat the original FIAC-supplied format into something where the # factorial structure of the design is more explicit. This has already # been run once, and get_experiment_initial() will simply load the # newly-formatted design description files (.csv) into record arrays. experiment, initial = futil.get_experiment_initial(path_info) # Create design matrices for the "initial" and "experiment" factors, # saving the default contrasts. # The function event_design will create # design matrices, which in the case of "experiment" # will have num_columns = # (# levels of speaker) * (# levels of sentence) * len(delay.spectral) = # 2 * 2 * 2 = 8 # For "initial", there will be # (# levels of initial) * len([hrf.glover]) = 1 * 1 = 1 # Here, delay.spectral is a sequence of 2 symbolic HRFs that # are described in # # Liao, C.H., Worsley, K.J., Poline, J-B., Aston, J.A.D., Duncan, G.H., # Evans, A.C. (2002). \'Estimating the delay of the response in fMRI # data.\' NeuroImage, 16:593-606. # The contrasts, cons_exper, # is a dictionary with keys: ['constant_0', 'constant_1', 'speaker_0', # 'speaker_1', # 'sentence_0', 'sentence_1', 'sentence:speaker_0', 'sentence:speaker_1'] # representing the four default contrasts: constant, main effects + # interactions, # each convolved with 2 HRFs in delay.spectral. Its values # are matrices with 8 columns. # XXX use the hrf __repr__ for naming contrasts X_exper, cons_exper = design.event_design(experiment, volume_times_rec, hrfs=delay.spectral) # The contrasts for 'initial' are ignored # as they are "uninteresting" and are included # in the model as confounds. X_initial, _ = design.event_design(initial, volume_times_rec, hrfs=[hrf.glover]) # In addition to factors, there is typically a "drift" term # In this case, the drift is a natural cubic spline with # a not at the midpoint (volume_times.mean()) vt = volume_times # shorthand drift = np.array( [vt**i for i in range(4)] + [(vt-vt.mean())**3 * (np.greater(vt, vt.mean()))] ) for i in range(drift.shape[0]): drift[i] /= drift[i].max() # We transpose the drift so that its shape is (nvol,5) so that it will have # the same number of rows as X_initial and X_exper. drift = drift.T # There are helper functions to create these drifts: design.fourier_basis, # design.natural_spline. Therefore, the above is equivalent (except for # the normalization by max for numerical stability) to # # >>> drift = design.natural_spline(t, [volume_times.mean()]) # Stack all the designs, keeping the new contrasts which has the same keys # as cons_exper, but its values are arrays with 15 columns, with the # non-zero entries matching the columns of X corresponding to X_exper X, cons = design.stack_designs((X_exper, cons_exper), (X_initial, {}), (drift, {})) # Sanity check: delete any non-estimable contrasts # XXX - this seems to be broken right now, it's producing bogus warnings. ## for k in cons.keys(): ## if not isestimable(X, cons[k]): ## del(cons[k]) ## warnings.warn("contrast %s not estimable for this run" % k) # The default contrasts are all t-statistics. We may want to output # F-statistics for 'speaker', 'sentence', 'speaker:sentence' based on the # two coefficients, one for each HRF in delay.spectral cons['speaker'] = np.vstack([cons['speaker_0'], cons['speaker_1']]) cons['sentence'] = np.vstack([cons['sentence_0'], cons['sentence_1']]) cons['sentence:speaker'] = np.vstack([cons['sentence:speaker_0'], cons['sentence:speaker_1']]) #---------------------------------------------------------------------- # Data loading #---------------------------------------------------------------------- # Load in the fMRI data, saving it as an array # It is transposed to have time as the first dimension, # i.e. fmri[t] gives the t-th volume. fmri_lpi = futil.get_fmri(path_info) # an LPIImage fmri_im = Image(fmri_lpi._data, fmri_lpi.coordmap) fmri_im = image_rollaxis(fmri_im, 't') fmri = fmri_im.get_data() # now, it's an ndarray nvol, volshape = fmri.shape[0], fmri.shape[1:] nslice, sliceshape = volshape[0], volshape[1:] #---------------------------------------------------------------------- # Model fit #---------------------------------------------------------------------- # The model is a two-stage model, the first stage being an OLS (ordinary # least squares) fit, whose residuals are used to estimate an AR(1) # parameter for each voxel. m = OLSModel(X) ar1 = np.zeros(volshape) # Fit the model, storing an estimate of an AR(1) parameter at each voxel for s in range(nslice): d = np.array(fmri[:,s]) flatd = d.reshape((d.shape[0], -1)) result = m.fit(flatd) ar1[s] = ((result.resid[1:] * result.resid[:-1]).sum(0) / (result.resid**2).sum(0)).reshape(sliceshape) # We round ar1 to nearest one-hundredth # and group voxels by their rounded ar1 value, # fitting an AR(1) model to each batch of voxels. # XXX smooth here? # ar1 = smooth(ar1, 8.0) ar1 *= 100 ar1 = ar1.astype(np.int) / 100. # We split the contrasts into F-tests and t-tests. # XXX helper function should do this fcons = {}; tcons = {} for n, v in cons.items(): v = np.squeeze(v) if v.ndim == 1: tcons[n] = v else: fcons[n] = v # Setup a dictionary to hold all the output # XXX ideally these would be memmap'ed Image instances output = {} for n in tcons: tempdict = {} for v in ['sd', 't', 'effect']: tempdict[v] = np.memmap(NamedTemporaryFile(prefix='%s%s.nii' \ % (n,v)), dtype=np.float, shape=volshape, mode='w+') output[n] = tempdict for n in fcons: output[n] = np.memmap(NamedTemporaryFile(prefix='%s%s.nii' \ % (n,v)), dtype=np.float, shape=volshape, mode='w+') # Loop over the unique values of ar1 for val in np.unique(ar1): armask = np.equal(ar1, val) m = ARModel(X, val) d = fmri[:,armask] results = m.fit(d) # Output the results for each contrast for n in tcons: resT = results.Tcontrast(tcons[n]) output[n]['sd'][armask] = resT.sd output[n]['t'][armask] = resT.t output[n]['effect'][armask] = resT.effect for n in fcons: output[n][armask] = results.Fcontrast(fcons[n]).F # Dump output to disk odir = futil.output_dir(path_info,tcons,fcons) # The coordmap for a single volume in the time series vol0_map = fmri_im[0].coormap for n in tcons: for v in ['t', 'sd', 'effect']: im = Image(output[n][v], vol0_map) save_image(im, pjoin(odir, n, '%s.nii' % v)) for n in fcons: im = Image(output[n], vol0_map) save_image(im, pjoin(odir, n, "F.nii"))
def make_image(self, X, base_nifti, do_pca=True): if self.pca is not None and do_pca and self.pca_components: X = self.pca.inverse_transform(X) image = Image.from_image(base_nifti, data=X) return image