Exemple #1
0
def test_save2b():
    # A test to ensure that when a file is saved, the affine and the
    # data agree. This image comes from a NIFTI file This example has
    # a non-diagonal affine matrix for the spatial part, but is
    # 'diagonal' for the space part.  this should raise a warnings
    # about 'non-diagonal' affine matrix

    # make a 5x5 transformatio
    step = np.array([3.45,2.3,4.5,6.9])
    A = np.random.standard_normal((4,4))
    B = np.diag(list(step)+[1])
    B[:4,:4] = A

    shape = (13,5,7,3)
    cmap = api.AffineTransform.from_params('ijkt', 'xyzt', B)

    data = np.random.standard_normal(shape)

    img = api.Image(data, cmap)

    save_image(img, tmpfile.name)
    img2 = load_image(tmpfile.name)
    yield assert_false, np.allclose(img.affine, img2.affine)
    yield assert_true, np.allclose(img.affine[:3,:3], img2.affine[:3,:3])
    yield assert_equal, img.shape, img2.shape
    yield assert_true, np.allclose(np.asarray(img2), np.asarray(img))
Exemple #2
0
def group_analysis(design, contrast):
    """ Compute group analysis effect, t, sd for `design` and `contrast`

    Saves to disk in 'group' analysis directory

    Parameters
    ----------
    design : {'block', 'event'}
    contrast : str
        contrast name
    """
    array = np.array  # shorthand
    # Directory where output will be written
    odir = futil.ensure_dir(futil.DATADIR, 'group', design, contrast)

    # Which subjects have this (contrast, design) pair?
    subj_con_dirs = futil.subj_des_con_dirs(design, contrast)
    if len(subj_con_dirs) == 0:
        raise ValueError('No subjects for %s, %s' % (design, contrast))

    # Assemble effects and sds into 4D arrays
    sds = []
    Ys = []
    for s in subj_con_dirs:
        sd_img = load_image(pjoin(s, "sd.nii"))
        effect_img = load_image(pjoin(s, "effect.nii"))
        sds.append(sd_img.get_data())
        Ys.append(effect_img.get_data())
    sd = array(sds)
    Y = array(Ys)

    # This function estimates the ratio of the fixed effects variance
    # (sum(1/sd**2, 0)) to the estimated random effects variance
    # (sum(1/(sd+rvar)**2, 0)) where rvar is the random effects variance.

    # The EM algorithm used is described in:
    #
    # Worsley, K.J., Liao, C., Aston, J., Petre, V., Duncan, G.H.,
    #    Morales, F., Evans, A.C. (2002). \'A general statistical
    #    analysis for fMRI data\'. NeuroImage, 15:1-15
    varest = onesample.estimate_varatio(Y, sd)
    random_var = varest['random']

    # XXX - if we have a smoother, use
    # random_var = varest['fixed'] * smooth(varest['ratio'])

    # Having estimated the random effects variance (and possibly smoothed it),
    # the corresponding estimate of the effect and its variance is computed and
    # saved.

    # This is the coordmap we will use
    coordmap = futil.load_image_ds105("fiac_00", "wanatomical.nii").coordmap

    adjusted_var = sd**2 + random_var
    adjusted_sd = np.sqrt(adjusted_var)

    results = onesample.estimate_mean(Y, adjusted_sd)
    for n in ['effect', 'sd', 't']:
        im = api.Image(results[n], copy(coordmap))
        save_image(im, pjoin(odir, "%s.nii" % n))
Exemple #3
0
def test_save3():
    # A test to ensure that when a file is saved, the affine
    # and the data agree. In this case, things don't agree:
    # i) the pixdim is off
    # ii) makes the affine off
    step = np.array([3.45,2.3,4.5,6.9])
    shape = (13,5,7,3)
    mni_xyz = mni_csm(3).coord_names
    cmap = AT(CS('jkli'),
              CS(('t',) + mni_xyz[::-1]),
              from_matvec(np.diag([0,3,5,1]), step))
    data = np.random.standard_normal(shape)
    img = api.Image(data, cmap)
    # with InTemporaryDirectory():
    with InTemporaryDirectory():
        save_image(img, TMP_FNAME)
        tmp = load_image(TMP_FNAME)
        # Detach image from file so we can delete it
        data = tmp.get_data().copy()
        img2 = api.Image(data, tmp.coordmap, tmp.metadata)
        del tmp
    assert_equal(tuple([img.shape[l] for l in [3,2,1,0]]), img2.shape)
    a = np.transpose(np.asarray(img), [3,2,1,0])
    assert_false(np.allclose(img.affine, img2.affine))
    assert_true(np.allclose(a, img2.get_data()))
Exemple #4
0
def group_analysis(design, contrast):
    """ Compute group analysis effect, t, sd for `design` and `contrast`

    Saves to disk in 'group' analysis directory

    Parameters
    ----------
    design : {'block', 'event'}
    contrast : str
        contrast name
    """
    array = np.array # shorthand
    # Directory where output will be written
    odir = futil.ensure_dir(futil.DATADIR, 'group', design, contrast)

    # Which subjects have this (contrast, design) pair?
    subj_con_dirs = futil.subj_des_con_dirs(design, contrast)
    if len(subj_con_dirs) == 0:
        raise ValueError('No subjects for %s, %s' % (design, contrast))

    # Assemble effects and sds into 4D arrays
    sds = []
    Ys = []
    for s in subj_con_dirs:
        sd_img = load_image(pjoin(s, "sd.nii"))
        effect_img = load_image(pjoin(s, "effect.nii"))
        sds.append(sd_img.get_data())
        Ys.append(effect_img.get_data())
    sd = array(sds)
    Y = array(Ys)

    # This function estimates the ratio of the fixed effects variance
    # (sum(1/sd**2, 0)) to the estimated random effects variance
    # (sum(1/(sd+rvar)**2, 0)) where rvar is the random effects variance.

    # The EM algorithm used is described in:
    #
    # Worsley, K.J., Liao, C., Aston, J., Petre, V., Duncan, G.H.,
    #    Morales, F., Evans, A.C. (2002). \'A general statistical
    #    analysis for fMRI data\'. NeuroImage, 15:1-15
    varest = onesample.estimate_varatio(Y, sd)
    random_var = varest['random']

    # XXX - if we have a smoother, use
    # random_var = varest['fixed'] * smooth(varest['ratio'])

    # Having estimated the random effects variance (and possibly smoothed it),
    # the corresponding estimate of the effect and its variance is computed and
    # saved.

    # This is the coordmap we will use
    coordmap = futil.load_image_fiac("fiac_00","wanatomical.nii").coordmap

    adjusted_var = sd**2 + random_var
    adjusted_sd = np.sqrt(adjusted_var)

    results = onesample.estimate_mean(Y, adjusted_sd) 
    for n in ['effect', 'sd', 't']:
        im = api.Image(results[n], copy(coordmap))
        save_image(im, pjoin(odir, "%s.nii" % n))
Exemple #5
0
 def save(self):
     """
     Save current Image data to disk
     """
     if not self.clobber and path.exists(self.filename):
         raise ValueError('trying to clobber existing file')
     save_image(self._im, self.filename)
     self._flushed = True
     del (self._im)
Exemple #6
0
 def save(self):
     """
     Save current Image data to disk
     """
     if not self.clobber and path.exists(self.filename):
         raise ValueError('trying to clobber existing file')
     save_image(self._im, self.filename)
     self._flushed = True
     del(self._im)
Exemple #7
0
def test_save1():
    # A test to ensure that when a file is saved, the affine and the
    # data agree. This image comes from a NIFTI file

    img = load_image(funcfile)
    save_image(img, tmpfile.name)
    img2 = load_image(tmpfile.name)
    yield assert_true, np.allclose(img.affine, img2.affine)
    yield assert_equal, img.shape, img2.shape
    yield assert_true, np.allclose(np.asarray(img2), np.asarray(img))
Exemple #8
0
def test_save1():
    # A test to ensure that when a file is saved, the affine and the
    # data agree. This image comes from a NIFTI file
    img = load_image(funcfile)
    with InTemporaryDirectory():
        save_image(img, TMP_FNAME)
        img2 = load_image(TMP_FNAME)
        assert_array_almost_equal(img.affine, img2.affine)
        assert_equal(img.shape, img2.shape)
        assert_array_almost_equal(img2.get_data(), img.get_data())
        del img2
Exemple #9
0
def test_save1():
    # A test to ensure that when a file is saved, the affine and the
    # data agree. This image comes from a NIFTI file
    img = load_image(funcfile)
    with InTemporaryDirectory():
        save_image(img, TMP_FNAME)
        img2 = load_image(TMP_FNAME)
        assert_array_almost_equal(img.affine, img2.affine)
        assert_equal(img.shape, img2.shape)
        assert_array_almost_equal(img2.get_data(), img.get_data())
        del img2
Exemple #10
0
def test_write():
    fp, fname = mkstemp('.nii')
    img = load_image(funcfile)
    save_image(img, fname)
    test = FmriImageList.from_image(load_image(fname))
    yield nose.tools.assert_equal, test[0].affine.shape, (4,4)
    yield nose.tools.assert_equal, img[0].affine.shape, (5,4)
    yield nose.tools.assert_true, np.allclose(test[0].affine, img[0].affine[1:])
    # Under windows, if you don't close before delete, you get a
    # locking error.
    os.close(fp)
    os.remove(fname)
Exemple #11
0
def fixed_effects(subj, design):
    """ Fixed effects (within subject) for FIAC model

    Finds run by run estimated model results, creates fixed effects results
    image per subject.

    Parameters
    ----------
    subj : int
        subject number 1..6 inclusive
    design : {'standard'}
        design type
    """
    # First, find all the effect and standard deviation images
    # for the subject and this design type
    path_dict = futil.path_info_design(subj, design)
    rootdir = path_dict['rootdir']
    # The output directory
    fixdir = pjoin(rootdir, "fixed")
    # Fetch results images from run estimations
    results = futil.results_table(path_dict)
    # Get our hands on the relevant coordmap to save our results
    coordmap = futil.load_image_fiac("_%02d" % subj,
                                     "wanatomical.nii").coordmap
    # Compute the "fixed" effects for each type of contrast
    for con in results:
        fixed_effect = 0
        fixed_var = 0
        for effect, sd in results[con]:
            effect = load_image(effect).get_data()
            sd = load_image(sd).get_data()
            var = sd ** 2

            # The optimal, in terms of minimum variance, combination of the
            # effects has weights 1 / var
            #
            # XXX regions with 0 variance are set to 0
            # XXX do we want this or np.nan?
            ivar = np.nan_to_num(1. / var)
            fixed_effect += effect * ivar
            fixed_var += ivar

        # Now, compute the fixed effects variance and t statistic
        fixed_sd = np.sqrt(fixed_var)
        isd = np.nan_to_num(1. / fixed_sd)
        fixed_t = fixed_effect * isd

        # Save the results
        odir = futil.ensure_dir(fixdir, con)
        for a, n in zip([fixed_effect, fixed_sd, fixed_t],
                        ['effect', 'sd', 't']):
            im = api.Image(a, copy(coordmap))
            save_image(im, pjoin(odir, '%s.nii' % n))
Exemple #12
0
def fixed_effects(subj, design):
    """ Fixed effects (within subject) for OpenfMRI ds105 model

    Finds run by run estimated model results, creates fixed effects results
    image per subject.

    Parameters
    ----------
    subj : int
        subject number 1..6 inclusive
    design : {'standard'}
        design type
    """
    # First, find all the effect and standard deviation images
    # for the subject and this design type
    path_dict = futil.path_info_design(subj, design)
    rootdir = path_dict['rootdir']
    # The output directory
    fixdir = pjoin(rootdir, "fixed")
    # Fetch results images from run estimations
    results = futil.results_table(path_dict)
    # Get our hands on the relevant coordmap to save our results
    coordmap = futil.load_image_ds105("_%02d" % subj,
                                      "wanatomical.nii").coordmap
    # Compute the "fixed" effects for each type of contrast
    for con in results:
        fixed_effect = 0
        fixed_var = 0
        for effect, sd in results[con]:
            effect = load_image(effect).get_data()
            sd = load_image(sd).get_data()
            var = sd**2

            # The optimal, in terms of minimum variance, combination of the
            # effects has weights 1 / var
            #
            # XXX regions with 0 variance are set to 0
            # XXX do we want this or np.nan?
            ivar = np.nan_to_num(1. / var)
            fixed_effect += effect * ivar
            fixed_var += ivar

        # Now, compute the fixed effects variance and t statistic
        fixed_sd = np.sqrt(fixed_var)
        isd = np.nan_to_num(1. / fixed_sd)
        fixed_t = fixed_effect * isd

        # Save the results
        odir = futil.ensure_dir(fixdir, con)
        for a, n in zip([fixed_effect, fixed_sd, fixed_t],
                        ['effect', 'sd', 't']):
            im = api.Image(a, copy(coordmap))
            save_image(im, pjoin(odir, '%s.nii' % n))
Exemple #13
0
def test_save4():
    # Same as test_save3 except we have reordered the 'ijk' input axes.
    shape = (13,5,7,3)
    step = np.array([3.45,2.3,4.5,6.9])
    # When the input coords are in the 'ljki' order, the affines get
    # rearranged.  Note that the 'start' below, must be 0 for
    # non-spatial dimensions, because we have no way to store them in
    # most cases.  For example, a 'start' of [1,5,3,1] would be lost on
    # reload
    cmap = api.Affine.from_start_step('lkji', 'tzyx', [2,5,3,1], step)
    data = np.random.standard_normal(shape)
    img = api.Image(data, cmap)
    save_image(img, tmpfile.name)
    img2 = load_image(tmpfile.name)
    P = np.array([[0,0,0,1,0],
                  [0,0,1,0,0],
                  [0,1,0,0,0],
                  [1,0,0,0,0],
                  [0,0,0,0,1]])
    res = np.dot(P, np.dot(img.affine, P.T))

    # the step part of the affine should be set correctly
    yield assert_array_almost_equal, res[:4,:4], img2.affine[:4,:4]

    # start in the spatial dimensions should be set correctly
    yield assert_array_almost_equal, res[:3,-1], img2.affine[:3,-1]

    # start in the time dimension should not be 2 as in img, but 0
    # because NIFTI dosen't have a time start

    yield assert_false, (res[3,-1] == img2.affine[3,-1])
    yield assert_true, (res[3,-1] == 2)
    yield assert_true, (img2.affine[3,-1] == 0)

    # shapes should be reversed because img has coordinates reversed


    yield assert_equal, img.shape[::-1], img2.shape

    # data should be transposed because coordinates are reversed

    yield (assert_array_almost_equal, 
           np.transpose(np.asarray(img2),[3,2,1,0]),
           np.asarray(img))

    # coordinate names should be reversed as well

    yield assert_equal, img2.coordmap.input_coords.coord_names, \
        img.coordmap.input_coords.coord_names[::-1]
    yield assert_equal, img2.coordmap.input_coords.coord_names, \
        ['i', 'j', 'k', 'l']
Exemple #14
0
def test_write():
    fname = "myfile.nii"
    img = load_image(funcfile)
    with InTemporaryDirectory():
        save_image(img, fname)
        test = FmriImageList.from_image(load_image(fname))
        assert_equal(test[0].affine.shape, (4, 4))
        assert_equal(img[0].affine.shape, (5, 4))
        # Check the affine...
        A = np.identity(4)
        A[:3, :3] = img[:, :, :, 0].affine[:3, :3]
        A[:3, -1] = img[:, :, :, 0].affine[:3, -1]
        assert_true(np.allclose(test[0].affine, A))
        del test
Exemple #15
0
def test_write():
    fname = 'myfile.nii'
    img = load_image(funcfile)
    with InTemporaryDirectory():
        save_image(img, fname)
        test = FmriImageList.from_image(load_image(fname))
        assert_equal(test[0].affine.shape, (4,4))
        assert_equal(img[0].affine.shape, (5,4))
        # Check the affine...
        A = np.identity(4)
        A[:3,:3] = img[:,:,:,0].affine[:3,:3]
        A[:3,-1] = img[:,:,:,0].affine[:3,-1]
        assert_true(np.allclose(test[0].affine, A))
        del test
Exemple #16
0
def test_space_time_realign():
    path, fname = psplit(funcfile)
    original_affine = load_image(funcfile).affine
    path, fname = psplit(funcfile)
    froot, _ = fname.split('.', 1)
    with InTemporaryDirectory():
        # Make another image with .nii extension and extra dot in filename
        save_image(load_image(funcfile), 'my.test.nii')
        for in_fname, out_fname in ((funcfile, froot + '_mc.nii.gz'),
                                    ('my.test.nii', 'my.test_mc.nii.gz')):
            xforms = reg.space_time_realign(in_fname, 2.0, out_name='.')
            assert_true(np.allclose(xforms[0].as_affine(), np.eye(4), atol=1e-7))
            assert_false(np.allclose(xforms[-1].as_affine(), np.eye(4), atol=1e-3))
            img = load_image(out_fname)
            npt.assert_almost_equal(original_affine, img.affine)
Exemple #17
0
def test_save2():
    # A test to ensure that when a file is saved, the affine and the
    # data agree. This image comes from a NIFTI file 
    shape = (13,5,7,3)
    step = np.array([3.45,2.3,4.5,6.93])
    cmap = api.AffineTransform.from_start_step('ijkt', 'xyzt', [1,3,5,0], step)
    data = np.random.standard_normal(shape)
    img = api.Image(data, cmap)
    with InTemporaryDirectory():
        save_image(img, TMP_FNAME)
        img2 = load_image(TMP_FNAME)
        assert_array_almost_equal(img.affine, img2.affine)
        assert_equal(img.shape, img2.shape)
        assert_array_almost_equal(img2.get_data(), img.get_data())
        del img2
Exemple #18
0
def group_analysis(design, contrast):
    """
    Compute group analysis effect, sd and t
    for a given contrast and design type
    """
    array = np.array # shorthand
    # Directory where output will be written
    odir = futil.ensure_dir(futil.DATADIR, 'group', design, contrast)

    # Which subjects have this (contrast, design) pair?
    subjects = futil.subject_dirs(design, contrast)

    sd = array([array(load_image(pjoin(s, "sd.nii"))) for s in subjects])
    Y = array([array(load_image(pjoin(s, "effect.nii"))) for s in subjects])

    # This function estimates the ratio of the
    # fixed effects variance (sum(1/sd**2, 0))
    # to the estimated random effects variance
    # (sum(1/(sd+rvar)**2, 0)) where
    # rvar is the random effects variance.

    # The EM algorithm used is described in 
    #
    # Worsley, K.J., Liao, C., Aston, J., Petre, V., Duncan, G.H., 
    #    Morales, F., Evans, A.C. (2002). \'A general statistical 
    #    analysis for fMRI data\'. NeuroImage, 15:1-15

    varest = onesample.estimate_varatio(Y, sd)
    random_var = varest['random']

    # XXX - if we have a smoother, use
    # random_var = varest['fixed'] * smooth(varest['ratio'])

    # Having estimated the random effects variance (and
    # possibly smoothed it), the corresponding
    # estimate of the effect and its variance is
    # computed and saved.

    # This is the coordmap we will use
    coordmap = futil.load_image_fiac("fiac_00","wanatomical.nii").coordmap

    adjusted_var = sd**2 + random_var
    adjusted_sd = np.sqrt(adjusted_var)

    results = onesample.estimate_mean(Y, adjusted_sd) 
    for n in ['effect', 'sd', 't']:
        im = api.Image(results[n], coordmap.copy())
        save_image(im, pjoin(odir, "%s.nii" % n))
Exemple #19
0
def fixed_effects(subj, design):
    """
    Fixed effects (within subject) for FIAC model
    """

    # First, find all the effect and standard deviation images
    # for the subject and this design type

    path_dict = futil.path_info2(subj, design)
    rootdir = path_dict['rootdir']
    # The output directory
    fixdir = pjoin(rootdir, "fixed")

    results = futil.results_table(path_dict)

    # Get our hands on the relevant coordmap to
    # save our results
    coordmap = futil.load_image_fiac("fiac_%02d" % subj,
                                     "wanatomical.nii").coordmap

    # Compute the "fixed" effects for each type of contrast
    for con in results:
        fixed_effect = 0
        fixed_var = 0
        for effect, sd in results[con]:
            effect = load_image(effect); sd = load_image(sd)
            var = np.array(sd)**2

            # The optimal, in terms of minimum variance, combination of the
            # effects has weights 1 / var
            #
            # XXX regions with 0 variance are set to 0
            # XXX do we want this or np.nan?
            ivar = np.nan_to_num(1. / var)
            fixed_effect += effect * ivar
            fixed_var += ivar

        # Now, compute the fixed effects variance and t statistic
        fixed_sd = np.sqrt(fixed_var)
        isd = np.nan_to_num(1. / fixed_sd)
        fixed_t = fixed_effect * isd

        # Save the results
        odir = futil.ensure_dir(fixdir, con)
        for a, n in zip([fixed_effect, fixed_sd, fixed_t],
                        ['effect', 'sd', 't']):
            im = api.Image(a, coordmap.copy())
            save_image(im, pjoin(odir, '%s.nii' % n))
Exemple #20
0
def test_save2():
    # A test to ensure that when a file is saved, the affine and the
    # data agree. This image comes from a NIFTI file 

    shape = (13,5,7,3)
    step = np.array([3.45,2.3,4.5,6.93])

    cmap = api.AffineTransform.from_start_step('ijkt', 'xyzt', [1,3,5,0], step)

    data = np.random.standard_normal(shape)
    img = api.Image(data, cmap)
    save_image(img, tmpfile.name)
    img2 = load_image(tmpfile.name)
    yield assert_true, np.allclose(img.affine, img2.affine)
    yield assert_equal, img.shape, img2.shape
    yield assert_true, np.allclose(np.asarray(img2), np.asarray(img))
Exemple #21
0
def test_save2():
    # A test to ensure that when a file is saved, the affine and the
    # data agree. This image comes from a NIFTI file
    shape = (13, 5, 7, 3)
    step = np.array([3.45, 2.3, 4.5, 6.93])
    cmap = api.AffineTransform.from_start_step('ijkt', 'xyzt', [1, 3, 5, 0],
                                               step)
    data = np.random.standard_normal(shape)
    img = api.Image(data, cmap)
    with InTemporaryDirectory():
        save_image(img, TMP_FNAME)
        img2 = load_image(TMP_FNAME)
        assert_array_almost_equal(img.affine, img2.affine)
        assert_equal(img.shape, img2.shape)
        assert_array_almost_equal(img2.get_data(), img.get_data())
        del img2
Exemple #22
0
def test_save4():
    # Same as test_save3 except we have reordered the 'ijk' input axes.
    shape = (13,5,7,3)
    step = np.array([3.45,2.3,4.5,6.9])
    # When the input coords are in the 'ljki' order, the affines get
    # rearranged.  Note that the 'start' below, must be 0 for
    # non-spatial dimensions, because we have no way to store them in
    # most cases.  For example, a 'start' of [1,5,3,1] would be lost on
    # reload
    mni_xyz = mni_csm(3).coord_names
    cmap = AT(CS('tkji'),
              CS((('t',) + mni_xyz[::-1])),
              from_matvec(np.diag([2., 3, 5, 1]), step))
    data = np.random.standard_normal(shape)
    img = api.Image(data, cmap)
    with InTemporaryDirectory():
        save_image(img, TMP_FNAME)
        tmp = load_image(TMP_FNAME)
        data = tmp.get_data().copy()
        # Detach image from file so we can delete it
        img2 = api.Image(data, tmp.coordmap, tmp.metadata)
        del tmp
    P = np.array([[0,0,0,1,0],
                  [0,0,1,0,0],
                  [0,1,0,0,0],
                  [1,0,0,0,0],
                  [0,0,0,0,1]])
    res = np.dot(P, np.dot(img.affine, P.T))
    # the step part of the affine should be set correctly
    assert_array_almost_equal(res[:4,:4], img2.affine[:4,:4])
    # start in the spatial dimensions should be set correctly
    assert_array_almost_equal(res[:3,-1], img2.affine[:3,-1])
    # start in the time dimension should be 3.45 as in img, because NIFTI stores
    # the time offset in hdr[``toffset``]
    assert_not_equal(res[3,-1], img2.affine[3,-1])
    assert_equal(res[3,-1], 3.45)
    # shapes should be reversed because img has coordinates reversed
    assert_equal(img.shape[::-1], img2.shape)
    # data should be transposed because coordinates are reversed
    assert_array_almost_equal(
           np.transpose(np.asarray(img2),[3,2,1,0]),
           np.asarray(img))
    # coordinate names should be reversed as well
    assert_equal(img2.coordmap.function_domain.coord_names,
                 img.coordmap.function_domain.coord_names[::-1])
    assert_equal(img2.coordmap.function_domain.coord_names,
                 ('i', 'j', 'k', 't'))
Exemple #23
0
def test_space_time_realign():
    path, fname = psplit(funcfile)
    original_affine = load_image(funcfile).affine
    path, fname = psplit(funcfile)
    froot, _ = fname.split('.', 1)
    with InTemporaryDirectory():
        # Make another image with .nii extension and extra dot in filename
        save_image(load_image(funcfile), 'my.test.nii')
        for in_fname, out_fname in ((funcfile, froot + '_mc.nii.gz'),
                                    ('my.test.nii', 'my.test_mc.nii.gz')):
            xforms = reg.space_time_realign(in_fname, 2.0, out_name='.')
            assert_true(
                np.allclose(xforms[0].as_affine(), np.eye(4), atol=1e-7))
            assert_false(
                np.allclose(xforms[-1].as_affine(), np.eye(4), atol=1e-3))
            img = load_image(out_fname)
            npt.assert_almost_equal(original_affine, img.affine)
Exemple #24
0
def seg_recovery(y_pred, filename):
    label = load_image(PRE_LABEL_PATH + filename)
    shape = label.get_data().shape
    segment = np.zeros(shape)

    h1 = y_pred[0].around()
    h2 = y_pred[1].around()

    area = crop_setting['3d' + str(shape[2])]

    segment[area['x'][0]:area['x'][1], area['y'][0]:area['y'][1],
            area['z1'][0]:area['z1'][1], 0] = h1[0]
    segment[area['x'][0]:area['x'][1], area['y'][0]:area['y'][1],
            area['z2'][0]:area['z2'][1], 1] = h2[0]

    img = Image(segment, label.coordmap)
    save_image(img, OUTPUT + filename)
Exemple #25
0
def test_write():
    fp, fname = mkstemp('.nii')
    img = load_image(funcfile)
    save_image(img, fname)
    test = FmriImageList.from_image(load_image(fname))
    yield assert_equal, test[0].affine.shape, (4,4)
    yield assert_equal, img[0].affine.shape, (5,4)

    # Check the affine...
    A = np.identity(4)
    A[:3,:3] = img[:,:,:,0].affine[:3,:3]
    A[:3,-1] = img[:,:,:,0].affine[:3,-1]
    yield assert_true, np.allclose(test[0].affine, A)

    # Under windows, if you don't close before delete, you get a
    # locking error.
    os.close(fp)
    os.remove(fname)
Exemple #26
0
def test_save3():
    # A test to ensure that when a file is saved, the affine
    # and the data agree. In this case, things don't agree:
    # i) the pixdim is off
    # ii) makes the affine off

    step = np.array([3.45,2.3,4.5,6.9])
    shape = (13,5,7,3)
    cmap = api.Affine.from_start_step('jkli', 'tzyx', [0,3,5,1], step)

    data = np.random.standard_normal(shape)
    img = api.Image(data, cmap)
    save_image(img, tmpfile.name)
    img2 = load_image(tmpfile.name)

    yield assert_equal, tuple([img.shape[l] for l in [3,0,1,2]]), img2.shape
    a = np.transpose(np.asarray(img), [3,0,1,2])
    yield assert_false, np.allclose(img.affine, img2.affine)
    yield assert_true, np.allclose(a, np.asarray(img2))
Exemple #27
0
def test_save4():
    # Same as test_save3 except we have reordered the 'ijk' input axes.
    shape = (13, 5, 7, 3)
    step = np.array([3.45, 2.3, 4.5, 6.9])
    # When the input coords are in the 'ljki' order, the affines get
    # rearranged.  Note that the 'start' below, must be 0 for
    # non-spatial dimensions, because we have no way to store them in
    # most cases.  For example, a 'start' of [1,5,3,1] would be lost on
    # reload
    mni_xyz = mni_csm(3).coord_names
    cmap = AT(CS('tkji'), CS((('t', ) + mni_xyz[::-1])),
              from_matvec(np.diag([2., 3, 5, 1]), step))
    data = np.random.standard_normal(shape)
    img = api.Image(data, cmap)
    with InTemporaryDirectory():
        save_image(img, TMP_FNAME)
        tmp = load_image(TMP_FNAME)
        data = tmp.get_data().copy()
        # Detach image from file so we can delete it
        img2 = api.Image(data, tmp.coordmap, tmp.metadata)
        del tmp
    P = np.array([[0, 0, 0, 1, 0], [0, 0, 1, 0, 0], [0, 1, 0, 0, 0],
                  [1, 0, 0, 0, 0], [0, 0, 0, 0, 1]])
    res = np.dot(P, np.dot(img.affine, P.T))
    # the step part of the affine should be set correctly
    assert_array_almost_equal(res[:4, :4], img2.affine[:4, :4])
    # start in the spatial dimensions should be set correctly
    assert_array_almost_equal(res[:3, -1], img2.affine[:3, -1])
    # start in the time dimension should be 3.45 as in img, because NIFTI stores
    # the time offset in hdr[``toffset``]
    assert_not_equal(res[3, -1], img2.affine[3, -1])
    assert_equal(res[3, -1], 3.45)
    # shapes should be reversed because img has coordinates reversed
    assert_equal(img.shape[::-1], img2.shape)
    # data should be transposed because coordinates are reversed
    assert_array_almost_equal(np.transpose(img2.get_data(), [3, 2, 1, 0]),
                              img.get_data())
    # coordinate names should be reversed as well
    assert_equal(img2.coordmap.function_domain.coord_names,
                 img.coordmap.function_domain.coord_names[::-1])
    assert_equal(img2.coordmap.function_domain.coord_names,
                 ('i', 'j', 'k', 't'))
Exemple #28
0
def test_roundtrip_fromarray():
    data = np.random.rand(10,20,30)
    img = fromarray(data, 'kji', 'xyz')
    fd, name = mkstemp(suffix='.nii.gz')
    tmpfile = open(name)
    save_image(img, tmpfile.name)
    img2 = load_image(tmpfile.name)
    data2 = np.asarray(img2)
    tmpfile.close()
    os.unlink(name)
    # verify data
    yield assert_true, np.allclose(data2, data)
    yield assert_true, np.allclose(data2.mean(), data.mean())
    yield assert_true, np.allclose(data2.min(), data.min())
    yield assert_true, np.allclose(data2.max(), data.max())
    # verify shape and ndims
    yield assert_equal, img2.shape, img.shape
    yield assert_equal, img2.ndim, img.ndim
    # verify affine
    yield assert_true, np.allclose(img2.affine, img.affine)
Exemple #29
0
def test_file_roundtrip():
    img = load_template_img()
    fd, name = mkstemp(suffix='.nii.gz')
    tmpfile = open(name)
    save_image(img, tmpfile.name)
    img2 = load_image(tmpfile.name)
    data = np.asarray(img)
    data2 = np.asarray(img2)
    tmpfile.close()
    os.unlink(name)
    # verify data
    yield assert_true, np.allclose(data2, data)
    yield assert_true, np.allclose(data2.mean(), data.mean())
    yield assert_true, np.allclose(data2.min(), data.min())
    yield assert_true, np.allclose(data2.max(), data.max())
    # verify shape and ndims
    yield assert_equal, img2.shape, img.shape
    yield assert_equal, img2.ndim, img.ndim
    # verify affine
    yield assert_true, np.allclose(img2.affine, img.affine)
Exemple #30
0
def uint8_to_dtype(dtype, name):
    dtype = dtype
    shape = (2,3,4)
    dmax = np.iinfo(np.uint8).max
    data = np.random.randint(0, dmax, size=shape)
    data[0,0,0] = 0
    data[1,0,0] = dmax
    data = data.astype(np.uint8) # randint returns np.int32
    img = fromarray(data, 'kji', 'zxy')
    newimg = save_image(img, name, dtype=dtype)
    newdata = np.asarray(newimg)
    return newdata, data
Exemple #31
0
def test_save2b():
    # A test to ensure that when a file is saved, the affine and the
    # data agree. This image comes from a NIFTI file.  This example has a
    # non-diagonal affine matrix for the spatial part, but is 'diagonal' for the
    # space part.
    #
    # make a 5x5 transformation (for 4d image)
    step = np.array([3.45, 2.3, 4.5, 6.9])
    A = np.random.standard_normal((3, 3))
    B = np.diag(list(step) + [1])
    B[:3, :3] = A
    shape = (13, 5, 7, 3)
    cmap = api.vox2mni(B)
    data = np.random.standard_normal(shape)
    img = api.Image(data, cmap)
    with InTemporaryDirectory():
        save_image(img, TMP_FNAME)
        img2 = load_image(TMP_FNAME)
        assert_array_almost_equal(img.affine, img2.affine)
        assert_equal(img.shape, img2.shape)
        assert_array_almost_equal(img2.get_data(), img.get_data())
        del img2
Exemple #32
0
def test_save2b():
    # A test to ensure that when a file is saved, the affine and the
    # data agree. This image comes from a NIFTI file.  This example has a
    # non-diagonal affine matrix for the spatial part, but is 'diagonal' for the
    # space part.
    #
    # make a 5x5 transformation (for 4d image)
    step = np.array([3.45, 2.3, 4.5, 6.9])
    A = np.random.standard_normal((3,3))
    B = np.diag(list(step)+[1])
    B[:3, :3] = A
    shape = (13,5,7,3)
    cmap = api.vox2mni(B)
    data = np.random.standard_normal(shape)
    img = api.Image(data, cmap)
    with InTemporaryDirectory():
        save_image(img, TMP_FNAME)
        img2 = load_image(TMP_FNAME)
        assert_array_almost_equal(img.affine, img2.affine)
        assert_equal(img.shape, img2.shape)
        assert_array_almost_equal(img2.get_data(), img.get_data())
        del img2
Exemple #33
0
def test_header_roundtrip():
    img = load_template_img()
    fd, name = mkstemp(suffix='.nii.gz')
    tmpfile = open(name)
    hdr = img.header
    # Update some header values and make sure they're saved
    hdr['slice_duration'] = 0.200
    hdr['intent_p1'] = 2.0
    hdr['descrip'] = 'descrip for TestImage:test_header_roundtrip'
    hdr['slice_end'] = 12
    img.header = hdr
    save_image(img, tmpfile.name)
    newimg = load_image(tmpfile.name)
    newhdr = newimg.header
    tmpfile.close()
    os.unlink(name)
    yield (assert_array_almost_equal,
           newhdr['slice_duration'],
           hdr['slice_duration'])
    yield assert_equal, newhdr['intent_p1'], hdr['intent_p1']
    yield assert_equal, newhdr['descrip'], hdr['descrip']
    yield assert_equal, newhdr['slice_end'], hdr['slice_end']
Exemple #34
0
def test_save2b():
    # A test to ensure that when a file is saved, the affine and the
    # data agree. This image comes from a NIFTI file This example has
    # a non-diagonal affine matrix for the spatial part, but is
    # 'diagonal' for the space part.  this should raise a warnings
    # about 'non-diagonal' affine matrix

    # make a 5x5 transformation
    step = np.array([3.45,2.3,4.5,6.9])
    A = np.random.standard_normal((4,4))
    B = np.diag(list(step)+[1])
    B[:4,:4] = A
    shape = (13,5,7,3)
    cmap = api.AffineTransform.from_params('ijkt', 'xyzt', B)
    data = np.random.standard_normal(shape)
    img = api.Image(data, cmap)
    with InTemporaryDirectory():
        save_image(img, TMP_FNAME)
        img2 = load_image(TMP_FNAME)
        assert_false(np.allclose(img.affine, img2.affine))
        assert_array_almost_equal(img.affine[:3,:3], img2.affine[:3,:3])
        assert_equal(img.shape, img2.shape)
        assert_array_almost_equal(img2.get_data(), img.get_data())
        del img2
Exemple #35
0
def float32_to_dtype(dtype):
    # Utility function for the scaling_float32 function
    dtype = dtype
    shape = (2,3,4)
    # set some value value for scaling our data
    scale = np.iinfo(np.uint16).max * 2.0
    data = np.random.normal(size=(2,3,4), scale=scale)
    data[0,0,0] = np.finfo(np.float32).max
    data[1,0,0] = np.finfo(np.float32).min
    # random.normal will return data as native machine type
    data = data.astype(np.float32)
    img = fromarray(data, 'kji', 'zyx')
    newimg = save_image(img, gtmpfile.name, dtype=dtype)
    newdata = np.asarray(newimg)
    return newdata, data
Exemple #36
0
def test_save3():
    # A test to ensure that when a file is saved, the affine
    # and the data agree. In this case, things don't agree:
    # i) the pixdim is off
    # ii) makes the affine off
    step = np.array([3.45, 2.3, 4.5, 6.9])
    shape = (13, 5, 7, 3)
    mni_xyz = mni_csm(3).coord_names
    cmap = AT(CS('jkli'), CS(('t', ) + mni_xyz[::-1]),
              from_matvec(np.diag([0, 3, 5, 1]), step))
    data = np.random.standard_normal(shape)
    img = api.Image(data, cmap)
    # with InTemporaryDirectory():
    with InTemporaryDirectory():
        save_image(img, TMP_FNAME)
        tmp = load_image(TMP_FNAME)
        # Detach image from file so we can delete it
        data = tmp.get_data().copy()
        img2 = api.Image(data, tmp.coordmap, tmp.metadata)
        del tmp
    assert_equal(tuple([img.shape[l] for l in [3, 2, 1, 0]]), img2.shape)
    a = np.transpose(img.get_data(), [3, 2, 1, 0])
    assert_false(np.allclose(img.affine, img2.affine))
    assert_true(np.allclose(a, img2.get_data()))
Exemple #37
0
def run_model(subj, run):
    """
    Single subject fitting of OpenfMRI ds105 model
    """
    #----------------------------------------------------------------------
    # Set initial parameters of the OpenfMRI ds105 dataset
    #----------------------------------------------------------------------
    # Number of volumes in the fMRI data
    nvol = 121
    # The TR of the experiment
    TR = 2.5
    # The time of the first volume
    Tstart = 0.0
    # The array of times corresponding to each volume in the fMRI data
    volume_times = np.arange(nvol) * TR + Tstart
    # This recarray of times has one column named 't'.  It is used in the
    # function design.event_design to create the design matrices.
    volume_times_rec = make_recarray(volume_times, 't')
    # Get a path description dictionary that contains all the path data relevant
    # to this subject/run
    path_info = futil.path_info_run(subj, run)

    #----------------------------------------------------------------------
    # Experimental design
    #----------------------------------------------------------------------

    # Load the experimental description from disk.  We have utilities in futil
    # that reformat the original OpenfMRI ds105-supplied format into something
    # where the factorial structure of the design is more explicit.  This has
    # already been run once, and get_experiment_initial() will simply load the
    # newly-formatted design description files (.csv) into record arrays.
    experiment = futil.get_experiment(path_info)

    # Create design matrices for the "initial" and "experiment" factors, saving
    # the default contrasts.

    # The function event_design will create design matrices, which in the case
    # of "experiment" will have num_columns = (# levels of speaker) * (# levels
    # of sentence) * len(delay.spectral) = 2 * 2 * 2 = 8. For "initial", there
    # will be (# levels of initial) * len([hrf.glover]) = 1 * 1 = 1.

    # Here, delay.spectral is a sequence of 2 symbolic HRFs that are described
    # in:
    #
    # Liao, C.H., Worsley, K.J., Poline, J-B., Aston, J.A.D., Duncan, G.H.,
    #    Evans, A.C. (2002). \'Estimating the delay of the response in fMRI
    #    data.\' NeuroImage, 16:593-606.

    # The contrast definitions in ``cons_exper`` are a dictionary with keys
    # ['constant_0', 'constant_1', 'speaker_0', 'speaker_1', 'sentence_0',
    # 'sentence_1', 'sentence:speaker_0', 'sentence:speaker_1'] representing the
    # four default contrasts: constant, main effects + interactions, each
    # convolved with 2 HRFs in delay.spectral. For example, sentence:speaker_0
    # is the interaction of sentence and speaker convolved with the first (=0)
    # of the two HRF basis functions, and sentence:speaker_1 is the interaction
    # convolved with the second (=1) of the basis functions.

    # XXX use the hrf __repr__ for naming contrasts
    X_exper, cons_exper = design.block_design(experiment,
                                              volume_times_rec,
                                              hrfs=delay.spectral,
                                              level_contrasts=True)

    # In addition to factors, there is typically a "drift" term. In this case,
    # the drift is a natural cubic spline with a not at the midpoint
    # (volume_times.mean())
    vt = volume_times  # shorthand
    drift = np.array([vt**i for i in range(4)] + [(vt - vt.mean())**3 *
                                                  (np.greater(vt, vt.mean()))])
    for i in range(drift.shape[0]):
        drift[i] /= drift[i].max()

    # We transpose the drift so that its shape is (nvol,5) so that it will have
    # the same number of rows as X_exper.
    drift = drift.T

    # There are helper functions to create these drifts: design.fourier_basis,
    # design.natural_spline.  Therefore, the above is equivalent (except for
    # the normalization by max for numerical stability) to
    #
    # >>> drift = design.natural_spline(t, [volume_times.mean()])

    # Stack all the designs, keeping the new contrasts which has the same keys
    # as cons_exper, but its values are arrays with 15 columns, with the
    # non-zero entries matching the columns of X corresponding to X_exper
    X, cons = design.stack_designs((X_exper, cons_exper), (drift, {}))

    # Sanity check: delete any non-estimable contrasts
    for k in cons.keys():
        if not isestimable(cons[k], X):
            del (cons[k])
            warnings.warn("contrast %s not estimable for this run" % k)

    # The default contrasts are all t-statistics.  We may want to output
    # F-statistics for 'speaker', 'sentence', 'speaker:sentence' based on the
    # two coefficients, one for each HRF in delay.spectral

    # We reproduce the same constrasts as in the data base
    # outputting an F using both HRFs, as well as the
    # t using only the first HRF

    for obj1, obj2 in [('face', 'scrambled'), ('house', 'scrambled'),
                       ('chair', 'scrambled'), ('face', 'house')]:
        cons['%s_vs_%s_F' % (obj1, obj2)] = \
            np.vstack([cons['object_%s_0' % obj1] -
                       cons['object_%s_0' % obj2],
                       cons['object_%s_1' % obj1] -
                       cons['object_%s_1' % obj2]])

        cons['%s_vs_%s_t' % (obj1, obj2)] = (cons['object_%s_0' % obj1] -
                                             cons['object_%s_0' % obj2])

    #----------------------------------------------------------------------
    # Data loading
    #----------------------------------------------------------------------

    # Load in the fMRI data, saving it as an array.  It is transposed to have
    # time as the first dimension, i.e. fmri[t] gives the t-th volume.
    fmri_im = futil.get_fmri(path_info)  # an Image
    fmri_im = rollimg(fmri_im, 't')
    fmri = fmri_im.get_data()  # now, it's an ndarray

    nvol, volshape = fmri.shape[0], fmri.shape[1:]
    nx, sliceshape = volshape[0], volshape[1:]

    #----------------------------------------------------------------------
    # Model fit
    #----------------------------------------------------------------------

    # The model is a two-stage model, the first stage being an OLS (ordinary
    # least squares) fit, whose residuals are used to estimate an AR(1)
    # parameter for each voxel.
    m = OLSModel(X)
    ar1 = np.zeros(volshape)

    # Fit the model, storing an estimate of an AR(1) parameter at each voxel
    for s in range(nx):
        d = np.array(fmri[:, s])
        flatd = d.reshape((d.shape[0], -1))
        result = m.fit(flatd)
        ar1[s] = ((result.resid[1:] * result.resid[:-1]).sum(0) /
                  (result.resid**2).sum(0)).reshape(sliceshape)

    # We round ar1 to nearest one-hundredth and group voxels by their rounded
    # ar1 value, fitting an AR(1) model to each batch of voxels.

    # XXX smooth here?
    # ar1 = smooth(ar1, 8.0)
    ar1 *= 100
    ar1 = ar1.astype(np.int) / 100.

    # We split the contrasts into F-tests and t-tests.
    # XXX helper function should do this
    fcons = {}
    tcons = {}
    for n, v in cons.items():
        v = np.squeeze(v)
        if v.ndim == 1:
            tcons[n] = v
        else:
            fcons[n] = v

    # Setup a dictionary to hold all the output
    # XXX ideally these would be memmap'ed Image instances
    output = {}
    for n in tcons:
        tempdict = {}
        for v in ['sd', 't', 'effect']:
            tempdict[v] = np.memmap(NamedTemporaryFile(prefix='%s%s.nii' %
                                                       (n, v)),
                                    dtype=np.float,
                                    shape=volshape,
                                    mode='w+')
        output[n] = tempdict

    for n in fcons:
        output[n] = np.memmap(NamedTemporaryFile(prefix='%s%s.nii' % (n, v)),
                              dtype=np.float,
                              shape=volshape,
                              mode='w+')

    # Loop over the unique values of ar1
    for val in np.unique(ar1):
        armask = np.equal(ar1, val)
        m = ARModel(X, val)
        d = fmri[:, armask]
        results = m.fit(d)

        # Output the results for each contrast
        for n in tcons:
            resT = results.Tcontrast(tcons[n])
            output[n]['sd'][armask] = resT.sd
            output[n]['t'][armask] = resT.t
            output[n]['effect'][armask] = resT.effect
        for n in fcons:
            output[n][armask] = results.Fcontrast(fcons[n]).F

    # Dump output to disk
    odir = futil.output_dir(path_info, tcons, fcons)
    # The coordmap for a single volume in the time series
    vol0_map = fmri_im[0].coordmap
    for n in tcons:
        for v in ['t', 'sd', 'effect']:
            im = Image(output[n][v], vol0_map)
            save_image(im, pjoin(odir, n, '%s.nii' % v))
    for n in fcons:
        im = Image(output[n], vol0_map)
        save_image(im, pjoin(odir, n, "F.nii"))
Exemple #38
0
mappedTrkFile = '/chb/tmp/kihotest_mapped.trk'


# volume
testArr = np.zeros( ( 10, 10, 10 ) )

r = 0
for i in range( testArr.shape[0] ):
  for j in range( testArr.shape[1] ):
    for k in range( testArr.shape[2] ):
      r += 1
      testArr[i, j, k] = r

img = image.fromarray( testArr, 'ijk', 'xyz' )

save_image( img, volFile )


# trk file
fibers = []

# 2,5,6
# 3,5,7
# 2,6,7
# 8,7,3
# 9,5,4
points = np.array( [[2, 5, 6], [3, 5, 7], [2, 6, 7], [8, 7, 3], [9, 5, 4]], dtype=np.float32 )

fibers.append( ( points, None, None ) )

io.saveTrk( trkFile, fibers, None, None, True )
Exemple #39
0
img_roi = load_image(roi)
img_roi = Image(img_roi.get_data().astype(np.float64), img_roi.coordmap, img_roi.header)
ind_roi = img_roi.get_data().astype(bool) & img_mask.get_data().astype(bool)
ind_noroi = ~img_roi.get_data().astype(bool) & img_mask.get_data().astype(bool)

roi_data = np.random.randn(np.sum(ind_roi))

for i in range(1, n_subjects+1):

    print('subject %02g' % i)

    s_dir = os.path.join(root, '%02g' % i)
    if not os.path.isdir(s_dir):
        os.makedirs(s_dir)
    roi_dir = os.path.join(s_dir, 'roi')
    if not os.path.isdir(roi_dir):
        os.makedirs(roi_dir)

    if mode == 'SVR':
        img_mask.get_data()[ind_roi] = (i - (i > n_subjects / 2) * n_subjects / 2) * roi_data + 0.4*np.random.randn(roi_data.shape[0])
        y = (i - (i > n_subjects / 2) * n_subjects / 2)
    else:
        img_mask.get_data()[ind_roi] = (1 + (i > n_subjects / 2)) * roi_data + 0.4\
                                                                               *np.random.randn(roi_data.shape[0])
        y = 1 + (i > n_subjects / 2)
    img_mask.get_data()[ind_noroi] = np.random.randn(np.sum(ind_noroi))
    img_mask = smooth(img_mask, fwhm)

    save_image(img_mask, os.path.join(roi_dir, roi_name))

    json.dump({'y': y}, open(os.path.join(s_dir, 'y_%s.json' % mode), 'w+'))
Exemple #40
0
def test_nondiag():
    gimg.affine[0,1] = 3.0
    save_image(gimg, gtmpfile.name)
    img2 = load_image(gtmpfile.name)
    yield assert_true, np.allclose(img2.affine, gimg.affine)
Exemple #41
0
trkFile = '/chb/tmp/kihotest.trk'
mappedTrkFile = '/chb/tmp/kihotest_mapped.trk'

# volume
testArr = np.zeros((10, 10, 10))

r = 0
for i in range(testArr.shape[0]):
    for j in range(testArr.shape[1]):
        for k in range(testArr.shape[2]):
            r += 1
            testArr[i, j, k] = r

img = image.fromarray(testArr, 'ijk', 'xyz')

save_image(img, volFile)

# trk file
fibers = []

# 2,5,6
# 3,5,7
# 2,6,7
# 8,7,3
# 9,5,4
points = np.array([[2, 5, 6], [3, 5, 7], [2, 6, 7], [8, 7, 3], [9, 5, 4]],
                  dtype=np.float32)

fibers.append((points, None, None))

io.saveTrk(trkFile, fibers, None, None, True)
Exemple #42
0
def space_time_realign(
    input, tr, slice_order="descending", slice_dim=2, slice_dir=1, apply=True, make_figure=False, out_name=None
):
    """
    This is a scripting interface to `nipy.algorithms.registration.SpaceTimeRealign` 

    Parameters
    ----------
    input : str or list
        A full path to a file-name (4D nifti time-series) , or to a directory
        containing 4D nifti time-series, or a list of full-paths to files.
    tr : float
        The repetition time
    slice_order : str (optional)
        This is the order of slice-times in the acquisition. This is used as a
        key into the ``SLICETIME_FUNCTIONS`` dictionary from
        :mod:`nipy.algorithms.slicetiming.timefuncs`. Default: 'descending'.
    slice_dim : int (optional) 
        Denotes the axis in `images` that is the slice axis.  In a 4D image,
        this will often be axis = 2 (default).
    slice_dir : int (optional)
        1 if the slices were acquired slice 0 first (default), slice -1 last,
        or -1 if acquire slice -1 first, slice 0 last.
    apply : bool (optional)
        Whether to apply the transformation and produce an output. Default:
        True. 
    make_figure : bool (optional)
        Whether to generate a .png figure with the parameters across scans.
    out_name : bool (optional)
        Specify an output location (full path) for the files that are
        generated. Default: generate files in the path of the inputs (with an
        `_mc` suffix added to the file-names.

    Returns
    -------
    transforms : ndarray
        An (n_times_points,) shaped array containing 
       `nipy.algorithms.registration.affine.Rigid` class instances for each time
        point in the time-series. These can be used as affine transforms by
        referring to their `.as_affine` attribute.
    """
    if make_figure:
        if not HAVE_MPL:
            e_s = "You need to have matplotlib installed to run this function"
            e_s += " with `make_figure` set to `True`"
            raise RuntimeError(e_s)

    # If we got only a single file, we motion correct that one:
    if op.isfile(input):
        if not (input.endswith(".nii") or input.endswith(".nii.gz")):
            e_s = "Input needs to be a nifti file ('.nii' or '.nii.gz'"
            raise ValueError(e_s)
        fnames = [input]
        input = nib.load(input)
    # If this is a full-path to a directory containing files, it's still a
    # string:
    elif isinstance(input, str):
        list_of_files = os.listdir(input)
        fnames = [op.join(input, f) for f in np.sort(list_of_files) if (f.endswith(".nii") or f.endswith(".nii.gz"))]
        input = [nib.load(x) for x in fnames]
    # Assume that it's a list of full-paths to files:
    else:
        input = [nib.load(x) for x in input]

    slice_times = timefuncs[slice_order]
    slice_info = [slice_dim, slice_dir]

    reggy = SpaceTimeRealign(input, tr, slice_times, slice_info)

    reggy.estimate(align_runs=True)

    # We now have the transformation parameters in here:
    transforms = np.squeeze(np.array(reggy._transforms))
    rot = np.array([t.rotation for t in transforms])
    trans = np.array([t.translation for t in transforms])

    if apply:
        new_reggy = reggy.resample(align_runs=True)
        for run_idx, new_im in enumerate(new_reggy):
            # Fix output TR - it was probably lost in the image realign step
            assert new_im.affine.shape == (5, 5)
            new_im.affine[:] = new_im.affine.dot(np.diag([1, 1, 1, tr, 1]))
            # Save it out to a '.nii.gz' file:
            froot, ext, trail_ext = splitext_addext(fnames[run_idx])
            path, fname = op.split(froot)
            # We retain the file-name adding '_mc' regardless of where it's
            # saved
            new_path = path if out_name is None else out_name
            save_image(new_im, op.join(new_path, fname + "_mc.nii.gz"))

    if make_figure:
        # Delay MPL plotting import to latest moment to avoid errors trying
        # import the default MPL backend (such as tkinter, which may not be
        # installed). See: https://github.com/nipy/nipy/issues/414
        import matplotlib.pyplot as plt

        figure, ax = plt.subplots(2)
        figure.set_size_inches([8, 6])
        ax[0].plot(rot)
        ax[0].set_xlabel("Time (TR)")
        ax[0].set_ylabel("Translation (mm)")
        ax[1].plot(trans)
        ax[1].set_xlabel("Time (TR)")
        ax[1].set_ylabel("Rotation (radians)")
        figure.savefig(op.join(os.path.split(fnames[0])[0], "mc_params.png"))

    return transforms
Exemple #43
0
def space_time_realign(input,
                       tr,
                       slice_order='descending',
                       slice_dim=2,
                       slice_dir=1,
                       apply=True,
                       make_figure=False,
                       out_name=None):
    """
    This is a scripting interface to `nipy.algorithms.registration.SpaceTimeRealign` 

    Parameters
    ----------
    input : str or list
        A full path to a file-name (4D nifti time-series) , or to a directory
        containing 4D nifti time-series, or a list of full-paths to files.
    tr : float
        The repetition time
    slice_order : str (optional)
        This is the order of slice-times in the acquisition. This is used as a
        key into the ``SLICETIME_FUNCTIONS`` dictionary from
        :mod:`nipy.algorithms.slicetiming.timefuncs`. Default: 'descending'.
    slice_dim : int (optional) 
        Denotes the axis in `images` that is the slice axis.  In a 4D image,
        this will often be axis = 2 (default).
    slice_dir : int (optional)
        1 if the slices were acquired slice 0 first (default), slice -1 last,
        or -1 if acquire slice -1 first, slice 0 last.
    apply : bool (optional)
        Whether to apply the transformation and produce an output. Default:
        True. 
    make_figure : bool (optional)
        Whether to generate a .png figure with the parameters across scans.
    out_name : bool (optional)
        Specify an output location (full path) for the files that are
        generated. Default: generate files in the path of the inputs (with an
        `_mc` suffix added to the file-names.
    

    Returns
    -------
    transforms : ndarray
        An (n_times_points,) shaped array containing 
       `nipy.algorithms.registration.affine.Rigid` class instances for each time
        point in the time-series. These can be used as affine transforms by
        referring to their `.as_affine` attribute.
    """
    if make_figure:
        if not HAVE_MPL:
            e_s = "You need to have matplotlib installed to run this function"
            e_s += " with `make_figure` set to `True`"
            raise RuntimeError(e_s)

    # If we got only a single file, we motion correct that one:
    if op.isfile(input):
        if not (input.endswith('.nii') or input.endswith('.nii.gz')):
            e_s = "Input needs to be a nifti file ('.nii' or '.nii.gz'"
            raise ValueError(e_s)
        fnames = [input]
        input = nib.load(input)
    # If this is a full-path to a directory containing files, it's still a
    # string:
    elif isinstance(input, str):
        list_of_files = os.listdir(input)
        fnames = [
            op.join(input, f) for f in np.sort(list_of_files)
            if (f.endswith('.nii') or f.endswith('.nii.gz'))
        ]
        input = [nib.load(x) for x in fnames]
    # Assume that it's a list of full-paths to files:
    else:
        input = [nib.load(x) for x in input]

    slice_times = timefuncs[slice_order]
    slice_info = [slice_dim, slice_dir]

    reggy = SpaceTimeRealign(input, tr, slice_times, slice_info)

    reggy.estimate(align_runs=True)

    # We now have the transformation parameters in here:
    transforms = np.squeeze(np.array(reggy._transforms))
    rot = np.array([t.rotation for t in transforms])
    trans = np.array([t.translation for t in transforms])

    if apply:
        new_reggy = reggy.resample(align_runs=True)
        for run_idx, new_im in enumerate(new_reggy):
            # Fix output TR - it was probably lost in the image realign step
            assert new_im.affine.shape == (5, 5)
            new_im.affine[:] = new_im.affine.dot(np.diag([1, 1, 1, tr, 1]))
            # Save it out to a '.nii.gz' file:
            froot, ext, trail_ext = splitext_addext(fnames[run_idx])
            path, fname = op.split(froot)
            # We retain the file-name adding '_mc' regardless of where it's
            # saved
            new_path = path if out_name is None else out_name
            save_image(new_im, op.join(new_path, fname + '_mc.nii.gz'))

    if make_figure:
        figure, ax = plt.subplots(2)
        figure.set_size_inches([8, 6])
        ax[0].plot(rot)
        ax[0].set_xlabel('Time (TR)')
        ax[0].set_ylabel('Translation (mm)')
        ax[1].plot(trans)
        ax[1].set_xlabel('Time (TR)')
        ax[1].set_ylabel('Rotation (radians)')
        figure.savefig(op.join(os.path.split(fnames[0])[0], 'mc_params.png'))

    return transforms
Exemple #44
0
def create(rootdir):

    out = None
    prev = None

    out_is_there = False

    dirs = sorted(os.listdir(rootdir))

    for d in dirs:

        files = os.listdir(os.path.join(rootdir, d))

        for f in files:
            input_image = tif.imread(os.path.join(rootdir, d, f))
            # print input_image
            # print type(input_image)
            # print 'ccc',input_image.flatten()
            if out_is_there:
                #out = np.concatenate([out, input_image.flatten()])
                out = np.dstack([out, input_image])
            else:
                # out = input_image.flatten()
                out = input_image
                out_is_there = True

    # return

    # i = 0
    # for root, dirs, files in os.walk(rootdir):

    #     fullpaths = [(os.path.join(root, name)) for name in files]

    #     for f in fullpaths:
    #       print f
    #       input_image = tif.imread(f)
    #       # print input_image
    #       # print type(input_image)
    #       # print 'ccc',input_image.flatten()
    #       if out_is_there:
    #         #out = np.concatenate([out, input_image.flatten()])
    #         out = np.dstack([out, input_image])
    #       else:
    #         # out = input_image.flatten()
    #         out = input_image
    #         out_is_there = True


#>>> from nipy.io.api import save_image
#>>> data = np.zeros((91,109,91), dtype=np.uint8)
#>>> cmap = AffineTransform('kji', 'zxy', np.eye(4))
#>>> img = Image(data, cmap)
#>>> fname1 = os.path.join(tmpdir, 'img1.nii.gz')
#>>> saved_img1 = save_image(img, fname1)

# image_data = PILImage.open(os.path.join(subdir,file))
# if image_data.mode != "RGB":
#   image_data = image_data.convert("RGB")

# print image_data.toString()
# break

# shutil.copy(os.path.join(subdir, file), '/tmp/'+str(i)+'.tif')
# i+=1
    length = out.shape[0]
    print 'aaa'
    print out
    # out = out.reshape((512, 512, length/512/512))
    print out
    cmap = AffineTransform('kji', 'zxy', np.eye(4))
    img = Image(out, cmap)
    save_image(img, '/tmp/out.nii.gz')
Exemple #45
0
def run_model(subj, run):
    """
    Single subject fitting of FIAC model
    """
    #----------------------------------------------------------------------
    # Set initial parameters of the FIAC dataset
    #----------------------------------------------------------------------
    # Number of volumes in the fMRI data
    nvol = 121
    # The TR of the experiment
    TR = 2.5
    # The time of the first volume
    Tstart = 0.0
    # The array of times corresponding to each volume in the fMRI data
    volume_times = np.arange(nvol) * TR + Tstart
    # This recarray of times has one column named 't'.  It is used in the
    # function design.event_design to create the design matrices.
    volume_times_rec = make_recarray(volume_times, 't')
    # Get a path description dictionary that contains all the path data relevant
    # to this subject/run
    path_info = futil.path_info_run(subj,run)

    #----------------------------------------------------------------------
    # Experimental design
    #----------------------------------------------------------------------

    # Load the experimental description from disk.  We have utilities in futil
    # that reformat the original FIAC-supplied format into something where the
    # factorial structure of the design is more explicit.  This has already
    # been run once, and get_experiment_initial() will simply load the
    # newly-formatted design description files (.csv) into record arrays.
    experiment = futil.get_experiment(path_info)

    # Create design matrices for the "initial" and "experiment" factors, saving
    # the default contrasts.

    # The function event_design will create design matrices, which in the case
    # of "experiment" will have num_columns = (# levels of speaker) * (# levels
    # of sentence) * len(delay.spectral) = 2 * 2 * 2 = 8. For "initial", there
    # will be (# levels of initial) * len([hrf.glover]) = 1 * 1 = 1.

    # Here, delay.spectral is a sequence of 2 symbolic HRFs that are described
    # in:
    #
    # Liao, C.H., Worsley, K.J., Poline, J-B., Aston, J.A.D., Duncan, G.H.,
    #    Evans, A.C. (2002). \'Estimating the delay of the response in fMRI
    #    data.\' NeuroImage, 16:593-606.

    # The contrast definitions in ``cons_exper`` are a dictionary with keys
    # ['constant_0', 'constant_1', 'speaker_0', 'speaker_1', 'sentence_0',
    # 'sentence_1', 'sentence:speaker_0', 'sentence:speaker_1'] representing the
    # four default contrasts: constant, main effects + interactions, each
    # convolved with 2 HRFs in delay.spectral. For example, sentence:speaker_0
    # is the interaction of sentence and speaker convolved with the first (=0)
    # of the two HRF basis functions, and sentence:speaker_1 is the interaction
    # convolved with the second (=1) of the basis functions.

    # XXX use the hrf __repr__ for naming contrasts
    X_exper, cons_exper = design.block_design(experiment, volume_times_rec,
                                              hrfs=delay.spectral,
                                              level_contrasts=True)

    # In addition to factors, there is typically a "drift" term. In this case,
    # the drift is a natural cubic spline with a not at the midpoint
    # (volume_times.mean())
    vt = volume_times # shorthand
    drift = np.array( [vt**i for i in range(4)] +
                      [(vt-vt.mean())**3 * (np.greater(vt, vt.mean()))] )
    for i in range(drift.shape[0]):
        drift[i] /= drift[i].max()

    # We transpose the drift so that its shape is (nvol,5) so that it will have
    # the same number of rows as X_exper.
    drift = drift.T

    # There are helper functions to create these drifts: design.fourier_basis,
    # design.natural_spline.  Therefore, the above is equivalent (except for
    # the normalization by max for numerical stability) to
    #
    # >>> drift = design.natural_spline(t, [volume_times.mean()])

    # Stack all the designs, keeping the new contrasts which has the same keys
    # as cons_exper, but its values are arrays with 15 columns, with the
    # non-zero entries matching the columns of X corresponding to X_exper
    X, cons = design.stack_designs((X_exper, cons_exper),
                                   (drift, {}))

    # Sanity check: delete any non-estimable contrasts
    for k in cons.keys():
        if not isestimable(cons[k], X):
            del(cons[k])
            warnings.warn("contrast %s not estimable for this run" % k)

    # The default contrasts are all t-statistics.  We may want to output
    # F-statistics for 'speaker', 'sentence', 'speaker:sentence' based on the
    # two coefficients, one for each HRF in delay.spectral

    # We reproduce the same constrasts as in the data base
    # outputting an F using both HRFs, as well as the
    # t using only the first HRF

    for obj1, obj2 in [('face', 'scrambled'),
                       ('house', 'scrambled'),
                       ('chair', 'scrambled'),
                       ('face', 'house')]:
        cons['%s_vs_%s_F' % (obj1, obj2)] = \
            np.vstack([cons['object_%s_0' % obj1] - 
                       cons['object_%s_0' % obj2],
                       cons['object_%s_1' % obj1] - 
                       cons['object_%s_1' % obj2]])


        cons['%s_vs_%s_t' % (obj1, obj2)] = (cons['object_%s_0' % obj1] - 
                                             cons['object_%s_0' % obj2])

    #----------------------------------------------------------------------
    # Data loading
    #----------------------------------------------------------------------

    # Load in the fMRI data, saving it as an array.  It is transposed to have
    # time as the first dimension, i.e. fmri[t] gives the t-th volume.
    fmri_im = futil.get_fmri(path_info) # an Image
    fmri_im = rollimg(fmri_im, 't')
    fmri = fmri_im.get_data() # now, it's an ndarray

    nvol, volshape = fmri.shape[0], fmri.shape[1:]
    nx, sliceshape = volshape[0], volshape[1:]

    #----------------------------------------------------------------------
    # Model fit
    #----------------------------------------------------------------------

    # The model is a two-stage model, the first stage being an OLS (ordinary
    # least squares) fit, whose residuals are used to estimate an AR(1)
    # parameter for each voxel.
    m = OLSModel(X)
    ar1 = np.zeros(volshape)

    # Fit the model, storing an estimate of an AR(1) parameter at each voxel
    for s in range(nx):
        d = np.array(fmri[:,s])
        flatd = d.reshape((d.shape[0], -1))
        result = m.fit(flatd)
        ar1[s] = ((result.resid[1:] * result.resid[:-1]).sum(0) /
                  (result.resid**2).sum(0)).reshape(sliceshape)

    # We round ar1 to nearest one-hundredth and group voxels by their rounded
    # ar1 value, fitting an AR(1) model to each batch of voxels.

    # XXX smooth here?
    # ar1 = smooth(ar1, 8.0)
    ar1 *= 100
    ar1 = ar1.astype(np.int) / 100.

    # We split the contrasts into F-tests and t-tests.
    # XXX helper function should do this
    fcons = {}; tcons = {}
    for n, v in cons.items():
        v = np.squeeze(v)
        if v.ndim == 1:
            tcons[n] = v
        else:
            fcons[n] = v

    # Setup a dictionary to hold all the output
    # XXX ideally these would be memmap'ed Image instances
    output = {}
    for n in tcons:
        tempdict = {}
        for v in ['sd', 't', 'effect']:
            tempdict[v] = np.memmap(NamedTemporaryFile(prefix='%s%s.nii'
                                    % (n,v)), dtype=np.float,
                                    shape=volshape, mode='w+')
        output[n] = tempdict

    for n in fcons:
        output[n] = np.memmap(NamedTemporaryFile(prefix='%s%s.nii'
                                    % (n,v)), dtype=np.float,
                                    shape=volshape, mode='w+')

    # Loop over the unique values of ar1
    for val in np.unique(ar1):
        armask = np.equal(ar1, val)
        m = ARModel(X, val)
        d = fmri[:,armask]
        results = m.fit(d)

        # Output the results for each contrast
        for n in tcons:
            resT = results.Tcontrast(tcons[n])
            output[n]['sd'][armask] = resT.sd
            output[n]['t'][armask] = resT.t
            output[n]['effect'][armask] = resT.effect
        for n in fcons:
            output[n][armask] = results.Fcontrast(fcons[n]).F

    # Dump output to disk
    odir = futil.output_dir(path_info,tcons,fcons)
    # The coordmap for a single volume in the time series
    vol0_map = fmri_im[0].coordmap
    for n in tcons:
        for v in ['t', 'sd', 'effect']:
            im = Image(output[n][v], vol0_map)
            save_image(im, pjoin(odir, n, '%s.nii' % v))
    for n in fcons:
        im = Image(output[n], vol0_map)
        save_image(im, pjoin(odir, n, "F.nii"))
Exemple #46
0
ind_roi = img_roi.get_data().astype(bool) & img_mask.get_data().astype(bool)
ind_noroi = ~img_roi.get_data().astype(bool) & img_mask.get_data().astype(bool)

roi_data = np.random.randn(np.sum(ind_roi))

for i in range(1, n_subjects + 1):

    print('subject %02g' % i)

    s_dir = os.path.join(root, '%02g' % i)
    if not os.path.isdir(s_dir):
        os.makedirs(s_dir)
    roi_dir = os.path.join(s_dir, 'roi')
    if not os.path.isdir(roi_dir):
        os.makedirs(roi_dir)

    if mode == 'SVR':
        img_mask.get_data(
        )[ind_roi] = (i - (i > n_subjects / 2) * n_subjects /
                      2) * roi_data + 0.4 * np.random.randn(roi_data.shape[0])
        y = (i - (i > n_subjects / 2) * n_subjects / 2)
    else:
        img_mask.get_data()[ind_roi] = (1 + (i > n_subjects / 2)) * roi_data + 0.4\
                                                                               *np.random.randn(roi_data.shape[0])
        y = 1 + (i > n_subjects / 2)
    img_mask.get_data()[ind_noroi] = np.random.randn(np.sum(ind_noroi))
    img_mask = smooth(img_mask, fwhm)

    save_image(img_mask, os.path.join(roi_dir, roi_name))

    json.dump({'y': y}, open(os.path.join(s_dir, 'y_%s.json' % mode), 'w+'))