def test_output_F(): # Test output_F convenience function rng = np.random.RandomState(ord('F')) Y = rng.normal(size=(10,1)) * 10 + 100 X = np.c_[rng.normal(size=(10,3)), np.ones((N,))] c1 = np.zeros((X.shape[1],)) c1[0] = 1 model = OLSModel(X) results = model.fit(Y) # Check we get required outputs exp_f = results.t(0) **2 assert_array_almost_equal(exp_f, output_F(results, c1))
def run_model(subj, run): """ Single subject fitting of FIAC model """ #---------------------------------------------------------------------- # Set initial parameters of the FIAC dataset #---------------------------------------------------------------------- # Number of volumes in the fMRI data nvol = 121 # The TR of the experiment TR = 2.5 # The time of the first volume Tstart = 0.0 # The array of times corresponding to each volume in the fMRI data volume_times = np.arange(nvol) * TR + Tstart # This recarray of times has one column named 't'. It is used in the # function design.event_design to create the design matrices. volume_times_rec = make_recarray(volume_times, 't') # Get a path description dictionary that contains all the path data relevant # to this subject/run path_info = futil.path_info_run(subj,run) #---------------------------------------------------------------------- # Experimental design #---------------------------------------------------------------------- # Load the experimental description from disk. We have utilities in futil # that reformat the original FIAC-supplied format into something where the # factorial structure of the design is more explicit. This has already # been run once, and get_experiment_initial() will simply load the # newly-formatted design description files (.csv) into record arrays. experiment = futil.get_experiment(path_info) # Create design matrices for the "initial" and "experiment" factors, saving # the default contrasts. # The function event_design will create design matrices, which in the case # of "experiment" will have num_columns = (# levels of speaker) * (# levels # of sentence) * len(delay.spectral) = 2 * 2 * 2 = 8. For "initial", there # will be (# levels of initial) * len([hrf.glover]) = 1 * 1 = 1. # Here, delay.spectral is a sequence of 2 symbolic HRFs that are described # in: # # Liao, C.H., Worsley, K.J., Poline, J-B., Aston, J.A.D., Duncan, G.H., # Evans, A.C. (2002). \'Estimating the delay of the response in fMRI # data.\' NeuroImage, 16:593-606. # The contrast definitions in ``cons_exper`` are a dictionary with keys # ['constant_0', 'constant_1', 'speaker_0', 'speaker_1', 'sentence_0', # 'sentence_1', 'sentence:speaker_0', 'sentence:speaker_1'] representing the # four default contrasts: constant, main effects + interactions, each # convolved with 2 HRFs in delay.spectral. For example, sentence:speaker_0 # is the interaction of sentence and speaker convolved with the first (=0) # of the two HRF basis functions, and sentence:speaker_1 is the interaction # convolved with the second (=1) of the basis functions. # XXX use the hrf __repr__ for naming contrasts X_exper, cons_exper = design.block_design(experiment, volume_times_rec, hrfs=delay.spectral, level_contrasts=True) # In addition to factors, there is typically a "drift" term. In this case, # the drift is a natural cubic spline with a not at the midpoint # (volume_times.mean()) vt = volume_times # shorthand drift = np.array( [vt**i for i in range(4)] + [(vt-vt.mean())**3 * (np.greater(vt, vt.mean()))] ) for i in range(drift.shape[0]): drift[i] /= drift[i].max() # We transpose the drift so that its shape is (nvol,5) so that it will have # the same number of rows as X_exper. drift = drift.T # There are helper functions to create these drifts: design.fourier_basis, # design.natural_spline. Therefore, the above is equivalent (except for # the normalization by max for numerical stability) to # # >>> drift = design.natural_spline(t, [volume_times.mean()]) # Stack all the designs, keeping the new contrasts which has the same keys # as cons_exper, but its values are arrays with 15 columns, with the # non-zero entries matching the columns of X corresponding to X_exper X, cons = design.stack_designs((X_exper, cons_exper), (drift, {})) # Sanity check: delete any non-estimable contrasts for k in cons.keys(): if not isestimable(cons[k], X): del(cons[k]) warnings.warn("contrast %s not estimable for this run" % k) # The default contrasts are all t-statistics. We may want to output # F-statistics for 'speaker', 'sentence', 'speaker:sentence' based on the # two coefficients, one for each HRF in delay.spectral # We reproduce the same constrasts as in the data base # outputting an F using both HRFs, as well as the # t using only the first HRF for obj1, obj2 in [('face', 'scrambled'), ('house', 'scrambled'), ('chair', 'scrambled'), ('face', 'house')]: cons['%s_vs_%s_F' % (obj1, obj2)] = \ np.vstack([cons['object_%s_0' % obj1] - cons['object_%s_0' % obj2], cons['object_%s_1' % obj1] - cons['object_%s_1' % obj2]]) cons['%s_vs_%s_t' % (obj1, obj2)] = (cons['object_%s_0' % obj1] - cons['object_%s_0' % obj2]) #---------------------------------------------------------------------- # Data loading #---------------------------------------------------------------------- # Load in the fMRI data, saving it as an array. It is transposed to have # time as the first dimension, i.e. fmri[t] gives the t-th volume. fmri_im = futil.get_fmri(path_info) # an Image fmri_im = rollimg(fmri_im, 't') fmri = fmri_im.get_data() # now, it's an ndarray nvol, volshape = fmri.shape[0], fmri.shape[1:] nx, sliceshape = volshape[0], volshape[1:] #---------------------------------------------------------------------- # Model fit #---------------------------------------------------------------------- # The model is a two-stage model, the first stage being an OLS (ordinary # least squares) fit, whose residuals are used to estimate an AR(1) # parameter for each voxel. m = OLSModel(X) ar1 = np.zeros(volshape) # Fit the model, storing an estimate of an AR(1) parameter at each voxel for s in range(nx): d = np.array(fmri[:,s]) flatd = d.reshape((d.shape[0], -1)) result = m.fit(flatd) ar1[s] = ((result.resid[1:] * result.resid[:-1]).sum(0) / (result.resid**2).sum(0)).reshape(sliceshape) # We round ar1 to nearest one-hundredth and group voxels by their rounded # ar1 value, fitting an AR(1) model to each batch of voxels. # XXX smooth here? # ar1 = smooth(ar1, 8.0) ar1 *= 100 ar1 = ar1.astype(np.int) / 100. # We split the contrasts into F-tests and t-tests. # XXX helper function should do this fcons = {}; tcons = {} for n, v in cons.items(): v = np.squeeze(v) if v.ndim == 1: tcons[n] = v else: fcons[n] = v # Setup a dictionary to hold all the output # XXX ideally these would be memmap'ed Image instances output = {} for n in tcons: tempdict = {} for v in ['sd', 't', 'effect']: tempdict[v] = np.memmap(NamedTemporaryFile(prefix='%s%s.nii' % (n,v)), dtype=np.float, shape=volshape, mode='w+') output[n] = tempdict for n in fcons: output[n] = np.memmap(NamedTemporaryFile(prefix='%s%s.nii' % (n,v)), dtype=np.float, shape=volshape, mode='w+') # Loop over the unique values of ar1 for val in np.unique(ar1): armask = np.equal(ar1, val) m = ARModel(X, val) d = fmri[:,armask] results = m.fit(d) # Output the results for each contrast for n in tcons: resT = results.Tcontrast(tcons[n]) output[n]['sd'][armask] = resT.sd output[n]['t'][armask] = resT.t output[n]['effect'][armask] = resT.effect for n in fcons: output[n][armask] = results.Fcontrast(fcons[n]).F # Dump output to disk odir = futil.output_dir(path_info,tcons,fcons) # The coordmap for a single volume in the time series vol0_map = fmri_im[0].coordmap for n in tcons: for v in ['t', 'sd', 'effect']: im = Image(output[n][v], vol0_map) save_image(im, pjoin(odir, n, '%s.nii' % v)) for n in fcons: im = Image(output[n], vol0_map) save_image(im, pjoin(odir, n, "F.nii"))
def run_model(subj, run): """ Single subject fitting of OpenfMRI ds105 model """ #---------------------------------------------------------------------- # Set initial parameters of the OpenfMRI ds105 dataset #---------------------------------------------------------------------- # Number of volumes in the fMRI data nvol = 121 # The TR of the experiment TR = 2.5 # The time of the first volume Tstart = 0.0 # The array of times corresponding to each volume in the fMRI data volume_times = np.arange(nvol) * TR + Tstart # This recarray of times has one column named 't'. It is used in the # function design.event_design to create the design matrices. volume_times_rec = make_recarray(volume_times, 't') # Get a path description dictionary that contains all the path data relevant # to this subject/run path_info = futil.path_info_run(subj, run) #---------------------------------------------------------------------- # Experimental design #---------------------------------------------------------------------- # Load the experimental description from disk. We have utilities in futil # that reformat the original OpenfMRI ds105-supplied format into something # where the factorial structure of the design is more explicit. This has # already been run once, and get_experiment_initial() will simply load the # newly-formatted design description files (.csv) into record arrays. experiment = futil.get_experiment(path_info) # Create design matrices for the "initial" and "experiment" factors, saving # the default contrasts. # The function event_design will create design matrices, which in the case # of "experiment" will have num_columns = (# levels of speaker) * (# levels # of sentence) * len(delay.spectral) = 2 * 2 * 2 = 8. For "initial", there # will be (# levels of initial) * len([hrf.glover]) = 1 * 1 = 1. # Here, delay.spectral is a sequence of 2 symbolic HRFs that are described # in: # # Liao, C.H., Worsley, K.J., Poline, J-B., Aston, J.A.D., Duncan, G.H., # Evans, A.C. (2002). \'Estimating the delay of the response in fMRI # data.\' NeuroImage, 16:593-606. # The contrast definitions in ``cons_exper`` are a dictionary with keys # ['constant_0', 'constant_1', 'speaker_0', 'speaker_1', 'sentence_0', # 'sentence_1', 'sentence:speaker_0', 'sentence:speaker_1'] representing the # four default contrasts: constant, main effects + interactions, each # convolved with 2 HRFs in delay.spectral. For example, sentence:speaker_0 # is the interaction of sentence and speaker convolved with the first (=0) # of the two HRF basis functions, and sentence:speaker_1 is the interaction # convolved with the second (=1) of the basis functions. # XXX use the hrf __repr__ for naming contrasts X_exper, cons_exper = design.block_design(experiment, volume_times_rec, hrfs=delay.spectral, level_contrasts=True) # In addition to factors, there is typically a "drift" term. In this case, # the drift is a natural cubic spline with a not at the midpoint # (volume_times.mean()) vt = volume_times # shorthand drift = np.array([vt**i for i in range(4)] + [(vt - vt.mean())**3 * (np.greater(vt, vt.mean()))]) for i in range(drift.shape[0]): drift[i] /= drift[i].max() # We transpose the drift so that its shape is (nvol,5) so that it will have # the same number of rows as X_exper. drift = drift.T # There are helper functions to create these drifts: design.fourier_basis, # design.natural_spline. Therefore, the above is equivalent (except for # the normalization by max for numerical stability) to # # >>> drift = design.natural_spline(t, [volume_times.mean()]) # Stack all the designs, keeping the new contrasts which has the same keys # as cons_exper, but its values are arrays with 15 columns, with the # non-zero entries matching the columns of X corresponding to X_exper X, cons = design.stack_designs((X_exper, cons_exper), (drift, {})) # Sanity check: delete any non-estimable contrasts for k in cons.keys(): if not isestimable(cons[k], X): del (cons[k]) warnings.warn("contrast %s not estimable for this run" % k) # The default contrasts are all t-statistics. We may want to output # F-statistics for 'speaker', 'sentence', 'speaker:sentence' based on the # two coefficients, one for each HRF in delay.spectral # We reproduce the same constrasts as in the data base # outputting an F using both HRFs, as well as the # t using only the first HRF for obj1, obj2 in [('face', 'scrambled'), ('house', 'scrambled'), ('chair', 'scrambled'), ('face', 'house')]: cons['%s_vs_%s_F' % (obj1, obj2)] = \ np.vstack([cons['object_%s_0' % obj1] - cons['object_%s_0' % obj2], cons['object_%s_1' % obj1] - cons['object_%s_1' % obj2]]) cons['%s_vs_%s_t' % (obj1, obj2)] = (cons['object_%s_0' % obj1] - cons['object_%s_0' % obj2]) #---------------------------------------------------------------------- # Data loading #---------------------------------------------------------------------- # Load in the fMRI data, saving it as an array. It is transposed to have # time as the first dimension, i.e. fmri[t] gives the t-th volume. fmri_im = futil.get_fmri(path_info) # an Image fmri_im = rollimg(fmri_im, 't') fmri = fmri_im.get_data() # now, it's an ndarray nvol, volshape = fmri.shape[0], fmri.shape[1:] nx, sliceshape = volshape[0], volshape[1:] #---------------------------------------------------------------------- # Model fit #---------------------------------------------------------------------- # The model is a two-stage model, the first stage being an OLS (ordinary # least squares) fit, whose residuals are used to estimate an AR(1) # parameter for each voxel. m = OLSModel(X) ar1 = np.zeros(volshape) # Fit the model, storing an estimate of an AR(1) parameter at each voxel for s in range(nx): d = np.array(fmri[:, s]) flatd = d.reshape((d.shape[0], -1)) result = m.fit(flatd) ar1[s] = ((result.resid[1:] * result.resid[:-1]).sum(0) / (result.resid**2).sum(0)).reshape(sliceshape) # We round ar1 to nearest one-hundredth and group voxels by their rounded # ar1 value, fitting an AR(1) model to each batch of voxels. # XXX smooth here? # ar1 = smooth(ar1, 8.0) ar1 *= 100 ar1 = ar1.astype(np.int) / 100. # We split the contrasts into F-tests and t-tests. # XXX helper function should do this fcons = {} tcons = {} for n, v in cons.items(): v = np.squeeze(v) if v.ndim == 1: tcons[n] = v else: fcons[n] = v # Setup a dictionary to hold all the output # XXX ideally these would be memmap'ed Image instances output = {} for n in tcons: tempdict = {} for v in ['sd', 't', 'effect']: tempdict[v] = np.memmap(NamedTemporaryFile(prefix='%s%s.nii' % (n, v)), dtype=np.float, shape=volshape, mode='w+') output[n] = tempdict for n in fcons: output[n] = np.memmap(NamedTemporaryFile(prefix='%s%s.nii' % (n, v)), dtype=np.float, shape=volshape, mode='w+') # Loop over the unique values of ar1 for val in np.unique(ar1): armask = np.equal(ar1, val) m = ARModel(X, val) d = fmri[:, armask] results = m.fit(d) # Output the results for each contrast for n in tcons: resT = results.Tcontrast(tcons[n]) output[n]['sd'][armask] = resT.sd output[n]['t'][armask] = resT.t output[n]['effect'][armask] = resT.effect for n in fcons: output[n][armask] = results.Fcontrast(fcons[n]).F # Dump output to disk odir = futil.output_dir(path_info, tcons, fcons) # The coordmap for a single volume in the time series vol0_map = fmri_im[0].coordmap for n in tcons: for v in ['t', 'sd', 'effect']: im = Image(output[n][v], vol0_map) save_image(im, pjoin(odir, n, '%s.nii' % v)) for n in fcons: im = Image(output[n], vol0_map) save_image(im, pjoin(odir, n, "F.nii"))
import numpy as np from nipy.algorithms.statistics.api import OLSModel from ..outputters import output_T, output_F from nose.tools import assert_true, assert_equal, assert_raises from numpy.testing import (assert_array_almost_equal, assert_array_equal) N = 10 X = np.c_[np.linspace(-1,1,N), np.ones((N,))] RNG = np.random.RandomState(20110901) Y = RNG.normal(size=(10,1)) * 10 + 100 MODEL = OLSModel(X) RESULTS = MODEL.fit(Y) C1 = [1, 0] def test_model(): # Check basics about the model fit # Check we fit the mean assert_array_almost_equal(RESULTS.theta[1], np.mean(Y)) def test_output_T(): # Check we get required outputs res = RESULTS.Tcontrast(C1) # all return values # default is all return values assert_array_almost_equal([res.effect, res.sd, res.t],