def testOLSdegenerate(self): X = W((40,10)) X[:,0] = X[:,1] + X[:,2] Y = W((40,)) model = OLSModel(design=X) results = model.fit(Y) self.assertEquals(results.df_resid, 31)
def test_altprotocol(): block, bT, bF = protocol(descriptions['block'], 'block', *delay.spectral) event, eT, eF = protocol(descriptions['event'], 'event', *delay.spectral) blocka, baT, baF = altprotocol(altdescr['block'], 'block', *delay.spectral) eventa, eaT, eaF = altprotocol(altdescr['event'], 'event', *delay.spectral) for c in bT.keys(): baf = baT[c] if not isinstance(baf, formula.Formula): baf = formula.Formula([baf]) bf = bT[c] if not isinstance(bf, formula.Formula): bf = formula.Formula([bf]) X = baf.design(t, return_float=True) Y = bf.design(t, return_float=True) if X.ndim == 1: X.shape = (X.shape[0], 1) m = OLSModel(X) r = m.fit(Y) remaining = (r.resid**2).sum() / (Y**2).sum() yield assert_almost_equal, remaining, 0 for c in bF.keys(): baf = baF[c] if not isinstance(baf, formula.Formula): baf = formula.Formula([baf]) bf = bF[c] if not isinstance(bf, formula.Formula): bf = formula.Formula([bf]) X = baf.design(t, return_float=True) Y = bf.design(t, return_float=True) if X.ndim == 1: X.shape = (X.shape[0], 1) m = OLSModel(X) r = m.fit(Y) remaining = (r.resid**2).sum() / (Y**2).sum() yield assert_almost_equal, remaining, 0
def test_scipy_stats(): # Using scipy.stats.models X, cons = twoway.design(D, contrasts=contrasts) Y = D['Days'] m = OLSModel(X) f = m.fit(Y) F_m = {} df_m = {} p_m = {} for n, c in cons.items(): r = f.Fcontrast(c) F_m[n] = r.F df_m[n] = r.df_num p_m[n] = scipy.stats.f.sf(F_m[n], df_m[n], r.df_den) assert_almost_equal(F[n], F_m[n]) assert_almost_equal(df[n], df_m[n]) assert_almost_equal(p[n], p_m[n])
tempdict = {} for v in ['sd', 't', 'effect']: tempdict[v] = np.zeros(mask_array.sum()) output[contrast_id] = tempdict ######################################## # Perform a GLM analysis ######################################## print 'Fitting a GLM (this takes time)...' fmri_image = load(data_path) Y = fmri_image.get_data()[mask_array] X = design_matrix.matrix m = OLSModel(X) # Fit the model, storing an estimate of an AR(1) parameter at each voxel result = m.fit(Y.T) ar1 = ((result.resid[1:] * result.resid[:-1]).sum(0) / (result.resid ** 2).sum(0)) ar1 *= 100 ar1 = ar1.astype(np.int) / 100. for val in np.unique(ar1): armask = np.equal(ar1, val) m = ARModel(X, val) d = Y[armask] results = m.fit(d.T) # Output the results for each contrast
def testOLS(self): X = W((40,10)) Y = W((40,)) model = OLSModel(design=X) results = model.fit(Y) self.assertEquals(results.df_resid, 30)
def run_model(subj, run): """ Single subject fitting of FIAC model """ #---------------------------------------------------------------------- # Set initial parameters of the FIAC dataset #---------------------------------------------------------------------- # Number of volumes in the fMRI data nvol = 191 # The TR of the experiment TR = 2.5 # The time of the first volume Tstart = 0.0 # The array of times corresponding to each # volume in the fMRI data volume_times = np.arange(nvol)*TR + Tstart # This recarray of times has one column named 't' # It is used in the function design.event_design # to create the design matrices. volume_times_rec = formula.make_recarray(volume_times, 't') # Get a path description dictionary that contains all the path data # relevant to this subject/run path_info = futil.path_info(subj,run) #---------------------------------------------------------------------- # Experimental design #---------------------------------------------------------------------- # Load the experimental description from disk. We have utilities in futil # that reformat the original FIAC-supplied format into something where the # factorial structure of the design is more explicit. This has already # been run once, and get_experiment_initial() will simply load the # newly-formatted design description files (.csv) into record arrays. experiment, initial = futil.get_experiment_initial(path_info) # Create design matrices for the "initial" and "experiment" factors, # saving the default contrasts. # The function event_design will create # design matrices, which in the case of "experiment" # will have num_columns = # (# levels of speaker) * (# levels of sentence) * len(delay.spectral) = # 2 * 2 * 2 = 8 # For "initial", there will be # (# levels of initial) * len([hrf.glover]) = 1 * 1 = 1 # Here, delay.spectral is a sequence of 2 symbolic HRFs that # are described in # # Liao, C.H., Worsley, K.J., Poline, J-B., Aston, J.A.D., Duncan, G.H., # Evans, A.C. (2002). \'Estimating the delay of the response in fMRI # data.\' NeuroImage, 16:593-606. # The contrasts, cons_exper, # is a dictionary with keys: ['constant_0', 'constant_1', 'speaker_0', # 'speaker_1', # 'sentence_0', 'sentence_1', 'sentence:speaker_0', 'sentence:speaker_1'] # representing the four default contrasts: constant, main effects + # interactions, # each convolved with 2 HRFs in delay.spectral. Its values # are matrices with 8 columns. # XXX use the hrf __repr__ for naming contrasts X_exper, cons_exper = design.event_design(experiment, volume_times_rec, hrfs=delay.spectral) # The contrasts for 'initial' are ignored # as they are "uninteresting" and are included # in the model as confounds. X_initial, _ = design.event_design(initial, volume_times_rec, hrfs=[hrf.glover]) # In addition to factors, there is typically a "drift" term # In this case, the drift is a natural cubic spline with # a not at the midpoint (volume_times.mean()) vt = volume_times # shorthand drift = np.array( [vt**i for i in range(4)] + [(vt-vt.mean())**3 * (np.greater(vt, vt.mean()))] ) for i in range(drift.shape[0]): drift[i] /= drift[i].max() # We transpose the drift so that its shape is (nvol,5) so that it will have # the same number of rows as X_initial and X_exper. drift = drift.T # There are helper functions to create these drifts: design.fourier_basis, # design.natural_spline. Therefore, the above is equivalent (except for # the normalization by max for numerical stability) to # # >>> drift = design.natural_spline(t, [volume_times.mean()]) # Stack all the designs, keeping the new contrasts which has the same keys # as cons_exper, but its values are arrays with 15 columns, with the # non-zero entries matching the columns of X corresponding to X_exper X, cons = design.stack_designs((X_exper, cons_exper), (X_initial, {}), (drift, {})) # Sanity check: delete any non-estimable contrasts # XXX - this seems to be broken right now, it's producing bogus warnings. ## for k in cons.keys(): ## if not isestimable(X, cons[k]): ## del(cons[k]) ## warnings.warn("contrast %s not estimable for this run" % k) # The default contrasts are all t-statistics. We may want to output # F-statistics for 'speaker', 'sentence', 'speaker:sentence' based on the # two coefficients, one for each HRF in delay.spectral cons['speaker'] = np.vstack([cons['speaker_0'], cons['speaker_1']]) cons['sentence'] = np.vstack([cons['sentence_0'], cons['sentence_1']]) cons['sentence:speaker'] = np.vstack([cons['sentence:speaker_0'], cons['sentence:speaker_1']]) #---------------------------------------------------------------------- # Data loading #---------------------------------------------------------------------- # Load in the fMRI data, saving it as an array # It is transposed to have time as the first dimension, # i.e. fmri[t] gives the t-th volume. fmri, anat = futil.get_fmri_anat(path_info) fmri = np.transpose(fmri, [3,0,1,2]) nvol, volshape = fmri.shape[0], fmri.shape[1:] nslice, sliceshape = volshape[0], volshape[1:] #---------------------------------------------------------------------- # Model fit #---------------------------------------------------------------------- # The model is a two-stage model, the first stage being an OLS (ordinary # least squares) fit, whose residuals are used to estimate an AR(1) # parameter for each voxel. m = OLSModel(X) ar1 = np.zeros(volshape) # Fit the model, storing an estimate of an AR(1) parameter at each voxel for s in range(nslice): d = np.array(fmri[:,s]) flatd = d.reshape((d.shape[0], -1)) result = m.fit(flatd) ar1[s] = ((result.resid[1:] * result.resid[:-1]).sum(0) / (result.resid**2).sum(0)).reshape(sliceshape) # We round ar1 to nearest one-hundredth # and group voxels by their rounded ar1 value, # fitting an AR(1) model to each batch of voxels. # XXX smooth here? # ar1 = smooth(ar1, 8.0) ar1 *= 100 ar1 = ar1.astype(np.int) / 100. # We split the contrasts into F-tests and t-tests. # XXX helper function should do this fcons = {}; tcons = {} for n, v in cons.items(): v = np.squeeze(v) if v.ndim == 1: tcons[n] = v else: fcons[n] = v # Setup a dictionary to hold all the output # XXX ideally these would be memmap'ed Image instances output = {} for n in tcons: tempdict = {} for v in ['sd', 't', 'effect']: tempdict[v] = np.memmap(NamedTemporaryFile(prefix='%s%s.nii' \ % (n,v)), dtype=np.float, shape=volshape, mode='w+') output[n] = tempdict for n in fcons: output[n] = np.memmap(NamedTemporaryFile(prefix='%s%s.nii' \ % (n,v)), dtype=np.float, shape=volshape, mode='w+') # Loop over the unique values of ar1 for val in np.unique(ar1): armask = np.equal(ar1, val) m = ARModel(X, val) d = fmri[:,armask] results = m.fit(d) # Output the results for each contrast for n in tcons: resT = results.Tcontrast(tcons[n]) output[n]['sd'][armask] = resT.sd output[n]['t'][armask] = resT.t output[n]['effect'][armask] = resT.effect for n in fcons: output[n][armask] = results.Fcontrast(fcons[n]).F # Dump output to disk odir = futil.output_dir(path_info,tcons,fcons) for n in tcons: for v in ['t', 'sd', 'effect']: im = api.Image(output[n][v], anat.coordmap.copy()) save_image(im, pjoin(odir, n, '%s.nii' % v)) for n in fcons: im = api.Image(output[n], anat.coordmap.copy()) save_image(im, pjoin(odir, n, "F.nii"))