def runsub(sub, thisContrast, r, dstype='raw', roi='grayMatter', filterLen=49, filterOrd=3, write=False): if dstype == 'raw': outdir='PyMVPA' print "working with raw data" thisSub = {sub: subList[sub]} dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi) thisDS = dsdict[sub] mc_params = lmvpa.loadmotionparams(paths, thisSub) beta_events = lmvpa.loadevents(paths, thisSub) # savitsky golay filtering sg.sg_filter(thisDS, filterLen, filterOrd) # gallant group zscores before regression. # zscore w.r.t. rest trials # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks') # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time). zscore(thisDS, chunks_attr='chunks') print "beta extraction" ## BETA EXTRACTION ## rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub]) evds = er.fit_event_hrf_model(rds, events, time_attr='time_coords', condition_attr=('trial_type', 'chunks'), design_kwargs={'add_regs': mc_params[sub], 'hrf_model': 'canonical'}, return_model=True) fds = lmvpa.replacetargets(evds, contrasts, thisContrast) fds = fds[fds.targets != '0'] else: outdir=os.path.join('LSS', dstype) print "loading betas" fds = lmvpa.loadsubbetas(paths, sub, btype=dstype, m=roi) fds.sa['targets'] = fds.sa[thisContrast] zscore(fds, chunks_attr='chunks') fds = lmvpa.sortds(fds) print "searchlights" ## initialize classifier clf = svm.LinearNuSVMC() cv = CrossValidation(clf, NFoldPartitioner()) from mvpa2.measures.searchlight import sphere_searchlight cvSL = sphere_searchlight(cv, radius=r) # now I have betas per chunk. could just correlate the betas, or correlate the predictions for corresponding runs lidx = fds.chunks < fds.sa['chunks'].unique[len(fds.sa['chunks'].unique)/2] pidx = fds.chunks >= fds.sa['chunks'].unique[len(fds.sa['chunks'].unique) / 2] lres = sl.run_cv_sl(cvSL, fds[lidx].copy(deep=False)) pres = sl.run_cv_sl(cvSL, fds[pidx].copy(deep=False)) if write: from mvpa2.base import dataset map2nifti(fds, dataset.vstack([lres, pres])).\ to_filename(os.path.join( paths[0], 'Maps', outdir, sub + '_' + roi + '_' + thisContrast + '_cvsl.nii.gz')) del lres, pres, cvSL cvSL = sphere_searchlight(cv, radius=r) crossSet = fds.copy() crossSet.chunks[lidx] = 1 crossSet.chunks[pidx] = 2 cres = sl.run_cv_sl(cvSL, crossSet.copy(deep=False)) if write: map2nifti(fds, cres[0]).to_filename( os.path.join(paths[0], 'Maps', outdir, sub + '_' + roi + '_' + (thisContrast) + '_P2L.nii.gz')) map2nifti(fds, cres[1]).to_filename( os.path.join(paths[0], 'Maps', outdir, sub + '_' + roi + '_' + (thisContrast) + '_L2P.nii.gz'))
def runsub(sub, thisContrast, thisContrastStr, filterLen, filterOrd, paramEst, chunklen, alphas=np.logspace(0, 3, 20), debug=False, write=False, roi='grayMatter'): thisSub = {sub: subList[sub]} mc_params = lmvpa.loadmotionparams(paths, thisSub) beta_events = lmvpa.loadevents(paths, thisSub) dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi, c='trial_type') thisDS = dsdict[sub] # savitsky golay filtering sg.sg_filter(thisDS, filterLen, filterOrd) # gallant group zscores before regression. # zscore w.r.t. rest trials # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks') # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time). zscore(thisDS, chunks_attr='chunks') # kay method: leave out a model run, use it to fit an HRF for each voxel # huth method: essentially use FIR # mumford method: deconvolution with canonical HRF # refit events and regress... # get timing data from timing files # rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub]) rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub], contrasts) # adding features # we can model out motion and just not use those betas. # Ridge if isinstance(thisContrast, basestring): thisContrast = [thisContrast] # instead of binarizing each one, make them parametric desX, rds = lmvpa.make_designmat(rds, events, time_attr='time_coords', condition_attr=thisContrast, design_kwargs={'hrf_model': 'canonical', 'drift_model': 'blank'}, regr_attrs=None) # want to collapse ap and cr, but have anim separate desX['motion'] = make_dmtx(rds.sa['time_coords'].value, paradigm=None, add_regs=mc_params[sub], drift_model='blank') des = lmvpa.make_parammat(desX, hrf='canonical', zscore=True) # set chunklen and nchunks # split by language and pictures lidx = thisDS.chunks < thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2] pidx = thisDS.chunks >= thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2] ldes = cp.copy(des) pdes = cp.copy(des) ldes.matrix = ldes.matrix[lidx] pdes.matrix = pdes.matrix[pidx] nchunks = int(len(thisDS)*paramEst / chunklen) nboots=50 covarmat = None mus = None lwts, lalphas, lres, lceil = bsr.bootstrap_ridge(rds[lidx], ldes, chunklen=chunklen, nchunks=nchunks, cov0=covarmat, mu0=mus, part_attr='chunks', mode='test', alphas=alphas, single_alpha=True, normalpha=False, nboots=nboots, corrmin=.2, singcutoff=1e-10, joined=None, plot=debug, use_corr=True) pwts, palphas, pres, pceil = bsr.bootstrap_ridge(rds[pidx], pdes, chunklen=chunklen, nchunks=nchunks, part_attr='chunks', mode='test', alphas=alphas, single_alpha=True, normalpha=False, nboots=nboots, corrmin=.2, singcutoff=1e-10, joined=None, plot=debug, use_corr=True) print 'language ' + str(np.mean(lres)) # pictures within print 'pictures: ' + str(np.mean(pres)) # need to change outstring if write: from mvpa2.base import dataset map2nifti(thisDS, dataset.vstack([lres, pres])) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_ridge_corrs.nii.gz')) map2nifti(thisDS, dataset.vstack([lwts, pwts])) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_ridge_weights.nii.gz')) map2nifti(thisDS, dataset.vstack([lalphas, palphas])) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_ridge_alphas.nii.gz')) map2nifti(thisDS, dataset.vstack([lceil, pceil])) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_ridge_ceiling.nii.gz')) del lres, pres, lwts, pwts, lalphas, palphas, lceil, pceil crossSet = thisDS.copy() crossSet.chunks[lidx] = 1 crossSet.chunks[pidx] = 2 cwts, calphas, cres, cceil = bsr.bootstrap_ridge(crossSet, des, chunklen=chunklen, nchunks=nchunks, part_attr='chunks', mode='test', alphas=alphas, single_alpha=True, normalpha=False, nboots=nboots, corrmin=.2, singcutoff=1e-10, joined=None, use_corr=True) print 'cross: ' + str(np.mean(cres)) if write: map2nifti(thisDS, cres[0]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_ridge_corr.nii.gz')) map2nifti(thisDS, cres[1]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_ridge_corr.nii.gz')) map2nifti(thisDS, cwts[0]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_ridge_weights.nii.gz')) map2nifti(thisDS, cwts[1]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_ridge_weights.nii.gz')) map2nifti(thisDS, calphas[calphas.chunks==1]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_ridge_alphas.nii.gz')) map2nifti(thisDS, calphas[calphas.chunks==2]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_ridge_alphas.nii.gz')) map2nifti(thisDS, cceil[0]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_ridge_ceiling.nii.gz')) map2nifti(thisDS, cceil[1]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_ridge_ceiling.nii.gz')) del cres, cwts, calphas, cceil
def runsub(sub, thisContrast, thisContrastStr, testContrast, filterLen, filterOrd, write=False, debug=False, alphas=1, roi='grayMatter'): thisSub = {sub: subList[sub]} mc_params = lmvpa.loadmotionparams(paths, thisSub) beta_events = lmvpa.loadevents(paths, thisSub) dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi, c='trial_type') thisDS = dsdict[sub] # savitsky golay filtering sg.sg_filter(thisDS, filterLen, filterOrd) # gallant group zscores before regression. # zscore w.r.t. rest trials # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks') # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time). zscore(thisDS, chunks_attr='chunks') # kay method: leave out a model run, use it to fit an HRF for each voxel # huth method: essentially use FIR # mumford method: deconvolution with canonical HRF # get timing data from timing files rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub], contrasts) # adding features # we can model out motion and just not use those betas. # Ridge if isinstance(thisContrast, basestring): thisContrast = [thisContrast] desX, rds = lmvpa.make_designmat(rds, events, time_attr='time_coords', condition_attr=thisContrast, design_kwargs={'hrf_model': 'canonical', 'drift_model': 'blank'}, regr_attrs=None) # 'add_regs': mc_params[sub] desX['motion'] = make_dmtx(rds.sa['time_coords'].value, paradigm=None, add_regs=mc_params[sub], drift_model='blank') des = lmvpa.make_parammat(desX, hrf='canonical', zscore=True) # split by language and pictures lidx = thisDS.chunks < thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2] pidx = thisDS.chunks >= thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2] ldes = cp.copy(des) pdes = cp.copy(des) ldes.matrix = ldes.matrix[lidx] pdes.matrix = pdes.matrix[pidx] covarmat = None mus = None lwts, _, lres, lceil = bsr.bootstrap_ridge(ds=rds[lidx], des=ldes, chunklen=1, nchunks=1, cov0=None, mu0=None, part_attr='chunks', mode='test', alphas=[alphas[0]], single_alpha=True, normalpha=False, nboots=1, corrmin=.2, singcutoff=1e-10, joined=None, use_corr=True) print 'language ' + str(np.mean(lres)) pwts, _, pres, pceil = bsr.bootstrap_ridge(ds=rds[pidx], des=pdes, chunklen=1, nchunks=1, cov0=None, mu0=None, part_attr='chunks', mode='test', alphas=[alphas[1]], single_alpha=True, normalpha=False, nboots=1, corrmin=.2, singcutoff=1e-10, joined=None, use_corr=True) # pictures within print 'pictures: ' + str(np.mean(pres)) if write: map2nifti(thisDS, dataset.vstack([lres, pres])) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_corrs.nii.gz')) map2nifti(thisDS, dataset.vstack([lwts, pwts])) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_wts.nii.gz')) map2nifti(thisDS, dataset.vstack([lceil, pceil])) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_ceiling.nii.gz')) for t in testContrast: tstr = '+'.join(t) lcorr = lmvpa.testmodel(wts=lwts, des=ldes, ds=rds[lidx], tc=cp.copy(t), use_corr=True) pcorr = lmvpa.testmodel(wts=pwts, des=pdes, ds=rds[pidx], tc=cp.copy(t), use_corr=True) if write: map2nifti(thisDS, dataset.vstack([lcorr, pcorr])) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + tstr + '_ridge_la_' + str(alphas[0]) + '_pa_' + str(alphas[1]) + '_test_corrs.nii.gz')) del lres, pres, lwts, pwts, lceil, pceil crossSet = thisDS.copy() crossSet.chunks[lidx] = 1 crossSet.chunks[pidx] = 2 # cwts, cres, cceil = bsr.ridge(rds[pidx], pdes, mu0=mus, cov0=covarmat, # part_attr='chunks', mode='test', alphas=alphas[0], single_alpha=True, # normalpha=False, corrmin=.2, singcutoff=1e-10, joined=None, # use_corr=True) cwts, _, cres, cceil = bsr.bootstrap_ridge(ds=crossSet, des=des, chunklen=1, nchunks=1, cov0=None, mu0=None, part_attr='chunks', mode='test', alphas=[alphas[2]], single_alpha=True, normalpha=False, nboots=1, corrmin=.2, singcutoff=1e-10, joined=None, use_corr=True) for t in testContrast: tstr = '+'.join(t) ccorr = lmvpa.testmodel(wts=cwts, des=des, ds=crossSet, tc=cp.copy(t), use_corr=True) if write: map2nifti(thisDS, ccorr[0]) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + tstr + '_P2L_ridge_alpha_' + str(alphas[2]) + '_test_corr.nii.gz')) map2nifti(thisDS, ccorr[1]) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + tstr + '_L2P_ridge_alpha_' + str(alphas[2]) + '_test_corr.nii.gz')) print 'cross: ' + str(np.mean(cres)) if write: map2nifti(thisDS, cres[0]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_ridge_alpha_' + str(alphas[2]) + '_corr.nii.gz')) map2nifti(thisDS, cres[1]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_ridge_alpha_' + str(alphas[2]) + '_corr.nii.gz')) map2nifti(thisDS, cwts[cwts.chunks==1]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_ridge_alpha_' + str(alphas[2]) + '_wts.nii.gz')) map2nifti(thisDS, cwts[cwts.chunks==2]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_ridge_alpha' + str(alphas[2]) + '_wts.nii.gz')) map2nifti(thisDS, cceil[0]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_ridge_alpha_' + str(alphas[2]) + '_ceiling.nii.gz')) map2nifti(thisDS, cceil[1]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_ridge_alpha_' + str(alphas[2]) + '_ceiling.nii.gz')) del cres, cwts, cceil
def runsub(sub, thisContrast, filterLen, filterOrd, thisContrastStr, roi='grayMatter'): thisSub = {sub: subList[sub]} mc_params = lmvpa.loadmotionparams(paths, thisSub) beta_events = lmvpa.loadevents(paths, thisSub) dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi, c='trial_type') thisDS = dsdict[sub] # savitsky golay filtering sg.sg_filter(thisDS, filterLen, filterOrd) # gallant group zscores before regression. # zscore w.r.t. rest trials # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks') # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time). zscore(thisDS, chunks_attr='chunks') # kay method: leave out a model run, use it to fit an HRF for each voxel # huth method: essentially use FIR # mumford method: deconvolution with canonical HRF # refit events and regress... # get timing data from timing files rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub], contrasts) # adding features # we can model out motion and just not use those betas. if isinstance(thisContrast, basestring): thisContrast = [thisContrast] # instead of binarizing each one, make them parametric desX, rds = lmvpa.make_designmat(rds, events, time_attr='time_coords', condition_attr=thisContrast, design_kwargs={'hrf_model': 'canonical', 'drift_model': 'blank'}, regr_attrs=None) # want to collapse ap and cr, but have anim separate des = lmvpa.make_parammat(desX) # set chunklen and nchunks # split by language and pictures lidx = thisDS.chunks < thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2] pidx = thisDS.chunks >= thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2] ldes = cp.copy(des) pdes = cp.copy(des) ldes.matrix = ldes.matrix[lidx] pdes.matrix = pdes.matrix[pidx] lwts, lres, lceil = bsr.bootstrap_linear(rds[lidx], ldes, part_attr='chunks', mode='test') pwts, pres, pceil = bsr.bootstrap_linear(rds[pidx], pdes, part_attr='chunks', mode='test') # now I have betas per chunk. could just correlate the betas, or correlate the predictions for corresponding runs print 'language ' + str(np.mean(lres)) # pictures within print 'pictures: ' + str(np.mean(pres)) from mvpa2.base import dataset map2nifti(thisDS, dataset.vstack([lres, pres])) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_univar_corr.nii.gz')) map2nifti(thisDS, dataset.vstack([lwts, pwts])) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_univar_betas.nii.gz')) map2nifti(thisDS, dataset.vstack([lceil, pceil])) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_univar_ceiling.nii.gz')) del lres, pres, lwts, pwts, lceil, pceil crossSet = thisDS.copy() crossSet.chunks[lidx] = 1 crossSet.chunks[pidx] = 2 cwts, cres, cceil = bsr.bootstrap_linear(crossSet, des, part_attr='chunks', mode='test') print 'cross: ' + str(np.mean(cres)) map2nifti(thisDS, cres[0]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_univar.nii.gz')) map2nifti(thisDS, cres[1]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_univar.nii.gz')) map2nifti(thisDS, cwts[0]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_betas.nii.gz')) map2nifti(thisDS, cwts[1]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_betas.nii.gz')) map2nifti(thisDS, cceil[0]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_betas.nii.gz')) map2nifti(thisDS, cceil[1]).to_filename( os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_betas.nii.gz'))
roi = 'grayMatter' filterLen = 49 filterOrd = 3 chunklen = 10 # this reflects the length of a complete trial paramEst = .2 #this much data to be held out for ridge regression parameter estimation paths, subList, contrasts, maskList = lmvpa.initpaths(plat) if debug: subList = {'LMVPA005': subList['LMVPA005']} # ds_all = lmvpa.loadsubdata(paths, subList, m=roi, c='trial_type') # motion parameters for all subjects mc_params = lmvpa.loadmotionparams(paths, subList) # events for beta extraction # add everything as a sample attribute beta_events = lmvpa.loadevents(paths, subList) # import BootstrapRidgeMapper as bsr import numpy as np import os import matplotlib.pyplot as plt import sklearn.decomposition as dcmp alphas = np.logspace(0, 3, 20) words = [ 'gravy', 'trainer', 'candle', 'bus', 'tree', 'goalie', 'singer', 'woman', 'touched', 'stretched', 'lit', 'hit', 'crushed', 'kicked', 'kissed', 'consoled', 'broccoli', 'dancer', 'match', 'truck', 'car', 'referee', 'guitarist', 'man' ]
thisContrast = 'verb' chance = .125 roi = 'grayMatter' nVox = 1000 # number of voxels to select nComp = 10 # number of SVD components filterLen = 49 #for SG filter filterOrd = 3 # for sg filter if debug: subList = {'LMVPA003': subList['LMVPA003']} # this would load all data at once. not memory efficient # ds_all = lmvpa.loadsubdata(paths, subList, m=roi, c='trial_type') # motion parameters for all subjects mc_params = lmvpa.loadmotionparams(paths, subList) # events for beta extraction # add everything as a sample attribute beta_events = lmvpa.loadevents(paths, subList) from mvpa2.datasets.mri import map2nifti from mvpa2.mappers.zscore import zscore import SavGolFilter as sg import mvpa2.datasets.eventrelated as er from mvpa2.mappers.svd import SVDMapper from mvpa2.datasets.base import StaticFeatureSelection, ChainMapper from mvpa2.featsel.helpers import FixedNElementTailSelector import mvpa2.featsel as fs from mvpa2.clfs import svm from mvpa2.clfs.warehouse import OneWayAnova, LDA from mvpa2.clfs.meta import FeatureSelectionClassifier, MappedClassifier from mvpa2.measures.base import CrossValidation from mvpa2.generators.partition import NFoldPartitioner from mvpa2.misc import errorfx
def runsub(sub, thisContrast, r, dstype="raw", roi="grayMatter", filterLen=49, filterOrd=3, write=False): if dstype == "raw": outdir = "PyMVPA" print "working with raw data" thisSub = {sub: subList[sub]} dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi) thisDS = dsdict[sub] mc_params = lmvpa.loadmotionparams(paths, thisSub) beta_events = lmvpa.loadevents(paths, thisSub) # savitsky golay filtering sg.sg_filter(thisDS, filterLen, filterOrd) # gallant group zscores before regression. # zscore w.r.t. rest trials # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks') # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time). zscore(thisDS, chunks_attr="chunks") print "beta extraction" ## BETA EXTRACTION ## rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub]) evds = er.fit_event_hrf_model( rds, events, time_attr="time_coords", condition_attr=("trial_type", "chunks"), design_kwargs={"add_regs": mc_params[sub], "hrf_model": "canonical"}, return_model=True, ) fds = lmvpa.replacetargets(evds, contrasts, thisContrast) fds = fds[fds.targets != "0"] else: outdir = os.path.join("LSS", dstype) print "loading betas" fds = lmvpa.loadsubbetas(paths, sub, btype=dstype, m=roi) fds.sa["targets"] = fds.sa[thisContrast] zscore(fds, chunks_attr="chunks") fds = lmvpa.sortds(fds) print "searchlights" ## initialize classifier clf = svm.LinearNuSVMC() cv = CrossValidation(clf, NFoldPartitioner()) from mvpa2.measures.searchlight import sphere_searchlight cvSL = sphere_searchlight(cv, radius=r) # now I have betas per chunk. could just correlate the betas, or correlate the predictions for corresponding runs lidx = fds.chunks < fds.sa["chunks"].unique[len(fds.sa["chunks"].unique) / 2] pidx = fds.chunks >= fds.sa["chunks"].unique[len(fds.sa["chunks"].unique) / 2] lres = sl.run_cv_sl(cvSL, fds[lidx].copy(deep=False)) pres = sl.run_cv_sl(cvSL, fds[pidx].copy(deep=False)) if write: from mvpa2.base import dataset map2nifti(fds, dataset.vstack([lres, pres])).to_filename( os.path.join(paths[0], "Maps", outdir, sub + "_" + roi + "_" + thisContrast + "_cvsl.nii.gz") ) del lres, pres, cvSL cvSL = sphere_searchlight(cv, radius=r) crossSet = fds.copy() crossSet.chunks[lidx] = 1 crossSet.chunks[pidx] = 2 cres = sl.run_cv_sl(cvSL, crossSet.copy(deep=False)) if write: map2nifti(fds, cres[0]).to_filename( os.path.join(paths[0], "Maps", outdir, sub + "_" + roi + "_" + (thisContrast) + "_P2L.nii.gz") ) map2nifti(fds, cres[1]).to_filename( os.path.join(paths[0], "Maps", outdir, sub + "_" + roi + "_" + (thisContrast) + "_L2P.nii.gz") )
def runsub(sub, thisContrast, filterLen, filterOrd, thisContrastStr, roi='grayMatter'): thisSub = {sub: subList[sub]} mc_params = lmvpa.loadmotionparams(paths, thisSub) beta_events = lmvpa.loadevents(paths, thisSub) dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi, c='trial_type') thisDS = dsdict[sub] # savitsky golay filtering sg.sg_filter(thisDS, filterLen, filterOrd) # gallant group zscores before regression. # zscore w.r.t. rest trials # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks') # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time). zscore(thisDS, chunks_attr='chunks') # kay method: leave out a model run, use it to fit an HRF for each voxel # huth method: essentially use FIR # mumford method: deconvolution with canonical HRF # refit events and regress... # get timing data from timing files rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub], contrasts) # adding features # we can model out motion and just not use those betas. if isinstance(thisContrast, basestring): thisContrast = [thisContrast] # instead of binarizing each one, make them parametric desX, rds = lmvpa.make_designmat(rds, events, time_attr='time_coords', condition_attr=thisContrast, design_kwargs={ 'hrf_model': 'canonical', 'drift_model': 'blank' }, regr_attrs=None) # want to collapse ap and cr, but have anim separate des = lmvpa.make_parammat(desX) # set chunklen and nchunks # split by language and pictures lidx = thisDS.chunks < thisDS.sa['chunks'].unique[len( thisDS.sa['chunks'].unique) / 2] pidx = thisDS.chunks >= thisDS.sa['chunks'].unique[len( thisDS.sa['chunks'].unique) / 2] ldes = cp.copy(des) pdes = cp.copy(des) ldes.matrix = ldes.matrix[lidx] pdes.matrix = pdes.matrix[pidx] lwts, lres, lceil = bsr.bootstrap_linear(rds[lidx], ldes, part_attr='chunks', mode='test') pwts, pres, pceil = bsr.bootstrap_linear(rds[pidx], pdes, part_attr='chunks', mode='test') # now I have betas per chunk. could just correlate the betas, or correlate the predictions for corresponding runs print 'language ' + str(np.mean(lres)) # pictures within print 'pictures: ' + str(np.mean(pres)) from mvpa2.base import dataset map2nifti(thisDS, dataset.vstack([lres, pres])) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_univar_corr.nii.gz')) map2nifti(thisDS, dataset.vstack([lwts, pwts])) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_univar_betas.nii.gz')) map2nifti(thisDS, dataset.vstack([lceil, pceil])) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_univar_ceiling.nii.gz')) del lres, pres, lwts, pwts, lceil, pceil crossSet = thisDS.copy() crossSet.chunks[lidx] = 1 crossSet.chunks[pidx] = 2 cwts, cres, cceil = bsr.bootstrap_linear(crossSet, des, part_attr='chunks', mode='test') print 'cross: ' + str(np.mean(cres)) map2nifti(thisDS, cres[0]).to_filename( os.path.join( paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_univar.nii.gz')) map2nifti(thisDS, cres[1]).to_filename( os.path.join( paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_univar.nii.gz')) map2nifti(thisDS, cwts[0]).to_filename( os.path.join( paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_betas.nii.gz')) map2nifti(thisDS, cwts[1]).to_filename( os.path.join( paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_betas.nii.gz')) map2nifti(thisDS, cceil[0]).to_filename( os.path.join( paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_betas.nii.gz')) map2nifti(thisDS, cceil[1]).to_filename( os.path.join( paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_betas.nii.gz'))
def runsub(sub, thisContrast, thisContrastStr, filterLen, filterOrd, paramEst, chunklen, alphas=np.logspace(0, 3, 20), debug=False, write=False, roi='grayMatter'): thisSub = {sub: subList[sub]} mc_params = lmvpa.loadmotionparams(paths, thisSub) beta_events = lmvpa.loadevents(paths, thisSub) dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi, c='trial_type') thisDS = dsdict[sub] # savitsky golay filtering sg.sg_filter(thisDS, filterLen, filterOrd) # gallant group zscores before regression. # zscore w.r.t. rest trials # zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks') # zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time). zscore(thisDS, chunks_attr='chunks') # kay method: leave out a model run, use it to fit an HRF for each voxel # huth method: essentially use FIR # mumford method: deconvolution with canonical HRF # refit events and regress... # get timing data from timing files # rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub]) rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub], contrasts) # adding features # we can model out motion and just not use those betas. # Ridge if isinstance(thisContrast, basestring): thisContrast = [thisContrast] # instead of binarizing each one, make them parametric desX, rds = lmvpa.make_designmat(rds, events, time_attr='time_coords', condition_attr=thisContrast, design_kwargs={ 'hrf_model': 'canonical', 'drift_model': 'blank' }, regr_attrs=None) # want to collapse ap and cr, but have anim separate desX['motion'] = make_dmtx(rds.sa['time_coords'].value, paradigm=None, add_regs=mc_params[sub], drift_model='blank') des = lmvpa.make_parammat(desX, hrf='canonical', zscore=True) # set chunklen and nchunks # split by language and pictures lidx = thisDS.chunks < thisDS.sa['chunks'].unique[len( thisDS.sa['chunks'].unique) / 2] pidx = thisDS.chunks >= thisDS.sa['chunks'].unique[len( thisDS.sa['chunks'].unique) / 2] ldes = cp.copy(des) pdes = cp.copy(des) ldes.matrix = ldes.matrix[lidx] pdes.matrix = pdes.matrix[pidx] nchunks = int(len(thisDS) * paramEst / chunklen) nboots = 50 covarmat = None mus = None lwts, lalphas, lres, lceil = bsr.bootstrap_ridge(rds[lidx], ldes, chunklen=chunklen, nchunks=nchunks, cov0=covarmat, mu0=mus, part_attr='chunks', mode='test', alphas=alphas, single_alpha=True, normalpha=False, nboots=nboots, corrmin=.2, singcutoff=1e-10, joined=None, plot=debug, use_corr=True) pwts, palphas, pres, pceil = bsr.bootstrap_ridge(rds[pidx], pdes, chunklen=chunklen, nchunks=nchunks, part_attr='chunks', mode='test', alphas=alphas, single_alpha=True, normalpha=False, nboots=nboots, corrmin=.2, singcutoff=1e-10, joined=None, plot=debug, use_corr=True) print 'language ' + str(np.mean(lres)) # pictures within print 'pictures: ' + str(np.mean(pres)) # need to change outstring if write: from mvpa2.base import dataset map2nifti(thisDS, dataset.vstack([lres, pres])) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_ridge_corrs.nii.gz')) map2nifti(thisDS, dataset.vstack([lwts, pwts])) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_ridge_weights.nii.gz')) map2nifti(thisDS, dataset.vstack([lalphas, palphas])) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_ridge_alphas.nii.gz')) map2nifti(thisDS, dataset.vstack([lceil, pceil])) \ .to_filename(os.path.join(paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_ridge_ceiling.nii.gz')) del lres, pres, lwts, pwts, lalphas, palphas, lceil, pceil crossSet = thisDS.copy() crossSet.chunks[lidx] = 1 crossSet.chunks[pidx] = 2 cwts, calphas, cres, cceil = bsr.bootstrap_ridge(crossSet, des, chunklen=chunklen, nchunks=nchunks, part_attr='chunks', mode='test', alphas=alphas, single_alpha=True, normalpha=False, nboots=nboots, corrmin=.2, singcutoff=1e-10, joined=None, use_corr=True) print 'cross: ' + str(np.mean(cres)) if write: map2nifti(thisDS, cres[0]).to_filename( os.path.join( paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_ridge_corr.nii.gz')) map2nifti(thisDS, cres[1]).to_filename( os.path.join( paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_ridge_corr.nii.gz')) map2nifti(thisDS, cwts[0]).to_filename( os.path.join( paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_ridge_weights.nii.gz')) map2nifti(thisDS, cwts[1]).to_filename( os.path.join( paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_ridge_weights.nii.gz')) map2nifti(thisDS, calphas[calphas.chunks == 1]).to_filename( os.path.join( paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_ridge_alphas.nii.gz')) map2nifti(thisDS, calphas[calphas.chunks == 2]).to_filename( os.path.join( paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_ridge_alphas.nii.gz')) map2nifti(thisDS, cceil[0]).to_filename( os.path.join( paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_P2L_ridge_ceiling.nii.gz')) map2nifti(thisDS, cceil[1]).to_filename( os.path.join( paths[0], 'Maps', 'Encoding', sub + '_' + roi + '_' + thisContrastStr + '_L2P_ridge_ceiling.nii.gz')) del cres, cwts, calphas, cceil