コード例 #1
0
# 2. cross validation __________________________________________________________________
# setting up classifier
clf = mv.LinearCSVMC(space='targets')
cv = mv.CrossValidation(clf, mv.NFoldPartitioner(attr='chunks'))
cv_within = cv(ds_q2)
cv_within
np.mean(cv_within)
# why is the mean lower?

# 3. searchlight _______________________________________________________________________
fsaverage_gii = os.path.join(main_dir, 'fs_templates',
                             hemisphere + '.pial.gii')
surf = mv.surf.read(fsaverage_gii)
# note: surf.vertices.shape (81920, 3) and surf.faces.shape (40962, 3) surface = surf,
qe = mv.SurfaceQueryEngine(surf, radius=radii, distance_metric='dijkstra')
sl = mv.Searchlight(cv, queryengine=qe, roi_ids=cortical_vertices)
sl_q2 = sl(ds_q2)

# 4. save output _______________________________________________________________________
if not os.path.exists(
        os.path.join(main_dir, 'analysis', 'searchlight', sub_name)):
    os.makedirs(os.path.join(main_dir, 'analysis', 'searchlight', sub_name))

# save as NIML dataset
niml_q1_filename = os.path.join(
    main_dir, 'analysis', 'searchlight', sub_name, sub_name + '_ques-02_' +
    hemisphere + '_searchlight_radii-' + str(radii) + '.niml.dset')
mv.niml.write(niml_q1_filename, sl_q2)

# save as GIFTI sub-rid000001_ques-01_lh_searchlight_radii-10
searchlight_q1_filename = os.path.join(
コード例 #2
0
    d.sa['conditions'] = conditions
    d.sa['taxonomy'] = taxonomy
    d.sa['behavior'] = behavior
    if ds is None:
        ds = d
    else:
        ds = mv.vstack((ds, d))
ds.fa['node_indices'] = range(ds.shape[1])
# zscore all of our samples
mv.zscore(ds, chunks_attr='chunks', dtype='float32')
# load in surgace and get searchlight query
radius = 10
surface = mv.surf.read(join(data_path, '{0}.pial.gii'.format(hemi)))
# this is an arbitrary radius and distance metric!
query = mv.SurfaceQueryEngine(surface, radius, distance_metric='dijkstra')
# based off PyMVPA tutorial
clf = mv.LinearNuSVMC(space=predict)

cv = mv.CrossValidation(clf,
                        mv.NFoldPartitioner(attr=train_on),
                        errorfx=lambda p, t: np.mean(p == t),
                        enable_ca=['stats'])
searchlights = mv.Searchlight(cv,
                              queryengine=query,
                              postproc=mv.mean_sample(),
                              roi_ids=None)
sl_clf_results = searchlights(ds)
outstr = save_path + 'results/sub' + sub + '_sl_clf_' + predict + '_' + hemi
res = np.array(sl_clf_results)
np.save(outstr, res)
コード例 #3
0
# 2. cross validation __________________________________________________________________
# setting up classifier
clf = mv.LinearCSVMC()
cv = mv.CrossValidation(clf, mv.NFoldPartitioner())
cv_within = cv(ds_q3)
cv_within
np.mean(cv_within)
# why is the mean lower?

# 3. searchlight _______________________________________________________________________
fsaverage_gii = os.path.join(main_dir, 'fs_templates',
                             hemisphere + '.pial.gii')
surf = mv.surf.read(fsaverage_gii)
# note: surf.vertices.shape (81920, 3) and surf.faces.shape (40962, 3) surface = surf,
qe = mv.SurfaceQueryEngine(surf, radius=radii, distance_metric='dijkstra')
sl = mv.Searchlight(cv, queryengine=qe, nproc=4)
sl_q3 = sl(ds_q3)

# 4. save output _______________________________________________________________________
if not os.path.exists(
        os.path.join(main_dir, 'analysis', 'searchlight', sub_name)):
    os.makedirs(os.path.join(main_dir, 'analysis', 'searchlight', sub_name))
# save as NIML dataset
niml_q1_filename = os.path.join(
    main_dir, 'analysis', 'searchlight', sub_name, sub_name + '_ques-03_' +
    hemisphere + '_searchlight_radii-' + str(radii) + '.niml.dset')
niml.write(niml_q1_filename, sl_q3)

# save as GIFTI sub-rid000001_ques-01_lh_searchlight_radii-10
searchlight_q1_filename = os.path.join(
    main_dir, 'analysis', 'searchlight', sub_name, sub_name + '_ques-03_' +
コード例 #4
0
from scipy.spatial.distance import pdist
import sys

hemi = sys.argv[1]
subid = [1, 12, 17, 27, 32, 33, 34, 36, 37, 41]
subjs = ['{:0>6}'.format(i) for i in subid]
taxonomy = np.repeat(['bird', 'insect', 'primate', 'reptile', 'ungulate'], 4)
behavior = np.tile(['eating', 'fighting', 'running', 'swimming'], 5)
conditions = [' '.join((beh, tax)) for beh, tax in zip(behavior, taxonomy)]
data_path = '/dartfs-hpc/scratch/psyc164/mvpaces/glm/'
dsm = rsa.PDist(center_data=True)
radius = 9
surface = mv.surf.read(pjoin(data_path, '{0}.pial.gii'.format(hemi)))
# this is an arbitrary radius and distance metric!
query = mv.SurfaceQueryEngine(surface, radius, distance_metric='dijkstra')
sl = mv.Searchlight(dsm, query)
mv.debug.active += ['SLC']
print('made our sls')

all_slres = []
for sub in subjs:
    # get all our data files for this subj
    ds = None
    prefix = data_path + 'sub-rid' + sub
    suffix = hemi + '.coefs.gii'
    fn = prefix + '*' + suffix
    files = sorted(glob.glob(fn))
    for x in range(len(files)):
        if x < 5:
            chunks = [x + 1] * 20
        else:
コード例 #5
0
#cortical_vertices = = np.load(join(mvpa_dir, 'cortical_vertices_{0}.npy').tolist()

# Z-score features across samples
#mv.zscore(ds, chunks_attr='runs')
ds.samples = ((ds.samples - np.mean(ds.samples, axis=1)[:, None]) /
              np.std(ds.samples, axis=1)[:, None])

clf = mv.LinearCSVMC(space=targets)

cv = mv.CrossValidation(clf,
                        mv.NFoldPartitioner(attr=chunks),
                        errorfx=mv.mean_match_accuracy)

sl = mv.Searchlight(cv,
                    queryengine=qe,
                    enable_ca=['roi_sizes'],
                    nproc=1,
                    roi_ids=cortical_vertices)
#sl = mv.Searchlight(cv_rsa, queryengine=qe, enable_ca=['roi_sizes'],
#                    nproc=1, results_backend='native', roi_ids=cortical_vertices)
#tmp_prefix='/local/tmp/sam_sl_p{0}_{1}_'.format(participant_id, hemi)
mv.debug.active += ['SLC']
sl_result = sl(ds)

# Average across folds and finalize result on surface
print("Average searchlight size = {0}".format(np.mean(sl.ca.roi_sizes)))

assert sl_result.shape[1] == len(cortical_vertices)
sl_final = np.zeros((1, n_vertices))
np.put(sl_final, cortical_vertices, np.mean(sl_result, axis=0))
assert sl_final.shape == (1, n_vertices)
コード例 #6
0
    # to just return samples
    from mvpa2.testing.tools import assert_collections_equal
    from mvpa2.base.collections import SampleAttributesCollection
    from mvpa2.base.node import Node
    def lean_errorfx(ds):#Node):
        #def __call__(self, ds):
            assert_collections_equal(ds.sa, target_sa)
            # since equal, we could just replace with a blank one
            ds.sa = SampleAttributesCollection()
            return ds
    # the one with the lean one
    cv_rsa = mv.CrossValidation(mv.CDist(pairwise_metric='correlation'),
                                 mv.HalfPartitioner(attr='sessions'),
                                 errorfx=None, postproc=lean_errorfx)

    sl = mv.Searchlight(cv_rsa, queryengine=qe, enable_ca=['roi_sizes'],
                        nproc=1, results_backend='native')
    #sl = mv.Searchlight(cv_rsa, queryengine=qe, enable_ca=['roi_sizes'],
    #                    nproc=1, results_backend='native', roi_ids=cortical_vertices)
    #tmp_prefix='/local/tmp/sam_sl_p{0}_{1}_'.format(participant_id, hemi)
    mv.debug.active += ['SLC']
    sl_result = sl(ds)
    assert len(sl_result.sa) == 0  # we didn't pass any
    sl_result.sa = target_sa

    print '>>>', np.mean(sl.ca.roi_sizes), np.std(sl.ca.roi_sizes)

    sl_means = np.mean(np.dstack((sl_result.samples[:n_conditions**2, :],
                                  sl_result.samples[n_conditions**2:, :])),
                       axis=2)
    sl_final = mv.Dataset(
         sl_means,
コード例 #7
0
            ds = d
        else:
            ds = mv.vstack((ds, d))
    ds.fa['node_indices'] = range(n_vertices)
    ds.samples = zscore(ds.samples, axis=1)
    mtgs = mean_group_sample(['conditions'])
    mtds = mtgs(ds)

    query = mv.SurfaceQueryEngine(surface,
                                  radius,
                                  distance_metric='dijkstra',
                                  fa_node_key='node_indices')
    query.train(ds)
    dsm = rsa.PDist(square=False)
    print('made dsms')
    sl = mv.Searchlight(dsm, query, roi_ids=query.query_byid(max_node))
    slres = sl(mtds)
    mv.debug.active += ['SLC']
    print('made our sls')
    slres.samples = np.nan_to_num(slres.samples)
    all_ROI_res.append(slres.samples)

all_ROI_res = np.array(all_ROI_res)
# this array is (#participants, 190, #nodes)
#all_ROI_res = np.swapaxes(all_slres, 0, 2)

results = np.mean(all_ROI_res, axis=0)
respath = '/dartfs-hpc/scratch/psyc164/mvpaces/lab2/results/'
resname = 'rsa_eb_roi_{0}_{1}'.format(region, hemi)
np.save(respath + resname, results)
コード例 #8
0
        self._subj2 = subj2

    def _call(self, evds):
        res = 1 - sd.pdist(
            np.hstack(
                (evds[evds.sa.subj == self._subj1].samples,
                 evds[evds.sa.subj == self._subj2].samples)).T, 'correlation')
        return mvpa.Dataset(np.array(res)[np.newaxis])


# Call inter-subject correlation measure
cor = Corr(subj1, subj2)

# Prepare single voxel Searchlight
sl = mvpa.Searchlight(cor,
                      mvpa.IndexQueryEngine(voxel_indices=mvpa.Sphere(0)),
                      nproc=int(nproc))

#Iterate the inter-subject correlation measure over all voxels
slmap = sl(evds)

# Name the output
filename = 'sl_' + subj1 + '_' + subj2 + dsfile

# Mapping of the searchlight results into group template space
nimg = mvpa.map2nifti(ds, slmap.samples[:, :ds.nfeatures], imghdr=ds.a.imghdr)

#Save result as nifti in maps directory
try:
    os.mkdir(os.path.join(path, 'maps'))
except:
コード例 #9
0
		self._subj2 = subj2
	def _call(self, evds):
		dsm1 = sd.pdist(evds[evds.sa.subj==self._subj1].samples, metric='correlation')
		dsm2 = sd.pdist(evds[evds.sa.subj==self._subj2].samples, metric='correlation')
	res = 1-Bio.Cluster.distancematrix(np.vstack((dsm1,dsm2)),dist='s')[1][0]
	return mvpa.Dataset(np.array(res)[np.newaxis])

#Call representational similarity analysis measure
rsa = RSA(subj1,subj2)

#Prepare Searchlight
sl = mvpa.Searchlight(rsa,
			mvpa.IndexQueryEngine(
					voxel_indices=mvpa.Sphere(sp),
					event_offsetidx=lambda x: range(samples_size / TR)),
			roi_ids='1stidx',
			postproc=mvpa.mean_sample(),
			results_fx=spherepack.fx,
			nproc=int(nproc),
			enable_ca=['roi_feature_ids'])

#Iterate the Representational similarity analysis measure over all searchlight spheres
slmap = sl(evds)

#Name the output
filename = 'sl_'+subj1+'_'+subj2+'_s'+str(sp)+'_sparse'+str(sparse)+dsfile

#Mapping of the searchlight results into group template space
nimg = mvpa.map2nifti(ds, slmap.samples[:,:ds.nfeatures], imghdr=ds.a.imghdr)

#Save result as nifti in maps directory 
コード例 #10
0
# 2. cross validation __________________________________________________________________
# setting up classifier
clf = mv.LinearCSVMC()
cv = mv.CrossValidation(clf, mv.NFoldPartitioner())
cv_within = cv(ds_q2)
cv_within
np.mean(cv_within)
# why is the mean lower?

# 3. searchlight _______________________________________________________________________
fsaverage_gii = os.path.join(main_dir, 'fs_templates',
                             hemisphere + '.pial.gii')
surf = mv.surf.read(fsaverage_gii)
# note: surf.vertices.shape (81920, 3) and surf.faces.shape (40962, 3) surface = surf,
qe = mv.SurfaceQueryEngine(surf, radius=radii, distance_metric='dijkstra')
sl = mv.Searchlight(cv, queryengine=qe)
sl_q2 = sl(ds_q2)

# 4. save output _______________________________________________________________________

# save as NIML dataset
niml_q1_filename = os.path.join(
    main_dir, 'analysis', 'searchlight', sub_name, sub_name + '_ques-02_' +
    hemisphere + '_searchlight_radii-' + str(radii) + '.niml.dset')
niml.write(niml_q1_filename, sl_q2)

# save as GIFTI sub-rid000001_ques-01_lh_searchlight_radii-10
searchlight_q1_filename = os.path.join(
    main_dir, 'analysis', 'searchlight', sub_name, sub_name + '_ques-02_' +
    hemisphere + '_searchlight_radii-' + str(radii) + '.gii')
nimg = mv.map2gifti(sl_q2,