Esempio n. 1
0
fclf = FeatureSelectionClassifier(clf, sbfs)

from mvpa2.measures.base import CrossValidation
from mvpa2.misc import errorfx
from mvpa2.generators.partition import NFoldPartitioner

cv = CrossValidation(fclf,
                     NFoldPartitioner(attr='chunks'),
                     errorfx=errorfx.mean_match_accuracy)

import numpy as np
from mvpa2.misc.io.base import SampleAttributes
cv_attr = SampleAttributes(os.path.join(paths[3], (con + "_attribute_labels.txt")))

from mvpa2.measures import rsa
dsm = rsa.PDist(square=True)
# searchlight
# import searchlightutils as sl
# from mvpa2.measures.searchlight import sphere_searchlight
# cvSL = sphere_searchlight(cv, radius=r)
# lres = sl.run_cv_sl(cvSL, fds[lidx].copy(deep=False))

lresults = []
presults = []
l2presults = []
p2lresults = []
rsaresults = []
for sub in subList.keys():
    betas = lmvpa.loadsubbetas(paths, sub, m=roi, a=cv_attr)
    rsaresults.append(dsm(betas))
Esempio n. 2
0
    pl.clim((0, 2))
    pl.colorbar()
    
"""
As a start, we want to inspect the dissimilarity structure of the stimulation conditions in the entire ROI. For this purpose, we average all samples of each conditions into a single exemplar, using an FxMapper() instance.
"""

# compute a dataset with the mean samples for all conditions
from mvpa2.mappers.fx import mean_group_sample
mtgs = mean_group_sample(['targets'])
mtds = mtgs(ds)
After these preparations we can use the PDist() measure to compute the desired distance matrix – by default using correlation distance as a metric. The square argument will cause a ful square matrix to be produced, instead of a leaner upper-triangular matrix in vector form.

# basic ROI RSA -- dissimilarity matrix for the entire ROI
from mvpa2.measures import rsa
dsm = rsa.PDist(square=True)
res = dsm(mtds)
plot_mtx(res, mtds.sa.targets, 'ROI pattern correlation distances')
Inspecting the figure we can see that there is not much structure in the matrix, except for the face and the house condition being slightly more dissimilar than others.

"""
Now, let’s take a look at the variation of similarity structure through the brain. We can plug the PDist() measure into a searchlight to quickly scan the brain and harvest this information.
"""

# same as above, but done in a searchlight fashion
from mvpa2.measures.searchlight import sphere_searchlight
dsm = rsa.PDist(square=False)
sl = sphere_searchlight(dsm, 2)
slres = sl(mtds)

"""
Esempio n. 3
0
        ds.fa['node_indices'] = range(ds.shape[1])
        ds.fa['center_ids'] = range(ds.shape[1])
        ds.sa['targets'] = range(ds.shape[0])
        # ds.sa.pop('labels')

        if hyperalign:
            ds = mappers[i][participant].forward(ds)
            print("Hyperaligned participant {0}".format(participant))
            if zscore_features:
                mv.zscore(ds, chunks_attr=None)
            ds.fa['node_indices'] = range(ds.shape[1])
            ds.fa['center_ids'] = range(ds.shape[1])

    ds_all = mv.vstack((ds1, ds2, ds3, ds4), fa='update')
    rsa.PDist(**kwargs)
    #variant_ids = mv.remove_invariant_features(ds_both).fa.center_ids.tolist()

    # Set up cross-validated RSA
    cv_rsa_ = mv.CrossValidation(mv.CDist(pairwise_metric='correlation'),
                                 mv.HalfPartitioner(attr='sessions'),
                                 errorfx=None)

    # cv_rsa above would return all kinds of .sa which are important
    # but must be the same across searchlights. so we first apply it
    # to the entire ds to capture them
    cv_rsa_out = cv_rsa_(ds_all)
    target_sa = cv_rsa_out.sa.copy(deep=True)

    # And now create a postproc which would verify and strip them off
    # to just return samples
Esempio n. 4
0
import mvpa2.suite as mv
import glob
from scipy.stats import zscore
from mvpa2.mappers.fx import mean_group_sample
from mvpa2.measures import rsa
from scipy.spatial.distance import pdist
import sys

hemi = sys.argv[1]
subid = [1, 12, 17, 27, 32, 33, 34, 36, 37, 41]
subjs = ['{:0>6}'.format(i) for i in subid]
taxonomy = np.repeat(['bird', 'insect', 'primate', 'reptile', 'ungulate'], 4)
behavior = np.tile(['eating', 'fighting', 'running', 'swimming'], 5)
conditions = [' '.join((beh, tax)) for beh, tax in zip(behavior, taxonomy)]
data_path = '/dartfs-hpc/scratch/psyc164/mvpaces/glm/'
dsm = rsa.PDist(center_data=True)
radius = 9
surface = mv.surf.read(pjoin(data_path, '{0}.pial.gii'.format(hemi)))
# this is an arbitrary radius and distance metric!
query = mv.SurfaceQueryEngine(surface, radius, distance_metric='dijkstra')
sl = mv.Searchlight(dsm, query)
mv.debug.active += ['SLC']
print('made our sls')

all_slres = []
for sub in subjs:
    # get all our data files for this subj
    ds = None
    prefix = data_path + 'sub-rid' + sub
    suffix = hemi + '.coefs.gii'
    fn = prefix + '*' + suffix
Esempio n. 5
0
        d.sa['conditions'] = conditions
        if ds is None:
            ds = d
        else:
            ds = mv.vstack((ds, d))
    ds.fa['node_indices'] = range(n_vertices)
    ds.samples = zscore(ds.samples, axis=1)
    mtgs = mean_group_sample(['conditions'])
    mtds = mtgs(ds)

    query = mv.SurfaceQueryEngine(surface,
                                  radius,
                                  distance_metric='dijkstra',
                                  fa_node_key='node_indices')
    query.train(ds)
    dsm = rsa.PDist(square=False)
    print('made dsms')
    sl = mv.Searchlight(dsm, query, roi_ids=query.query_byid(max_node))
    slres = sl(mtds)
    mv.debug.active += ['SLC']
    print('made our sls')
    slres.samples = np.nan_to_num(slres.samples)
    all_ROI_res.append(slres.samples)

all_ROI_res = np.array(all_ROI_res)
# this array is (#participants, 190, #nodes)
#all_ROI_res = np.swapaxes(all_slres, 0, 2)

results = np.mean(all_ROI_res, axis=0)
respath = '/dartfs-hpc/scratch/psyc164/mvpaces/lab2/results/'
resname = 'rsa_eb_roi_{0}_{1}'.format(region, hemi)