Exemple #1
0
def spatiotemporal(ds, **kwargs):

    onset = 0

    for arg in kwargs:
        if (arg == 'onset'):
            onset = kwargs[arg]
        if (arg == 'duration'):
            duration = kwargs[arg]
        if (arg == 'enable_results'):
            enable_results = kwargs[arg]
        if (arg == 'permutations'):
            permutations = int(kwargs[arg])

    events = find_events(targets=ds.sa.targets, chunks=ds.sa.chunks)

    if 'duration' in locals():
        events = [e for e in events if e['duration'] >= duration]
    else:
        duration = np.min([ev['duration'] for ev in events])

    for e in events:
        e['onset'] += onset
        e['duration'] = duration

    evds = eventrelated_dataset(ds, events=events)

    [fclf, cvte] = setup_classifier(**kwargs)

    logger.info('Cross validation is performing ...')
    res = cvte(evds)

    print(cvte.ca.stats)

    if permutations != 0:
        print(cvte.ca.null_prob.samples)
        dist_len = len(cvte.null_dist.dists())
        err_arr = np.zeros(dist_len)
        for i in range(dist_len):
            err_arr[i] = 1 - cvte.ca.stats.stats['ACC']

        total_p_value = np.mean(cvte.null_dist.p(err_arr))
        p_value = cvte.ca.null_prob.samples
    else:
        total_p_value = 0.
        p_value = np.array([0, 0])

    try:
        sensana = fclf.get_sensitivity_analyzer()
        res_sens = sensana(evds)
    except Exception as err:
        allowed_keys = [
            'map', 'sensitivities', 'stats', 'mapper', 'classifier', 'ds',
            'perm_pvalue', 'p'
        ]

        allowed_results = [
            None, None, cvte.ca.stats, evds.a.mapper, fclf, evds, p_value,
            total_p_value
        ]

        results_dict = dict(zip(allowed_keys, allowed_results))
        results = dict()
        if not 'enable_results' in locals():
            enable_results = allowed_keys[:]
        for elem in enable_results:
            if elem in allowed_keys:
                results[elem] = results_dict[elem]

        return results

    sens_comb = res_sens.get_mapped(mean_sample())
    mean_map = map2nifti(evds, evds.a.mapper.reverse1(sens_comb))

    l_maps = []
    for m in res_sens:
        maps = ds.a.mapper.reverse1(m)
        nifti = map2nifti(evds, maps)
        l_maps.append(nifti)

    l_maps.append(mean_map)
    # Packing results    (to be sobstitute with a function)
    results = dict()
    if not 'enable_results' in locals():
        enable_results = [
            'map', 'sensitivities', 'stats', 'mapper', 'classifier', 'ds',
            'pvalue', 'p'
        ]

    allowed_keys = [
        'map', 'sensitivities', 'stats', 'mapper', 'classifier', 'ds',
        'pvalue', 'p'
    ]

    allowed_results = [
        l_maps, res_sens, cvte.ca.stats, evds.a.mapper, fclf, evds, p_value,
        total_p_value
    ]

    results_dict = dict(zip(allowed_keys, allowed_results))

    for elem in enable_results:

        if elem in allowed_keys:
            results[elem] = results_dict[elem]
        else:
            print('******** ' + elem + ' result is not allowed! *********')

    return results
Exemple #2
0
def spatial(ds, **kwargs):
    #    gc.enable()
    #    gc.set_debug(gc.DEBUG_LEAK)

    cvte = None
    permutations = 0

    for arg in kwargs:
        if arg == 'clf_type':
            clf_type = kwargs[arg]
        if arg == 'enable_results':
            enable_results = kwargs[arg].split(',')
        if arg == 'permutations':
            permutations = int(kwargs[arg])
        if arg == 'cvte':
            cvte = kwargs[arg]
            fclf = cvte.learner  # Send crossvalidation object

    if cvte == None:
        [fclf, cvte] = setup_classifier(**kwargs)

    logger.info('Cross validation is performing ...')
    error_ = cvte(ds)

    logger.debug(cvte.ca.stats)
    #print error_.samples

    #Plot permutations
    #plot_cv_results(cvte, error_, 'Permutations analysis')

    if permutations != 0:
        dist_len = len(cvte.null_dist.dists())
        err_arr = np.zeros(dist_len)
        for i in range(dist_len):
            err_arr[i] = 1 - cvte.ca.stats.stats['ACC']

        total_p_value = np.mean(cvte.null_dist.p(err_arr))
        p_value = cvte.ca.null_prob.samples

    else:
        p_value = np.array([0, 0])
        total_p_value = 0

    predictions_ds = fclf.predict(ds)

    # If classifier didn't have sensitivity
    try:
        sensana = fclf.get_sensitivity_analyzer()
    except Exception as err:
        allowed_keys = [
            'map', 'sensitivities', 'stats', 'mapper', 'classifier', 'ds_src',
            'perm_pvalue', 'p'
        ]

        allowed_results = [
            None, None, cvte.ca.stats, ds.a.mapper, fclf, ds, p_value,
            total_p_value
        ]

        results_dict = dict(zip(allowed_keys, allowed_results))
        results = dict()
        if not 'enable_results' in locals():
            enable_results = allowed_keys[:]
        for elem in enable_results:
            if elem in allowed_keys:
                results[elem] = results_dict[elem]

        return results

    res_sens = sensana(ds)

    sens_comb = res_sens.get_mapped(mean_sample())
    mean_map = map2nifti(ds, ds.a.mapper.reverse1(sens_comb))

    l_maps = []
    for m in res_sens:
        maps = ds.a.mapper.reverse1(m)
        nifti = map2nifti(ds, maps)
        l_maps.append(nifti)

    l_maps.append(mean_map)

    # Packing results    (to be sobstitute with a function)
    results = dict()

    classifier = fclf

    allowed_keys = [
        'map', 'sensitivities', 'stats', 'mapper', 'classifier', 'ds_src',
        'pvalue', 'p'
    ]
    allowed_results = [
        l_maps, res_sens, cvte.ca.stats, ds.a.mapper, classifier, ds, p_value,
        total_p_value
    ]

    results_dict = dict(zip(allowed_keys, allowed_results))

    if not 'enable_results' in locals():
        enable_results = allowed_keys[:]

    for elem in enable_results:

        if elem in allowed_keys:
            results[elem] = results_dict[elem]
        else:
            logger.error('******** ' + elem +
                         ' result is not allowed! *********')

    return results
    d.sa['conditions'] = conditions
    d.sa['taxonomy'] = taxonomy
    d.sa['behavior'] = behavior
    if ds is None:
        ds = d
    else:
        ds = mv.vstack((ds, d))
ds.fa['node_indices'] = range(ds.shape[1])
# zscore all of our samples
mv.zscore(ds, chunks_attr='chunks', dtype='float32')
# load in surgace and get searchlight query
radius = 10
surface = mv.surf.read(join(data_path, '{0}.pial.gii'.format(hemi)))
# this is an arbitrary radius and distance metric!
query = mv.SurfaceQueryEngine(surface, radius, distance_metric='dijkstra')
# based off PyMVPA tutorial
clf = mv.LinearNuSVMC(space=predict)

cv = mv.CrossValidation(clf,
                        mv.NFoldPartitioner(attr=train_on),
                        errorfx=lambda p, t: np.mean(p == t),
                        enable_ca=['stats'])
searchlights = mv.Searchlight(cv,
                              queryengine=query,
                              postproc=mv.mean_sample(),
                              roi_ids=None)
sl_clf_results = searchlights(ds)
outstr = save_path + 'results/sub' + sub + '_sl_clf_' + predict + '_' + hemi
res = np.array(sl_clf_results)
np.save(outstr, res)
Exemple #4
0
                           p_value, total_p_value]
        
        results_dict = dict(zip(allowed_keys, allowed_results))
        results = dict()
        if not 'enable_results' in locals():
            enable_results = allowed_keys[:]
        for elem in enable_results:
            if elem in allowed_keys:
                results[elem] = results_dict[elem]
                
        return results
    
            
    res_sens = sensana(ds)
    
    sens_comb = res_sens.get_mapped(mean_sample())
    mean_map = map2nifti(ds, ds.a.mapper.reverse1(sens_comb))

    
    l_maps = []
    for m in res_sens:
        maps = ds.a.mapper.reverse1(m)
        nifti = map2nifti(ds, maps)
        l_maps.append(nifti)
    
    
    l_maps.append(mean_map)
    
    # Packing results    (to be sobstitute with a function)
    results = dict()
	def _call(self, evds):
		dsm1 = sd.pdist(evds[evds.sa.subj==self._subj1].samples, metric='correlation')
		dsm2 = sd.pdist(evds[evds.sa.subj==self._subj2].samples, metric='correlation')
	res = 1-Bio.Cluster.distancematrix(np.vstack((dsm1,dsm2)),dist='s')[1][0]
	return mvpa.Dataset(np.array(res)[np.newaxis])

#Call representational similarity analysis measure
rsa = RSA(subj1,subj2)

#Prepare Searchlight
sl = mvpa.Searchlight(rsa,
			mvpa.IndexQueryEngine(
					voxel_indices=mvpa.Sphere(sp),
					event_offsetidx=lambda x: range(samples_size / TR)),
			roi_ids='1stidx',
			postproc=mvpa.mean_sample(),
			results_fx=spherepack.fx,
			nproc=int(nproc),
			enable_ca=['roi_feature_ids'])

#Iterate the Representational similarity analysis measure over all searchlight spheres
slmap = sl(evds)

#Name the output
filename = 'sl_'+subj1+'_'+subj2+'_s'+str(sp)+'_sparse'+str(sparse)+dsfile

#Mapping of the searchlight results into group template space
nimg = mvpa.map2nifti(ds, slmap.samples[:,:ds.nfeatures], imghdr=ds.a.imghdr)

#Save result as nifti in maps directory 
try: