コード例 #1
0
ファイル: main_wu.py プロジェクト: robbisg/mvpa_itab_wu
def searchlight(ds, **kwargs):

    if __debug__:
        debug.active += ["SLC"]

    radius = 3

    for arg in kwargs:
        if (arg == 'radius'):
            radius = kwargs[arg]
    """
    [fclf, cvte] = setup_classifier(**kwargs)
    """
    clf = LinearCSVMC(C=1, probability=1, enable_ca=['probabilities'])
    cv = CrossValidation(clf, HalfPartitioner(attr='chunks'))

    sl = sphere_searchlight(cv, radius, space='voxel_indices')

    #sl = Searchlight(MahalanobisMeasure, queryengine, add_center_fa, results_postproc_fx, results_backend, results_fx, tmp_prefix, nblocks)
    #sl = sphere_searchlight(MahalanobisMeasure(), 3, space= 'voxel_indices')
    sl_map = sl(ds)

    sl_map.samples *= -1
    sl_map.samples += 1

    nif = map2nifti(sl_map, imghdr=ds.a.imghdr)
    nif.set_qform(ds.a.imgaffine)

    #Results packing
    d_result = dict({'map': nif, 'radius': radius})

    return d_result
コード例 #2
0
ファイル: main_wu.py プロジェクト: robbisg/mvpa_itab_wu
def searchlight(ds, **kwargs):
    
    if __debug__:
        debug.active += ["SLC"]
        
    radius = 3
    
    for arg in kwargs:
        if (arg == 'radius'):
            radius = kwargs[arg]
    
    """
    [fclf, cvte] = setup_classifier(**kwargs)
    """ 
    clf = LinearCSVMC(C=1, probability=1, enable_ca=['probabilities'])
    cv = CrossValidation(clf, HalfPartitioner(attr='chunks'))
    
    
    sl = sphere_searchlight(cv, radius, space = 'voxel_indices')
    
    #sl = Searchlight(MahalanobisMeasure, queryengine, add_center_fa, results_postproc_fx, results_backend, results_fx, tmp_prefix, nblocks)
    #sl = sphere_searchlight(MahalanobisMeasure(), 3, space= 'voxel_indices')
    sl_map = sl(ds)
    
    sl_map.samples *= -1
    sl_map.samples +=  1

    nif = map2nifti(sl_map, imghdr=ds.a.imghdr)
    nif.set_qform(ds.a.imgaffine)  
    
    #Results packing
    d_result = dict({'map': nif,
                     'radius': radius})
    
    return d_result
コード例 #3
0
def main():
    sac_attr = SampleAttributes(TSTATS_DIR + 'sac_attr.txt')
    nav_attr = SampleAttributes(TSTATS_DIR + 'nav_bin_attr.txt')

    # labels
    xtask_run = [
        # down/powerless, up/powerful
        [1, 2],  # down/powerless
        [2, 1],  # up/powerful
    ]
    intask_run = [  # no comparison within a task
        [0, 0], [0, 0]
    ]
    labels = squareform(
        np.vstack([np.hstack([intask_run] * 6 + [xtask_run] * 2)] * 6 +
                  [np.hstack([xtask_run] * 6 + [intask_run] * 2)] * 2))

    for subj in subject_list:
        subj_mask = MASK_DIR + '%s_ribbon_rsmp0_dil3mm.nii.gz' % subj
        dataset = get_nav_sac_data(nav_attr, sac_attr, subj, subj_mask)
        dataset = remove_invariant_features(dataset)
        print(dataset.targets, dataset.chunks)

        # searchlight
        similarity = CustomDist(labels)
        searchlight = sphere_searchlight(similarity, SEARCHLIGHT_RADIUS)
        searchlight_map = searchlight(dataset)

        # save files
        nifti = map2nifti(data=searchlight_map, dataset=dataset)
        nifti.to_filename(OUTDIR + '%s_%s_%dvox_sim.nii.gz' %
                          (subj, DIRCT, SEARCHLIGHT_RADIUS))
コード例 #4
0
def main():
    subject_list = sys.argv[1:] if len(sys.argv) > 1 else EVERYONE
    print(subject_list)

    attr = SampleAttributes(TSTATS_DIR + TSTATS_NAME + '_attr.txt')

    for subj in subject_list:
        tstats_file = TSTATS_DIR + TSTATS_NAME + '_tstats/%s_%s.nii.gz' % (
            subj, TSTATS_NAME)
        dataset = fmri_dataset(samples=tstats_file,
                               mask=MASK_DIR +
                               '%s_ribbon_rsmp0_dil3mm.nii.gz' % subj)
        dataset.sa['chunks'] = attr.chunks
        dataset.sa['targets'] = attr.targets
        dataset = remove_invariant_features(dataset)

        similarity = CustomDist(squareform(LABELS_NAV))
        searchlight = sphere_searchlight(similarity, SEARCHLIGHT_RADIUS)
        searchlight_map = searchlight(dataset)

        # save files
        nifti = map2nifti(data=searchlight_map, dataset=dataset)
        nifti.to_filename(OUTDIR + OUTFILE % (subj, SEARCHLIGHT_RADIUS))
コード例 #5
0
stimuli = []
for i in range(0, 54):
    stimuli.append(ds.uniquetargets[i])

#create all possible pairs for confusion matrix
pair_list = list(itertools.combinations(range(len(stimuli)), 2))
pair_list2 = []
for x in range(0, len(pair_list)):
    pair_list2.append([stimuli[pair_list[x][0]], stimuli[pair_list[x][1]]])

# enable debug output for searchlight call
#if __debug__:
mvpa2.debug.active += ["SLC"]

sl = mvpa2.sphere_searchlight(clf_wrapper,
                              radius=3,
                              space='voxel_indices',
                              nproc=1)
ds = ds.copy(deep=False,
             sa=['targets', 'chunks'],
             fa=['voxel_indices'],
             a=['mapper'])
sl_map = sl(ds)

data_file1 = os.path.join(job_path, filename)

file_path_full = open(data_file1, 'w')
pickle.dump(sl_map, file_path_full)

#jobs up to 22000 now... (12 hours each?)
コード例 #6
0
 y[ds.targets == ds.uniquetargets[0]] = 1
 
 # We needs to modify the chunks in order to use sklearn
 ds.chunks = np.arange(len(ds.chunks))
 
 permut_ = []
 
 i = 3
 
 partitioner = SKLCrossValidation(StratifiedKFold(y, n_folds=i))
 
 cvte = CrossValidation(clf,
                        partitioner,
                        enable_ca=['stats', 'probabilities'])
 
 sl = sphere_searchlight(cvte, radius=3, space = 'voxel_indices')
 
 maps = []
 
 for p_ in range(100):
     
     print '-------- '+str(p_+1)+' of 100 ------------'
     
     y_perm = permutation(range(len(ds.targets)))
     
     ds.targets = ds.targets[y_perm]
     
     sl_map = sl(ds)
     sl_map.samples *= -1
     sl_map.samples +=  1