示例#1
0
def searchlight(ds, **kwargs):
    
    if __debug__:
        debug.active += ["SLC"]
        
    radius = 3
    
    for arg in kwargs:
        if (arg == 'radius'):
            radius = kwargs[arg]
    
    """
    [fclf, cvte] = setup_classifier(**kwargs)
    """ 
    clf = LinearCSVMC(C=1, probability=1, enable_ca=['probabilities'])
    cv = CrossValidation(clf, HalfPartitioner(attr='chunks'))
    
    
    sl = sphere_searchlight(cv, radius, space = 'voxel_indices')
    
    #sl = Searchlight(MahalanobisMeasure, queryengine, add_center_fa, results_postproc_fx, results_backend, results_fx, tmp_prefix, nblocks)
    #sl = sphere_searchlight(MahalanobisMeasure(), 3, space= 'voxel_indices')
    sl_map = sl(ds)
    
    sl_map.samples *= -1
    sl_map.samples +=  1

    nif = map2nifti(sl_map, imghdr=ds.a.imghdr)
    nif.set_qform(ds.a.imgaffine)  
    
    #Results packing
    d_result = dict({'map': nif,
                     'radius': radius})
    
    return d_result
示例#2
0
def searchlight(ds, **kwargs):

    if __debug__:
        debug.active += ["SLC"]

    radius = 3

    for arg in kwargs:
        if (arg == 'radius'):
            radius = kwargs[arg]
    """
    [fclf, cvte] = setup_classifier(**kwargs)
    """
    clf = LinearCSVMC(C=1, probability=1, enable_ca=['probabilities'])
    cv = CrossValidation(clf, HalfPartitioner(attr='chunks'))

    sl = sphere_searchlight(cv, radius, space='voxel_indices')

    #sl = Searchlight(MahalanobisMeasure, queryengine, add_center_fa, results_postproc_fx, results_backend, results_fx, tmp_prefix, nblocks)
    #sl = sphere_searchlight(MahalanobisMeasure(), 3, space= 'voxel_indices')
    sl_map = sl(ds)

    sl_map.samples *= -1
    sl_map.samples += 1

    nif = map2nifti(sl_map, imghdr=ds.a.imghdr)
    nif.set_qform(ds.a.imgaffine)

    #Results packing
    d_result = dict({'map': nif, 'radius': radius})

    return d_result
示例#3
0
def main():
    sac_attr = SampleAttributes(TSTATS_DIR + 'sac_attr.txt')
    nav_attr = SampleAttributes(TSTATS_DIR + 'nav_bin_attr.txt')

    # labels
    xtask_run = [
        # down/powerless, up/powerful
        [1, 2],  # down/powerless
        [2, 1],  # up/powerful
    ]
    intask_run = [  # no comparison within a task
        [0, 0], [0, 0]
    ]
    labels = squareform(
        np.vstack([np.hstack([intask_run] * 6 + [xtask_run] * 2)] * 6 +
                  [np.hstack([xtask_run] * 6 + [intask_run] * 2)] * 2))

    for subj in subject_list:
        subj_mask = MASK_DIR + '%s_ribbon_rsmp0_dil3mm.nii.gz' % subj
        dataset = get_nav_sac_data(nav_attr, sac_attr, subj, subj_mask)
        dataset = remove_invariant_features(dataset)
        print(dataset.targets, dataset.chunks)

        # searchlight
        similarity = CustomDist(labels)
        searchlight = sphere_searchlight(similarity, SEARCHLIGHT_RADIUS)
        searchlight_map = searchlight(dataset)

        # save files
        nifti = map2nifti(data=searchlight_map, dataset=dataset)
        nifti.to_filename(OUTDIR + '%s_%s_%dvox_sim.nii.gz' %
                          (subj, DIRCT, SEARCHLIGHT_RADIUS))
示例#4
0
def sl2nifti(ds,remap,outfile):
    '''
    No return; converts sl output and saves nifti file to working directory

    ds=array of sl results
    remap: dataset to remap to
    outfile: string outfile name including extension .nii.gz
    '''

    nimg = map2nifti(data=ds,dataset=remap)
    nimg.to_filename(outfile)
示例#5
0
def main():
    subject_list = sys.argv[1:] if len(sys.argv) > 1 else EVERYONE
    print(subject_list)

    attr = SampleAttributes(TSTATS_DIR + TSTATS_NAME + '_attr.txt')

    for subj in subject_list:
        tstats_file = TSTATS_DIR + TSTATS_NAME + '_tstats/%s_%s.nii.gz' % (
            subj, TSTATS_NAME)
        dataset = fmri_dataset(samples=tstats_file,
                               mask=MASK_DIR +
                               '%s_ribbon_rsmp0_dil3mm.nii.gz' % subj)
        dataset.sa['chunks'] = attr.chunks
        dataset.sa['targets'] = attr.targets
        dataset = remove_invariant_features(dataset)

        similarity = CustomDist(squareform(LABELS_NAV))
        searchlight = sphere_searchlight(similarity, SEARCHLIGHT_RADIUS)
        searchlight_map = searchlight(dataset)

        # save files
        nifti = map2nifti(data=searchlight_map, dataset=dataset)
        nifti.to_filename(OUTDIR + OUTFILE % (subj, SEARCHLIGHT_RADIUS))
示例#6
0
def spatial(ds, **kwargs):
    #    gc.enable()
    #    gc.set_debug(gc.DEBUG_LEAK)

    cvte = None
    permutations = 0

    for arg in kwargs:
        if arg == 'clf_type':
            clf_type = kwargs[arg]
        if arg == 'enable_results':
            enable_results = kwargs[arg].split(',')
        if arg == 'permutations':
            permutations = int(kwargs[arg])
        if arg == 'cvte':
            cvte = kwargs[arg]
            fclf = cvte.learner  # Send crossvalidation object

    if cvte == None:
        [fclf, cvte] = setup_classifier(**kwargs)

    logger.info('Cross validation is performing ...')
    error_ = cvte(ds)

    logger.debug(cvte.ca.stats)
    #print error_.samples

    #Plot permutations
    #plot_cv_results(cvte, error_, 'Permutations analysis')

    if permutations != 0:
        dist_len = len(cvte.null_dist.dists())
        err_arr = np.zeros(dist_len)
        for i in range(dist_len):
            err_arr[i] = 1 - cvte.ca.stats.stats['ACC']

        total_p_value = np.mean(cvte.null_dist.p(err_arr))
        p_value = cvte.ca.null_prob.samples

    else:
        p_value = np.array([0, 0])
        total_p_value = 0

    predictions_ds = fclf.predict(ds)

    # If classifier didn't have sensitivity
    try:
        sensana = fclf.get_sensitivity_analyzer()
    except Exception as err:
        allowed_keys = [
            'map', 'sensitivities', 'stats', 'mapper', 'classifier', 'ds_src',
            'perm_pvalue', 'p'
        ]

        allowed_results = [
            None, None, cvte.ca.stats, ds.a.mapper, fclf, ds, p_value,
            total_p_value
        ]

        results_dict = dict(zip(allowed_keys, allowed_results))
        results = dict()
        if not 'enable_results' in locals():
            enable_results = allowed_keys[:]
        for elem in enable_results:
            if elem in allowed_keys:
                results[elem] = results_dict[elem]

        return results

    res_sens = sensana(ds)

    sens_comb = res_sens.get_mapped(mean_sample())
    mean_map = map2nifti(ds, ds.a.mapper.reverse1(sens_comb))

    l_maps = []
    for m in res_sens:
        maps = ds.a.mapper.reverse1(m)
        nifti = map2nifti(ds, maps)
        l_maps.append(nifti)

    l_maps.append(mean_map)

    # Packing results    (to be sobstitute with a function)
    results = dict()

    classifier = fclf

    allowed_keys = [
        'map', 'sensitivities', 'stats', 'mapper', 'classifier', 'ds_src',
        'pvalue', 'p'
    ]
    allowed_results = [
        l_maps, res_sens, cvte.ca.stats, ds.a.mapper, classifier, ds, p_value,
        total_p_value
    ]

    results_dict = dict(zip(allowed_keys, allowed_results))

    if not 'enable_results' in locals():
        enable_results = allowed_keys[:]

    for elem in enable_results:

        if elem in allowed_keys:
            results[elem] = results_dict[elem]
        else:
            logger.error('******** ' + elem +
                         ' result is not allowed! *********')

    return results
示例#7
0
        
        results_dict = dict(zip(allowed_keys, allowed_results))
        results = dict()
        if not 'enable_results' in locals():
            enable_results = allowed_keys[:]
        for elem in enable_results:
            if elem in allowed_keys:
                results[elem] = results_dict[elem]
                
        return results
    
            
    res_sens = sensana(ds)
    
    sens_comb = res_sens.get_mapped(mean_sample())
    mean_map = map2nifti(ds, ds.a.mapper.reverse1(sens_comb))

    
    l_maps = []
    for m in res_sens:
        maps = ds.a.mapper.reverse1(m)
        nifti = map2nifti(ds, maps)
        l_maps.append(nifti)
    
    
    l_maps.append(mean_map)
    
    # Packing results    (to be sobstitute with a function)
    results = dict()

    classifier = fclf
示例#8
0
f.close

# display results
P.figure()
P.title(
    str(N.round(foldwiseCvedAnovaSelectedSMLR.ca.stats.stats['ACC%'], 1)) +
    '%, n-fold SMLR with anova FS x 500')
foldwiseCvedAnovaSelectedSMLR.ca.stats.plot()
P.savefig(
    os.path.join(
        sessionPath,
        'confMatrixAvTrial{0}-{1}-LanguageThinking-Japanese_English.png'.
        format(boldDelay, stimulusWidth)))
print foldwiseCvedAnovaSelectedSMLR.ca.stats.matrix

print 'accuracy', N.round(foldwiseCvedAnovaSelectedSMLR.ca.stats.stats['ACC%'],
                          1), '%', datetime.datetime.now()

# this should give average anova measure over the folds - but in fact would be much the same as taking over single fold
sensana = anovaSelectedSMLR.get_sensitivity_analyzer(
    postproc=M.maxofabs_sample())
cv_sensana = M.RepeatedMeasure(sensana, M.NFoldPartitioner())
sens = cv_sensana(dataset)
print sens.shape
M.map2nifti(dataset, N.mean(sens, 0)).to_filename(
    "anovaSensitivity{0}-{1}-LanguageThink-Japanese_English.nii".format(
        boldDelay, stimulusWidth))

# this looks good, but don't know way to get back from this feature selected space (of 500) to the whole space of 28k or so, for output
weights = anovaSelectedSMLR.clf.weights
示例#9
0
def buildremapper(ds_type,
                  sub,
                  data,
                  rootdir = '.',
                  anatdir = 'ses-movie/anat',
                  rois=['FFA', 'LOC', 'PPA', 'VIS', 'EBA', 'OFA'],
                  ):
    """During the hdf5 dataset creation, wrapping information was lost :-(
    This function attempts to recover this information:
    For full datasets, we load the brain group template -- for stripped ds,
    we build a new mask of only ROIs of the participants. Loading this as an
    fmri_dataset back into the analysis should yield a wrapper, that we can get
    the dataset lacking a wrapper 'get_wrapped'.
    """
    # TODO: define rootdir, anatdir less hardcoded

    # Q: do I need to load participants brain warped into groupspace individually or is one general enough?
    if ds_type == 'full':
        brain = 'sourcedata/tnt/{}/bold3Tp2/in_grpbold3Tp2/head.nii.gz'.format(sub)
        mask = 'sourcedata/tnt/{}/bold3Tp2/in_grpbold3Tp2/brain_mask.nii.gz'.format(sub)
        #maybe take the study-template here.
      #  brain = 'sourcedata/tnt/templates/grpbold3Tp2/brain.nii.gz'
      #  head = 'sourcedata/tnt/templates/grpbold3Tp2/head.nii.gz'
        dummy = mv.fmri_dataset(brain, mask=mask)

    # # WIP -- still debating whether this is necessary.
    # elif ds_type == 'stripped':
    #     # if the dataset is stripped, we have to make a custom mask... yet pondering whether that is worth the work...
    #     # we have to build the masks participant-wise, because each participant has custom masks per run (possibly several)...
    #     # create a dummy outlay: (first dim of hrf estimates should be number of voxel)
    #     all_rois_mask = np.array([['placeholder'] * data.shape[1]]).astype('S10')
    #     for roi in rois:
    #         if roi == 'VIS':
    #             roi_fns = sorted(glob(rootdir + participant + anatdir + \
    #                                       '{0}_*_mask_tmpl.nii.gz'.format(roi)))
    #         else:
    #             if bilateral:
    #                 # if its bilateralized we don't need to segregate based on hemispheres
    #
    #             else:
    #                 # we need to segregate based on hemispheres
    #                 left_roi_fns = sorted(glob(rootdir + participant + anatdir + \
    #                                            'l{0}*mask_tmpl.nii.gz'.format(roi)))
    #                 right_roi_fns = sorted(glob(rootdir + participant + anatdir + \
    #                                             'r{0}*mask_tmpl.nii.gz'.format(roi)))
    #                 roi_fns = left_roi_fns + right_roi_fns
    #             if len(roi_fns) > 1:
    #                 # if there are more than 1 mask, combine them
    #                 roi_mask = np.sum([mv.fmri_dataset(roi_fn, mask=mask_fn).samples for roi_fn in roi_fns], axis=0)
    #                 # Set any voxels that might exceed 1 to 1
    #                 roi_mask = np.where(roi_mask > 0, 1, 0)
    #             elif len(roi_fns) == 0:
    #                 # if there are no masks, we get zeros
    #                 print("ROI {0} does not exist for participant {1}; appending all zeros".format(roi, participant))
    #                 roi_mask = np.zeros((1, data_ds.shape[1]))
    #             elif len(roi_fns) == 1:
    #                 roi_mask = mv.fmri_dataset(roi_fns[0], mask=mask_fn).samples
    #                 ## continue here

    # now that we have a dummy ds with a wrapper, we can project the betas into a brain --> map2nifti
    # does that. If we save that, we should be able to load it into FSL.
    return mv.map2nifti(dummy, data)
示例#10
0
f = open("withinPredictionResult.csv", "a")
f.write(st)
f.close

# display results
P.figure()
P.title(
    str(N.round(foldwiseCvedAnovaSelectedSMLR.ca.stats.stats['ACC%'], 1)) +
    '%, n-fold SMLR with anova FS x 500')
foldwiseCvedAnovaSelectedSMLR.ca.stats.plot()
P.savefig(
    os.path.join(
        sessionPath, 'confMatrixAvTrial{0}-{1}-keepswitch.png'.format(
            boldDelay, stimulusWidth)))
print foldwiseCvedAnovaSelectedSMLR.ca.stats.matrix

print 'accuracy', N.round(foldwiseCvedAnovaSelectedSMLR.ca.stats.stats['ACC%'],
                          1), '%', datetime.datetime.now()

# this should give average anova measure over the folds - but in fact would be much the same as taking over single fold
sensana = anovaSelectedSMLR.get_sensitivity_analyzer(
    postproc=M.maxofabs_sample())
cv_sensana = M.RepeatedMeasure(sensana, M.NFoldPartitioner())
sens = cv_sensana(dataset)
print sens.shape
M.map2nifti(dataset, N.mean(sens, 0)).to_filename(
    "anovaSensitivity{0}-{1}-keepswitch.nii".format(boldDelay, stimulusWidth))

# this looks good, but don't know way to get back from this feature selected space (of 500) to the whole space of 28k or so, for output
weights = anovaSelectedSMLR.clf.weights
def make_neurimg(parent_ds,child_ds):
    parent_ds.samples = child_ds
    print(parent_ds.shape)
    nimg = mvpa2.map2nifti(parent_ds)
    return nimg
level = '/narps_level2'
msk = pwd + model_dir + '/narps_level3/interceptAllSubs.gfeat/cope1.feat/intercept_msk.nii.gz'

cope_num = 3 #1 intercept, 2 sv, 3 de

work_dir = pwd + model_dir + level
fldrs = os.listdir(work_dir)
fldrs.sort()

for fldr in fldrs:
    print(fldr)
    sub_fldr = work_dir + '/' + fldr
    z_stat1 = sub_fldr + '/cope' + str(cope_num) + '.feat/stats/zstat1.nii.gz' #cope3.feat is for entropy
    ds_tmp = mvpa2.fmri_dataset(z_stat1)
    ds_tmp.samples = ds_tmp.samples*-1
    nimg = mvpa2.map2nifti(ds_tmp)
    nimg.to_filename(sub_fldr + '/cope' + str(cope_num) + '.feat/stats/zstat2.nii.gz')

#compute which var wins w.r.t. absolute value
mn_dir = '/second_level_diffs/signed_diffs/flip_DE_sign' #'/second_level_diffs/signed_diffs/zstat1s' #
entropies = pwd + model_dir + mn_dir + '/entropies_z.nii.gz'
subvals = pwd + model_dir + mn_dir + '/subval_z.nii.gz'

ds_DE = mvpa2.fmri_dataset(entropies)
ds_SV = mvpa2.fmri_dataset(subvals)

ds_DE_mn = np.mean(ds_DE.samples,axis=0)
ds_SV_mn = np.mean(ds_SV.samples,axis=0)

DE_msk = np.abs(ds_DE_mn)>np.abs(ds_SV_mn)
SV_msk = np.abs(ds_DE_mn)<np.abs(ds_SV_mn)
		res = 1-sd.pdist(np.hstack((evds[evds.sa.subj==self._subj1].samples,evds[evds.sa.subj==self._subj2].samples)).T,'correlation')
		return mvpa.Dataset(np.array(res)[np.newaxis])

# Call inter-subject correlation measure
cor = Corr(subj1,subj2)

# Prepare single voxel Searchlight
sl = mvpa.Searchlight(cor,
			mvpa.IndexQueryEngine(
					voxel_indices=mvpa.Sphere(0)),
			nproc=int(nproc))

#Iterate the inter-subject correlation measure over all voxels
slmap = sl(evds)

# Name the output
filename = 'sl_'+subj1+'_'+subj2+dsfile

# Mapping of the searchlight results into group template space
nimg = mvpa.map2nifti(ds, slmap.samples[:,:ds.nfeatures], imghdr=ds.a.imghdr)

#Save result as nifti in maps directory
try:
    os.mkdir(os.path.join(path,'maps'))
except:
    print 'maps directory already exists'

# Save result as nifti in maps directory 
nb.Nifti1Image(nimg.get_data(),
			nimg.get_header().get_best_affine()
			).to_filename(os.path.join(path,'maps',filename + '_univariate.nii.gz'))
示例#14
0
    conf['label_included'] = ','.join([str(n) for n in np.array([-5,-3,-1,1,3,5])])
    
    ds = preprocess_dataset(ds, task_, **conf)
    ds.targets = np.float_(ds.targets)
    ds.targets = (ds.targets - np.mean(ds.targets))/np.std(ds.targets)
    cv = CrossValidation(slsim.RegressionMeasure(), 
                            partitioner,
                            #NFoldPartitioner(cvtype=1),
                            errorfx=None
                             )
        
    
    kwa = dict(voxel_indices=Sphere(3))
    queryengine = IndexQueryEngine(**kwa)
    
    sl = Searchlight(cv, queryengine=queryengine)
    
    map_ = sl(ds)
    
    map_nii = map2nifti(map_, imghdr=ds.a.imghdr)  
    name = "%s_%s_regression_fold_%s" %(subj, task_, str(i))

    result_dict['radius'] = 3
    result_dict['map'] = map_nii
            
    subj_result = rs.SubjectResult(name, result_dict, savers)
    collection.add(subj_result) 
            
            
            
示例#15
0
                 evds[evds.sa.subj == self._subj2].samples)).T, 'correlation')
        return mvpa.Dataset(np.array(res)[np.newaxis])


# Call inter-subject correlation measure
cor = Corr(subj1, subj2)

# Prepare single voxel Searchlight
sl = mvpa.Searchlight(cor,
                      mvpa.IndexQueryEngine(voxel_indices=mvpa.Sphere(0)),
                      nproc=int(nproc))

#Iterate the inter-subject correlation measure over all voxels
slmap = sl(evds)

# Name the output
filename = 'sl_' + subj1 + '_' + subj2 + dsfile

# Mapping of the searchlight results into group template space
nimg = mvpa.map2nifti(ds, slmap.samples[:, :ds.nfeatures], imghdr=ds.a.imghdr)

#Save result as nifti in maps directory
try:
    os.mkdir(os.path.join(path, 'maps'))
except:
    print 'maps directory already exists'

# Save result as nifti in maps directory
nb.Nifti1Image(nimg.get_data(),
               nimg.get_header().get_best_affine()).to_filename(
                   os.path.join(path, 'maps', filename + '_univariate.nii.gz'))
示例#16
0
def spatiotemporal(ds, **kwargs):

    onset = 0

    for arg in kwargs:
        if (arg == 'onset'):
            onset = kwargs[arg]
        if (arg == 'duration'):
            duration = kwargs[arg]
        if (arg == 'enable_results'):
            enable_results = kwargs[arg]
        if (arg == 'permutations'):
            permutations = int(kwargs[arg])

    events = find_events(targets=ds.sa.targets, chunks=ds.sa.chunks)

    if 'duration' in locals():
        events = [e for e in events if e['duration'] >= duration]
    else:
        duration = np.min([ev['duration'] for ev in events])

    for e in events:
        e['onset'] += onset
        e['duration'] = duration

    evds = eventrelated_dataset(ds, events=events)

    [fclf, cvte] = setup_classifier(**kwargs)

    logger.info('Cross validation is performing ...')
    res = cvte(evds)

    print(cvte.ca.stats)

    if permutations != 0:
        print(cvte.ca.null_prob.samples)
        dist_len = len(cvte.null_dist.dists())
        err_arr = np.zeros(dist_len)
        for i in range(dist_len):
            err_arr[i] = 1 - cvte.ca.stats.stats['ACC']

        total_p_value = np.mean(cvte.null_dist.p(err_arr))
        p_value = cvte.ca.null_prob.samples
    else:
        total_p_value = 0.
        p_value = np.array([0, 0])

    try:
        sensana = fclf.get_sensitivity_analyzer()
        res_sens = sensana(evds)
    except Exception as err:
        allowed_keys = [
            'map', 'sensitivities', 'stats', 'mapper', 'classifier', 'ds',
            'perm_pvalue', 'p'
        ]

        allowed_results = [
            None, None, cvte.ca.stats, evds.a.mapper, fclf, evds, p_value,
            total_p_value
        ]

        results_dict = dict(zip(allowed_keys, allowed_results))
        results = dict()
        if not 'enable_results' in locals():
            enable_results = allowed_keys[:]
        for elem in enable_results:
            if elem in allowed_keys:
                results[elem] = results_dict[elem]

        return results

    sens_comb = res_sens.get_mapped(mean_sample())
    mean_map = map2nifti(evds, evds.a.mapper.reverse1(sens_comb))

    l_maps = []
    for m in res_sens:
        maps = ds.a.mapper.reverse1(m)
        nifti = map2nifti(evds, maps)
        l_maps.append(nifti)

    l_maps.append(mean_map)
    # Packing results    (to be sobstitute with a function)
    results = dict()
    if not 'enable_results' in locals():
        enable_results = [
            'map', 'sensitivities', 'stats', 'mapper', 'classifier', 'ds',
            'pvalue', 'p'
        ]

    allowed_keys = [
        'map', 'sensitivities', 'stats', 'mapper', 'classifier', 'ds',
        'pvalue', 'p'
    ]

    allowed_results = [
        l_maps, res_sens, cvte.ca.stats, evds.a.mapper, fclf, evds, p_value,
        total_p_value
    ]

    results_dict = dict(zip(allowed_keys, allowed_results))

    for elem in enable_results:

        if elem in allowed_keys:
            results[elem] = results_dict[elem]
        else:
            print('******** ' + elem + ' result is not allowed! *********')

    return results
st=str(boldDelay) + ',' + str(stimulusWidth) + ',' + str(precision) +'\n'
f = open( "withinPredictionResult.csv", "a" )
f.write(st)
f.close


# display results
P.figure()
P.title(str(N.round(foldwiseCvedAnovaSelectedSMLR.ca.stats.stats['ACC%'], 1))+'%, n-fold SMLR with anova FS x 500')
foldwiseCvedAnovaSelectedSMLR.ca.stats.plot()
P.savefig(os.path.join(sessionPath,'confMatrixAvTrial-LanguageSwitch-Japanese_English-GNB.png'))
print foldwiseCvedAnovaSelectedSMLR.ca.stats.matrix

print 'accuracy',N.round(foldwiseCvedAnovaSelectedSMLR.ca.stats.stats['ACC%'], 1),'%',datetime.datetime.now()

# this should give average anova measure over the folds - but in fact would be much the same as taking over single fold
sensana = anovaSelectedSMLR.get_sensitivity_analyzer(postproc=M.maxofabs_sample())
cv_sensana = M.RepeatedMeasure(sensana, M.NFoldPartitioner())
sens = cv_sensana(dataset)
print sens.shape
M.map2nifti(dataset, N.mean(sens,0)).to_filename("anovaSensitivity_"+sessionID+'.nii')

# this looks good, but don't know way to get back from this feature selected space (of 500) to the whole space of 28k or so, for output
weights = anovaSelectedSMLR.clf.weights 






st=str(boldDelay) + ',' + str(stimulusWidth) + ',' + str(precision) +'\n'
f = open( "withinPredictionResult.csv", "a" )
f.write(st)
f.close


# display results
P.figure()
P.title(str(N.round(foldwiseCvedAnovaSelectedSMLR.ca.stats.stats['ACC%'], 1))+'%, n-fold SMLR with anova FS x 500')
foldwiseCvedAnovaSelectedSMLR.ca.stats.plot()
P.savefig(os.path.join(sessionPath,'confMatrixAvTrial{0}-{1}-Mammal_Tool.png'.format(boldDelay, stimulusWidth)))
print foldwiseCvedAnovaSelectedSMLR.ca.stats.matrix

print 'accuracy',N.round(foldwiseCvedAnovaSelectedSMLR.ca.stats.stats['ACC%'], 1),'%',datetime.datetime.now()

# this should give average anova measure over the folds - but in fact would be much the same as taking over single fold
sensana = anovaSelectedSMLR.get_sensitivity_analyzer(postproc=M.maxofabs_sample())
cv_sensana = M.RepeatedMeasure(sensana, M.NFoldPartitioner())
sens = cv_sensana(dataset)
print sens.shape
M.map2nifti(dataset, N.mean(sens,0)).to_filename("anovaSensitivity{0}-{1}-mammal_tool.nii".format(boldDelay, stimulusWidth))

# this looks good, but don't know way to get back from this feature selected space (of 500) to the whole space of 28k or so, for output
weights = anovaSelectedSMLR.clf.weights 






示例#19
0
st=str(boldDelay) + ',' + str(stimulusWidth) + ',' + str(precision) +'\n'
f = open( "withinPredictionResult.csv", "a" )
f.write(st)
f.close


# display results
P.figure()
P.title(str(N.round(foldwiseCvedAnovaSelectedSMLR.ca.stats.stats['ACC%'], 1))+'%, n-fold SMLR with anova FS x 500')
foldwiseCvedAnovaSelectedSMLR.ca.stats.plot()
P.savefig(os.path.join(sessionPath,'confMatrixAvTrial{0}-{1}-LanguagePresented-Japanese_English.png'.format(boldDelay, stimulusWidth)))
print foldwiseCvedAnovaSelectedSMLR.ca.stats.matrix

print 'accuracy',N.round(foldwiseCvedAnovaSelectedSMLR.ca.stats.stats['ACC%'], 1),'%',datetime.datetime.now()

# this should give average anova measure over the folds - but in fact would be much the same as taking over single fold
sensana = anovaSelectedSMLR.get_sensitivity_analyzer(postproc=M.maxofabs_sample())
cv_sensana = M.RepeatedMeasure(sensana, M.NFoldPartitioner())
sens = cv_sensana(dataset)
print sens.shape
M.map2nifti(dataset, N.mean(sens,0)).to_filename("anovaSensitivity{0}-{1}-LanguagePresented.nii".format(boldDelay, stimulusWidth))

# this looks good, but don't know way to get back from this feature selected space (of 500) to the whole space of 28k or so, for output
weights = anovaSelectedSMLR.clf.weights 






st=str(boldDelay) + ',' + str(stimulusWidth) + ',' + str(precision) +'\n'
f = open( "withinPredictionResult.csv", "a" )
f.write(st)
f.close


# display results
P.figure()
P.title(str(N.round(foldwiseCvedAnovaSelectedSMLR.ca.stats.stats['ACC%'], 1))+'%, n-fold SMLR with anova FS x 500')
foldwiseCvedAnovaSelectedSMLR.ca.stats.plot()
P.savefig(os.path.join(sessionPath,'confMatrixAvTrial{0}-{1}-LanguageSwitch-Japanese_English-SVM.png'.format(boldDelay, stimulusWidth)))
print foldwiseCvedAnovaSelectedSMLR.ca.stats.matrix

print 'accuracy',N.round(foldwiseCvedAnovaSelectedSMLR.ca.stats.stats['ACC%'], 1),'%',datetime.datetime.now()

# this should give average anova measure over the folds - but in fact would be much the same as taking over single fold
sensana = anovaSelectedSMLR.get_sensitivity_analyzer(postproc=M.maxofabs_sample())
cv_sensana = M.RepeatedMeasure(sensana, M.NFoldPartitioner())
sens = cv_sensana(dataset)
print sens.shape
M.map2nifti(dataset, N.mean(sens,0)).to_filename("anovaSensitivity{0}-{1}-LanguageSwitch-Japanese-English-SVM.nii".format(boldDelay, stimulusWidth))

# this looks good, but don't know way to get back from this feature selected space (of 500) to the whole space of 28k or so, for output
weights = anovaSelectedSMLR.clf.weights 






示例#21
0
#k = 11
print(fn)

ds = mvpa2.fmri_dataset(fn)

RSS = np.sum(np.power(ds.samples, 2), axis=0)

k = int(sys.argv[2])
print(k, " PEs")

n = ds.shape[0]
print(n, " data points")

#this was to verify with sigmasquareds.nii.gz
#fn2 = '/scratch/scratch/ucjtbob/narps1_only_entropy_model/narps_level1/sub001_run01.feat/stats/sigmasquareds.nii.gz'
#ds2 = mvpa2.fmri_dataset(fn2)
#RSS2 = ds2.samples * (n-k)

BIC = k * np.log(n) + n * np.log(RSS / n)
BIC[~np.isfinite(BIC)] = 0
print(np.sum(BIC), ' BIC')
print(BIC.shape)

ds.samples = BIC
print(ds.shape)
#print(ds)

nimg = mvpa2.map2nifti(ds)
nimg.to_filename(sys.argv[1] + 'BIC.nii.gz')
#nimg.to_filename(fn + 'BIC.nii.gz')