Beispiel #1
0
def main():
    sac_attr = SampleAttributes(TSTATS_DIR + 'sac_attr.txt')
    nav_attr = SampleAttributes(TSTATS_DIR + 'nav_bin_attr.txt')

    # labels
    xtask_run = [
        # down/powerless, up/powerful
        [1, 2],  # down/powerless
        [2, 1],  # up/powerful
    ]
    intask_run = [  # no comparison within a task
        [0, 0], [0, 0]
    ]
    labels = squareform(
        np.vstack([np.hstack([intask_run] * 6 + [xtask_run] * 2)] * 6 +
                  [np.hstack([xtask_run] * 6 + [intask_run] * 2)] * 2))

    for subj in subject_list:
        subj_mask = MASK_DIR + '%s_ribbon_rsmp0_dil3mm.nii.gz' % subj
        dataset = get_nav_sac_data(nav_attr, sac_attr, subj, subj_mask)
        dataset = remove_invariant_features(dataset)
        print(dataset.targets, dataset.chunks)

        # searchlight
        similarity = CustomDist(labels)
        searchlight = sphere_searchlight(similarity, SEARCHLIGHT_RADIUS)
        searchlight_map = searchlight(dataset)

        # save files
        nifti = map2nifti(data=searchlight_map, dataset=dataset)
        nifti.to_filename(OUTDIR + '%s_%s_%dvox_sim.nii.gz' %
                          (subj, DIRCT, SEARCHLIGHT_RADIUS))
Beispiel #2
0
def main():
    subject_list = sys.argv[1:] if len(sys.argv) > 1 else EVERYONE
    print(subject_list)

    attr = SampleAttributes(TSTATS_DIR + TSTATS_NAME + '_attr.txt')

    for subj in subject_list:
        tstats_file = TSTATS_DIR + TSTATS_NAME + '_tstats/%s_%s.nii.gz' % (
            subj, TSTATS_NAME)
        dataset = fmri_dataset(samples=tstats_file,
                               mask=MASK_DIR +
                               '%s_ribbon_rsmp0_dil3mm.nii.gz' % subj)
        dataset.sa['chunks'] = attr.chunks
        dataset.sa['targets'] = attr.targets
        dataset = remove_invariant_features(dataset)

        similarity = CustomDist(squareform(LABELS_NAV))
        searchlight = sphere_searchlight(similarity, SEARCHLIGHT_RADIUS)
        searchlight_map = searchlight(dataset)

        # save files
        nifti = map2nifti(data=searchlight_map, dataset=dataset)
        nifti.to_filename(OUTDIR + OUTFILE % (subj, SEARCHLIGHT_RADIUS))
print np.array(job_table).shape

behav_file = 'sub' + sub + '_attr.txt'

bold_fname = os.path.join(cwd1, sub, 'betas_sub' + sub +
                          '.nii.gz')  #full functional timeseries (beta series)

attr_fname = os.path.join(cwd1, 'all_attr',
                          behav_file)  #codes stimuli number and run number
attr = mvpa2.SampleAttributes(attr_fname)  #loads attributes into pymvpa

ds = mvpa2.fmri_dataset(bold_fname, targets=attr.targets, chunks=attr.chunks)

ds = mvpa2.remove_nonfinite_features(ds)
ds = mvpa2.remove_invariant_features(ds)

#this basically breaks up the brain into 100 different areas (to parallelize the searchlight)
try:
    ds = ds[:, fset_num * 1000:(fset_num * 1000) + 1000]
except:
    ds = ds[:, fset_num * 1000:]

stimuli = []
for i in range(0, 54):
    stimuli.append(ds.uniquetargets[i])

#create all possible pairs for confusion matrix
pair_list = list(itertools.combinations(range(len(stimuli)), 2))
pair_list2 = []
for x in range(0, len(pair_list)):
fn3 = '/scratch/scratch/ucjtbob/narps1_subval_entropy/BIC_level2/BIC_medians.nii.gz'

#fn_BIC_diff = '/scratch/scratch/ucjtbob//BIC_diffs_results/subval_minus_entropy_means.nii.gz_T.nii.gz_tfce_corrp_tstat1.nii.gz'
#ds_diff = mvpa2.fmri_dataset(fn_BIC_diff)

accumbens = '/scratch/scratch/ucjtbob/narps_masks/Accumbens_narps.nii.gz'
amygdala = '/scratch/scratch/ucjtbob/narps_masks/Amygdala_narps.nii.gz'
fmc = '/scratch/scratch/ucjtbob/narps_masks/Frontal_Medial_Cortex_narps.nii.gz'

msk = None

ds1 = mvpa2.fmri_dataset(fn1, mask=msk)
ds2 = mvpa2.fmri_dataset(fn2, mask=msk)
ds3 = mvpa2.fmri_dataset(fn3, mask=msk)

ds1 = mvpa2.remove_invariant_features(ds1)
ds2 = mvpa2.remove_invariant_features(ds2)
ds3 = mvpa2.remove_invariant_features(ds3)

bic_sums = [np.sum(ds1.samples), np.sum(ds2.samples), np.sum(ds3.samples)]
np.argsort(bic_sums)

bic_means = [np.mean(ds1.samples), np.mean(ds2.samples), np.mean(ds3.samples)]
np.argsort(bic_means)

#bic_means[0]/bic_means[1]
#bic_means

bic_mins = [np.min(ds1.samples), np.min(ds2.samples), np.min(ds3.samples)]
bic_medians = [
    np.median(ds1.samples),