Beispiel #1
0
def run_anal(mask_path: str, bold_path: str, sl_rad: float, max_blk_edge: int,
             pool_size: int, nn_paths: list, part: int, save_dir: str,
             rsa) -> bool:
    """
    DOCS: TODO:
 
    """
    # extract the subject string from the bold path
    sub = bold_path.split("/")[-1].split(".")[0].split("_")[-1]
    print("starting run {}".format(sub))

    # this is to account for if we're using part 1 of the movie clip or part 2
    if part == 1:
        tr_start_idx = 0
        tr_end_idx = 946
    elif part == 2:
        tr_start_idx = 946
        tr_end_idx = 1976

    # get the correct correlation matrices for alexnet
    nncor_files = nnpart_files(part, nn_paths)

    # nibabel load, this loads an object but not the numpy arrays of the data
    mask_obj = nib.load(mask_path)
    bold_obj = nib.load(bold_path)

    # converting to numpy arrays
    data = np.array(bold_obj.dataobj)[:, :, :, tr_start_idx:tr_end_idx]
    mask = np.array(mask_obj.dataobj).astype(int)

    sl = Searchlight(sl_rad=sl_rad, max_blk_edge=max_blk_edge)
    sl.distribute([data], mask)

    counter = 0
    for nn_path in nncor_files:
        # this is alexnet
        bcvar = np.load(nn_path)
        save_name = nn_path.split("/")[-1].split(".")[0]

        # broadcast the NN matrix
        sl.broadcast(bcvar)

        # now the rsa part
        sl_result = sl.run_searchlight(rsa, pool_size=pool_size)
        sl_result[sl_result == None] = 0
        sl_data = np.array(sl_result, dtype=float)
        sl_img = nib.Nifti1Image(sl_data, affine=mask_obj.affine)

        # save the result to file as a nifti image
        save_path = os.path.join(save_dir, save_name + "_" + sub)
        sl_img.to_filename(save_path + ".nii.gz")

        counter += 1

    if counter == len(nncor_files):
        print("\nCOMPLETE SUCCESSFULLY\n")
        return True

    print("\nSOMETHING FAILED\n")
    return False
Beispiel #2
0
    def block_test(data, mask, max_blk_edge, rad):

        comm = MPI.COMM_WORLD
        rank = comm.rank

        (dim0, dim1, dim2) = mask.shape

        # Initialize dataset with known pattern
        for subj in data:
            if subj is not None:
                for tr in range(subj.shape[3]):
                    for d1 in range(dim0):
                        for d2 in range(dim1):
                            for d3 in range(dim2):
                                subj[d1, d2, d3, tr] = np.array(
                                    [d1, d2, d3, tr])

        sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
        sl.distribute(data, mask)
        sl.broadcast(mask)
        global_outputs = sl.run_block_function(block_test_sfn)

        if rank == 0:
            for d0 in range(rad, global_outputs.shape[0]-rad):
                for d1 in range(rad, global_outputs.shape[1]-rad):
                    for d2 in range(rad, global_outputs.shape[2]-rad):
                        if mask[d0, d1, d2]:
                            assert np.array_equal(
                                np.array(global_outputs[d0, d1, d2]),
                                np.array([d0, d1, d2, 0]))
Beispiel #3
0
 def block_test(data, mask, max_blk_edge, rad):
   
   comm = MPI.COMM_WORLD
   rank = comm.rank
   size = comm.size
   
   nsubj = len(data)
   (dim0, dim1, dim2) = mask.shape
   
   # Initialize dataset with known pattern
   for subj in data:
     if subj is not None:
       for tr in range(subj.shape[3]):
         for d1 in range(dim0):
           for d2 in range(dim1):
             for d3 in range(dim2): 
               subj[d1,d2,d3,tr] = np.array([ d1, d2, d3, tr])
   
   def sfn(l,msk,myrad,bcast_var):
     outmat = l[0][:,:,:,0] 
     outmat[~msk] = None
     return outmat[rad:-rad,rad:-rad,rad:-rad] 
   
   sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
   sl.distribute(data, mask)
   sl.broadcast(mask)
   global_outputs = sl.run_block_function(sfn)
 
   if rank == 0:
     for d0 in range(rad, global_outputs.shape[0]-rad):
       for d1 in range(rad, global_outputs.shape[1]-rad):
         for d2 in range(rad, global_outputs.shape[2]-rad):
           if mask[d0, d1, d2]:
             assert np.array_equal(np.array(global_outputs[d0,d1,d2]), np.array([d0,d1,d2,0]))
    def block_test(data, mask, max_blk_edge, rad):

        comm = MPI.COMM_WORLD
        rank = comm.rank

        (dim0, dim1, dim2) = mask.shape

        # Initialize dataset with known pattern
        for subj in data:
            if subj is not None:
                for tr in range(subj.shape[3]):
                    for d1 in range(dim0):
                        for d2 in range(dim1):
                            for d3 in range(dim2):
                                subj[d1, d2, d3, tr] = np.array(
                                    [d1, d2, d3, tr])

        sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
        sl.distribute(data, mask)
        sl.broadcast(mask)
        global_outputs = sl.run_block_function(block_test_sfn)

        if rank == 0:
            for d0 in range(rad, global_outputs.shape[0]-rad):
                for d1 in range(rad, global_outputs.shape[1]-rad):
                    for d2 in range(rad, global_outputs.shape[2]-rad):
                        if mask[d0, d1, d2]:
                            assert np.array_equal(
                                np.array(global_outputs[d0, d1, d2]),
                                np.array([d0, d1, d2, 0]))
Beispiel #5
0
 def block_test(data, mask, max_blk_edge, rad):
   
   comm = MPI.COMM_WORLD
   rank = comm.rank
   size = comm.size
   
   nsubj = len(data)
   (dim0, dim1, dim2) = mask.shape
   
   # Initialize dataset with known pattern
   for subj in data:
     if subj is not None:
       for tr in range(subj.shape[3]):
         for d1 in range(dim0):
           for d2 in range(dim1):
             for d3 in range(dim2): 
               subj[d1,d2,d3,tr] = np.array([ d1, d2, d3, tr])
   
   def sfn(l,msk,myrad,bcast_var, extra_params):
     outmat = l[0][:,:,:,0] 
     outmat[~msk] = None
     return outmat[rad:-rad,rad:-rad,rad:-rad] 
   
   sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
   sl.distribute(data, mask)
   sl.broadcast(mask)
   global_outputs = sl.run_block_function(sfn)
 
   if rank == 0:
     for d0 in range(rad, global_outputs.shape[0]-rad):
       for d1 in range(rad, global_outputs.shape[1]-rad):
         for d2 in range(rad, global_outputs.shape[2]-rad):
           if mask[d0, d1, d2]:
             assert np.array_equal(np.array(global_outputs[d0,d1,d2]), np.array([d0,d1,d2,0]))
 def voxel_test(data, mask, max_blk_edge, rad):
   
   comm = MPI.COMM_WORLD
   rank = comm.rank
   size = comm.size
   
   nsubj = len(data)
   (dim0, dim1, dim2) = mask.shape
   
   # Initialize dataset with known pattern
   for subj in data:
     if subj is not None:
       for tr in range(subj.shape[3]):
         for d1 in range(dim0):
           for d2 in range(dim1):
             for d3 in range(dim2): 
               subj[d1,d2,d3,tr] = np.array([d1, d2, d3, tr])
   
   def sfn(l,msk,myrad,bcast_var):
     # Check each point
     for subj in l:
       for _tr in range(subj.shape[3]):
         tr = subj[:,:,:,_tr]
         midpt = tr[rad,rad,rad]
         for d0 in range(tr.shape[0]):
           for d1 in range(tr.shape[1]):
             for d2 in range(tr.shape[2]):
               assert np.array_equal(tr[d0,d1,d2] - midpt, np.array([d0-rad,d1-rad,d2-rad,0]))
   
     # Determine midpoint
     midpt = l[0][rad,rad,rad,0]
     midpt = (midpt[0], midpt[1], midpt[2])
   
     for d0 in range(msk.shape[0]):
       for d1 in range(msk.shape[1]):
         for d2 in range(msk.shape[2]):
           pt = (midpt[0] - rad + d0, midpt[1] - rad + d1, midpt[2] - rad + d2)
           assert bcast_var[pt] == msk[d0,d1,d2]
   
     # Return midpoint
     return midpt
   
 
   sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
   sl.distribute(data, mask)
   sl.broadcast(mask)
   global_outputs = sl.run_searchlight(sfn)
 
   if rank == 0:
     for d0 in range(rad, global_outputs.shape[0]-rad):
       for d1 in range(rad, global_outputs.shape[1]-rad):
         for d2 in range(rad, global_outputs.shape[2]-rad):
           if mask[d0, d1, d2]:
             assert np.array_equal(np.array(global_outputs[d0,d1,d2]), np.array([d0,d1,d2]))
Beispiel #7
0
for i in range(len(subjs)):
    # Load functional data and mask data
    data1 = load_img(datadir + 'subjects/' + subjs[i] +
                     '/data/avg_reorder1.nii')
    data2 = load_img(datadir + 'subjects/' + subjs[i] +
                     '/data/avg_reorder2.nii')
    data1 = data1.get_data()
    data2 = data2.get_data()

    np.seterr(divide='ignore', invalid='ignore')

    # Create and run searchlight
    sl = Searchlight(sl_rad=1, max_blk_edge=5)
    sl.distribute([data1, data2], mask_img)
    sl.broadcast(None)
    print('Running Searchlight...')
    global_outputs = sl.run_searchlight(corr2_coeff)
    global_outputs_all[:, :, :, i] = global_outputs

# Plot and save searchlight results
global_outputs_avg = np.mean(global_outputs_all, 3)
maxval = np.max(global_outputs_avg[np.not_equal(global_outputs_avg, None)])
minval = np.min(global_outputs_avg[np.not_equal(global_outputs_avg, None)])
global_outputs = np.array(global_outputs_avg, dtype=np.float)
global_nonans = global_outputs[np.not_equal(global_outputs, None)]
global_nonans = np.reshape(global_nonans, (91, 109, 91))
min1 = np.min(global_nonans[~np.isnan(global_nonans)])
max1 = np.max(global_nonans[~np.isnan(global_nonans)])
img = nib.Nifti1Image(global_nonans, np.eye(4))
img.header['cal_min'] = min1
    print('Subject: ',subjs[i])
    data_run1 = np.nan_to_num(load_img(datadir + 'subjects/' + subjs[i] + '/analysis/run1.feat/trans_filtered_func_data.nii').get_data()[:,:,:,0:628])
    runs.append(data_run1)
for i in range(len(subjs)):
    data_run2 = np.nan_to_num(load_img(datadir + 'subjects/' + subjs[i] + '/data/avg_reorder2.nii').get_data())
    runs.append(data_run2)
    
print("All Subjects Loaded")
         
            
#np.seterr(divide='ignore',invalid='ignore')

# Create and run searchlight
sl = Searchlight(sl_rad=5,max_blk_edge=5)
sl.distribute(runs,mask_img)
sl.broadcast([nfeature,niter,loo_idx])
print('Running Searchlight...')
global_outputs = sl.run_searchlight(corr2_coeff)
global_outputs_all[:,:,:,i] = global_outputs
        
# Plot and save searchlight results
global_outputs_avg = np.mean(global_outputs_all,3)
#maxval = np.max(global_outputs_avg[np.not_equal(global_outputs_avg,None)])
#minval = np.min(global_outputs_avg[np.not_equal(global_outputs_avg,None)])
global_outputs_avg = np.array(global_outputs_avg, dtype=np.float)
#global_nonans = global_outputs_avg[np.not_equal(global_outputs_avg,None)]
#global_nonans = np.reshape(global_nonans,(91,109,91))
#img = nib.Nifti1Image(global_nonans, np.eye(4))
#img.header['cal_min'] = minval
#img.header['cal_max'] = maxval
#nib.save(img,datadir + 'prototype/link/scripts/data/searchlight_output/janice_srm_results/loo_' + subjs[loo_idx])
Beispiel #9
0
bcvar = [metas]

# say some things about the mask.
print('mask dimensions: {}'.format(mask.shape))
print('number of voxels in mask: {}'.format(np.sum(mask)))

sl_rad = radius
max_blk_edge = 5
pool_size = 1

# Create the searchlight object
sl = Searchlight(sl_rad=sl_rad, max_blk_edge=max_blk_edge)

# Distribute the information to the searchlights (preparing it to run)
sl.distribute(data, mask)
sl.broadcast(bcvar)
slstart = time.time()
sl_result = sl.run_searchlight(Class)

#result = Class(data, np.zeros((5,5,5)), 2, bcvar)

SL = time.time() - slstart
tot = time.time() - starttime
print('total time: {}, searchlight time: {}'.format(tot, SL))
'''
# Only save the data if this is the first core
if rank == 0:
    output = ('{}/{}_r{}.npy'.format(outloc, subject, radius))
    #np.save(output, sl_result)

    sl_result = sl_result.astype('double')
Beispiel #10
0
            stats.zscore(movie_data[:, :, :, :int(ntr / 2), s], axis=3,
                         ddof=1))
        test_tmp = np.nan_to_num(
            stats.zscore(movie_data[:, :, :, int(ntr / 2):, s], axis=3,
                         ddof=1))
    all_data.append(np.concatenate((train_tmp, test_tmp), axis=3))

# Generate mask
mask = np.ones((dim1, dim2, dim3), dtype=np.bool)

# Create searchlight object
sl = Searchlight(sl_rad=sl_rad, max_blk_edge=max_blk_edge)

# Distribute data to processes
sl.distribute(all_data, mask)
sl.broadcast([n_iter, nfeature])


# time segment matching experiment
def timesegmentmatching_accuracy(data, win_size=6):
    nsubjs = len(data)
    (ndim, nsample) = data[0].shape
    accu = np.zeros(shape=nsubjs)

    nseg = nsample - win_size
    # mysseg prediction prediction
    trn_data = np.zeros((ndim * win_size, nseg))

    # the trn data also include the tst data, but will be subtracted when
    # calculating A
    for m in range(nsubjs):
Beispiel #11
0
# compute shifted recall correlation matrix
shifted_corrmat = np.corrcoef(shifted)

# isolate off-diagonal values with video model temporal correlations > 0
# this was precomputed to save permutation runtime with:
# for k in range(1976):
#     d = np.diag(np.corrcoef(video_model), k=k)
#     if ~(d > 0).any():
#         DIAG_LIMIT = k
#         break
DIAG_LIMIT = 238
diag_mask = np.zeros_like(shifted_corrmat, dtype=bool)

for k in range(1, DIAG_LIMIT):
    ix = kth_diag_indices(diag_mask, k)
    diag_mask[ix] = True

recall_corrs = shifted_corrmat[diag_mask]
to_broadcast = (recall_corrs, diag_mask)

# create Searchlight object
sl = Searchlight(sl_rad=2)

# distribute data to processes
sl.distribute([scan_data], mask)
sl.broadcast(to_broadcast)

# run searchlight, save data
result = sl.run_searchlight(sfn)
np.save(result_path, result)
        kernel [i,j,k] = np.exp(-np.dot(arr.T,arr))
  kernel = kernel / np.sum(kernel)

  for (idx, l) in enumerate(labels):
    if l:
      data[pt[0]:pt[0]+kernel_dim,pt[1]:pt[1]+kernel_dim,pt[2]:pt[2]+kernel_dim,idx] += kernel * weight
    else:
      data[pt[0]:pt[0]+kernel_dim,pt[1]:pt[1]+kernel_dim,pt[2]:pt[2]+kernel_dim,idx] -= kernel * weight

# Create searchlight object
sl = Searchlight(sl_rad=1, max_blk_edge=5, shape=Diamond,
                 min_active_voxels_proportion=0)

# Distribute data to processes
sl.distribute([data], mask)
sl.broadcast(labels)

# Define voxel function
def sfn(l, msk, myrad, bcast_var):
  import sklearn.svm
  import sklearn.model_selection
  classifier = sklearn.svm.SVC()
  data = l[0][msk,:].T
  return np.mean(sklearn.model_selection.cross_val_score(classifier, data, bcast_var,n_jobs=1))

# Run searchlight
global_outputs = sl.run_searchlight(sfn)

# Visualize result
if rank == 0:
  print(global_outputs)
Beispiel #13
0
                 )[:, :, :, 0:628])
    runs.append(data_run1)
for i in range(len(subjs)):
    data_run2 = np.nan_to_num(
        load_img(datadir + 'subjects/' + subjs[i] +
                 '/data/avg_reorder2.nii').get_data())
    runs.append(data_run2)

print("All Subjects Loaded")

#np.seterr(divide='ignore',invalid='ignore')

# Create and run searchlight
sl = Searchlight(sl_rad=5, max_blk_edge=5)
sl.distribute(runs, mask_img)
sl.broadcast([nfeature, niter, loo_idx, exclude_songs])
print('Running Searchlight...')
global_outputs = sl.run_searchlight(corr2_coeff, pool_size=1)
global_outputs_all[:, :, :, i] = global_outputs

# Plot and save searchlight results
global_outputs_avg = np.mean(global_outputs_all, 3)
#maxval = np.max(global_outputs_avg[np.not_equal(global_outputs_avg,None)])
#minval = np.min(global_outputs_avg[np.not_equal(global_outputs_avg,None)])
global_outputs_avg = np.array(global_outputs_avg, dtype=np.float)
#global_nonans = global_outputs_avg[np.not_equal(global_outputs_avg,None)]
#global_nonans = np.reshape(global_nonans,(91,109,91))
#img = nib.Nifti1Image(global_nonans, np.eye(4))
#img.header['cal_min'] = minval
#img.header['cal_max'] = maxval
#nib.save(img,datadir + 'prototype/link/scripts/data/searchlight_output/janice_srm_results/loo_' + subjs[loo_idx])
    jazz_within = np.mean(corrAB[8:16,8:16])
    classJazz_between = np.mean(corrAB[8:16,0:8])
    jazzClass_between = np.mean(corrAB[0:8,8:16])
    within_genre = np.mean([classical_within,jazz_within])
    between_genre = np.mean([classJazz_between,jazzClass_between])
    diff = within_genre - between_genre
    return diff

comm.Barrier()
begin_time = time.time()
comm.Barrier()

# Create and run searchlight
sl = Searchlight(sl_rad=1,max_blk_edge=5)
sl.distribute([data1,data2],mask_img)
sl.broadcast(None)
global_outputs = sl.run_searchlight(corr2_coeff)

comm.Barrier()
end_time = time.time()
comm.Barrier()

# Plot searchlight results
if rank == 0:
    print('Searchlight Done: ', end_time - begin_time)
    maxval = np.max(global_outputs[np.not_equal(global_outputs,None)])
    minval = np.min(global_outputs[np.not_equal(global_outputs,None)])
    global_outputs = np.array(global_outputs, dtype=np.float)
    print(global_outputs)

    # Save searchlight images
def run_rsa_sub(sub,
                model_rdms,
                procedure,
                corr,
                tasks=TASKS,
                overwrite=False,
                val_label=None,
                pred=None,
                dist='correlation'):
    """
    sub: str subject ID
    model_rdms: dictionary with model names as keys and lower triangles of RDMs as elements
    tasks: array of task names: friend, number, avg
    """
    # check type of input arguments

    # subject id
    sub = convert2subid(sub)

    # get labels
    corr_label = 'spear' if corr == 'corr' else 'reg'
    if val_label is None:
        val_label = 'r' if corr == 'corr' else 'beta'
    elif val_label == "R2" and corr != 'reg':
        print("ERROR: cannot calculate R2 with corr label '" + corr +
              "'. Must be 'reg'")
        exit(1)

    parc_label = SL + str(SL_RADIUS) if isSl(procedure) else PARC_LAB
    # make output directories if they don't exist
    if not os.path.exists(out_dir % (sub, corr_label, dist)):
        os.makedirs(out_dir % (sub, corr_label, dist))

    # tasks to run
    if not isinstance(tasks, list):
        tasks = [tasks]

    # dictionary of model representational dissimilarity matrices (lower triangles)
    if not isinstance(model_rdms, dict):
        if isinstance(model_rdms, list):
            model_rdms = {'model': model_rdms}
        else:
            print("ERROR: model_rdms input must be dictionary")
            exit(1)

    # procedure to run
    if procedure not in PROCEDURES_ALL:
        print("ERROR: procedure is not in PROCEDURES_ALL")
        print(PROCEDURES_ALL)
        exit(1)

    # reference image for saving parcellation output
    parcellation_template = nib.load(MNI_PARCELLATION, mmap=False)
    parcellation_template.set_data_dtype(np.double)

    # get model names
    model_keys = [pred] if val_label == "R2" else model_rdms.keys()
    print(
        str(datetime.now()) + ": Using the following models to get " +
        val_label + " values")
    print(model_rdms.keys())
    # turn model dictionary into matrix, s.t. column = model RDM lower triangle
    for i, k in enumerate(model_rdms.keys()):
        if i == 0:
            # if first model, setup matrix
            model_rdms_mat = [model_rdms[k]]
        else:
            # add next matrix as row
            model_rdms_mat = np.vstack((model_rdms_mat, model_rdms[k]))

    # transpose so that each column corresponds to each measure
    model_rdms_mat = np.transpose(model_rdms_mat)

    out_tasks = {}
    # iterate through inputted tasks
    for task in tasks:
        print(str(datetime.now()) + ": Task " + task)
        # read in subject's image
        sub_template = nib.load(data_fnames % (sub, sub, TASKS[0], 0),
                                mmap=False)
        sub_dims = sub_template.get_data().shape + (N_NODES, )
        sub_data = np.empty(sub_dims)
        for n in range(N_NODES):
            print(str(datetime.now()) + ": Reading in node " + str(n))
            if task == 'avg':
                # average this node's data from both runs
                d1 = load_nii(data_fnames % (sub, sub, TASKS[0], n))
                d2 = load_nii(data_fnames % (sub, sub, TASKS[1], n))
                d = (d1 + d2) / 2
            else:
                d = load_nii(data_fnames % (sub, sub, task, n))
            # save to fourth dimension
            sub_data[:, :, :, n] = d

        out_data_dict = {}
        if isParc(procedure):
            # out csv filename
            sub_csv_fname = csv_fname % (sub, corr_label, dist, sub, task,
                                         corr_label, parc_label, val_label)
            if (not overwrite) and os.path.isfile(sub_csv_fname):
                read_bool = True
                # read in csv if already exists
                sub_csv = pd.read_csv(sub_csv_fname)
                # remove row column
                sub_csv = sub_csv.iloc[:, 1:]
                # save all completed ROIs except for last one (since may not have been finished)
                completed_rois = np.unique(sub_csv['roi'])
                sub_csv = sub_csv[sub_csv['roi'].isin(completed_rois)]
                sub_csv.to_csv(sub_csv_fname)
                out_csv_array = sub_csv.values.tolist()
                completed_preds = np.unique(sub_csv['predictor'])
            else:
                if os.path.isfile(sub_csv_fname):
                    os.remove(sub_csv_fname)
                    print("Deleted " + sub_csv_fname)
                read_bool = False
                out_csv_array = []
                completed_rois = []
                completed_preds = []
            wtr = csv.writer(open(sub_csv_fname, 'a'),
                             delimiter=',',
                             lineterminator='\n')
            # column names for csv file
            colnames = ['sub', 'task', 'roi', 'predictor', val_label]
            if not read_bool:
                # write out to csv
                wtr.writerow(colnames)
            ref_img = parcellation_template
            # make mask
            parcellation = sub_parc % (sub, sub)
            print(str(datetime.now()) + ": Using parcellation " + parcellation)
            parc_data = load_nii(parcellation)
            roi_list = np.unique(parc_data)
            # remove 0 (i.e., the background)
            roi_list = np.delete(roi_list, 0)
            # check if number of parcels matches global variable
            if N_PARCELS != len(roi_list):
                print("WARNING: Number of parcels found (" +
                      str(len(roi_list)) + ") does not equal N_PARCELS (" +
                      str(N_PARCELS) + ")")

            # Run regression on each parcellation
            print(
                str(datetime.now()) + ": Starting parcellation " +
                str(N_PARCELS))
            # get the voxels from parcellation nii
            out_data = ref_img.get_data().astype(np.double)
            # create a dictionary of nii's: one per predictor
            for i, k in enumerate(model_keys):
                out_data_dict[k] = deepcopy(out_data)
            # iterate through each ROI of parcellation and run regression
            for r, parc_roi in enumerate(roi_list):
                roi_done = parc_roi in completed_rois and all(
                    mk in completed_preds for mk in model_keys)
                if roi_done:
                    print(
                        str(datetime.now()) + ': ROI ' + str(parc_roi) +
                        ' already saved.')
                    # read in values from dataframe for nii
                    res = get_roi_csv_val(sub_csv, parc_roi, val_label)
                else:
                    perc_done = round(((r + 1) / len(roi_list)) * 100, 3)
                    print(
                        str(datetime.now()) + ': Analyzing ROI ' +
                        str(parc_roi) + ' -- ' + str(perc_done) + '%')
                    # create mask for this ROI
                    roi_mask = parc_data == parc_roi
                    roi_mask = roi_mask.astype(int)
                    roi_data = get_roi_data(sub_data, roi_mask)
                    res_dict = run_rsa_roi(roi_data,
                                           model_rdms_mat,
                                           corr=corr,
                                           val_label=val_label,
                                           dist=dist)
                    res = res_dict['result']
                # for each model, save the result to its image in out_data_dict
                for i, k in enumerate(model_keys):
                    # save to dataframe if not already there
                    if not roi_done:
                        val = res[i]
                        csv_row = [sub, task, parc_roi, k, val]
                        out_csv_array.append(csv_row)
                        # write out to csv
                        wtr.writerow(csv_row)
                    else:
                        val = res[i]
                    # update voxels
                    model_data = out_data_dict[k]
                    model_data[model_data == parc_roi] = val
                    out_data_dict[k] = model_data
        elif isSl(procedure):
            ref_img = sub_template
            # mask
            if use_mask:
                # load all functional masks and make largest mask
                t = task if task in TASKS else "*"
                print(str(datetime.now()) + ": Reading in masks")
                func_mask_names = glob.glob(sub_mask_fname % (sub, sub, t))
                for i, m in enumerate(func_mask_names):
                    print(m)
                    m_data = load_nii(m)
                    if i == 0:
                        m_sum = deepcopy(m_data)
                    else:
                        m_sum += m_data
                whole_brain_mask = np.where(m_sum > 0, 1, 0)
                whole_brain_mask_dil = binary_dilation(
                    whole_brain_mask,
                    iterations=int(SL_RADIUS)).astype(whole_brain_mask.dtype)
                mask = whole_brain_mask_dil
            else:
                mask = deepcopy(d)
                mask.fill(1)

            # Create the searchlight object
            begin_time = time.time()
            sl = Searchlight(sl_rad=sl_rad,
                             max_blk_edge=max_blk_edge,
                             shape=Ball)
            print(str(datetime.now()) + ": Setup searchlight inputs")
            print(
                str(datetime.now()) + ": Input data shape: " +
                str(sub_data.shape))

            # Distribute the information to the searchlights (preparing it to run)
            print(str(datetime.now()) + ": Distributing searchlight")
            sl.distribute([sub_data], mask)
            # Data that is needed for all searchlights is sent to all cores via
            # the sl.broadcast function. In this example, we are sending the
            # labels for classification to all searchlights.
            print(str(datetime.now()) + ": Broadcasting bcvar")
            sl.broadcast(bcvar)

            print(str(datetime.now()) + ": Shape of searchlight")
            print(sl.shape)
            # turn model dictionary into matrix, s.t. column = model RDM lower triangle
            sl_result = run_rsa_searchlight(sl,
                                            model_rdms_mat,
                                            corr=corr,
                                            val_label=val_label,
                                            dist=dist)
            end_time = time.time()

            # Print outputs
            print(
                str(datetime.now()) + ": Number of searchlights run: " +
                str(len(sl_result[mask == 1])))
            print(
                str(datetime.now()) +
                ': Total searchlight duration (including start up time): %.2f min'
                % ((end_time - begin_time) / 60))
            # separate values
            for i, k in enumerate(model_keys):
                out_data_dict[k] = deepcopy(sl_result)
                for x in range(sl_result.shape[0]):
                    for y in range(sl_result.shape[1]):
                        for z in range(sl_result.shape[2]):
                            val = sl_result[x, y, z]
                            if val is None:
                                out_val = 0.
                            else:
                                out_val = val[i]
                            out_data_dict[k][x, y, z] = out_val

                out_data_dict[k] = out_data_dict[k].astype('double')

            # unindent if saving images in parcellation (in addition to csv)
            # save images
            for k in out_data_dict.keys():
                fname = out_fname % (sub, corr_label, dist, sub, task,
                                     corr_label, parc_label, val_label, k)
                save_nii(out_data_dict[k], ref_img, fname)
        # add to output array
        out_tasks[task] = out_data_dict
        out_tasks['ref_img'] = ref_img
    return (out_tasks)
Beispiel #16
0
subid = sys.argv[1]
data = load_img(
    '/idata/cdl/data/fMRI/andy/sherlock/data/sherlock_movie_s%s_10000.nii.gz' %
    str(subid)).get_data()

mask = data[:, :, :, 0] != 10000
model = np.load('/idata/cdl/data/fMRI/andy/sherlock/data/movie_corrmat.npy')
params = dict(sl_rad=5)

# Create searchlight object
sl = Searchlight(**params)

# Distribute data to processes
sl.distribute([data], mask)
sl.broadcast(model)


# Define voxel function
def sfn(l, msk, myrad, bcast_var):
    from scipy.spatial.distance import cdist
    from scipy.stats import pearsonr
    b = l[0][msk, :].T
    c = 1 - cdist(b, b, 'correlation').ravel()
    return pearsonr(c, bcast_var.ravel())[0]


# Run searchlight
result = sl.run_searchlight(sfn)

np.save(
Beispiel #17
0
    kernel = kernel / np.sum(kernel)

    for (idx, l) in enumerate(labels):
        if l:
            data[pt[0]:pt[0] + kernel_dim, pt[1]:pt[1] + kernel_dim,
                 pt[2]:pt[2] + kernel_dim, idx] += kernel * weight
        else:
            data[pt[0]:pt[0] + kernel_dim, pt[1]:pt[1] + kernel_dim,
                 pt[2]:pt[2] + kernel_dim, idx] -= kernel * weight

# Create searchlight object
sl = Searchlight(sl_rad=1, max_blk_edge=5, shape=Diamond)

# Distribute data to processes
sl.distribute([data], mask)
sl.broadcast(labels)


# Define voxel function
def sfn(l, msk, myrad, bcast_var):
    import sklearn.svm
    import sklearn.model_selection
    classifier = sklearn.svm.SVC()
    data = l[0][msk, :].T
    return np.mean(
        sklearn.model_selection.cross_val_score(classifier,
                                                data,
                                                bcast_var,
                                                n_jobs=1))

    print ('brain data dimension is {}-by-{}-by-{}'.format(dim1,dim2,dim3))

# Generate mask: mask is a 3D binary array, with active voxels being 1. I simply set 
# all voxels to be active in this example, but you should set the mask to fit your ROI
# in practice.
mask = np.ones((dim1,dim2,dim3), dtype=np.bool)

# Create searchlight object
sl = Searchlight(sl_rad=sl_rad)

# Distribute data to processes
# the first argument of "distribute" is a list of 4D arrays, and each 4D array is data 
# from a single subject
sl.distribute(all_data, mask)
# broadcast something that should be shared by all ranks 
sl.broadcast([niter,nfeature])

# time segment matching experiment. Define your own experiment function here
def time_segment_matching_accuracy(data, win_size=6): 
    nsubjs = len(data)
    (ndim, nsample) = data[0].shape
    accu = np.zeros(shape=nsubjs)
    nseg = nsample - win_size 
    # mysseg prediction prediction
    trn_data = np.zeros((ndim*win_size, nseg),order='f')
    # the trn data also include the tst data, but will be subtracted when 
    # calculating A
    for m in range(nsubjs):
        for w in range(win_size):
            trn_data[w*ndim:(w+1)*ndim,:] += data[m][:,w:(w+nseg)]
    for tst_subj in range(nsubjs):