def test_searchlight_with_diamond():
    sl = Searchlight(sl_rad=3, shape=Diamond)
    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size
    dim0, dim1, dim2 = (50, 50, 50)
    ntr = 30
    nsubj = 3
    mask = np.zeros((dim0, dim1, dim2), dtype=np.bool)
    data = [np.empty((dim0, dim1, dim2, ntr), dtype=np.object)
            if i % size == rank
            else None
            for i in range(0, nsubj)]

    # Put a spot in the mask
    mask[10:17, 10:17, 10:17] = Diamond(3).mask_

    sl.distribute(data, mask)
    global_outputs = sl.run_searchlight(diamond_sfn)

    if rank == 0:
        assert global_outputs[13, 13, 13] == 1.0
        global_outputs[13, 13, 13] = None

        for i in range(global_outputs.shape[0]):
            for j in range(global_outputs.shape[1]):
                for k in range(global_outputs.shape[2]):
                    assert global_outputs[i, j, k] is None
Пример #2
0
def test_searchlight_with_diamond():
    sl = Searchlight(sl_rad=3, shape=Diamond)
    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size
    dim0, dim1, dim2 = (50, 50, 50)
    ntr = 30
    nsubj = 3
    mask = np.zeros((dim0, dim1, dim2), dtype=np.bool)
    data = [np.empty((dim0, dim1, dim2, ntr), dtype=np.object)
            if i % size == rank
            else None
            for i in range(0, nsubj)]

    # Put a spot in the mask
    mask[10:17, 10:17, 10:17] = Diamond(3).mask_

    sl.distribute(data, mask)
    global_outputs = sl.run_searchlight(diamond_sfn)

    if rank == 0:
        assert global_outputs[13, 13, 13] == 1.0
        global_outputs[13, 13, 13] = None

        for i in range(global_outputs.shape[0]):
            for j in range(global_outputs.shape[1]):
                for k in range(global_outputs.shape[2]):
                    assert global_outputs[i, j, k] is None
Пример #3
0
def predict(sample, models, mask):
    test_searchlight = Searchlight(sl_rad=2, shape=Diamond)
    test_searchlight.distribute([sample, models[..., np.newaxis]], mask)
    test_classes = test_searchlight.run_searchlight(test_models)
    test_display = np.empty(test_classes.shape, dtype=float)
    test_display[test_classes == "face"] = 0.5
    test_display[test_classes == "house"] = 1
    test_display[test_classes == None] = np.nan
    return test_display
Пример #4
0
 def block_test(data, mask, max_blk_edge, rad):
   
   comm = MPI.COMM_WORLD
   rank = comm.rank
   size = comm.size
   
   nsubj = len(data)
   (dim0, dim1, dim2) = mask.shape
   
   # Initialize dataset with known pattern
   for subj in data:
     if subj is not None:
       for tr in range(subj.shape[3]):
         for d1 in range(dim0):
           for d2 in range(dim1):
             for d3 in range(dim2): 
               subj[d1,d2,d3,tr] = np.array([ d1, d2, d3, tr])
   
   def sfn(l,msk,myrad,bcast_var, extra_params):
     outmat = l[0][:,:,:,0] 
     outmat[~msk] = None
     return outmat[rad:-rad,rad:-rad,rad:-rad] 
   
   sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
   sl.distribute(data, mask)
   sl.broadcast(mask)
   global_outputs = sl.run_block_function(sfn)
 
   if rank == 0:
     for d0 in range(rad, global_outputs.shape[0]-rad):
       for d1 in range(rad, global_outputs.shape[1]-rad):
         for d2 in range(rad, global_outputs.shape[2]-rad):
           if mask[d0, d1, d2]:
             assert np.array_equal(np.array(global_outputs[d0,d1,d2]), np.array([d0,d1,d2,0]))
    def block_test(data, mask, max_blk_edge, rad):

        comm = MPI.COMM_WORLD
        rank = comm.rank

        (dim0, dim1, dim2) = mask.shape

        # Initialize dataset with known pattern
        for subj in data:
            if subj is not None:
                for tr in range(subj.shape[3]):
                    for d1 in range(dim0):
                        for d2 in range(dim1):
                            for d3 in range(dim2):
                                subj[d1, d2, d3, tr] = np.array(
                                    [d1, d2, d3, tr])

        sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
        sl.distribute(data, mask)
        sl.broadcast(mask)
        global_outputs = sl.run_block_function(block_test_sfn)

        if rank == 0:
            for d0 in range(rad, global_outputs.shape[0]-rad):
                for d1 in range(rad, global_outputs.shape[1]-rad):
                    for d2 in range(rad, global_outputs.shape[2]-rad):
                        if mask[d0, d1, d2]:
                            assert np.array_equal(
                                np.array(global_outputs[d0, d1, d2]),
                                np.array([d0, d1, d2, 0]))
Пример #6
0
def run_anal(mask_path: str, bold_path: str, sl_rad: float, max_blk_edge: int,
             pool_size: int, nn_paths: list, part: int, save_dir: str,
             rsa) -> bool:
    """
    DOCS: TODO:
 
    """
    # extract the subject string from the bold path
    sub = bold_path.split("/")[-1].split(".")[0].split("_")[-1]
    print("starting run {}".format(sub))

    # this is to account for if we're using part 1 of the movie clip or part 2
    if part == 1:
        tr_start_idx = 0
        tr_end_idx = 946
    elif part == 2:
        tr_start_idx = 946
        tr_end_idx = 1976

    # get the correct correlation matrices for alexnet
    nncor_files = nnpart_files(part, nn_paths)

    # nibabel load, this loads an object but not the numpy arrays of the data
    mask_obj = nib.load(mask_path)
    bold_obj = nib.load(bold_path)

    # converting to numpy arrays
    data = np.array(bold_obj.dataobj)[:, :, :, tr_start_idx:tr_end_idx]
    mask = np.array(mask_obj.dataobj).astype(int)

    sl = Searchlight(sl_rad=sl_rad, max_blk_edge=max_blk_edge)
    sl.distribute([data], mask)

    counter = 0
    for nn_path in nncor_files:
        # this is alexnet
        bcvar = np.load(nn_path)
        save_name = nn_path.split("/")[-1].split(".")[0]

        # broadcast the NN matrix
        sl.broadcast(bcvar)

        # now the rsa part
        sl_result = sl.run_searchlight(rsa, pool_size=pool_size)
        sl_result[sl_result == None] = 0
        sl_data = np.array(sl_result, dtype=float)
        sl_img = nib.Nifti1Image(sl_data, affine=mask_obj.affine)

        # save the result to file as a nifti image
        save_path = os.path.join(save_dir, save_name + "_" + sub)
        sl_img.to_filename(save_path + ".nii.gz")

        counter += 1

    if counter == len(nncor_files):
        print("\nCOMPLETE SUCCESSFULLY\n")
        return True

    print("\nSOMETHING FAILED\n")
    return False
Пример #7
0
    def block_test(data, mask, max_blk_edge, rad):

        comm = MPI.COMM_WORLD
        rank = comm.rank

        (dim0, dim1, dim2) = mask.shape

        # Initialize dataset with known pattern
        for subj in data:
            if subj is not None:
                for tr in range(subj.shape[3]):
                    for d1 in range(dim0):
                        for d2 in range(dim1):
                            for d3 in range(dim2):
                                subj[d1, d2, d3, tr] = np.array(
                                    [d1, d2, d3, tr])

        sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
        sl.distribute(data, mask)
        sl.broadcast(mask)
        global_outputs = sl.run_block_function(block_test_sfn)

        if rank == 0:
            for d0 in range(rad, global_outputs.shape[0]-rad):
                for d1 in range(rad, global_outputs.shape[1]-rad):
                    for d2 in range(rad, global_outputs.shape[2]-rad):
                        if mask[d0, d1, d2]:
                            assert np.array_equal(
                                np.array(global_outputs[d0, d1, d2]),
                                np.array([d0, d1, d2, 0]))
Пример #8
0
 def block_test(data, mask, max_blk_edge, rad):
   
   comm = MPI.COMM_WORLD
   rank = comm.rank
   size = comm.size
   
   nsubj = len(data)
   (dim0, dim1, dim2) = mask.shape
   
   # Initialize dataset with known pattern
   for subj in data:
     if subj is not None:
       for tr in range(subj.shape[3]):
         for d1 in range(dim0):
           for d2 in range(dim1):
             for d3 in range(dim2): 
               subj[d1,d2,d3,tr] = np.array([ d1, d2, d3, tr])
   
   def sfn(l,msk,myrad,bcast_var):
     outmat = l[0][:,:,:,0] 
     outmat[~msk] = None
     return outmat[rad:-rad,rad:-rad,rad:-rad] 
   
   sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
   sl.distribute(data, mask)
   sl.broadcast(mask)
   global_outputs = sl.run_block_function(sfn)
 
   if rank == 0:
     for d0 in range(rad, global_outputs.shape[0]-rad):
       for d1 in range(rad, global_outputs.shape[1]-rad):
         for d2 in range(rad, global_outputs.shape[2]-rad):
           if mask[d0, d1, d2]:
             assert np.array_equal(np.array(global_outputs[d0,d1,d2]), np.array([d0,d1,d2,0]))
Пример #9
0
 def voxel_test(data, mask, max_blk_edge, rad):
   
   comm = MPI.COMM_WORLD
   rank = comm.rank
   size = comm.size
   
   nsubj = len(data)
   (dim0, dim1, dim2) = mask.shape
   
   # Initialize dataset with known pattern
   for subj in data:
     if subj is not None:
       for tr in range(subj.shape[3]):
         for d1 in range(dim0):
           for d2 in range(dim1):
             for d3 in range(dim2): 
               subj[d1,d2,d3,tr] = np.array([d1, d2, d3, tr])
   
   def sfn(l,msk,myrad,bcast_var):
     # Check each point
     for subj in l:
       for _tr in range(subj.shape[3]):
         tr = subj[:,:,:,_tr]
         midpt = tr[rad,rad,rad]
         for d0 in range(tr.shape[0]):
           for d1 in range(tr.shape[1]):
             for d2 in range(tr.shape[2]):
               assert np.array_equal(tr[d0,d1,d2] - midpt, np.array([d0-rad,d1-rad,d2-rad,0]))
   
     # Determine midpoint
     midpt = l[0][rad,rad,rad,0]
     midpt = (midpt[0], midpt[1], midpt[2])
   
     for d0 in range(msk.shape[0]):
       for d1 in range(msk.shape[1]):
         for d2 in range(msk.shape[2]):
           pt = (midpt[0] - rad + d0, midpt[1] - rad + d1, midpt[2] - rad + d2)
           assert bcast_var[pt] == msk[d0,d1,d2]
   
     # Return midpoint
     return midpt
   
 
   sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
   sl.distribute(data, mask)
   sl.broadcast(mask)
   global_outputs = sl.run_searchlight(sfn)
 
   if rank == 0:
     for d0 in range(rad, global_outputs.shape[0]-rad):
       for d1 in range(rad, global_outputs.shape[1]-rad):
         for d2 in range(rad, global_outputs.shape[2]-rad):
           if mask[d0, d1, d2]:
             assert np.array_equal(np.array(global_outputs[d0,d1,d2]), np.array([d0,d1,d2]))
def test_mvpa_voxel_selection():
    data = prng.rand(5, 5, 5, 8).astype(np.float32)
    # all MPI processes read the mask; the mask file is small
    mask = np.ones([5, 5, 5], dtype=np.bool)
    mask[0, 0, :] = False
    labels = [0, 1, 0, 1, 0, 1, 0, 1]
    # 2 subjects, 4 epochs per subject
    sl = Searchlight(sl_rad=1)
    mvs = MVPAVoxelSelector(data, mask, labels, 2, sl)
    # for cross validation, use SVM with precomputed kernel

    clf = svm.SVC(kernel='rbf', C=10)
    result_volume, results = mvs.run(clf)
    if MPI.COMM_WORLD.Get_rank() == 0:
        output = []
        for tuple in results:
            if tuple[1] > 0:
                output.append(int(8*tuple[1]))
        expected_output = [6, 6, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4,
                           4, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 1]
        assert np.allclose(output, expected_output, atol=1), \
            'voxel selection via SVM does not provide correct results'
Пример #11
0
else:
    _data = None
    data.append(_data)

bcvar = [metas]

# say some things about the mask.
print('mask dimensions: {}'.format(mask.shape))
print('number of voxels in mask: {}'.format(np.sum(mask)))

sl_rad = radius
max_blk_edge = 5
pool_size = 1

# Create the searchlight object
sl = Searchlight(sl_rad=sl_rad, max_blk_edge=max_blk_edge)

# Distribute the information to the searchlights (preparing it to run)
sl.distribute(data, mask)
sl.broadcast(bcvar)
slstart = time.time()
sl_result = sl.run_searchlight(Class)

#result = Class(data, np.zeros((5,5,5)), 2, bcvar)

SL = time.time() - slstart
tot = time.time() - starttime
print('total time: {}, searchlight time: {}'.format(tot, SL))
'''
# Only save the data if this is the first core
if rank == 0:
    if rank == 0:
        data_i, mask, affine_mat, dimsize = load_fs_data(sub_id)
        data.append(data_i)
    else:
        data.append(None)
    bcvar_i = load_fs_label(sub_id)
    bcvar.append(bcvar_i)

sl_rad = 1
max_blk_edge = 5
pool_size = 1

coords = np.where(mask)

# Create the searchlight object
sl = Searchlight(sl_rad=sl_rad, max_blk_edge=max_blk_edge)
# print("Setup searchlight inputs")
# print("Number of subjects: " + str(len(data)))
# print("Input data shape: " + str(data[0].shape))
# print("Input mask shape: " + str(mask.shape) + "\n")

# Distribute the information to the searchlights (preparing it to run)
sl.distribute(data, mask)

# Broadcast variables
sl.broadcast(bcvar)


# Set up the kernel function, in this case an SVM
def calc_svm(data, sl_mask, myrad, bcvar):
    accuracy = []
Пример #13
0
for s in range(nsubj):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        train_tmp = np.nan_to_num(
            stats.zscore(movie_data[:, :, :, :int(ntr / 2), s], axis=3,
                         ddof=1))
        test_tmp = np.nan_to_num(
            stats.zscore(movie_data[:, :, :, int(ntr / 2):, s], axis=3,
                         ddof=1))
    all_data.append(np.concatenate((train_tmp, test_tmp), axis=3))

# Generate mask
mask = np.ones((dim1, dim2, dim3), dtype=np.bool)

# Create searchlight object
sl = Searchlight(sl_rad=sl_rad, max_blk_edge=max_blk_edge)

# Distribute data to processes
sl.distribute(all_data, mask)
sl.broadcast([n_iter, nfeature])


# time segment matching experiment
def timesegmentmatching_accuracy(data, win_size=6):
    nsubjs = len(data)
    (ndim, nsample) = data[0].shape
    accu = np.zeros(shape=nsubjs)

    nseg = nsample - win_size
    # mysseg prediction prediction
    trn_data = np.zeros((ndim * win_size, nseg))
Пример #14
0
        # for non-parametric statistical analysis.
        # There are three random options:
        # RandomType.NORANDOM is the default
        # RandomType.REPRODUCIBLE permutes the voxels in the same way every run
        # RandomType.UNREPRODUCIBLE permutes the voxels differently across runs
        # example
        #from brainiak.fcma.preprocessing import RandomType
        #data, labels = prepare_searchlight_mvpa_data(images, conditions,
        #                                                    random=RandomType.UNREPRODUCIBLE)

        # the following line is an example to leaving a subject out
        #epoch_info = [x for x in epoch_info if x[1] != 0]

    num_subjs = int(sys.argv[5])
    # create a Searchlight object
    sl = Searchlight(sl_rad=1)
    mvs = MVPAVoxelSelector(data, mask, labels, num_subjs, sl)
    clf = svm.SVC(kernel='linear', shrinking=False, C=1, gamma='auto')
    # only rank 0 has meaningful return values
    score_volume, results = mvs.run(clf)
    # this output is just for result checking
    if MPI.COMM_WORLD.Get_rank()==0:
        score_volume = np.nan_to_num(score_volume.astype(np.float))
        io.save_as_nifti_file(score_volume, mask_image.affine,
                                   'result_score.nii.gz')
        seq_volume = np.zeros(mask.shape, dtype=np.int)
        seq = np.zeros(len(results), dtype=np.int)
        with open('result_list.txt', 'w') as fp:
            for idx, tuple in enumerate(results):
                fp.write(str(tuple[0]) + ' ' + str(tuple[1]) + '\n')
                seq[tuple[0]] = idx
Пример #15
0
# print information
if rank == 0:
    print ('searchlight length is {}'.format(sl_rad))
    print ('number of features in SRM: {}'.format(nfeature))
    print ('number of subjects is: {}'.format(len(all_data)))
    print ('number of TR is: {}'.format(ntr))
    print ('brain data dimension is {}-by-{}-by-{}'.format(dim1,dim2,dim3))

# Generate mask: mask is a 3D binary array, with active voxels being 1. I simply set 
# all voxels to be active in this example, but you should set the mask to fit your ROI
# in practice.
mask = np.ones((dim1,dim2,dim3), dtype=np.bool)

# Create searchlight object
sl = Searchlight(sl_rad=sl_rad)

# Distribute data to processes
# the first argument of "distribute" is a list of 4D arrays, and each 4D array is data 
# from a single subject
sl.distribute(all_data, mask)
# broadcast something that should be shared by all ranks 
sl.broadcast([niter,nfeature])

# time segment matching experiment. Define your own experiment function here
def time_segment_matching_accuracy(data, win_size=6): 
    nsubjs = len(data)
    (ndim, nsample) = data[0].shape
    accu = np.zeros(shape=nsubjs)
    nseg = nsample - win_size 
    # mysseg prediction prediction
Пример #16
0
import sys
import numpy as np
from nilearn.image import load_img
from brainiak.searchlight.searchlight import Searchlight

subid = sys.argv[1]
data = load_img(
    '/idata/cdl/data/fMRI/andy/sherlock/data/sherlock_movie_s%s_10000.nii.gz' %
    str(subid)).get_data()

mask = data[:, :, :, 0] != 10000
model = np.load('/idata/cdl/data/fMRI/andy/sherlock/data/movie_corrmat.npy')
params = dict(sl_rad=5)

# Create searchlight object
sl = Searchlight(**params)

# Distribute data to processes
sl.distribute([data], mask)
sl.broadcast(model)


# Define voxel function
def sfn(l, msk, myrad, bcast_var):
    from scipy.spatial.distance import cdist
    from scipy.stats import pearsonr
    b = l[0][msk, :].T
    c = 1 - cdist(b, b, 'correlation').ravel()
    return pearsonr(c, bcast_var.ravel())[0]

for i in range(len(subjs)):
    # Load functional data and mask data
    print('Subject: ',subjs[i])
    data_run1 = np.nan_to_num(load_img(datadir + 'subjects/' + subjs[i] + '/analysis/run1.feat/trans_filtered_func_data.nii').get_data()[:,:,:,0:628])
    runs.append(data_run1)
for i in range(len(subjs)):
    data_run2 = np.nan_to_num(load_img(datadir + 'subjects/' + subjs[i] + '/data/avg_reorder2.nii').get_data())
    runs.append(data_run2)
    
print("All Subjects Loaded")
         
            
#np.seterr(divide='ignore',invalid='ignore')

# Create and run searchlight
sl = Searchlight(sl_rad=5,max_blk_edge=5)
sl.distribute(runs,mask_img)
sl.broadcast([nfeature,niter,loo_idx])
print('Running Searchlight...')
global_outputs = sl.run_searchlight(corr2_coeff)
global_outputs_all[:,:,:,i] = global_outputs
        
# Plot and save searchlight results
global_outputs_avg = np.mean(global_outputs_all,3)
#maxval = np.max(global_outputs_avg[np.not_equal(global_outputs_avg,None)])
#minval = np.min(global_outputs_avg[np.not_equal(global_outputs_avg,None)])
global_outputs_avg = np.array(global_outputs_avg, dtype=np.float)
#global_nonans = global_outputs_avg[np.not_equal(global_outputs_avg,None)]
#global_nonans = np.reshape(global_nonans,(91,109,91))
#img = nib.Nifti1Image(global_nonans, np.eye(4))
#img.header['cal_min'] = minval
Пример #18
0
# compute shifted recall correlation matrix
shifted_corrmat = np.corrcoef(shifted)

# isolate off-diagonal values with video model temporal correlations > 0
# this was precomputed to save permutation runtime with:
# for k in range(1976):
#     d = np.diag(np.corrcoef(video_model), k=k)
#     if ~(d > 0).any():
#         DIAG_LIMIT = k
#         break
DIAG_LIMIT = 238
diag_mask = np.zeros_like(shifted_corrmat, dtype=bool)

for k in range(1, DIAG_LIMIT):
    ix = kth_diag_indices(diag_mask, k)
    diag_mask[ix] = True

recall_corrs = shifted_corrmat[diag_mask]
to_broadcast = (recall_corrs, diag_mask)

# create Searchlight object
sl = Searchlight(sl_rad=2)

# distribute data to processes
sl.distribute([scan_data], mask)
sl.broadcast(to_broadcast)

# run searchlight, save data
result = sl.run_searchlight(sfn)
np.save(result_path, result)
Пример #19
0
  kernel = np.zeros((kernel_dim,kernel_dim,kernel_dim))
  for i in range(kernel_dim):
    for j in range(kernel_dim):
      for k in range(kernel_dim):
        arr = np.array([i-(kernel_dim/2),j-(kernel_dim/2),k-(kernel_dim/2)])
        kernel [i,j,k] = np.exp(-np.dot(arr.T,arr))
  kernel = kernel / np.sum(kernel)

  for (idx, l) in enumerate(labels):
    if l:
      data[pt[0]:pt[0]+kernel_dim,pt[1]:pt[1]+kernel_dim,pt[2]:pt[2]+kernel_dim,idx] += kernel * weight
    else:
      data[pt[0]:pt[0]+kernel_dim,pt[1]:pt[1]+kernel_dim,pt[2]:pt[2]+kernel_dim,idx] -= kernel * weight

# Create searchlight object
sl = Searchlight(sl_rad=1, max_blk_edge=5, shape=Diamond,
                 min_active_voxels_proportion=0)

# Distribute data to processes
sl.distribute([data], mask)
sl.broadcast(labels)

# Define voxel function
def sfn(l, msk, myrad, bcast_var):
  import sklearn.svm
  import sklearn.model_selection
  classifier = sklearn.svm.SVC()
  data = l[0][msk,:].T
  return np.mean(sklearn.model_selection.cross_val_score(classifier, data, bcast_var,n_jobs=1))

# Run searchlight
global_outputs = sl.run_searchlight(sfn)
    corrAB = np.corrcoef(A.T,B.T)[16:,:16]
    classical_within = np.mean(corrAB[0:8,0:8])
    jazz_within = np.mean(corrAB[8:16,8:16])
    classJazz_between = np.mean(corrAB[8:16,0:8])
    jazzClass_between = np.mean(corrAB[0:8,8:16])
    within_genre = np.mean([classical_within,jazz_within])
    between_genre = np.mean([classJazz_between,jazzClass_between])
    diff = within_genre - between_genre
    return diff

comm.Barrier()
begin_time = time.time()
comm.Barrier()

# Create and run searchlight
sl = Searchlight(sl_rad=1,max_blk_edge=5)
sl.distribute([data1,data2],mask_img)
sl.broadcast(None)
global_outputs = sl.run_searchlight(corr2_coeff)

comm.Barrier()
end_time = time.time()
comm.Barrier()

# Plot searchlight results
if rank == 0:
    print('Searchlight Done: ', end_time - begin_time)
    maxval = np.max(global_outputs[np.not_equal(global_outputs,None)])
    minval = np.min(global_outputs[np.not_equal(global_outputs,None)])
    global_outputs = np.array(global_outputs, dtype=np.float)
    print(global_outputs)
Пример #21
0
        # Compute difference score for permuted matrices
        np.random.seed(0)
        diff_perm_holder = np.zeros((100, 1))
        for i in range(100):
            corrAB_perm = corrAB[np.random.permutation(16), :]
            same_songs_perm = corrAB_perm[corr_eye == 1]
            diff_songs_perm = corrAB_perm[corr_eye == 0]
            diff_perm_holder[i] = np.mean(same_songs_perm) - np.mean(
                diff_songs_perm)

        z = (same_song_minus_diff_song -
             np.mean(diff_perm_holder)) / np.std(diff_perm_holder)
        return z

    # Create and run searchlight
    sl = Searchlight(sl_rad=2, max_blk_edge=5)
    sl.distribute([leftout, avg_others], mask_img)
    sl.broadcast(None)
    print('Running Searchlight...')
    global_outputs = sl.run_searchlight(corr2_coeff)
    global_outputs_all[:, :, :, i] = global_outputs

# Plot and save searchlight results
global_outputs_avg = np.mean(global_outputs_all, 3)
maxval = np.max(global_outputs_avg[np.not_equal(global_outputs_avg, None)])
minval = np.min(global_outputs_avg[np.not_equal(global_outputs_avg, None)])
global_outputs_avg = np.array(global_outputs_avg, dtype=np.float)
global_nonans = global_outputs_avg[np.not_equal(global_outputs_avg, None)]
global_nonans = np.reshape(global_nonans, (91, 109, 91))
min = np.min(global_nonans[~np.isnan(global_nonans)])
max = np.max(global_nonans[~np.isnan(global_nonans)])
node_brain = nii.get_data()

# Specify paths
searchlights_path = './community_structure/simulated_data/searchlights/'

# Set the names
sub_idx = brain_file.find('_brain/') + 7
subjectName = brain_file[sub_idx:]
output_name = searchlights_path + subjectName

# Make the mask
mask = node_brain != 0
mask = mask[:, :, :, 0]

# Create searchlight object
sl = Searchlight(sl_rad=1, max_blk_edge=5)

# Distribute data to processes
sl.distribute([node_brain], mask)
sl.broadcast(None)

# Run clusters
sl_outputs = sl.run_searchlight(rsa_sl, pool_size=1)

if rank == 0:

    # Convert the output into what can be used
    sl_outputs = sl_outputs.astype('double')
    sl_outputs[np.isnan(sl_outputs)] = 0

    # Save the volume
def run_rsa_sub(sub,
                model_rdms,
                procedure,
                corr,
                tasks=TASKS,
                overwrite=False,
                val_label=None,
                pred=None,
                dist='correlation'):
    """
    sub: str subject ID
    model_rdms: dictionary with model names as keys and lower triangles of RDMs as elements
    tasks: array of task names: friend, number, avg
    """
    # check type of input arguments

    # subject id
    sub = convert2subid(sub)

    # get labels
    corr_label = 'spear' if corr == 'corr' else 'reg'
    if val_label is None:
        val_label = 'r' if corr == 'corr' else 'beta'
    elif val_label == "R2" and corr != 'reg':
        print("ERROR: cannot calculate R2 with corr label '" + corr +
              "'. Must be 'reg'")
        exit(1)

    parc_label = SL + str(SL_RADIUS) if isSl(procedure) else PARC_LAB
    # make output directories if they don't exist
    if not os.path.exists(out_dir % (sub, corr_label, dist)):
        os.makedirs(out_dir % (sub, corr_label, dist))

    # tasks to run
    if not isinstance(tasks, list):
        tasks = [tasks]

    # dictionary of model representational dissimilarity matrices (lower triangles)
    if not isinstance(model_rdms, dict):
        if isinstance(model_rdms, list):
            model_rdms = {'model': model_rdms}
        else:
            print("ERROR: model_rdms input must be dictionary")
            exit(1)

    # procedure to run
    if procedure not in PROCEDURES_ALL:
        print("ERROR: procedure is not in PROCEDURES_ALL")
        print(PROCEDURES_ALL)
        exit(1)

    # reference image for saving parcellation output
    parcellation_template = nib.load(MNI_PARCELLATION, mmap=False)
    parcellation_template.set_data_dtype(np.double)

    # get model names
    model_keys = [pred] if val_label == "R2" else model_rdms.keys()
    print(
        str(datetime.now()) + ": Using the following models to get " +
        val_label + " values")
    print(model_rdms.keys())
    # turn model dictionary into matrix, s.t. column = model RDM lower triangle
    for i, k in enumerate(model_rdms.keys()):
        if i == 0:
            # if first model, setup matrix
            model_rdms_mat = [model_rdms[k]]
        else:
            # add next matrix as row
            model_rdms_mat = np.vstack((model_rdms_mat, model_rdms[k]))

    # transpose so that each column corresponds to each measure
    model_rdms_mat = np.transpose(model_rdms_mat)

    out_tasks = {}
    # iterate through inputted tasks
    for task in tasks:
        print(str(datetime.now()) + ": Task " + task)
        # read in subject's image
        sub_template = nib.load(data_fnames % (sub, sub, TASKS[0], 0),
                                mmap=False)
        sub_dims = sub_template.get_data().shape + (N_NODES, )
        sub_data = np.empty(sub_dims)
        for n in range(N_NODES):
            print(str(datetime.now()) + ": Reading in node " + str(n))
            if task == 'avg':
                # average this node's data from both runs
                d1 = load_nii(data_fnames % (sub, sub, TASKS[0], n))
                d2 = load_nii(data_fnames % (sub, sub, TASKS[1], n))
                d = (d1 + d2) / 2
            else:
                d = load_nii(data_fnames % (sub, sub, task, n))
            # save to fourth dimension
            sub_data[:, :, :, n] = d

        out_data_dict = {}
        if isParc(procedure):
            # out csv filename
            sub_csv_fname = csv_fname % (sub, corr_label, dist, sub, task,
                                         corr_label, parc_label, val_label)
            if (not overwrite) and os.path.isfile(sub_csv_fname):
                read_bool = True
                # read in csv if already exists
                sub_csv = pd.read_csv(sub_csv_fname)
                # remove row column
                sub_csv = sub_csv.iloc[:, 1:]
                # save all completed ROIs except for last one (since may not have been finished)
                completed_rois = np.unique(sub_csv['roi'])
                sub_csv = sub_csv[sub_csv['roi'].isin(completed_rois)]
                sub_csv.to_csv(sub_csv_fname)
                out_csv_array = sub_csv.values.tolist()
                completed_preds = np.unique(sub_csv['predictor'])
            else:
                if os.path.isfile(sub_csv_fname):
                    os.remove(sub_csv_fname)
                    print("Deleted " + sub_csv_fname)
                read_bool = False
                out_csv_array = []
                completed_rois = []
                completed_preds = []
            wtr = csv.writer(open(sub_csv_fname, 'a'),
                             delimiter=',',
                             lineterminator='\n')
            # column names for csv file
            colnames = ['sub', 'task', 'roi', 'predictor', val_label]
            if not read_bool:
                # write out to csv
                wtr.writerow(colnames)
            ref_img = parcellation_template
            # make mask
            parcellation = sub_parc % (sub, sub)
            print(str(datetime.now()) + ": Using parcellation " + parcellation)
            parc_data = load_nii(parcellation)
            roi_list = np.unique(parc_data)
            # remove 0 (i.e., the background)
            roi_list = np.delete(roi_list, 0)
            # check if number of parcels matches global variable
            if N_PARCELS != len(roi_list):
                print("WARNING: Number of parcels found (" +
                      str(len(roi_list)) + ") does not equal N_PARCELS (" +
                      str(N_PARCELS) + ")")

            # Run regression on each parcellation
            print(
                str(datetime.now()) + ": Starting parcellation " +
                str(N_PARCELS))
            # get the voxels from parcellation nii
            out_data = ref_img.get_data().astype(np.double)
            # create a dictionary of nii's: one per predictor
            for i, k in enumerate(model_keys):
                out_data_dict[k] = deepcopy(out_data)
            # iterate through each ROI of parcellation and run regression
            for r, parc_roi in enumerate(roi_list):
                roi_done = parc_roi in completed_rois and all(
                    mk in completed_preds for mk in model_keys)
                if roi_done:
                    print(
                        str(datetime.now()) + ': ROI ' + str(parc_roi) +
                        ' already saved.')
                    # read in values from dataframe for nii
                    res = get_roi_csv_val(sub_csv, parc_roi, val_label)
                else:
                    perc_done = round(((r + 1) / len(roi_list)) * 100, 3)
                    print(
                        str(datetime.now()) + ': Analyzing ROI ' +
                        str(parc_roi) + ' -- ' + str(perc_done) + '%')
                    # create mask for this ROI
                    roi_mask = parc_data == parc_roi
                    roi_mask = roi_mask.astype(int)
                    roi_data = get_roi_data(sub_data, roi_mask)
                    res_dict = run_rsa_roi(roi_data,
                                           model_rdms_mat,
                                           corr=corr,
                                           val_label=val_label,
                                           dist=dist)
                    res = res_dict['result']
                # for each model, save the result to its image in out_data_dict
                for i, k in enumerate(model_keys):
                    # save to dataframe if not already there
                    if not roi_done:
                        val = res[i]
                        csv_row = [sub, task, parc_roi, k, val]
                        out_csv_array.append(csv_row)
                        # write out to csv
                        wtr.writerow(csv_row)
                    else:
                        val = res[i]
                    # update voxels
                    model_data = out_data_dict[k]
                    model_data[model_data == parc_roi] = val
                    out_data_dict[k] = model_data
        elif isSl(procedure):
            ref_img = sub_template
            # mask
            if use_mask:
                # load all functional masks and make largest mask
                t = task if task in TASKS else "*"
                print(str(datetime.now()) + ": Reading in masks")
                func_mask_names = glob.glob(sub_mask_fname % (sub, sub, t))
                for i, m in enumerate(func_mask_names):
                    print(m)
                    m_data = load_nii(m)
                    if i == 0:
                        m_sum = deepcopy(m_data)
                    else:
                        m_sum += m_data
                whole_brain_mask = np.where(m_sum > 0, 1, 0)
                whole_brain_mask_dil = binary_dilation(
                    whole_brain_mask,
                    iterations=int(SL_RADIUS)).astype(whole_brain_mask.dtype)
                mask = whole_brain_mask_dil
            else:
                mask = deepcopy(d)
                mask.fill(1)

            # Create the searchlight object
            begin_time = time.time()
            sl = Searchlight(sl_rad=sl_rad,
                             max_blk_edge=max_blk_edge,
                             shape=Ball)
            print(str(datetime.now()) + ": Setup searchlight inputs")
            print(
                str(datetime.now()) + ": Input data shape: " +
                str(sub_data.shape))

            # Distribute the information to the searchlights (preparing it to run)
            print(str(datetime.now()) + ": Distributing searchlight")
            sl.distribute([sub_data], mask)
            # Data that is needed for all searchlights is sent to all cores via
            # the sl.broadcast function. In this example, we are sending the
            # labels for classification to all searchlights.
            print(str(datetime.now()) + ": Broadcasting bcvar")
            sl.broadcast(bcvar)

            print(str(datetime.now()) + ": Shape of searchlight")
            print(sl.shape)
            # turn model dictionary into matrix, s.t. column = model RDM lower triangle
            sl_result = run_rsa_searchlight(sl,
                                            model_rdms_mat,
                                            corr=corr,
                                            val_label=val_label,
                                            dist=dist)
            end_time = time.time()

            # Print outputs
            print(
                str(datetime.now()) + ": Number of searchlights run: " +
                str(len(sl_result[mask == 1])))
            print(
                str(datetime.now()) +
                ': Total searchlight duration (including start up time): %.2f min'
                % ((end_time - begin_time) / 60))
            # separate values
            for i, k in enumerate(model_keys):
                out_data_dict[k] = deepcopy(sl_result)
                for x in range(sl_result.shape[0]):
                    for y in range(sl_result.shape[1]):
                        for z in range(sl_result.shape[2]):
                            val = sl_result[x, y, z]
                            if val is None:
                                out_val = 0.
                            else:
                                out_val = val[i]
                            out_data_dict[k][x, y, z] = out_val

                out_data_dict[k] = out_data_dict[k].astype('double')

            # unindent if saving images in parcellation (in addition to csv)
            # save images
            for k in out_data_dict.keys():
                fname = out_fname % (sub, corr_label, dist, sub, task,
                                     corr_label, parc_label, val_label, k)
                save_nii(out_data_dict[k], ref_img, fname)
        # add to output array
        out_tasks[task] = out_data_dict
        out_tasks['ref_img'] = ref_img
    return (out_tasks)
Пример #24
0
    return z


for i in range(len(subjs)):
    # Load functional data and mask data
    data1 = load_img(datadir + 'subjects/' + subjs[i] +
                     '/data/avg_reorder1.nii')
    data2 = load_img(datadir + 'subjects/' + subjs[i] +
                     '/data/avg_reorder2.nii')
    data1 = data1.get_data()
    data2 = data2.get_data()

    np.seterr(divide='ignore', invalid='ignore')

    # Create and run searchlight
    sl = Searchlight(sl_rad=1, max_blk_edge=5)
    sl.distribute([data1, data2], mask_img)
    sl.broadcast(None)
    print('Running Searchlight...')
    global_outputs = sl.run_searchlight(corr2_coeff)
    global_outputs_all[:, :, :, i] = global_outputs

# Plot and save searchlight results
global_outputs_avg = np.mean(global_outputs_all, 3)
maxval = np.max(global_outputs_avg[np.not_equal(global_outputs_avg, None)])
minval = np.min(global_outputs_avg[np.not_equal(global_outputs_avg, None)])
global_outputs = np.array(global_outputs_avg, dtype=np.float)
global_nonans = global_outputs[np.not_equal(global_outputs, None)]
global_nonans = np.reshape(global_nonans, (91, 109, 91))
min1 = np.min(global_nonans[~np.isnan(global_nonans)])
max1 = np.max(global_nonans[~np.isnan(global_nonans)])
def test_instantiate():
    sl = Searchlight(sl_rad=5, max_blk_edge=10)
    assert sl
Пример #26
0
                    i - (kernel_dim / 2), j - (kernel_dim / 2),
                    k - (kernel_dim / 2)
                ])
                kernel[i, j, k] = np.exp(-np.dot(arr.T, arr))
    kernel = kernel / np.sum(kernel)

    for (idx, l) in enumerate(labels):
        if l:
            data[pt[0]:pt[0] + kernel_dim, pt[1]:pt[1] + kernel_dim,
                 pt[2]:pt[2] + kernel_dim, idx] += kernel * weight
        else:
            data[pt[0]:pt[0] + kernel_dim, pt[1]:pt[1] + kernel_dim,
                 pt[2]:pt[2] + kernel_dim, idx] -= kernel * weight

# Create searchlight object
sl = Searchlight(sl_rad=1, max_blk_edge=5, shape=Diamond)

# Distribute data to processes
sl.distribute([data], mask)
sl.broadcast(labels)


# Define voxel function
def sfn(l, msk, myrad, bcast_var):
    import sklearn.svm
    import sklearn.model_selection
    classifier = sklearn.svm.SVC()
    data = l[0][msk, :].T
    return np.mean(
        sklearn.model_selection.cross_val_score(classifier,
                                                data,
Пример #27
0
                    k - (kernel_dim / 2)
                ])
                kernel[i, j, k] = np.exp(-np.dot(arr.T, arr))
    kernel = kernel / np.sum(kernel)

    for (idx, l) in enumerate(labels):
        if l:
            data[pt[0]:pt[0] + kernel_dim, pt[1]:pt[1] + kernel_dim,
                 pt[2]:pt[2] + kernel_dim, idx] += kernel * weight
        else:
            data[pt[0]:pt[0] + kernel_dim, pt[1]:pt[1] + kernel_dim,
                 pt[2]:pt[2] + kernel_dim, idx] -= kernel * weight

# Create searchlight object
sl = Searchlight(sl_rad=1,
                 max_blk_edge=5,
                 shape=Diamond,
                 min_active_voxels_proportion=0)

# Distribute data to processes
sl.distribute([data], mask)
sl.broadcast(labels)


# Define voxel function
def sfn(l, msk, myrad, bcast_var):
    import sklearn.svm
    import sklearn.model_selection
    classifier = sklearn.svm.SVC()
    data = l[0][msk, :].T
    return np.mean(
        sklearn.model_selection.cross_val_score(classifier,