Exemple #1
0
def test_searchlight_with_diamond():
    sl = Searchlight(sl_rad=3, shape=Diamond)
    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size
    dim0, dim1, dim2 = (50, 50, 50)
    ntr = 30
    nsubj = 3
    mask = np.zeros((dim0, dim1, dim2), dtype=np.bool)
    data = [np.empty((dim0, dim1, dim2, ntr), dtype=np.object)
            if i % size == rank
            else None
            for i in range(0, nsubj)]

    # Put a spot in the mask
    mask[10:17, 10:17, 10:17] = Diamond(3).mask_

    sl.distribute(data, mask)
    global_outputs = sl.run_searchlight(diamond_sfn)

    if rank == 0:
        assert global_outputs[13, 13, 13] == 1.0
        global_outputs[13, 13, 13] = None

        for i in range(global_outputs.shape[0]):
            for j in range(global_outputs.shape[1]):
                for k in range(global_outputs.shape[2]):
                    assert global_outputs[i, j, k] is None
    def voxel_test(data, mask, max_blk_edge, rad):

        comm = MPI.COMM_WORLD
        rank = comm.rank

        (dim0, dim1, dim2) = mask.shape

        # Initialize dataset with known pattern
        for subj in data:
            if subj is not None:
                for tr in range(subj.shape[3]):
                    for d1 in range(dim0):
                        for d2 in range(dim1):
                            for d3 in range(dim2):
                                subj[d1, d2, d3, tr] = np.array(
                                    [d1, d2, d3, tr])

        sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
        sl.distribute(data, mask)
        sl.broadcast(MaskRadBcast(mask, rad))
        global_outputs = sl.run_searchlight(voxel_test_sfn)

        if rank == 0:
            for d0 in range(rad, global_outputs.shape[0]-rad):
                for d1 in range(rad, global_outputs.shape[1]-rad):
                    for d2 in range(rad, global_outputs.shape[2]-rad):
                        if mask[d0, d1, d2]:
                            assert np.array_equal(
                                np.array(global_outputs[d0, d1, d2]),
                                np.array([d0, d1, d2]))
Exemple #3
0
    def voxel_test(data, mask, max_blk_edge, rad):

        comm = MPI.COMM_WORLD
        rank = comm.rank

        (dim0, dim1, dim2) = mask.shape

        # Initialize dataset with known pattern
        for subj in data:
            if subj is not None:
                for tr in range(subj.shape[3]):
                    for d1 in range(dim0):
                        for d2 in range(dim1):
                            for d3 in range(dim2):
                                subj[d1, d2, d3, tr] = np.array(
                                    [d1, d2, d3, tr])

        sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
        sl.distribute(data, mask)
        sl.broadcast(MaskRadBcast(mask, rad))
        global_outputs = sl.run_searchlight(voxel_test_sfn)

        if rank == 0:
            for d0 in range(rad, global_outputs.shape[0]-rad):
                for d1 in range(rad, global_outputs.shape[1]-rad):
                    for d2 in range(rad, global_outputs.shape[2]-rad):
                        if mask[d0, d1, d2]:
                            assert np.array_equal(
                                np.array(global_outputs[d0, d1, d2]),
                                np.array([d0, d1, d2]))
def test_searchlight_with_diamond():
    sl = Searchlight(sl_rad=3, shape=Diamond)
    comm = MPI.COMM_WORLD
    rank = comm.rank
    size = comm.size
    dim0, dim1, dim2 = (50, 50, 50)
    ntr = 30
    nsubj = 3
    mask = np.zeros((dim0, dim1, dim2), dtype=np.bool)
    data = [np.empty((dim0, dim1, dim2, ntr), dtype=np.object)
            if i % size == rank
            else None
            for i in range(0, nsubj)]

    # Put a spot in the mask
    mask[10:17, 10:17, 10:17] = Diamond(3).mask_

    sl.distribute(data, mask)
    global_outputs = sl.run_searchlight(diamond_sfn)

    if rank == 0:
        assert global_outputs[13, 13, 13] == 1.0
        global_outputs[13, 13, 13] = None

        for i in range(global_outputs.shape[0]):
            for j in range(global_outputs.shape[1]):
                for k in range(global_outputs.shape[2]):
                    assert global_outputs[i, j, k] is None
Exemple #5
0
def run_anal(mask_path: str, bold_path: str, sl_rad: float, max_blk_edge: int,
             pool_size: int, nn_paths: list, part: int, save_dir: str,
             rsa) -> bool:
    """
    DOCS: TODO:
 
    """
    # extract the subject string from the bold path
    sub = bold_path.split("/")[-1].split(".")[0].split("_")[-1]
    print("starting run {}".format(sub))

    # this is to account for if we're using part 1 of the movie clip or part 2
    if part == 1:
        tr_start_idx = 0
        tr_end_idx = 946
    elif part == 2:
        tr_start_idx = 946
        tr_end_idx = 1976

    # get the correct correlation matrices for alexnet
    nncor_files = nnpart_files(part, nn_paths)

    # nibabel load, this loads an object but not the numpy arrays of the data
    mask_obj = nib.load(mask_path)
    bold_obj = nib.load(bold_path)

    # converting to numpy arrays
    data = np.array(bold_obj.dataobj)[:, :, :, tr_start_idx:tr_end_idx]
    mask = np.array(mask_obj.dataobj).astype(int)

    sl = Searchlight(sl_rad=sl_rad, max_blk_edge=max_blk_edge)
    sl.distribute([data], mask)

    counter = 0
    for nn_path in nncor_files:
        # this is alexnet
        bcvar = np.load(nn_path)
        save_name = nn_path.split("/")[-1].split(".")[0]

        # broadcast the NN matrix
        sl.broadcast(bcvar)

        # now the rsa part
        sl_result = sl.run_searchlight(rsa, pool_size=pool_size)
        sl_result[sl_result == None] = 0
        sl_data = np.array(sl_result, dtype=float)
        sl_img = nib.Nifti1Image(sl_data, affine=mask_obj.affine)

        # save the result to file as a nifti image
        save_path = os.path.join(save_dir, save_name + "_" + sub)
        sl_img.to_filename(save_path + ".nii.gz")

        counter += 1

    if counter == len(nncor_files):
        print("\nCOMPLETE SUCCESSFULLY\n")
        return True

    print("\nSOMETHING FAILED\n")
    return False
Exemple #6
0
def predict(sample, models, mask):
    test_searchlight = Searchlight(sl_rad=2, shape=Diamond)
    test_searchlight.distribute([sample, models[..., np.newaxis]], mask)
    test_classes = test_searchlight.run_searchlight(test_models)
    test_display = np.empty(test_classes.shape, dtype=float)
    test_display[test_classes == "face"] = 0.5
    test_display[test_classes == "house"] = 1
    test_display[test_classes == None] = np.nan
    return test_display
 def voxel_test(data, mask, max_blk_edge, rad):
   
   comm = MPI.COMM_WORLD
   rank = comm.rank
   size = comm.size
   
   nsubj = len(data)
   (dim0, dim1, dim2) = mask.shape
   
   # Initialize dataset with known pattern
   for subj in data:
     if subj is not None:
       for tr in range(subj.shape[3]):
         for d1 in range(dim0):
           for d2 in range(dim1):
             for d3 in range(dim2): 
               subj[d1,d2,d3,tr] = np.array([d1, d2, d3, tr])
   
   def sfn(l,msk,myrad,bcast_var):
     # Check each point
     for subj in l:
       for _tr in range(subj.shape[3]):
         tr = subj[:,:,:,_tr]
         midpt = tr[rad,rad,rad]
         for d0 in range(tr.shape[0]):
           for d1 in range(tr.shape[1]):
             for d2 in range(tr.shape[2]):
               assert np.array_equal(tr[d0,d1,d2] - midpt, np.array([d0-rad,d1-rad,d2-rad,0]))
   
     # Determine midpoint
     midpt = l[0][rad,rad,rad,0]
     midpt = (midpt[0], midpt[1], midpt[2])
   
     for d0 in range(msk.shape[0]):
       for d1 in range(msk.shape[1]):
         for d2 in range(msk.shape[2]):
           pt = (midpt[0] - rad + d0, midpt[1] - rad + d1, midpt[2] - rad + d2)
           assert bcast_var[pt] == msk[d0,d1,d2]
   
     # Return midpoint
     return midpt
   
 
   sl = Searchlight(sl_rad=rad, max_blk_edge=max_blk_edge)
   sl.distribute(data, mask)
   sl.broadcast(mask)
   global_outputs = sl.run_searchlight(sfn)
 
   if rank == 0:
     for d0 in range(rad, global_outputs.shape[0]-rad):
       for d1 in range(rad, global_outputs.shape[1]-rad):
         for d2 in range(rad, global_outputs.shape[2]-rad):
           if mask[d0, d1, d2]:
             assert np.array_equal(np.array(global_outputs[d0,d1,d2]), np.array([d0,d1,d2]))
Exemple #8
0
    # Load functional data and mask data
    data1 = load_img(datadir + 'subjects/' + subjs[i] +
                     '/data/avg_reorder1.nii')
    data2 = load_img(datadir + 'subjects/' + subjs[i] +
                     '/data/avg_reorder2.nii')
    data1 = data1.get_data()
    data2 = data2.get_data()

    np.seterr(divide='ignore', invalid='ignore')

    # Create and run searchlight
    sl = Searchlight(sl_rad=1, max_blk_edge=5)
    sl.distribute([data1, data2], mask_img)
    sl.broadcast(None)
    print('Running Searchlight...')
    global_outputs = sl.run_searchlight(corr2_coeff)
    global_outputs_all[:, :, :, i] = global_outputs

# Plot and save searchlight results
global_outputs_avg = np.mean(global_outputs_all, 3)
maxval = np.max(global_outputs_avg[np.not_equal(global_outputs_avg, None)])
minval = np.min(global_outputs_avg[np.not_equal(global_outputs_avg, None)])
global_outputs = np.array(global_outputs_avg, dtype=np.float)
global_nonans = global_outputs[np.not_equal(global_outputs, None)]
global_nonans = np.reshape(global_nonans, (91, 109, 91))
min1 = np.min(global_nonans[~np.isnan(global_nonans)])
max1 = np.max(global_nonans[~np.isnan(global_nonans)])
img = nib.Nifti1Image(global_nonans, np.eye(4))
img.header['cal_min'] = min1
img.header['cal_max'] = max1
nib.save(img, 'classical_within_permuted.nii.gz')
subjectName = brain_file[sub_idx:]
output_name = searchlights_path + subjectName

# Make the mask
mask = node_brain != 0
mask = mask[:, :, :, 0]

# Create searchlight object
sl = Searchlight(sl_rad=1, max_blk_edge=5)

# Distribute data to processes
sl.distribute([node_brain], mask)
sl.broadcast(None)

# Run clusters
sl_outputs = sl.run_searchlight(rsa_sl, pool_size=1)

if rank == 0:

    # Convert the output into what can be used
    sl_outputs = sl_outputs.astype('double')
    sl_outputs[np.isnan(sl_outputs)] = 0

    # Save the volume
    sl_nii = nibabel.Nifti1Image(sl_outputs, nii.affine)
    hdr = sl_nii.header
    hdr.set_zooms((dimsize[0], dimsize[1], dimsize[2]))
    nibabel.save(sl_nii, output_name)  # Save


Exemple #10
0
# say some things about the mask.
print('mask dimensions: {}'.format(mask.shape))
print('number of voxels in mask: {}'.format(np.sum(mask)))

sl_rad = radius
max_blk_edge = 5
pool_size = 1

# Create the searchlight object
sl = Searchlight(sl_rad=sl_rad, max_blk_edge=max_blk_edge)

# Distribute the information to the searchlights (preparing it to run)
sl.distribute(data, mask)
sl.broadcast(bcvar)
slstart = time.time()
sl_result = sl.run_searchlight(Class)

#result = Class(data, np.zeros((5,5,5)), 2, bcvar)

SL = time.time() - slstart
tot = time.time() - starttime
print('total time: {}, searchlight time: {}'.format(tot, SL))
'''
# Only save the data if this is the first core
if rank == 0:
    output = ('{}/{}_r{}.npy'.format(outloc, subject, radius))
    #np.save(output, sl_result)

    sl_result = sl_result.astype('double')
    sl_result[np.isnan(sl_result)] = 0  # If there are nans we want this
        labels_train = np.array(labels_train)

        # Train classifier
        clf = SVC(kernel='linear', C=1)
        clf.fit(bolddata_sl_train, labels_train)

        # Test classifier
        score = clf.score(bolddata_sl_test, labels_test)
        accuracy.append(score)

    return accuracy


# Run the searchlight analysis
print("Begin SearchLight in rank %s\n" % rank)
all_sl_result = sl.run_searchlight(calc_svm, pool_size=pool_size)
print("End SearchLight in rank %s\n" % rank)

# Only save the data if this is the first core
if rank == 0:
    all_sl_result = all_sl_result[mask == 1]
    all_sl_result = [num_subj * [0] if not n else n
                     for n in all_sl_result]  # replace all None
    # The average result
    avg_vol = np.zeros((mask.shape[0], mask.shape[1], mask.shape[2]))

    # Loop over subjects
    for sub_id in range(1, num_subj + 1):
        sl_result = [r[sub_id - 1] for r in all_sl_result]
        # reshape
        result_vol = np.zeros((mask.shape[0], mask.shape[1], mask.shape[2]))
Exemple #12
0
    srm = SRM(bcast_var[0], bcast_var[1])
    srm.fit(train_data)
    # transform test data
    shared_data = srm.transform(test_data)
    for s in range(len(l)):
        shared_data[s] = np.nan_to_num(
            stats.zscore(shared_data[s], axis=1, ddof=1))
    # experiment
    accu = timesegmentmatching_accuracy(shared_data, 6)

    return np.mean(accu), stats.sem(
        accu)  # multiple outputs will be saved as tuples


# Run searchlight
global_outputs = sl.run_searchlight(sfn)  # output is in shape (dim1,dim2,dim3)

# Unpack and save result
if rank == 0:
    acc = np.zeros((dim1, dim2, dim3))
    se = np.zeros((dim1, dim2, dim3))
    for i in range(dim1):
        for j in range(dim2):
            for k in range(dim3):
                if global_outputs[i][j][k] is not None:
                    acc[i][j][k] = global_outputs[i][j][k][0]
                    se[i][j][k] = global_outputs[i][j][k][1]
    print(acc)
    np.savez_compressed('data/sherlock/searchlight_srm_tsm_acc.npz',
                        acc=acc,
                        se=se)
                 min_active_voxels_proportion=0)

# Distribute data to processes
sl.distribute([data], mask)
sl.broadcast(labels)

# Define voxel function
def sfn(l, msk, myrad, bcast_var):
  import sklearn.svm
  import sklearn.model_selection
  classifier = sklearn.svm.SVC()
  data = l[0][msk,:].T
  return np.mean(sklearn.model_selection.cross_val_score(classifier, data, bcast_var,n_jobs=1))

# Run searchlight
global_outputs = sl.run_searchlight(sfn)

# Visualize result
if rank == 0:
  print(global_outputs)
  global_outputs = np.array(global_outputs, dtype=np.float)
  import matplotlib.pyplot as plt
  for (cnt, img) in enumerate(global_outputs):
    plt.imshow(img,cmap='hot',vmin=0,vmax=1)
    plt.savefig('img' + str(cnt) + '.png')
    plt.clf()




Exemple #14
0
for i in range(len(subjs)):
    data_run2 = np.nan_to_num(
        load_img(datadir + 'subjects/' + subjs[i] +
                 '/data/avg_reorder2.nii').get_data())
    runs.append(data_run2)

print("All Subjects Loaded")

#np.seterr(divide='ignore',invalid='ignore')

# Create and run searchlight
sl = Searchlight(sl_rad=5, max_blk_edge=5)
sl.distribute(runs, mask_img)
sl.broadcast([nfeature, niter, loo_idx, exclude_songs])
print('Running Searchlight...')
global_outputs = sl.run_searchlight(corr2_coeff, pool_size=1)
global_outputs_all[:, :, :, i] = global_outputs

# Plot and save searchlight results
global_outputs_avg = np.mean(global_outputs_all, 3)
#maxval = np.max(global_outputs_avg[np.not_equal(global_outputs_avg,None)])
#minval = np.min(global_outputs_avg[np.not_equal(global_outputs_avg,None)])
global_outputs_avg = np.array(global_outputs_avg, dtype=np.float)
#global_nonans = global_outputs_avg[np.not_equal(global_outputs_avg,None)]
#global_nonans = np.reshape(global_nonans,(91,109,91))
#img = nib.Nifti1Image(global_nonans, np.eye(4))
#img.header['cal_min'] = minval
#img.header['cal_max'] = maxval
#nib.save(img,datadir + 'prototype/link/scripts/data/searchlight_output/janice_srm_results/loo_' + subjs[loo_idx])
np.save(
    datadir +
    classJazz_between = np.mean(corrAB[8:16,0:8])
    jazzClass_between = np.mean(corrAB[0:8,8:16])
    within_genre = np.mean([classical_within,jazz_within])
    between_genre = np.mean([classJazz_between,jazzClass_between])
    diff = within_genre - between_genre
    return diff

comm.Barrier()
begin_time = time.time()
comm.Barrier()

# Create and run searchlight
sl = Searchlight(sl_rad=1,max_blk_edge=5)
sl.distribute([data1,data2],mask_img)
sl.broadcast(None)
global_outputs = sl.run_searchlight(corr2_coeff)

comm.Barrier()
end_time = time.time()
comm.Barrier()

# Plot searchlight results
if rank == 0:
    print('Searchlight Done: ', end_time - begin_time)
    maxval = np.max(global_outputs[np.not_equal(global_outputs,None)])
    minval = np.min(global_outputs[np.not_equal(global_outputs,None)])
    global_outputs = np.array(global_outputs, dtype=np.float)
    print(global_outputs)

    # Save searchlight images
    out_dir = "searchlight_images"
Exemple #16
0
    '/idata/cdl/data/fMRI/andy/sherlock/data/sherlock_movie_s%s_10000.nii.gz' %
    str(subid)).get_data()

mask = data[:, :, :, 0] != 10000
model = np.load('/idata/cdl/data/fMRI/andy/sherlock/data/movie_corrmat.npy')
params = dict(sl_rad=5)

# Create searchlight object
sl = Searchlight(**params)

# Distribute data to processes
sl.distribute([data], mask)
sl.broadcast(model)


# Define voxel function
def sfn(l, msk, myrad, bcast_var):
    from scipy.spatial.distance import cdist
    from scipy.stats import pearsonr
    b = l[0][msk, :].T
    c = 1 - cdist(b, b, 'correlation').ravel()
    return pearsonr(c, bcast_var.ravel())[0]


# Run searchlight
result = sl.run_searchlight(sfn)

np.save(
    '/idata/cdl/data/fMRI/andy/sherlock/analyses/searchlight_movie/s%s' %
    str(subid), result)
Exemple #17
0
# Distribute data to processes
sl.distribute([data], mask)
sl.broadcast(labels)


# Define voxel function
def sfn(l, msk, myrad, bcast_var):
    import sklearn.svm
    import sklearn.model_selection
    classifier = sklearn.svm.SVC()
    data = l[0][msk, :].T
    return np.mean(
        sklearn.model_selection.cross_val_score(classifier,
                                                data,
                                                bcast_var,
                                                n_jobs=1))


# Run searchlight
global_outputs = sl.run_searchlight(sfn)

# Visualize result
if rank == 0:
    print(global_outputs)
    global_outputs = np.array(global_outputs, dtype=np.float)
    import matplotlib.pyplot as plt
    for (cnt, img) in enumerate(global_outputs):
        plt.imshow(img, cmap='hot', vmin=0, vmax=1)
        plt.savefig('img' + str(cnt) + '.png')
        plt.clf()
Exemple #18
0
    d1,d2,d3,ntr = l[0].shape
    nvx = d1*d2*d3
    for s in l:
        train_data.append(np.reshape(s[:,:,:,:int(ntr/2)],(nvx,int(ntr/2))))
        test_data.append(np.reshape(s[:,:,:,int(ntr/2):],(nvx,ntr-int(ntr/2))))
    # train an srm model 
    srm = SRM(bcast_var[0],bcast_var[1])
    srm.fit(train_data)
    # transform test data
    shared_data = srm.transform(test_data)
    for s in range(len(l)):
        shared_data[s] = np.nan_to_num(stats.zscore(shared_data[s],axis=1,ddof=1))
    # run experiment
    accu = time_segment_matching_accuracy(shared_data)

    # return: can also return several values. In that case, the final output will be 
    # a 3D array of tuples
    return np.mean(accu) 

# Run searchlight
acc = sl.run_searchlight(sfn) # output is a 3D array in shape (dim1,dim2,dim3)

# save result
if rank == 0:
    print (acc)
    np.savez_compressed('data/searchlight_srm_tsm_acc.npz',acc=acc)