def test_getRegressor(): # Set up and load in subject 1 run 1 # This is the convolution method described, in detail on: # http://practical-neuroimaging.github.io/on_convolution.html TR = 2 n_vols = 240 tr_times = np.arange(0, 30, TR) hrf_signal = hrf(tr_times) behav_cond = pathtotest + 'test_behavdata.txt' task_cond1 = pathtotest + 'test_cond001.txt' task_cond2 = pathtotest + 'test_cond002.txt' task_cond3 = pathtotest + 'test_cond003.txt' task_cond4 = pathtotest + 'test_cond004.txt' parameters = merge_cond(behav_cond, task_cond1, task_cond2, task_cond3, task_cond4) # get neural_signal neural_signal = events2neural_extend(parameters,TR, n_vols) # get gain signal gain_signal = neural_signal[:,1] # get loss signal loss_signal = neural_signal[:,2] # Len of neural signal N = neural_signal.shape[0] # Length of hrf_signal M = len(hrf_signal) # create the convolved bold signal gain/loss convolved_gain = np.zeros(N + M - 1) # adding the tail convolved_loss = np.zeros(N + M - 1) # adding the tail for i in range(N): input_value_g = gain_signal[i] input_value_l = loss_signal[i] # Adding the shifted, scaled HRF convolved_gain[i : i + M] += hrf_signal * input_value_g convolved_loss[i : i + M] += hrf_signal * input_value_l # Remove the extra_times n_to_remove = M-1 convolved_gain = convolved_gain[:-n_to_remove] convolved_loss = convolved_loss[:-n_to_remove] lin_dr = np.linspace(-1, 1, n_vols) quad_dr = lin_dr ** 2 quad_dr -= np.mean(quad_dr) #--------------------------------------------------------------------------# # my function myconv_gain, myconv_loss, my_lin, my_quad = getRegressor(TR, n_vols, hrf_signal, neural_signal) myconv_gain1, myconv_loss1, my_lin1, my_quad1 = getRegressor(TR, n_vols, hrf_signal, neural_signal, standard = True) #--------------------------------------------------------------------------# # assert checks assert (max(abs(convolved_gain-myconv_gain) < .0001)) assert (max(abs(convolved_loss-myconv_loss) < .0001)) assert (max(abs(quad_dr-my_quad) < .0001)) assert (max(abs(lin_dr-my_lin) < .0001)) # Check standard template assert_allclose(myconv_gain, myconv_gain1) assert_allclose(myconv_loss, myconv_loss1) assert (my_lin1 is None) assert (my_quad1 is None)
def test_getRegressor(): # Set up and load in subject 1 run 1 # This is the convolution method described, in detail on: # http://practical-neuroimaging.github.io/on_convolution.html TR = 2 n_vols = 240 tr_times = np.arange(0, 30, TR) hrf_signal = hrf(tr_times) behav_cond = pathtotest + 'test_behavdata.txt' task_cond1 = pathtotest + 'test_cond001.txt' task_cond2 = pathtotest + 'test_cond002.txt' task_cond3 = pathtotest + 'test_cond003.txt' task_cond4 = pathtotest + 'test_cond004.txt' parameters = merge_cond(behav_cond, task_cond1, task_cond2, task_cond3, task_cond4) # get neural_signal neural_signal = events2neural_extend(parameters,TR, n_vols) # get gain signal gain_signal = neural_signal[:,1] # get loss signal loss_signal = neural_signal[:,2] # Len of neural signal N = neural_signal.shape[0] # Length of hrf_signal M = len(hrf_signal) # create the convolved bold signal gain/loss convolved_gain = np.zeros(N + M - 1) # adding the tail convolved_loss = np.zeros(N + M - 1) # adding the tail for i in range(N): input_value_g = gain_signal[i] input_value_l = loss_signal[i] # Adding the shifted, scaled HRF convolved_gain[i : i + M] += hrf_signal * input_value_g convolved_loss[i : i + M] += hrf_signal * input_value_l # Remove the extra_times n_to_remove = M-1 convolved_gain = convolved_gain[:-n_to_remove] convolved_loss = convolved_loss[:-n_to_remove] linear_dr = np.linspace(-1, 1, n_vols) quadratic_dr = linear_dr ** 2 quadratic_dr -= np.mean(quadratic_dr) #--------------------------------------------------------------------------# # my function myconv_gain, myconv_loss, my_lin, my_quad = getRegressor(TR, n_vols, hrf_signal, neural_signal) #--------------------------------------------------------------------------# # assert checks assert (max(abs(convolved_gain-myconv_gain) < .0001)) assert (max(abs(convolved_loss-myconv_loss) < .0001)) assert (max(abs(quadratic_dr-my_quad) < .0001)) assert (max(abs(linear_dr-my_lin) < .0001))
def test_mergecond(): # my function my_merge = merge_cond(pathtotest+'test_behavdata.txt', pathtotest+'test_cond001.txt', pathtotest+'test_cond002.txt', pathtotest+'test_cond003.txt', pathtotest+'test_cond004.txt') t_behav = np.loadtxt(pathtotest+'test_behavdata.txt', skiprows=1) t_con1 = np.loadtxt(pathtotest+'test_cond001.txt') t_con2 = np.loadtxt(pathtotest+'test_cond002.txt') t_con3 = np.loadtxt(pathtotest+'test_cond003.txt') t_con4 = np.loadtxt(pathtotest+'test_cond004.txt') # assert assert_allclose(t_behav[:,1:], my_merge[:,6:]) assert_allclose(t_con1, my_merge[:,:3]) assert_allclose(t_con2[:,-1], my_merge[:,3]) assert_allclose(t_con3[:,-1], my_merge[:,4]) assert_allclose(t_con4[:,-1], my_merge[:,5])
pathtodata + "ds005/sub0" + str(i).zfill(2) + "/model/model001/onsets/task001_run00" + ` j ` + "/cond003.txt" ) task_cond4 = ( pathtodata + "ds005/sub0" + str(i).zfill(2) + "/model/model001/onsets/task001_run00" + ` j ` + "/cond004.txt" ) parameters = merge_cond(behav_cond, task_cond1, task_cond2, task_cond3, task_cond4) neural_prediction = events2neural_extend(parameters, TR, n_vols) gain, loss, linear_dr, quad_dr = getRegressor(TR, n_vols, hrf_at_trs, neural_prediction) data, gain, loss, linear_dr, quad_dr = deleteOutliers( data, gain, loss, linear_dr, quad_dr, i, run, dvars_out, fd_out ) data_full = np.concatenate((data_full, data), axis=3) gain_full = np.concatenate((gain_full, gain), axis=0) loss_full = np.concatenate((loss_full, loss), axis=0) linear_full = np.concatenate((linear_full, linear_dr), axis=0) quad_full = np.concatenate((quad_full, quad_dr), axis=0) d_shape = data_full.shape[:3] mea = calcMRSS(data_full, gain_full, loss_full, linear_full, quad_full) X, Y, beta = calcBeta(data_full, gain_full, loss_full, linear_full, quad_full) # ------------------------------------------------------------------------------# # Take the 40,000 voxel
loss_full = np.empty([0,]) linear_full = np.empty([0,]) quad_full = np.empty([0,]) run_count = np.zeros(3) for j in range(1,4): direct='ds005/sub0'+str(i).zfill(2)+'/BOLD/task001_run00'+`j`+'/' boldname = pathtofolder + direct+'bold.nii.gz' img=nib.load(boldname) data=img.get_data() run = j behav_cond = pathtofolder + 'ds005/sub0'+str(i).zfill(2)+'/behav/task001_run00'+`j`+'/behavdata.txt' task_cond1 = pathtofolder + 'ds005/sub0'+str(i).zfill(2)+'/model/model001/onsets/task001_run00'+`j`+'/cond001.txt' task_cond2 = pathtofolder + 'ds005/sub0'+str(i).zfill(2)+'/model/model001/onsets/task001_run00'+`j`+'/cond002.txt' task_cond3 = pathtofolder + 'ds005/sub0'+str(i).zfill(2)+'/model/model001/onsets/task001_run00'+`j`+'/cond003.txt' task_cond4 = pathtofolder + 'ds005/sub0'+str(i).zfill(2)+'/model/model001/onsets/task001_run00'+`j`+'/cond004.txt' parameters = merge_cond(behav_cond, task_cond1, task_cond2, task_cond3, task_cond4) neural_prediction = events2neural_extend(parameters,TR, n_vols) gain, loss, linear_dr, quad_dr = getRegressor(TR, n_vols, hrf_at_trs, neural_prediction) data, gain, loss, linear_dr, quad_dr = deleteOutliers(data, gain, loss, linear_dr, quad_dr, i, run, dvars_out, fd_out) run_count[j-1] = data.shape[3] ## dummy variable indicating the groups data_full = np.concatenate((data_full,data),axis=3) gain_full = np.concatenate((gain_full,gain),axis=0) loss_full = np.concatenate((loss_full,loss),axis=0) linear_full = np.concatenate((linear_full,linear_dr),axis=0) quad_full = np.concatenate((quad_full,quad_dr),axis=0) run_group = np.concatenate((np.repeat(1, run_count[0]), np.repeat(2, run_count[1]), np.repeat(3, run_count[2])), axis=0) thrshd = 400 ## set a threshold to idenfity the voxels inside the brain print "calculating parameters of subject "+str(i) beta = calcBetaLme(data_full, gain_full, loss_full, linear_full, quad_full, run_group, thrshd)