def test_deleteOutliers():
    # Create some test arrays/dictionaries
    t_data = np.arange(8).reshape((1, 1, 1, 8))
    t_gain = np.arange(8) + 2
    t_loss = np.arange(8) + 4
    t_lin = np.linspace(-1, 1, 8)
    t_quad = t_lin ** 2
    t_quad -= np.mean(t_quad)
    dvars_out = {"sub1run1": [2, 3], "sub2run2": [1, 2]}
    fd_out = {"sub1run1": [1, 2, 3], "sub2run2": [4, 5, 6]}
    # sub 1 run 1
    sub1 = 1
    run1 = 1
    # t_outliers = [2,3]
    t_nonoutliers1 = [0, 4, 5, 6, 7]
    t_data1 = t_data[:, :, :, t_nonoutliers1]
    t_gain1 = t_gain[t_nonoutliers1]
    t_loss1 = t_loss[t_nonoutliers1]
    t_lin1 = t_lin[t_nonoutliers1]
    t_quad1 = t_quad[t_nonoutliers1]
    # --------------------------------------------------------------------------#
    # my function
    my_data1, my_gain1, my_loss1, my_lin1, my_quad1 = deleteOutliers(
        t_data, t_gain, t_loss, t_lin, t_quad, sub1, run1, dvars_out, fd_out
    )
    # --------------------------------------------------------------------------#
    # asssert 1
    assert_allclose(my_data1, t_data1)
    assert_allclose(my_gain1, t_gain1)
    assert_allclose(my_loss1, t_loss1)
    assert_allclose(my_lin1, t_lin1)
    assert_allclose(my_quad1, t_quad1)

    # --------------------------------------------------------------------------#
    # sub 2 run 2
    sub2 = 2
    run2 = 2
    # t_outliers = [1,2,4,5,6]
    t_nonoutliers2 = [0, 3, 7]
    t_data2 = t_data[:, :, :, t_nonoutliers2]
    t_gain2 = t_gain[t_nonoutliers2]
    t_loss2 = t_loss[t_nonoutliers2]
    t_lin2 = t_lin[t_nonoutliers2]
    t_quad2 = t_quad[t_nonoutliers2]
    # --------------------------------------------------------------------------#
    # my function
    my_data2, my_gain2, my_loss2, my_lin2, my_quad2 = deleteOutliers(
        t_data, t_gain, t_loss, t_lin, t_quad, sub2, run2, dvars_out, fd_out
    )
    # --------------------------------------------------------------------------#
    # assert 2
    assert_allclose(my_data2, t_data2)
    assert_allclose(my_gain2, t_gain2)
    assert_allclose(my_loss2, t_loss2)
    assert_allclose(my_lin2, t_lin2)
    assert_allclose(my_quad2, t_quad2)
def test_deleteOutliers():
    # Create some test arrays/dictionaries
    t_data = np.arange(8).reshape((1,1,1,8))
    t_gain = np.arange(8)+2
    t_loss = np.arange(8)+4
    t_lin = np.linspace(-1,1, 8)
    t_quad = t_lin ** 2
    t_quad -= np.mean(t_quad)
    dvars_out= {'sub1run1': [2,3],'sub2run2':[1,2]}
    fd_out= {'sub1run1': [1,2,3], 'sub2run2':[4,5,6]}
    # sub 1 run 1
    sub1 = 1
    run1 = 1
    # t_outliers = [2,3]
    t_nonoutliers1 = [0,4,5,6,7]
    t_data1= t_data[:,:,:,t_nonoutliers1]
    t_gain1 = t_gain[t_nonoutliers1]
    t_loss1 = t_loss[t_nonoutliers1]
    t_lin1 = t_lin[t_nonoutliers1]
    t_quad1 = t_quad[t_nonoutliers1]
    #--------------------------------------------------------------------------#
    # my function
    my_data1, my_gain1, my_loss1, my_lin1, my_quad1 = deleteOutliers(t_data, t_gain, t_loss, t_lin, t_quad, sub1, run1, dvars_out, fd_out)
    #--------------------------------------------------------------------------#
    # asssert 1
    assert_allclose(my_data1, t_data1)
    assert_allclose(my_gain1, t_gain1)
    assert_allclose(my_loss1, t_loss1)
    assert_allclose(my_lin1, t_lin1)
    assert_allclose(my_quad1, t_quad1)

    #--------------------------------------------------------------------------#
    # sub 2 run 2
    sub2 = 2
    run2 = 2
    # t_outliers = [1,2,4,5,6]
    t_nonoutliers2 = [0,3,7]
    t_data2= t_data[:,:,:,t_nonoutliers2]
    t_gain2 = t_gain[t_nonoutliers2]
    t_loss2 = t_loss[t_nonoutliers2]
    t_lin2 = t_lin[t_nonoutliers2] 
    t_quad2 = t_quad[t_nonoutliers2]
    #--------------------------------------------------------------------------#
    # my function
    my_data2, my_gain2, my_loss2 , my_lin2, my_quad2 = deleteOutliers(t_data, t_gain, t_loss,t_lin, t_quad, sub2, run2, dvars_out, fd_out)
    #--------------------------------------------------------------------------#
    # assert 2
    assert_allclose(my_data2, t_data2)
    assert_allclose(my_gain2, t_gain2)
    assert_allclose(my_loss2, t_loss2)
    assert_allclose(my_lin2, t_lin2)
    assert_allclose(my_quad2, t_quad2)
         + ` j `
         + "/cond003.txt"
     )
     task_cond4 = (
         pathtodata
         + "ds005/sub0"
         + str(i).zfill(2)
         + "/model/model001/onsets/task001_run00"
         + ` j `
         + "/cond004.txt"
     )
     parameters = merge_cond(behav_cond, task_cond1, task_cond2, task_cond3, task_cond4)
     neural_prediction = events2neural_extend(parameters, TR, n_vols)
     gain, loss, linear_dr, quad_dr = getRegressor(TR, n_vols, hrf_at_trs, neural_prediction)
     data, gain, loss, linear_dr, quad_dr = deleteOutliers(
         data, gain, loss, linear_dr, quad_dr, i, run, dvars_out, fd_out
     )
     data_full = np.concatenate((data_full, data), axis=3)
     gain_full = np.concatenate((gain_full, gain), axis=0)
     loss_full = np.concatenate((loss_full, loss), axis=0)
     linear_full = np.concatenate((linear_full, linear_dr), axis=0)
     quad_full = np.concatenate((quad_full, quad_dr), axis=0)
 d_shape = data_full.shape[:3]
 mea = calcMRSS(data_full, gain_full, loss_full, linear_full, quad_full)
 X, Y, beta = calcBeta(data_full, gain_full, loss_full, linear_full, quad_full)
 # ------------------------------------------------------------------------------#
 # Take the 40,000 voxel
 fitted = X.dot(beta[:, 40000])
 residuals = Y[:, 40000] - fitted
 qqplot(residuals, saveit=True)
 res_var(fitted, residuals, name="fitted", saveit=True)
Ejemplo n.º 4
0
 run_count = np.zeros(3)
 for j in range(1,4):
     direct='ds005/sub0'+str(i).zfill(2)+'/BOLD/task001_run00'+`j`+'/'
     boldname = pathtofolder + direct+'bold.nii.gz'
     img=nib.load(boldname)
     data=img.get_data()
     run = j
     behav_cond = pathtofolder + 'ds005/sub0'+str(i).zfill(2)+'/behav/task001_run00'+`j`+'/behavdata.txt'
     task_cond1 = pathtofolder + 'ds005/sub0'+str(i).zfill(2)+'/model/model001/onsets/task001_run00'+`j`+'/cond001.txt'
     task_cond2 = pathtofolder + 'ds005/sub0'+str(i).zfill(2)+'/model/model001/onsets/task001_run00'+`j`+'/cond002.txt'
     task_cond3 = pathtofolder + 'ds005/sub0'+str(i).zfill(2)+'/model/model001/onsets/task001_run00'+`j`+'/cond003.txt'
     task_cond4 = pathtofolder + 'ds005/sub0'+str(i).zfill(2)+'/model/model001/onsets/task001_run00'+`j`+'/cond004.txt'
     parameters = merge_cond(behav_cond, task_cond1, task_cond2, task_cond3, task_cond4)
     neural_prediction = events2neural_extend(parameters,TR, n_vols)
     gain, loss, linear_dr, quad_dr = getRegressor(TR, n_vols, hrf_at_trs, neural_prediction)
     data, gain, loss, linear_dr, quad_dr = deleteOutliers(data, gain, loss, linear_dr, quad_dr, i, run, dvars_out, fd_out)
     run_count[j-1] = data.shape[3]     ## dummy variable indicating the groups
     data_full = np.concatenate((data_full,data),axis=3)
     gain_full = np.concatenate((gain_full,gain),axis=0)
     loss_full = np.concatenate((loss_full,loss),axis=0)
     linear_full = np.concatenate((linear_full,linear_dr),axis=0)
     quad_full = np.concatenate((quad_full,quad_dr),axis=0)
     
 run_group = np.concatenate((np.repeat(1, run_count[0]), 
                             np.repeat(2, run_count[1]), np.repeat(3, run_count[2])), axis=0)
 thrshd = 400 ## set a threshold to idenfity the voxels inside the brain
 print "calculating parameters of subject "+str(i)
 beta = calcBetaLme(data_full, gain_full, loss_full, linear_full, quad_full, run_group, thrshd)
 sig_level = 0.05
 sig_gain_prop[i-1], sig_loss_prop[i-1] = calcSigProp(beta, sig_level)
 write=pathtofolder + 'ds005/sub0'+str(i).zfill(2)+'/model/model001/onsets/sub0'+str(i).zfill(2)+'_lme_beta.txt'