def test_calcBeta():
    # X, Y are constructed like the way in calcBeta(data, gain, loss).
    # Create a simple linear model based on Y = 2X1 + 5X2 + e
    X = np.ones((6, 4))
    X[:, 0] = np.array([1,2,3,4,5,6])
    X[:, 1] = np.array([2,4,6,8,10,12])
    X[:, 2] = np.linspace(-1,1,6)
    t_quad = X[:,2] ** 2
    t_quad -= np.mean(t_quad)
    X[:, 3] = t_quad
    Y = X[:,0] + X[:,1]*2 + X[:,2] + X[:,3] + 1
    # Create linear regression object
    regr = linear_model.LinearRegression()
    # Train the model using the training sets
    regr.fit(X, Y)
    test_beta = np.append(regr.coef_ , regr.intercept_)
    #--------------------------------------------------------------------------#
    # my function
    design, t_by_v, my_beta = calcBeta(Y, X[:,0], X[:,1], X[:,2], X[:,3])
    #--------------------------------------------------------------------------#
    # assert betas: the threshold here is a bit high since 2 methods of 
    # implementation for a small sample size gives some variation in betas
    assert all(my_beta.ravel()-test_beta < 0.1)
    # assert time_by_voxel (predictions)
    assert_allclose(t_by_v.ravel(), regr.predict(X))
    # assert design
    assert_allclose(X, design[:,:4])
    
    #--------------------------------------------------------------------------#
    Y = X[:,0] + X[:,1]*2 + X[:,2] + X[:,3] + 1
    # myfunction when thrs != None
    design1, t_by_v1, my_beta1 = calcBeta(Y, X[:,0], X[:,1], X[:,2], X[:,3], 1)

    # assert the threshold values are produce different betas and tbyv
    assert (t_by_v.ravel() != t_by_v1.ravel()).any()
    assert (my_beta.ravel() != my_beta1.ravel()).all()
    # assert design still the same
    assert_allclose(X, design1[:,:4])

    #--------------------------------------------------------------------------#
    # Standard Template test
    X_s = np.ones((6, 2))
    X_s[:, 0] = np.array([1,2,3,4,5,6])
    X_s[:, 1] = np.array([2,4,6,8,10,12])
    Y_s = X_s[:,0] + X_s[:,1]*2  + 1
    # Create linear regression object
    regr_s = linear_model.LinearRegression()
    # Train the model using the training sets
    regr_s.fit(X_s, Y_s)

    # my function standard 
    design_s, t_by_v_s, my_beta_s = calcBeta(Y_s, X_s[:,0], X_s[:,1])

    # assert the threshold values are produce different betas and tbyv
    assert_allclose(t_by_v_s.ravel(), regr_s.predict(X_s))
    # assert design
    assert_allclose(X_s, design_s[:,:2])
def test_calcBeta():
    # X, Y are constructed like the way in calcBeta(data, gain, loss).
    # Create a simple linear model based on Y = 2X1 + 5X2 + e
    X = np.ones((6, 4))
    X[:, 0] = np.array([1,2,3,4,5,6])
    X[:, 1] = np.array([2,4,6,8,10,12])
    X[:, 2] = np.linspace(-1,1,6)
    t_quad = X[:,2] ** 2
    t_quad -= np.mean(t_quad)
    X[:, 3] = t_quad
    Y = X[:,0] + X[:,1]*2 + X[:,2] + X[:,3] + 1
    # Create linear regression object
    regr = linear_model.LinearRegression()
    # Train the model using the training sets
    regr.fit(X, Y)
    test_beta = np.append(regr.coef_ , regr.intercept_)
    #--------------------------------------------------------------------------#
    # my function
    design, t_by_v, my_beta = calcBeta(Y, X[:,0], X[:,1], X[:,2], X[:,3])
    #--------------------------------------------------------------------------#
    # assert betas: the threshold here is a bit high since 2 methods of 
    # implementation for a small sample size gives some variation in betas
    assert all(my_beta.ravel()-test_beta < 0.1)
    # assert time_by_voxel (predictions)
    assert_allclose(t_by_v.ravel(), regr.predict(X))
    # assert design
    assert_allclose(X, design[:,:4])
            + "/cond004.txt"
        )
        parameters = merge_cond(behav_cond, task_cond1, task_cond2, task_cond3, task_cond4)
        neural_prediction = events2neural_extend(parameters, TR, n_vols)
        gain, loss, linear_dr, quad_dr = getRegressor(TR, n_vols, hrf_at_trs, neural_prediction)
        data, gain, loss, linear_dr, quad_dr = deleteOutliers(
            data, gain, loss, linear_dr, quad_dr, i, run, dvars_out, fd_out
        )
        data_full = np.concatenate((data_full, data), axis=3)
        gain_full = np.concatenate((gain_full, gain), axis=0)
        loss_full = np.concatenate((loss_full, loss), axis=0)
        linear_full = np.concatenate((linear_full, linear_dr), axis=0)
        quad_full = np.concatenate((quad_full, quad_dr), axis=0)
    d_shape = data_full.shape[:3]
    mea = calcMRSS(data_full, gain_full, loss_full, linear_full, quad_full)
    X, Y, beta = calcBeta(data_full, gain_full, loss_full, linear_full, quad_full)
    # ------------------------------------------------------------------------------#
    # Take the 40,000 voxel
    fitted = X.dot(beta[:, 40000])
    residuals = Y[:, 40000] - fitted
    qqplot(residuals, saveit=True)
    res_var(fitted, residuals, name="fitted", saveit=True)

    # possibly transform data:
    X_log, Y_log, beta_log = calcBeta(np.log(data_full + 1), gain_full, loss_full, linear_full, quad_full)
    residuals_log = Y_log[:, 40000] - X_log.dot(beta_log[:, 40000])
    qqplot(residuals_log, saveit=True)
    res_var(X_log.dot(beta_log[:, 40000]), residuals_log, name="fitted_log", saveit=True)
    vox_pos = np.unravel_index(40000, d_shape)
    print("Voxel used: " + str(vox_pos))
        boldname='ds005/sub0'+str(i).zfill(2)+'/model/model001/task001_run00'+`j`+'.feat/filtered_func_data_mni.nii.gz'
        img=nib.load(boldname)
        data=img.get_data()
        data=smooth_spatial(data)
        run = j
        behav_cond = 'ds005/sub0'+str(i).zfill(2)+'/behav/task001_run00'+`j`+'/behavdata.txt'
        task_cond1 = 'ds005/sub0'+str(i).zfill(2)+'/model/model001/onsets/task001_run00'+`j`+'/cond001.txt'
        task_cond2 = 'ds005/sub0'+str(i).zfill(2)+'/model/model001/onsets/task001_run00'+`j`+'/cond002.txt'
        task_cond3 = 'ds005/sub0'+str(i).zfill(2)+'/model/model001/onsets/task001_run00'+`j`+'/cond003.txt'
        task_cond4 = 'ds005/sub0'+str(i).zfill(2)+'/model/model001/onsets/task001_run00'+`j`+'/cond004.txt'
        parameters = merge_cond(behav_cond, task_cond1, task_cond2, task_cond3, task_cond4)
        neural_prediction = events2neural_extend(parameters,TR, n_vols)
        gain, loss, linear_dr, quad_dr = getRegressor(TR, n_vols, hrf_at_trs, neural_prediction, standard = True)
        data, gain, loss, linear_dr, quad_dr = deleteOutliers(data, gain, loss, i, run, dvars_out, fd_out)
        data_full = np.concatenate((data_full,data),axis=3)
        gain_full = np.concatenate((gain_full,gain),axis=0)
        loss_full = np.concatenate((loss_full,loss),axis=0)
    # mea=calcMRSS(data_full, gain_full, loss_full, None, None, threshold)
    X, Y, beta=calcBeta(data_full, gain_full, loss_full, None, None, threshold)
    # calculate t values
    t_val=np.zeros((2,902629))
    for k in range(Y.shape[1]):
        t_val[:,k] = significant(X,Y[:,k], beta[:,k])
    # file names for beta and t
    beta_file='../results/texts/sub0'+str(i).zfill(2)+'_standard_beta.txt'
    t_file='../results/texts/sub0'+str(i).zfill(2)+'_standard_tvals.txt'
    # save beta and t values to file
    np.savetxt(beta_file, beta)
    np.savetxt(t_file, t_val)

Example #5
0
    gain_full = np.empty([0,])
    loss_full = np.empty([0,])
    linear_full = np.empty([0,])
    quad_full = np.empty([0,])
    for j in range(1,4):
        direct='ds005/sub0'+str(i).zfill(2)+'/BOLD/task001_run00'+`j`+'/'
        boldname = direct+'bold.nii.gz'
        img=nib.load(boldname)
        data=img.get_data()
        data=smooth_spatial(data)
        run = j
        behav_cond = 'ds005/sub0'+str(i).zfill(2)+'/behav/task001_run00'+`j`+'/behavdata.txt'
        task_cond1 = 'ds005/sub0'+str(i).zfill(2)+'/model/model001/onsets/task001_run00'+`j`+'/cond001.txt'
        task_cond2 = 'ds005/sub0'+str(i).zfill(2)+'/model/model001/onsets/task001_run00'+`j`+'/cond002.txt'
        task_cond3 = 'ds005/sub0'+str(i).zfill(2)+'/model/model001/onsets/task001_run00'+`j`+'/cond003.txt'
        task_cond4 = 'ds005/sub0'+str(i).zfill(2)+'/model/model001/onsets/task001_run00'+`j`+'/cond004.txt'
        parameters = merge_cond(behav_cond, task_cond1, task_cond2, task_cond3, task_cond4)
        neural_prediction = events2neural_extend(parameters,TR, n_vols)
        gain, loss, linear_dr, quad_dr = getRegressor(TR, n_vols, hrf_at_trs, neural_prediction)
        data, gain, loss, linear_dr, quad_dr = deleteOutliers(data, gain, loss, linear_dr, quad_dr, i, run, dvars_out, fd_out)
        data_full = np.concatenate((data_full,data),axis=3)
        gain_full = np.concatenate((gain_full,gain),axis=0)
        loss_full = np.concatenate((loss_full,loss),axis=0)
        linear_full = np.concatenate((linear_full,linear_dr),axis=0)
        quad_full = np.concatenate((quad_full,quad_dr),axis=0)
    mea=calcMRSS(data_full, gain_full, loss_full, linear_full, quad_full, threshold)
    X, Y, beta=calcBeta(data_full, gain_full, loss_full, linear_full, quad_full, threshold)
    # write='ds005/sub0'+str(i).zfill(2)+'/model/model001/onsets/sub'+`i`+'_beta.txt'
    # np.savetxt(write, beta)