コード例 #1
0
def test_hypothesis1():
    img = nib.load(pathtoclassdata + "ds114_sub009_t2r1.nii")
    data = img.get_data()[..., 4:]
    # Read in the convolutions. 
    convolved = np.loadtxt(pathtoclassdata + "ds114_sub009_t2r1_conv.txt")[4:]
    # Create design matrix. 
    
    beta,t,df,p = t_stat(data, convolved,[1,1])
    beta2, t2,df2,p2 = t_stat(data, convolved,[0,1])
    
    assert_almost_equal(beta,beta2)
    assert t.all() == t2.all()
    assert beta.shape[1] == np.prod(data.shape[0:-1])
コード例 #2
0
def test_bh():
    img = nib.load(pathtoclassdata + "ds114_sub009_t2r1.nii")
    data = img.get_data()[..., 4:]
    # Read in the convolutions. 
    convolved = np.loadtxt(pathtoclassdata + "ds114_sub009_t2r1_conv.txt")[4:]
    # Create design matrix. 

    beta,t,df,p = t_stat(data, convolved,[1,1])
    beta2, t2,df2,p2 = t_stat(data, convolved,[0,1])

    Q = 1.0
    pval = p.T
    useless_bh = bh_procedure(pval, Q)

    # Since the FDR is 100%, the bh_procedure should return the exact same thing as the original data.
    #assert_almost_equal(data[...,7], useless_bh[...,7])
    #assert_almost_equal(np.ravel(pval), useless_bh)

    Q_real = .25
    real_bh = bh_procedure(pval, Q_real)
    #assert_not_equals(data[...,7], real_bh[...,7])
    assert(not (np.all(np.ravel(pval) != real_bh)))
コード例 #3
0
ファイル: test_bh.py プロジェクト: karenceli/project-alpha
def test_bh():
    img = nib.load(pathtoclassdata + "ds114_sub009_t2r1.nii")
    data = img.get_data()[..., 4:]
    # Read in the convolutions.
    convolved = np.loadtxt(pathtoclassdata + "ds114_sub009_t2r1_conv.txt")[4:]
    # Create design matrix.

    beta, t, df, p = t_stat(data, convolved, [1, 1])
    beta2, t2, df2, p2 = t_stat(data, convolved, [0, 1])

    Q = 1.0
    pval = p.T
    useless_bh = bh_procedure(pval, Q)

    # Since the FDR is 100%, the bh_procedure should return the exact same thing as the original data.
    # assert_almost_equal(data[...,7], useless_bh[...,7])
    # assert_almost_equal(np.ravel(pval), useless_bh)

    Q_real = 0.25
    real_bh = bh_procedure(pval, Q_real)
    # assert_not_equals(data[...,7], real_bh[...,7])
    assert not (np.all(np.ravel(pval) != real_bh))
コード例 #4
0
def test_hypothesis2():
    # example from http://www.jarrodmillman.com/rcsds/lectures/glm_intro.html
    # it should be pointed out that hypothesis just looks at simple linear regression

    psychopathy = [11.416,   4.514,  12.204,  14.835,
    8.416,   6.563,  17.343, 13.02,
    15.19 ,  11.902,  22.721,  22.324]
    clammy = [0.389,  0.2  ,  0.241,  0.463,
    4.585,  1.097,  1.642,  4.972,
    7.957,  5.585,  5.527,  6.964]  

    Y = np.asarray(psychopathy)
    B, t, df, p = t_stat(Y, clammy, [0, 1])


    assert np.round(t,6)==np.array([[ 1.914389]])
    assert np.round(p,6)==np.array([[ 0.042295]])
コード例 #5
0
def test_hypothesis_3():
    # new multiple-regression
    img = nib.load(pathtoclassdata + "ds114_sub009_t2r1.nii")
    data = img.get_data()[..., 4:]
    # Read in the convolutions.
    convolved = np.loadtxt(pathtoclassdata + "ds114_sub009_t2r1_conv.txt")[4:]
    # Create design matrix.
    X = np.ones((convolved.shape[0], 2))
    X[:, 1] = convolved

    beta, t, df, p = t_stat(data, convolved, [0, 1])
    beta2, t2, df2, p2 = t_stat_mult_regression_single(data, X)

    beta3, t3, df3, p3 = t_stat_mult_regression(data, X)

    assert_array_equal(t, t2)
    assert_array_equal(t, np.atleast_2d(t3[1, :]))
コード例 #6
0
def test_hypothesis2():
    # example from http://www.jarrodmillman.com/rcsds/lectures/glm_intro.html
    # it should be pointed out that hypothesis just looks at simple linear 
    # regression

    psychopathy = [11.416,   4.514,  12.204,  14.835,
    8.416,   6.563,  17.343, 13.02,
    15.19 ,  11.902,  22.721,  22.324]
    clammy = [0.389,  0.2  ,  0.241,  0.463,
    4.585,  1.097,  1.642,  4.972,
    7.957,  5.585,  5.527,  6.964]  

    Y = np.asarray(psychopathy)
    B, t, df, p = t_stat(Y, clammy, [0, 1])


    assert np.round(t,6)==np.array([[ 1.914389]])
    assert np.round(p,6)==np.array([[ 0.042295]])
コード例 #7
0
def test_hypothesis_3():
    # new multiple-regression
    img = nib.load(pathtoclassdata + "ds114_sub009_t2r1.nii")
    data = img.get_data()[..., 4:]
    # Read in the convolutions. 
    convolved = np.loadtxt(pathtoclassdata + "ds114_sub009_t2r1_conv.txt")[4:]
    # Create design matrix. 
    X=np.ones((convolved.shape[0],2))
    X[:,1]=convolved


    beta,t,df,p = t_stat(data, convolved,[0,1])
    beta2, t2,df2,p2 = t_stat_mult_regression_single(data, X)

    beta3, t3,df3,p3 = t_stat_mult_regression(data, X)


    assert_array_equal(t,t2)
    assert_array_equal(t,np.atleast_2d(t3[1,:]))
コード例 #8
0
    hrf_at_trs = np.array([hrf_single(x) for x in tr_times])
    n_vols=data.shape[-1]

    # creating the .txt file for the events2neural function
    cond_all=np.row_stack((cond1,cond2,cond3))
    cond_all=sorted(cond_all,key= lambda x:x[0])
    np.savetxt(pathtodata+ i+ "/model/model001/onsets/task001_run001/cond_all.txt",cond_all)

    neural_prediction = events2neural(pathtodata+ i+ "/model/model001/onsets/task001_run001/cond_all.txt",TR,n_vols)
    convolved = np.convolve(neural_prediction, hrf_at_trs) # hrf_at_trs sample 
    
    N = len(neural_prediction)  # N == n_vols == 173
    M = len(hrf_at_trs)  # M == 12
    np_hrf=convolved[:N]
    
    B,t,df,p = t_stat(data, np_hrf, np.array([0,1]))

     #Simple mask function
    mask = nib.load(pathtodata+i+'/anatomy/inplane001_brain_mask.nii.gz')
    mask_data = mask.get_data()
    
    t_mean[...,int(i[-1])] = make_mask(np.reshape(t,(64,64,34)), mask_data, fit=True)


    
final = present_3d(np.mean(t_mean,axis=3))


#######################
# Plot the results    #
#######################
コード例 #9
0
# Path to the subject 009 fMRI data used in class. 
pathtoclassdata = "data/ds114/"

# Add path to functions to the system path.
sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))

# Load our benjamini-hochberg function
from benjamini_hochberg import bh_procedure
from hypothesis import t_stat

img = nib.load(pathtoclassdata + "ds114_sub009_t2r1.nii")
data = img.get_data()[..., 4:]
# Read in the convolutions. 
convolved = np.loadtxt(pathtoclassdata + "ds114_sub009_t2r1_conv.txt")[4:]
# Create design matrix. 
beta,t,df,p = t_stat(data, convolved,[1,1])
beta2, t2,df2,p2 = t_stat(data, convolved,[0,1])
def test_bh():


    Q = 1.0
    p_vals = p.T
    useless_bh = bh_procedure(p_vals, Q)

    # Since the FDR is 100%, the bh_procedure should return the exact same thing as the original data.
    #assert_almost_equal(data[...,7], useless_bh[...,7])
    #assert_almost_equal(np.ravel(pval), useless_bh)

    Q_real = .25
    real_bh = bh_procedure(p_vals, Q_real)
コード例 #10
0
# creating the .txt file for the events2neural function
cond_all = np.row_stack((cond1, cond2, cond3))
cond_all = sorted(cond_all, key=lambda x: x[0])
np.savetxt(condition_location + "cond_all.txt", cond_all)

neural_prediction = events2neural(condition_location + "cond_all.txt", TR,
                                  n_vols)
convolved = np.convolve(neural_prediction,
                        hrf_at_trs)  # hrf_at_trs sample data
N = len(neural_prediction)  # N == n_vols == 173
M = len(hrf_at_trs)  # M == 12
np_hrf = convolved[:N]

#=================================================
""" Run hypothesis testing script"""

B_my, t_my, df, p_my = t_stat(data, my_hrf, np.array([0, 1]))

print("'my' convolution single regression (t,p):")
print(t_my, p_my)
print("means of (t,p) for 'my' convolution: (" + str(np.mean(t_my)) +
      str(np.mean(p_my)) + ")")

B_np, t_np, df, p_np = t_stat(data, np_hrf, np.array([0, 1]))

print("np convolution single regression (t,p):")
print(t_np, p_np)
print("means of (t,p) for np convolution: (" + str(np.mean(t_np)) +
      str(np.mean(p_np)) + ")")
B, t, df, p = t_stat(data, my_hrf, np.array([0, 1]))
コード例 #11
0
img = nib.load(pathtodata + "BOLD/task001_run001/bold.nii.gz")
data = img.get_data()
data = data[..., 6:]  # Knock off the first 6 observations.

#Load convolution files
cond1 = np.loadtxt(condition_location + "cond001.txt")
cond2 = np.loadtxt(condition_location + "cond002.txt")
cond3 = np.loadtxt(condition_location + "cond003.txt")

#Convolution and t-values
all_stimuli = np.array(
    sorted(list(cond2[:, 0]) + list(cond3[:, 0]) + list(cond1[:, 0])))
my_hrf = convolution_specialized(all_stimuli, np.ones(len(all_stimuli)),
                                 hrf_single, np.linspace(0, 239 * 2 - 2, 239))

B, t, df, p = t_stat(data, my_hrf, np.array([0, 1]))

#########################
# Benjamini-Hochberg #
#########################

print(
    "# ==== BEGIN Visualization of Masked data over original brain data ==== #"
)

p_vals = p.T  # shape of p_vals is (139264, 1)

print("# ==== No Mask, bh_procedure ==== #")
# a fairly large false discovery rate
Q = .4
significant_pvals = bh_procedure(p_vals, Q)
コード例 #12
0
# creating the .txt file for the events2neural function
cond_all=np.row_stack((cond1,cond2,cond3))
cond_all=sorted(cond_all,key= lambda x:x[0])
np.savetxt(condition_location+"cond_all.txt",cond_all)

neural_prediction = events2neural(condition_location+"cond_all.txt",TR,n_vols)
convolved = np.convolve(neural_prediction, hrf_at_trs) # hrf_at_trs sample data
N = len(neural_prediction)  # N == n_vols == 173
M = len(hrf_at_trs)  # M == 12
np_hrf=convolved[:N]


#=================================================

""" Run hypothesis testing script"""

B_my,t_my,df,p_my = t_stat(data, my_hrf, np.array([0,1]))

print("'my' convolution single regression (t,p):")
print(t_my,p_my)
print("means of (t,p) for 'my' convolution: (" +str(np.mean(t_my))+str(np.mean(p_my)) +")")

B_np,t_np,df,p_np = t_stat(data, np_hrf, np.array([0,1]))

print("np convolution single regression (t,p):")
print(t_np,p_np)
print("means of (t,p) for np convolution: (" +str(np.mean(t_np))+str(np.mean(p_np)) +")")
B,t,df,p = t_stat(data, my_hrf, np.array([0,1]))

コード例 #13
0
    t_final2 = np.zeros((data_smooth.shape[:-1]))
    t_final3 = np.zeros((data_smooth.shape[:-1]))
    
    #Run per slice in order to correct for time
    for j in range(data_smooth.shape[2]):
        
        data_smooth_slice = data_smooth[:,:,j,:]
        data_rough_slice = data_rough[:,:,j,:]        
        #Create design matrix
        X = np.ones((n_vols,7))
        X[:,1] = convolve[:,j]
        X[:,2]=np.linspace(-1,1,num=X.shape[0]) #drift
        X[:,3:]=fourier_creation(X.shape[0],2)[:,1:]
        
        beta1,t1,df1,p1 = t_stat_mult_regression(data_rough_slice, X)
        beta2, t2, df2, p2 = t_stat(data_smooth_slice,convolve[:,j], c=[0,1] )
        beta3, t3, df3, p3 = t_stat(data_rough_slice,convolve[:,j], c=[0,1] )
        
        
        
        t1 = t1[1,:]
        t2 = t2.T
        t3= t3.T
                
        
        t_final1[:,:,j] = t1.reshape(data_rough_slice.shape[:-1])
        t_final2[:,:,j] = t2.reshape(data_smooth_slice.shape[:-1])
        t_final3[:,:,j] = t3.reshape(data_rough_slice.shape[:-1])
        

    np.save("../data/t_stat/"+i+"_tstat_rough_full.npy", t_final1)
コード例 #14
0
# Path to the subject 009 fMRI data used in class.
pathtoclassdata = "data/ds114/"

# Add path to functions to the system path.
sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))

# Load our benjamini-hochberg function
from benjamini_hochberg import bh_procedure
from hypothesis import t_stat

img = nib.load(pathtoclassdata + "ds114_sub009_t2r1.nii")
data = img.get_data()[..., 4:]
# Read in the convolutions.
convolved = np.loadtxt(pathtoclassdata + "ds114_sub009_t2r1_conv.txt")[4:]
# Create design matrix.
beta, t, df, p = t_stat(data, convolved, [1, 1])
beta2, t2, df2, p2 = t_stat(data, convolved, [0, 1])


def test_bh():

    Q = 1.0
    p_vals = p.T
    useless_bh = bh_procedure(p_vals, Q)

    # Since the FDR is 100%, the bh_procedure should return the exact same thing as the original data.
    #assert_almost_equal(data[...,7], useless_bh[...,7])
    #assert_almost_equal(np.ravel(pval), useless_bh)

    Q_real = .25
    real_bh = bh_procedure(p_vals, Q_real)
コード例 #15
0
 t_final = np.zeros((data.shape[:-1]))
 #p_final = np.zeros((data.shape[:-1]))
 t_final2 = np.zeros((data.shape[:-1]))
 
 
 for j in range(data.shape[2]):
     
     data_slice = data[:,:,j,:]
     X = np.ones((n_vols,6))
     X[:,1] = convolve[:,j]
     X[:,2]=np.linspace(-1,1,num=X.shape[0]) #drift
     X[:,3:]=fourier_creation(X.shape[0],3)[:,1:]
     
     beta,t,df,p = t_stat_mult_regression(data_slice, X)
     
     beta2, t2, df2, p2 = t_stat(data_slice,convolve[:,j], c=[0,1] )
     
     t = t[1,:]
     #p = p[1,:]
     t2 = t2.T
     
     MRSS, fitted, residuals = glm_diagnostics(beta, X, data_slice)
     
     
     t_final[:,:,j] = t.reshape(data_slice.shape[:-1])
     #p_final[:,:,j] = p.reshape(data_slice.shape[:-1])
     #t_final2[:,:,j] = t2.reshape(data_slice.shape[:-1])
     
     #residual_final[:,:,j,:] = residuals.reshape(data_slice.shape)
     
     np.save("../data/glm/t_stat/"+i+"_tstat.npy", t_final)