def test_getRegressor():
    # Set up and load in subject 1 run 1
    # This is the convolution method described, in detail on:
    # http://practical-neuroimaging.github.io/on_convolution.html
    TR = 2
    n_vols = 240
    tr_times = np.arange(0, 30, TR)
    hrf_signal = hrf(tr_times)
    behav_cond = pathtotest + 'test_behavdata.txt'
    task_cond1 = pathtotest + 'test_cond001.txt'
    task_cond2 = pathtotest + 'test_cond002.txt'
    task_cond3 = pathtotest + 'test_cond003.txt'
    task_cond4 = pathtotest + 'test_cond004.txt'
    parameters = merge_cond(behav_cond, task_cond1, task_cond2, task_cond3, task_cond4)
    # get neural_signal
    neural_signal = events2neural_extend(parameters,TR, n_vols)
    # get gain signal
    gain_signal = neural_signal[:,1]
    # get loss signal 
    loss_signal = neural_signal[:,2]
    # Len of neural signal 
    N = neural_signal.shape[0]
    # Length of hrf_signal
    M = len(hrf_signal)
    # create the convolved bold signal gain/loss 
    convolved_gain = np.zeros(N + M - 1)  # adding the tail
    convolved_loss = np.zeros(N + M - 1)  # adding the tail
    for i in range(N):
        input_value_g = gain_signal[i]
        input_value_l = loss_signal[i]
        # Adding the shifted, scaled HRF
        convolved_gain[i : i + M] += hrf_signal * input_value_g
        convolved_loss[i : i + M] += hrf_signal * input_value_l
    # Remove the extra_times
    n_to_remove = M-1
    convolved_gain = convolved_gain[:-n_to_remove]
    convolved_loss = convolved_loss[:-n_to_remove]
    lin_dr = np.linspace(-1, 1, n_vols)
    quad_dr = lin_dr ** 2
    quad_dr -= np.mean(quad_dr)
     
    #--------------------------------------------------------------------------#
    # my function 
    myconv_gain, myconv_loss, my_lin, my_quad = getRegressor(TR, n_vols, hrf_signal, neural_signal)
    myconv_gain1, myconv_loss1, my_lin1, my_quad1 = getRegressor(TR, n_vols, hrf_signal, neural_signal, standard = True)

    #--------------------------------------------------------------------------#
    # assert checks
    assert (max(abs(convolved_gain-myconv_gain) < .0001))
    assert (max(abs(convolved_loss-myconv_loss) < .0001))
    assert (max(abs(quad_dr-my_quad) < .0001))
    assert (max(abs(lin_dr-my_lin) < .0001))
    # Check standard template
    assert_allclose(myconv_gain, myconv_gain1)
    assert_allclose(myconv_loss, myconv_loss1)
    assert (my_lin1 is None)
    assert (my_quad1 is None)
def test_getRegressor():
    # Set up and load in subject 1 run 1
    # This is the convolution method described, in detail on:
    # http://practical-neuroimaging.github.io/on_convolution.html
    TR = 2
    n_vols = 240
    tr_times = np.arange(0, 30, TR)
    hrf_signal = hrf(tr_times)
    behav_cond = pathtotest + 'test_behavdata.txt'
    task_cond1 = pathtotest + 'test_cond001.txt'
    task_cond2 = pathtotest + 'test_cond002.txt'
    task_cond3 = pathtotest + 'test_cond003.txt'
    task_cond4 = pathtotest + 'test_cond004.txt'
    parameters = merge_cond(behav_cond, task_cond1, task_cond2, task_cond3, task_cond4)
    # get neural_signal
    neural_signal = events2neural_extend(parameters,TR, n_vols)
    # get gain signal
    gain_signal = neural_signal[:,1]
    # get loss signal 
    loss_signal = neural_signal[:,2]
    # Len of neural signal 
    N = neural_signal.shape[0]
    # Length of hrf_signal
    M = len(hrf_signal)
    # create the convolved bold signal gain/loss 
    convolved_gain = np.zeros(N + M - 1)  # adding the tail
    convolved_loss = np.zeros(N + M - 1)  # adding the tail
    for i in range(N):
        input_value_g = gain_signal[i]
        input_value_l = loss_signal[i]
        # Adding the shifted, scaled HRF
        convolved_gain[i : i + M] += hrf_signal * input_value_g
        convolved_loss[i : i + M] += hrf_signal * input_value_l
    # Remove the extra_times
    n_to_remove = M-1
    convolved_gain = convolved_gain[:-n_to_remove]
    convolved_loss = convolved_loss[:-n_to_remove]
    linear_dr = np.linspace(-1, 1, n_vols)
    quadratic_dr = linear_dr ** 2
    quadratic_dr -= np.mean(quadratic_dr)
     
    #--------------------------------------------------------------------------#
    # my function 
    myconv_gain, myconv_loss, my_lin, my_quad = getRegressor(TR, n_vols, hrf_signal, neural_signal)
    #--------------------------------------------------------------------------#
    # assert checks
    assert (max(abs(convolved_gain-myconv_gain) < .0001))
    assert (max(abs(convolved_loss-myconv_loss) < .0001))
    assert (max(abs(quadratic_dr-my_quad) < .0001))
    assert (max(abs(linear_dr-my_lin) < .0001))
def test_hrf():
    # create test array of times
    hrf_times = np.arange(0,20,0.2)
    # Gamma pdf for the peak
    peak_values = gamma.pdf(hrf_times, 6)
    # Gamma pdf for the undershoot
    undershoot_values = gamma.pdf(hrf_times, 12)
    # Combine them
    test_values = peak_values - 0.35 * undershoot_values
    # Scale max to 0.6
    test_values = test_values/np.max(test_values)*0.6
    # my values from function
    my_values = hrf(hrf_times)
    # assert
    assert_almost_equal(test_values, my_values)
def test_hrf():
    # create test array of times
    hrf_times = np.arange(0,20,0.2)
    # Gamma pdf for the peak
    peak_values = gamma.pdf(hrf_times, 6)
    # Gamma pdf for the undershoot
    undershoot_values = gamma.pdf(hrf_times, 12)
    # Combine them
    test_values = peak_values - 0.35 * undershoot_values
    # Scale max to 0.6
    test_values = test_values/np.max(test_values)*0.6
    # my values from function
    my_values = hrf(hrf_times)
    # assert
    assert_almost_equal(test_values, my_values)
# Path to function
pathtofunction = "../utils/functions"
# Append path to sys
sys.path.append(pathtofunction)

from graph_lindiagnostics import qqplot, res_var
from behavtask_tr import events2neural_extend, merge_cond
from regression_functions import hrf, getRegressor, calcBeta, calcMRSS, deleteOutliers

# This first part is same as regression_script, except path changes
# ------------------------------------------------------------------------------#
n_vols = 240
TR = 2
tr_times = np.arange(0, 30, TR)
hrf_at_trs = hrf(tr_times)

pathtodata = "../../data/"

dvars_out = json.load(open(pathtodata + "dvarsOutliers.txt"))
fd_out = json.load(open(pathtodata + "fdOutliers.txt"))

for i in range(2, 3):
    # first three dimension for data shape is 64, 64, 34.
    # create array to store the combined dataset of three runs
    data_full = np.empty([64, 64, 34, 0])
    gain_full = np.empty([0])
    loss_full = np.empty([0])
    linear_full = np.empty([0])
    quad_full = np.empty([0])
    for j in range(1, 4):
Ejemplo n.º 6
0
import json
import sys

# Path to function
pathtofunction = '../utils'
# Append path to sys
sys.path.append(pathtofunction)

from behavtask_tr import events2neural_extend, merge_cond
from regression_functions import hrf, getRegressor, calcBeta, calcMRSS, deleteOutliers
from lme_functions import calcBetaLme, calcSigProp, calcAnov, anovStat

n_vols=240
TR=2
tr_times = np.arange(0, 30, TR)
hrf_at_trs = hrf(tr_times)


pathtofolder = '../../data/'

dvars_out = json.load(open(pathtofolder + "dvarsOutliers.txt"))
fd_out = json.load(open(pathtofolder + "fdOutliers.txt"))

sig_gain_prop = np.empty(16)
sig_loss_prop = np.empty(16)
anov_prop = np.empty(16)
for i in range(1,17):
    # first three dimension for data shape is 64, 64, 34.
    # create array to store the combined dataset of three runs
    data_full = np.empty([64, 64, 34, 0])
    gain_full = np.empty([0,])