def test_convolution_specialized():
	stimuli=np.array([0,5,15])
	on_off1=np.array([0,1,0])
	on_off2=np.array([1,0,1])
	x=np.linspace(0,45,91) # 0, .5, 1, 1.5, 2, ... 45
	HRF1=convolution_specialized(stimuli,on_off1,hrf_single,x)
	y1=np.array([hrf_single(x_i-5) for x_i in x]) # what it should be doing
	HRF2=convolution_specialized(stimuli,on_off2,hrf_single,x)
	y2=np.array([hrf_single(x_i)+hrf_single(x_i-15) for x_i in x]) #what it should be doing

	assert all(HRF1 == y1)
	assert all(HRF2 == y2)
for i,name in enumerate(cond_string):
	nueral_prediction = events2neural(condition_location+name,TR,n_vols)
	hrf_long = np.convolve(nueral_prediction, hrf_at_trs)
	X_np[:,i+1] = hrf_long[:-(len(hrf_at_trs)-1)]

all_tr_times = np.arange(n_vols) * TR


###############################
#     convolution_specialized #
###############################
X_my = np.ones((n_vols,4))

conds = [cond1[:,0],cond2[:,0],cond3[:,0]]
for i,cond in enumerate(conds):
	X_my[:,i+1]=convolution_specialized(cond,np.ones(len(cond)),hrf_single,all_tr_times)


##########
#    GLM #  
##########


###################
#     np.convolve #
###################

B_np,junk=glm_multiple(data,X_np)

###############################
#     convolution_specialized #
testconv_np=testconv_np[:N]

#####################
# b. user functions #
#####################

#--------#
# second #

testconv_2 = convolution(all_tr_times,neural_prediction,hrf_single)


#-------#
# third #

testconv_3 = convolution_specialized(all_tr_times,neural_prediction,
	hrf_single,all_tr_times)


#--------#
# fourth #

on_off = np.zeros(174)
real_times,on_off[:-1] = np.linspace(0,432.5,173+1),neural_prediction
hrf_function,TR,record_cuts= hrf_single, 2.5 ,np.linspace(0,432.5,173+1)
#
testconv_4_1 = np_convolve_30_cuts(real_times,on_off,hrf_function,TR,record_cuts,cuts=1)

testconv_4_15 = np_convolve_30_cuts(real_times,on_off,hrf_function,TR,record_cuts,cuts=15)


testconv_4_30 = np_convolve_30_cuts(real_times,on_off,hrf_function,TR,record_cuts,cuts=30)
Exemple #4
0
img = nib.load(pathtodata + "BOLD/task001_run001/bold.nii.gz")
data = img.get_data()
data = data[..., 6:]  # Knock off the first 6 observations.

cond1 = np.loadtxt(condition_location + "cond001.txt")
cond2 = np.loadtxt(condition_location + "cond002.txt")
cond3 = np.loadtxt(condition_location + "cond003.txt")

#######################
# Smart   convolution #
#######################

all_stimuli = np.array(
    sorted(list(cond2[:, 0]) + list(cond3[:, 0]) +
           list(cond1[:, 0])))  # could also just x_s_array
my_hrf = convolution_specialized(all_stimuli, np.ones(len(all_stimuli)),
                                 hrf_single, np.linspace(0, 239 * 2 - 2, 239))

##################
#    np.convolve #
##################

# Suppose that TR=2. We know this is not a good assumption.
# Also need to look into the hrf function.
# initial needed values
TR = 2
tr_times = np.arange(0, 30, TR)
hrf_at_trs = np.array([hrf_single(x) for x in tr_times])
n_vols = data.shape[-1]

# creating the .txt file for the events2neural function
cond_all = np.row_stack((cond1, cond2, cond3))
# Load the image data for subject 1.
img = nib.load(pathtodata+"BOLD/task001_run001/bold.nii.gz")
data = img.get_data()
data = data[...,6:] # Knock off the first 6 observations.

cond1=np.loadtxt(condition_location+"cond001.txt")
cond2=np.loadtxt(condition_location+"cond002.txt")
cond3=np.loadtxt(condition_location+"cond003.txt")

#######################
# Smart   convolution #
#######################

all_stimuli=np.array(sorted(list(cond2[:,0])+list(cond3[:,0])+list(cond1[:,0]))) # could also just x_s_array
my_hrf = convolution_specialized(all_stimuli,np.ones(len(all_stimuli)),hrf_single,np.linspace(0,239*2-2,239))


##################
#    np.convolve #
##################

# Suppose that TR=2. We know this is not a good assumption.
# Also need to look into the hrf function. 
# initial needed values
TR = 2
tr_times = np.arange(0, 30, TR)
hrf_at_trs = np.array([hrf_single(x) for x in tr_times])
n_vols=data.shape[-1]

# creating the .txt file for the events2neural function
all_tr_times = np.arange(n_vols) * TR


cond_all = np.loadtxt(condition_location + "cond_all.txt")


#################
# First Attempt #
#################

# First approach allowed for fourier strength to be fit to each voxel,
# 	potentially overcorrecting and masking some response to neural stimulation

# X matrix
X = np.ones((n_vols, 9))  # changed since fourier needs more
X[:, 1] = convolution_specialized(cond_all[:, 0], np.ones(len(cond_all)), hrf_single, all_tr_times)
X[:, 2] = np.linspace(-1, 1, num=X.shape[0])  # drift
X[:, 3:] = fourier_creation(X.shape[0], 3)[:, 1:]

# modeling voxel hemodynamic response
beta, junk = glm_multiple(data, X)
MRSS, fitted, residuals = glm_diagnostics(beta, X, data)

# individual voxel analysis

plt.plot(all_tr_times, data[41, 47, 2], label="actual", color="b")
plt.plot(all_tr_times, fitted[41, 47, 2], label="predicted", color="r")
plt.title("Data for sub001, voxel [41, 47, 2],fourier 3 fit to voxel")
plt.xlabel("Time")
plt.ylabel("Hemodynamic response")
plt.legend(loc="upper right", shadow=True, fontsize="smaller")
Exemple #7
0
M = len(hrf_at_trs)  # M == 12
testconv_np = testconv_np[:N]

#####################
# b. user functions #
#####################

#--------#
# second #

testconv_2 = convolution(all_tr_times, neural_prediction, hrf_single)

#-------#
# third #

testconv_3 = convolution_specialized(all_tr_times, neural_prediction,
                                     hrf_single, all_tr_times)

#--------#
# fourth #

on_off = np.zeros(174)
real_times, on_off[:-1] = np.linspace(0, 432.5, 173 + 1), neural_prediction
hrf_function, TR, record_cuts = hrf_single, 2.5, np.linspace(0, 432.5, 173 + 1)
#
testconv_4_1 = np_convolve_30_cuts(real_times,
                                   on_off,
                                   hrf_function,
                                   TR,
                                   record_cuts,
                                   cuts=1)
def test_convolution():
	#################
	# i. Can the user-created functions match np.convolve in np.convolve territory

	TR = 2.5
	tr_times = np.arange(0, 30, TR)
	hrf_at_trs = np.array([hrf_single(x) for x in tr_times])

	n_vols = 173
	neural_prediction = events2neural(location_to_class_data+'ds114_sub009_t2r1_cond.txt',TR,n_vols)
	all_tr_times = np.arange(173) * TR


	##################
	# a. np.convolve #
	##################


	testconv_np = np.convolve(neural_prediction, hrf_at_trs) # hrf_at_trs sample data
	N = len(neural_prediction)  # N == n_vols == 173
	M = len(hrf_at_trs)  # M == 12
	testconv_np=testconv_np[:N]

	#####################
	# b. user functions #
	#####################

	#--------#
	# second #

	testconv_2 = convolution(all_tr_times,neural_prediction,hrf_single)


	#-------#
	# third #

	testconv_3 = convolution_specialized(all_tr_times,neural_prediction,
		hrf_single,all_tr_times)


	#--------#
	# fourth #

	on_off = np.zeros(174)
	real_times,on_off[:-1] = np.linspace(0,432.5,173+1),neural_prediction
	hrf_function,TR,record_cuts= hrf_single, 2.5 ,np.linspace(0,432.5,173+1)
	#
	testconv_4_1 = np_convolve_30_cuts(real_times,on_off,hrf_function,TR,record_cuts,cuts=1)

	testconv_4_15 = np_convolve_30_cuts(real_times,on_off,hrf_function,TR,record_cuts,cuts=15)


	testconv_4_30 = np_convolve_30_cuts(real_times,on_off,hrf_function,TR,record_cuts,cuts=30)


	#-------#
	# fifth #

	testconv_5 = fast_convolution(all_tr_times,neural_prediction,fast_hrf,all_tr_times)

	additional_runs=[testconv_np,testconv_2,testconv_3,testconv_4_1,testconv_4_15,testconv_4_30,testconv_5]
	names=["testconv_np","testconv_2","testconv_3","testconv_4_1","testconv_4_15","testconv_4_30","testconv_5"]
	print("Max difference between model and testconv_np:")
	for i,my_convolved in enumerate(additional_runs):
		if my_convolved.shape[0]==testconv_np.shape[0]:
			print(names[i],max(abs(testconv_np-my_convolved)))
		else:
			print(names[i],max(abs(testconv_np-my_convolved[:-1])))


# Actual asserts
	for i,my_convolved in enumerate(additional_runs):
		if my_convolved.shape[0]==testconv_np.shape[0]:
			assert (max(abs(testconv_np-my_convolved) < .0001))
		else:
			assert (max(abs(testconv_np-my_convolved[:-1]) < .0001))
Exemple #9
0
for i,name in enumerate(cond_string):
	nueral_prediction = events2neural(condition_location+name,TR,n_vols)
	hrf_long = np.convolve(nueral_prediction, hrf_at_trs)
	X_np[:,i+1] = hrf_long[:-(len(hrf_at_trs)-1)]

all_tr_times = np.arange(n_vols) * TR


###############################
#     convolution_specialized #
###############################
X_my = np.ones((n_vols,4))

conds = [cond1[:,0],cond2[:,0],cond3[:,0]]
for i,cond in enumerate(conds):
	X_my[:,i+1]=convolution_specialized(cond,np.ones(len(cond)),hrf_single,all_tr_times)


##########
#    GLM #  
##########


###################
#     np.convolve #
###################

B_np,junk=glm_multiple(data,X_np)

###############################
#     convolution_specialized #
Exemple #10
0
one_zeros[16:20]=1


plt.scatter(np.arange(40),one_zeros)
plt.xlim(-1,40)
plt.title("Stimulus pattern")
plt.savefig(location_of_created_images+"on_off_pattern.png")
plt.close()


plt.plot(np.linspace(0,30,200),np.array([hrf_single(x) for x in np.linspace(0,30,200)]))
plt.title("Single HRF, started at t=0")
plt.savefig(location_of_created_images+"hrf_pattern.png")
plt.close()

convolved=convolution_specialized(np.arange(40),one_zeros,hrf_single,np.linspace(0,60,300))
plt.plot(np.linspace(0,60,300),convolved)
plt.title("Convolution")
plt.savefig(location_of_created_images+"initial_convolved.png")
plt.close()








colors=["#CCCCFF","#C4C3D0","#92A1CF","#2A52BE","#003399","#120A8F","#000080","#002366"]

Exemple #11
0
one_zeros[4] = 1
one_zeros[16:20] = 1

plt.scatter(np.arange(40), one_zeros)
plt.xlim(-1, 40)
plt.title("Stimulus pattern")
plt.savefig(location_of_created_images + "on_off_pattern.png")
plt.close()

plt.plot(np.linspace(0, 30, 200),
         np.array([hrf_single(x) for x in np.linspace(0, 30, 200)]))
plt.title("Single HRF, started at t=0")
plt.savefig(location_of_created_images + "hrf_pattern.png")
plt.close()

convolved = convolution_specialized(np.arange(40), one_zeros, hrf_single,
                                    np.linspace(0, 60, 300))
plt.plot(np.linspace(0, 60, 300), convolved)
plt.title("Convolution")
plt.savefig(location_of_created_images + "initial_convolved.png")
plt.close()

colors = [
    "#CCCCFF", "#C4C3D0", "#92A1CF", "#2A52BE", "#003399", "#120A8F",
    "#000080", "#002366"
]

xx = np.linspace(0, 30, 3001)

i = 3
one_zeros_2 = np.zeros(3001)
one_zeros_2[(2 * i * 100 - 15):(2 * i * 100 + 15)] = .6
Exemple #12
0
TR = 2
all_tr_times = np.arange(n_vols) * TR

cond_all = np.loadtxt(condition_location + "cond_all.txt")

#################
# First Attempt #
#################

# First approach allowed for fourier strength to be fit to each voxel,
#	potentially overcorrecting and masking some response to neural stimulation

# X matrix
X = np.ones((n_vols, 9))  #changed since fourier needs more
X[:, 1] = convolution_specialized(cond_all[:, 0], np.ones(len(cond_all)),
                                  hrf_single, all_tr_times)
X[:, 2] = np.linspace(-1, 1, num=X.shape[0])  #drift
X[:, 3:] = fourier_creation(X.shape[0], 3)[:, 1:]

# modeling voxel hemodynamic response
beta, junk = glm_multiple(data, X)
MRSS, fitted, residuals = glm_diagnostics(beta, X, data)

# individual voxel analysis

plt.plot(all_tr_times, data[41, 47, 2], label="actual", color="b")
plt.plot(all_tr_times, fitted[41, 47, 2], label="predicted", color="r")
plt.title("Data for sub001, voxel [41, 47, 2],fourier 3 fit to voxel")
plt.xlabel("Time")
plt.ylabel("Hemodynamic response")
plt.legend(loc='upper right', shadow=True, fontsize="smaller")
cond_all = np.array(
    sorted(list(cond2[:, 0]) + list(cond3[:, 0]) +
           list(cond1[:, 0])))  # could also just x_s_array

#--------#
# second #
#--------#

conv_2 = convolution(cond_all, np.ones(len(cond_all)), hrf_single)
scaled_2 = (conv_2 - np.mean(conv_2)) / (2 * np.std(conv_2)) + .4

#-------#
# third #
#-------#

conv_3 = convolution_specialized(cond_all, np.ones(len(cond_all)), hrf_single,
                                 np.linspace(0, 239 * 2 - 2, 239))
scaled_3 = (conv_3 - np.mean(conv_3)) / (2 * np.std(conv_3)) + .4

#--------#
# fourth #
#--------#

real_times, on_off = cond_all, np.ones(len(cond_all))
hrf_function, TR, record_cuts = hrf_single, 2, np.linspace(0, 239 * 2 - 2, 239)

conv_4_15 = np_convolve_30_cuts(real_times,
                                on_off,
                                hrf_function,
                                TR,
                                record_cuts,
                                cuts=15)
cond_all=np.array(sorted(list(cond2[:,0])+list(cond3[:,0])+list(cond1[:,0]))) # could also just x_s_array

#--------#
# second #
#--------#

conv_2 = convolution(cond_all,np.ones(len(cond_all)),hrf_single)
scaled_2=(conv_2-np.mean(conv_2))/(2*np.std(conv_2)) +.4


#-------#
# third #
#-------#

conv_3 = convolution_specialized(cond_all,np.ones(len(cond_all)),hrf_single,np.linspace(0,239*2-2,239))
scaled_3=(conv_3-np.mean(conv_3))/(2*np.std(conv_3)) +.4


#--------#
# fourth #
#--------#


real_times,on_off = cond_all,np.ones(len(cond_all))
hrf_function,TR,record_cuts= hrf_single, 2 ,np.linspace(0,239*2-2,239)

conv_4_15 = np_convolve_30_cuts(real_times,on_off,hrf_function,TR,record_cuts,cuts=15)
scaled_4_15=(conv_4_15-np.mean(conv_4_15))/(2*np.std(conv_4_15)) +.4

conv_4_30 = np_convolve_30_cuts(real_times,on_off,hrf_function,TR,record_cuts,cuts=30)