/
probabilisticRewardTaskPerformance_SpikeAnalysis.py
374 lines (315 loc) · 16.3 KB
/
probabilisticRewardTaskPerformance_SpikeAnalysis.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
import scipy as sp
import tables
from neo import io
from numpy import sin, linspace, pi, loadtxt
import matplotlib
import numpy as np
from matplotlib import pyplot as plt
from basicAnalysis import computePSTH, remap_spike_channels
from plexon import plexfile
import matplotlib as mpl
from matplotlib import mlab
import glob
def probabilisticRewardTask_PSTH(hdf_filename, filename, block_num):
# Define file paths and names
plx_filename1 = 'Offline_eNe1.plx'
plx_filename2 = 'Offline_eNe2.plx'
TDT_tank = '/home/srsummerson/storage/tdt/'+filename
hdf_location = '/storage/rawdata/hdf/'+hdf_filename
plx_location1 = '/home/srsummerson/storage/tdt/'+filename+'/'+'Block-'+ str(block_num) + '/'+plx_filename1
plx_location2 = '/home/srsummerson/storage/tdt/'+filename+'/'+'Block-'+ str(block_num) + '/'+plx_filename2
# Get spike data
plx1 = plexfile.openFile(plx_location1)
spike_file1 = plx1.spikes[:].data
plx2 = plexfile.openFile(plx_location2)
spike_file2 = plx2.spikes[:].data
print "Loaded spike data."
# Unpack behavioral data
hdf = tables.openFile(hdf_location)
# Task states
state = hdf.root.task_msgs[:]['msg']
state_time = hdf.root.task_msgs[:]['time']
# Target information: high-value target= targetH, low-value target= targetL
targetH = hdf.root.task[:]['targetH']
targetL = hdf.root.task[:]['targetL']
# Reward schedules for each target
reward_scheduleH = hdf.root.task[:]['reward_scheduleH']
reward_scheduleL = hdf.root.task[:]['reward_scheduleL']
# Trial type: instructed (1) or free-choice (2) trial
trial_type = hdf.root.task[:]['target_index']
cursor = hdf.root.task[:]['cursor']
ind_wait_states = np.ravel(np.nonzero(state == 'wait'))
ind_check_reward_states = np.ravel(np.nonzero(state == 'check_reward'))
ind_target_states = ind_check_reward_states - 3 # only look at targets when the trial was successful (2 states before reward state)
ind_hold_center_states = ind_check_reward_states - 4 # only look at center holds for successful trials
num_successful_trials = ind_check_reward_states.size
target_times = state_time[ind_target_states]
center_hold_times = state_time[ind_hold_center_states]
# creates vector same size of state vectors for comparison. instructed (1) and free-choice (2)
instructed_or_freechoice = trial_type[state_time[ind_target_states]]
# creates vector of same size of state vectors for comparision. (0) = small reward, (1) = large reward.
rewarded_reward_scheduleH = reward_scheduleH[state_time[ind_target_states]]
rewarded_reward_scheduleL = reward_scheduleL[state_time[ind_target_states]]
num_free_choice_trials = sum(instructed_or_freechoice) - num_successful_trials
# creates vector of same size of target info: maxtrix of num_successful_trials x 3; (position_offset, reward_prob, left/right)
targetH_info = targetH[state_time[ind_target_states]]
targetL_info = targetL[state_time[ind_target_states]]
target1 = np.zeros(100)
target3 = np.zeros(ind_check_reward_states.size-200)
trial1 = np.zeros(target1.size)
trial3 = np.zeros(target3.size)
stim_trials = np.zeros(target3.size)
# Initialize variables use for in performance computation
neural_data_center_hold_times = np.zeros(len(center_hold_times))
# Load syncing data for hdf file and TDT recording
hdf_times = dict()
mat_filename = filename+'_b'+str(block_num)+'_syncHDF.mat'
sp.io.loadmat('/home/srsummerson/storage/syncHDF/'+mat_filename,hdf_times)
print "Loaded sync data."
hdf_rows = np.ravel(hdf_times['row_number'])
hdf_rows = [val for val in hdf_rows] # turn into a list so that the index method can be used later
dio_tdt_sample = np.ravel(hdf_times['tdt_samplenumber'])
dio_freq = np.ravel(hdf_times['tdt_dio_samplerate'])
dio_recording_start = hdf_times['tdt_recording_start'] # starting sample value
dio_tstart = dio_recording_start/dio_freq # starting time in seconds
# Find corresponding timestamps for neural data from behavioral time points
for i, time in enumerate(center_hold_times):
hdf_index = np.argmin(np.abs(hdf_rows - time))
neural_data_center_hold_times[i] = dio_tdt_sample[hdf_index]/dio_freq
"""
Find target choices and trial type across the blocks.
"""
for i in range(0,100):
target_state1 = state[ind_check_reward_states[i] - 2]
trial1[i] = instructed_or_freechoice[i]
if target_state1 == 'hold_targetL':
target1[i] = 1
else:
target1[i] = 2
for i in range(200,num_successful_trials):
target_state3 = state[ind_check_reward_states[i] - 2]
trial3[i-200] = instructed_or_freechoice[i]
if target_state3 == 'hold_targetL':
target3[i-200] = 1
else:
target3[i-200] = 2
# Compute PSTH for units over all trials
window_before = 2 # PSTH time window before alignment point in seconds
window_after = 3 # PSTH time window after alignment point in seconds
binsize = 100 # spike bin size in ms
psth_all_trials, smooth_psth_all_trials, labels_all_trials = computePSTH(spike_file1,spike_file2,neural_data_center_hold_times,window_before,window_after, binsize)
psth_time_window = np.arange(-window_before,window_after-float(binsize)/1000,float(binsize)/1000)
# Compute PSTH for units over trials (free-choice and instructed) where the LV target was selected
target_state = state[ind_check_reward_states - 2]
choose_lv = np.ravel(np.nonzero(target_state == 'hold_targetL'))
neural_choose_lv = neural_data_center_hold_times[choose_lv]
psth_lv_trials, smooth_psth_lv_trials, labels_lv_trials = computePSTH(spike_file1,spike_file2,neural_data_center_hold_times[choose_lv],window_before,window_after, binsize)
# Compute PSTH for units over trials (free-choice and instructed) where the HV target was selected
choose_hv = np.ravel(np.nonzero(target_state == 'hold_targetH'))
psth_hv_trials, smooth_psth_hv_trials, labels_hv_trials = computePSTH(spike_file1,spike_file2,neural_data_center_hold_times[choose_hv],window_before,window_after, binsize)
print "Plotting results."
# Plot PSTHs all together
cmap_all = mpl.cm.brg
plt.figure()
for i in range(len(psth_all_trials)):
unit_name = psth_all_trials.keys()[i]
plt.plot(psth_time_window,psth_all_trials[unit_name],color=cmap_all(i/float(len(psth_all_trials))),label=unit_name)
plt.xlabel('Time (s)')
plt.ylabel('spks/s')
plt.title('PSTH')
plt.savefig('/home/srsummerson/code/analysis/Mario_Performance_figs/'+filename+'_b'+str(block_num)+'_PSTH-CenterHold.svg')
plt.figure()
for i in range(len(psth_all_trials)):
unit_name = psth_all_trials.keys()[i]
if np.max(smooth_psth_all_trials[unit_name]) > 10:
plt.plot(psth_time_window,smooth_psth_all_trials[unit_name],color=cmap_all(i/float(len(psth_all_trials))),label=unit_name)
plt.xlabel('Time (s)')
plt.ylabel('spks/s')
plt.title('Smooth PSTH')
plt.legend()
plt.savefig('/home/srsummerson/code/analysis/Mario_Performance_figs/'+filename+'_b'+str(block_num)+'_SmoothPSTH-CenterHold.svg')
plt.figure()
for i in range(len(psth_lv_trials)):
unit_name = psth_lv_trials.keys()[i]
if np.max(smooth_psth_lv_trials[unit_name]) > 20:
plt.plot(psth_time_window,smooth_psth_lv_trials[unit_name],color=cmap_all(i/float(len(psth_lv_trials))),label=unit_name)
plt.xlabel('Time (s)')
plt.ylabel('spks/s')
plt.title('Smooth PSTH for Trials with LV Target Selection')
plt.legend()
plt.savefig('/home/srsummerson/code/analysis/Mario_Performance_figs/'+filename+'_b'+str(block_num)+'_SmoothPSTH-CenterHold-LV.svg')
plt.figure()
for i in range(len(psth_hv_trials)):
unit_name = psth_hv_trials.keys()[i]
if np.max(smooth_psth_hv_trials[unit_name]) > 20:
plt.plot(psth_time_window,smooth_psth_hv_trials[unit_name],color=cmap_all(i/float(len(psth_hv_trials))),label=unit_name)
plt.xlabel('Time (s)')
plt.ylabel('spks/s')
plt.title('Smooth PSTH for Trials with HV Target Selection')
plt.legend()
plt.savefig('/home/srsummerson/code/analysis/Mario_Performance_figs/'+filename+'_b'+str(block_num)+'_SmoothPSTH-CenterHold-HV.svg')
plt.close()
hdf.close()
return
def probabilisticRewardTask_PSTH_WithChanMapping(hdf_filename, filename, block_num):
'''
This method computes the PSTH for all sorted single-unit/multi-unit data using the plx files and txt files
containing the associated channel numbers represented in the plx files. This is to be used when all 96/64 channels
are not represented in the plx files. Assumes the channel numbers are stored in the .txt file of the same name with
channel numbers deliminated with commas.
'''
# Define file paths and names
plx_filename1_prefix = 'Offline_eNe1'
plx_filename2_prefix = 'Offline_eNe2'
TDT_tank = '/home/srsummerson/storage/tdt/'+filename
#TDT_tank = '/backup/subnetsrig/storage/tdt/'+filename
hdf_location = '/storage/rawdata/hdf/'+hdf_filename
# Unpack behavioral data
hdf = tables.openFile(hdf_location)
# Task states
state = hdf.root.task_msgs[:]['msg']
state_time = hdf.root.task_msgs[:]['time']
# Target information: high-value target= targetH, low-value target= targetL
targetH = hdf.root.task[:]['targetH']
targetL = hdf.root.task[:]['targetL']
# Reward schedules for each target
reward_scheduleH = hdf.root.task[:]['reward_scheduleH']
reward_scheduleL = hdf.root.task[:]['reward_scheduleL']
# Trial type: instructed (1) or free-choice (2) trial
trial_type = hdf.root.task[:]['target_index']
cursor = hdf.root.task[:]['cursor']
ind_wait_states = np.ravel(np.nonzero(state == 'wait'))
ind_check_reward_states = np.ravel(np.nonzero(state == 'check_reward'))
ind_target_states = ind_check_reward_states - 3 # only look at targets when the trial was successful (2 states before reward state)
ind_hold_center_states = ind_check_reward_states - 4 # only look at center holds for successful trials
num_successful_trials = ind_check_reward_states.size
target_times = state_time[ind_target_states]
center_hold_times = state_time[ind_hold_center_states]
# creates vector same size of state vectors for comparison. instructed (1) and free-choice (2)
instructed_or_freechoice = trial_type[state_time[ind_target_states]]
# creates vector of same size of state vectors for comparision. (0) = small reward, (1) = large reward.
rewarded_reward_scheduleH = reward_scheduleH[state_time[ind_target_states]]
rewarded_reward_scheduleL = reward_scheduleL[state_time[ind_target_states]]
num_free_choice_trials = sum(instructed_or_freechoice) - num_successful_trials
# creates vector of same size of target info: maxtrix of num_successful_trials x 3; (position_offset, reward_prob, left/right)
targetH_info = targetH[state_time[ind_target_states]]
targetL_info = targetL[state_time[ind_target_states]]
target1 = np.zeros(100)
target3 = np.zeros(ind_check_reward_states.size-200)
trial1 = np.zeros(target1.size)
trial3 = np.zeros(target3.size)
stim_trials = np.zeros(target3.size)
# Initialize variables use for in performance computation
neural_data_center_hold_times = np.zeros(len(center_hold_times))
# Load syncing data for hdf file and TDT recording
hdf_times = dict()
mat_filename = filename+'_b'+str(block_num)+'_syncHDF.mat'
sp.io.loadmat('/home/srsummerson/storage/syncHDF/'+mat_filename,hdf_times)
print "Loaded sync data."
hdf_rows = np.ravel(hdf_times['row_number'])
hdf_rows = [val for val in hdf_rows] # turn into a list so that the index method can be used later
dio_tdt_sample = np.ravel(hdf_times['tdt_samplenumber'])
dio_freq = np.ravel(hdf_times['tdt_dio_samplerate'])
dio_recording_start = hdf_times['tdt_recording_start'] # starting sample value
dio_tstart = dio_recording_start/dio_freq # starting time in seconds
# Find corresponding timestamps for neural data from behavioral time points
for i, time in enumerate(center_hold_times):
hdf_index = np.argmin(np.abs(hdf_rows - time))
neural_data_center_hold_times[i] = dio_tdt_sample[hdf_index]/dio_freq
"""
Find target choices and trial type across the blocks.
"""
for i in range(0,100):
target_state1 = state[ind_check_reward_states[i] - 2]
trial1[i] = instructed_or_freechoice[i]
if target_state1 == 'hold_targetL':
target1[i] = 1
else:
target1[i] = 2
for i in range(200,num_successful_trials):
target_state3 = state[ind_check_reward_states[i] - 2]
trial3[i-200] = instructed_or_freechoice[i]
if target_state3 == 'hold_targetL':
target3[i-200] = 1
else:
target3[i-200] = 2
# Compute PSTH for units over all trials
window_before = 2 # PSTH time window before alignment point in seconds
window_after = 3 # PSTH time window after alignment point in seconds
binsize = 100 # spike bin size in ms
# Get behavior data for computing PSTH for units over trials (free-choice and instructed) where the LV target was selected
target_state = state[ind_check_reward_states - 2]
choose_lv = np.ravel(np.nonzero(target_state == 'hold_targetL'))
neural_choose_lv = neural_data_center_hold_times[choose_lv]
# Get behavior data for computing PSTH for units over trials (free-choice and instructed) where the HV target was selected
choose_hv = np.ravel(np.nonzero(target_state == 'hold_targetH'))
neural_choose_hv = neural_data_center_hold_times[choose_hv]
total_units = 0
print "Getting spike data."
plx_location1 = TDT_tank + '/'+'Block-'+ str(block_num) + '/'
plx_location2 = TDT_tank + '/'+'Block-'+ str(block_num) + '/'
eNe1_channs = loadtxt(plx_location1+plx_filename1_prefix+'.txt',delimiter=',')
eNe2_channs = loadtxt(plx_location2+plx_filename2_prefix+'.txt',delimiter=',')
plx_location1 = plx_location1+plx_filename1_prefix+'.plx'
plx_location2 = plx_location2+plx_filename2_prefix+'.plx'
plx1 = plexfile.openFile(plx_location1)
spike_file1 = plx1.spikes[:].data
spike_file1 = remap_spike_channels(spike_file1,eNe1_channs)
plx2 = plexfile.openFile(plx_location2)
spike_file2 = plx2.spikes[:].data
spike_file2 = remap_spike_channels(spike_file2,eNe2_channs)
all_channs = np.append(eNe1_channs,eNe2_channs+96)
print "Computing PSTHs."
psth_all_trials, smooth_psth_all_trials, labels_all_trials = computePSTH(spike_file1,spike_file2,neural_data_center_hold_times,window_before,window_after, binsize)
psth_lv_trials, smooth_psth_lv_trials, labels_lv_trials = computePSTH(spike_file1,spike_file2,neural_data_center_hold_times[choose_lv],window_before,window_after, binsize)
psth_hv_trials, smooth_psth_hv_trials, labels_hv_trials = computePSTH(spike_file1,spike_file2,neural_data_center_hold_times[choose_hv],window_before,window_after, binsize)
psth_time_window = np.arange(-window_before,window_after-float(binsize)/1000,float(binsize)/1000)
# Plot PSTHs all together
print "Plotting."
cmap_all = mpl.cm.brg
plt.figure()
for i in range(len(all_channs)):
unit_name = psth_all_trials.keys()[i]
plt.plot(psth_time_window,psth_all_trials[unit_name],color=cmap_all(i/float(len(psth_all_trials))),label=unit_name)
plt.xlabel('Time (s)')
plt.ylabel('spks/s')
plt.title('PSTH')
plt.savefig('/home/srsummerson/code/analysis/Mario_Performance_figs/'+filename+'_b'+str(block_num)+'_PSTH-CenterHold.svg')
plt.figure()
for i in range(len(all_channs)):
unit_name = psth_all_trials.keys()[i]
if np.max(smooth_psth_all_trials[unit_name]) > 10:
plt.plot(psth_time_window,smooth_psth_all_trials[unit_name],color=cmap_all(i/float(len(psth_all_trials))),label=unit_name)
plt.xlabel('Time (s)')
plt.ylabel('spks/s')
plt.title('Smooth PSTH')
plt.legend()
plt.savefig('/home/srsummerson/code/analysis/Mario_Performance_figs/'+filename+'_b'+str(block_num)+'_SmoothPSTH-CenterHold.svg')
plt.figure()
for i in range(len(all_channs)):
unit_name = psth_lv_trials.keys()[i]
if np.max(smooth_psth_lv_trials[unit_name]) > 20:
plt.plot(psth_time_window,smooth_psth_lv_trials[unit_name],color=cmap_all(i/float(len(psth_lv_trials))),label=unit_name)
plt.xlabel('Time (s)')
plt.ylabel('spks/s')
plt.title('Smooth PSTH for Trials with LV Target Selection')
plt.legend()
plt.savefig('/home/srsummerson/code/analysis/Mario_Performance_figs/'+filename+'_b'+str(block_num)+'_SmoothPSTH-CenterHold-LV.svg')
plt.figure()
for i in range(len(all_channs)):
unit_name = psth_hv_trials.keys()[i]
if np.max(smooth_psth_hv_trials[unit_name]) > 20:
plt.plot(psth_time_window,smooth_psth_hv_trials[unit_name],color=cmap_all(i/float(len(psth_hv_trials))),label=unit_name)
plt.xlabel('Time (s)')
plt.ylabel('spks/s')
plt.title('Smooth PSTH for Trials with HV Target Selection')
plt.legend()
plt.savefig('/home/srsummerson/code/analysis/Mario_Performance_figs/'+filename+'_b'+str(block_num)+'_SmoothPSTH-CenterHold-HV.svg')
plt.close("all")
hdf.close()
return
# Set up code for particular day and block
#hdf_filename = 'mari20160524_11_te2135.hdf'
#filename = 'Mario20160524'
#block_num = 1
#probabilisticRewardTask_PSTH_SepSpikeFiles(hdf_filename, filename, block_num)