def generate_rasters(self): filename = self.filename # Validate filename if not self.valid_filename(filename): return target_test = self.ui.comboBox_test_num.currentText() thresh = self.ui.doubleSpinBox_threshold.value() h_file = h5py.File(unicode(filename), 'r') # Find target segment for segment in h_file.keys(): for test in h_file[segment].keys(): if target_test == test: target_seg = segment target_test = test fs = h_file[target_seg].attrs['samplerate_ad'] reps = h_file[target_seg][target_test].attrs['reps'] start_time = h_file[target_seg][target_test].attrs['start'] trace_data = h_file[target_seg][target_test].value if len(trace_data.shape) == 4: pass stim_info = eval(h_file[target_seg][target_test].attrs['stim']) duration = trace_data.shape[-1] / fs * 1000 if stim_info[1]['components'][0]['stim_type'] != 'Pure Tone': self.add_message('Cannot generate raster with stim type "' + str(stim_info[1]['components'][0]['stim_type']) + '".') h_file.close() return autoRasters = {} for tStim in range(1, len(stim_info)): freq = int(stim_info[tStim]['components'][0]['frequency']) spl = int(stim_info[tStim]['components'][0]['intensity']) trace_key = str(freq) + '_' + str(spl) num_reps = trace_data.shape[1] # ???: Same as reps = h_file[target_seg][target_test].attrs['reps'] spikeTrains = pd.DataFrame([]) nspk = 0 for tRep in range(reps): if len(trace_data.shape) == 3: trace = trace_data[tStim][tRep] pass elif len(trace_data.shape) == 4: tchan = int(self.ui.comboBox_channel.currentText().replace('channel_', '')) - 1 trace = trace_data[tStim][tRep][tchan] pass else: self.add_message('Cannot handle trace_data of shape: ' + str(trace_data.shape)) return spike_times = 1000 * np.array(get_spike_times(trace, thresh, fs)) spike_times_s = pd.Series(spike_times) if spike_times_s.size > nspk: spikeTrains = spikeTrains.reindex(spike_times_s.index) nspk = spike_times_s.size spikeTrains[str(tRep)] = spike_times_s autoRasters[trace_key] = spikeTrains h_file.close() return autoRasters
def graph_multi_io_test(self): h_file = h5py.File(unicode(self.filename), 'r') target_rows = [] for i in range(len(self.checkboxes)): if self.checkboxes[i].checkState(): target_rows.append(i) axes = [] for row in target_rows: for segment in h_file.keys(): for test in h_file[segment].keys(): if self.checkboxes[row].text() == test: target_seg = segment target_test = test fs = h_file[target_seg].attrs['samplerate_ad'] reps = h_file[target_seg][target_test].attrs['reps'] start_time = h_file[target_seg][target_test].attrs['start'] trace_data = h_file[target_seg][target_test].value stim_info = eval(h_file[target_seg][target_test].attrs['stim']) autoRasters = {} for tStim in range(1, len(stim_info)): spl = int(stim_info[tStim]['components'][0]['intensity']) traceKey = 'None_' + str(spl).zfill(2) spikeTrains = pd.DataFrame([]) nspk = 0 for tRep in range(reps): if len(trace_data.shape) == 3: trace = trace_data[tStim][tRep] pass elif len(trace_data.shape) == 4: tchan = int(self.comboboxes[row].currentText().replace('channel_', '')) - 1 trace = trace_data[tStim][tRep][tchan] pass else: self.add_message('Cannot handle trace_data of shape: ' + str(trace_data.shape)) return spike_times = 1000 * np.array(get_spike_times(trace, self.spnboxes[row].value(), fs)) spike_times_s = pd.Series(spike_times) if spike_times_s.size >= nspk: spikeTrains = spikeTrains.reindex(spike_times_s.index) nspk = spike_times_s.size spikeTrains[str(tRep)] = spike_times_s autoRasters[traceKey] = spikeTrains rasters = autoRasters tuning = [] sortedKeys = sorted(rasters.keys()) for traceKey in sortedKeys: spl = int(traceKey.split('_')[-1]) raster = rasters[traceKey] res = ResponseStats(raster) tuning.append({'intensity': spl, 'response': res[0], 'responseSTD': res[1]}) tuningCurves = pd.DataFrame(tuning) if axes: tuningCurves.plot(x='intensity', y='response', ax=axes, yerr='responseSTD', capthick=1, label=str(target_test) + ' : ' + str(self.spnboxes[row].value()) + ' V') else: axes = tuningCurves.plot(x='intensity', y='response', yerr='responseSTD', capthick=1, label=str(target_test) + ' : ' + str(self.spnboxes[row].value()) + ' V') plt.legend(loc='upper left', fontsize=12, frameon=True) sns.despine() plt.grid(False) plt.xlabel('Intensity (dB)', size=14) plt.ylabel('Response Rate (Hz)', size=14) plt.tick_params(axis='both', which='major', labelsize=14) plt.title(str.split(str(self.filename), '/')[-1].replace('.hdf5', '') + ' Multi I/O') plt.show() h_file.close()
def graph_io_test(self): filename = self.filename = self.ui.lineEdit_file_name.text() # Validate filename if not self.valid_filename(filename): return target_test = self.ui.comboBox_test_num.currentText() thresh = self.ui.doubleSpinBox_threshold.value() h_file = h5py.File(unicode(filename), 'r') # Find target segment for segment in h_file.keys(): for test in h_file[segment].keys(): if target_test == test: target_seg = segment target_test = test fs = h_file[target_seg].attrs['samplerate_ad'] reps = h_file[target_seg][target_test].attrs['reps'] start_time = h_file[target_seg][target_test].attrs['start'] trace_data = h_file[target_seg][target_test].value if len(trace_data.shape) == 4: pass stim_info = eval(h_file[target_seg][target_test].attrs['stim']) duration = trace_data.shape[-1] / fs * 1000 autoRasters = {} for tStim in range(1, len(stim_info)): spl = int(stim_info[tStim]['components'][0]['intensity']) traceKey = 'None_' + str(spl).zfill(2) spikeTrains = pd.DataFrame([]) nspk = 0 for tRep in range(reps): if len(trace_data.shape) == 3: trace = trace_data[tStim][tRep] pass elif len(trace_data.shape) == 4: tchan = int(self.ui.comboBox_channel.currentText().replace('channel_', '')) - 1 trace = trace_data[tStim][tRep][tchan] pass else: self.add_message('Cannot handle trace_data of shape: ' + str(trace_data.shape)) return spike_times = 1000 * np.array(get_spike_times(trace, thresh, fs)) spike_times_s = pd.Series(spike_times) if spike_times_s.size >= nspk: spikeTrains = spikeTrains.reindex(spike_times_s.index) nspk = spike_times_s.size spikeTrains[str(tRep)] = spike_times_s autoRasters[traceKey] = spikeTrains rasters = autoRasters h_file.close() tuning = [] sortedKeys = sorted(rasters.keys()) for traceKey in sortedKeys: spl = int(traceKey.split('_')[-1]) raster = rasters[traceKey] res = ResponseStats(raster) tuning.append({'intensity': spl, 'response': res[0], 'responseSTD': res[1]}) tuningCurves = pd.DataFrame(tuning) tuningCurves.plot(x='intensity', y='response', yerr='responseSTD', capthick=1, label=str(target_test)) plt.legend(loc='upper left', fontsize=12, frameon=True) sns.despine() plt.grid(False) plt.xlabel('Intensity (dB)', size=14) plt.ylabel('Response Rate (Hz)', size=14) plt.tick_params(axis='both', which='major', labelsize=14) plt.title(str.split(str(filename), '/')[-1].replace('.hdf5', '') + ' ' + str(self.ui.comboBox_test_num.currentText()).replace('test_', 'Test ')) plt.show()
def graph_historgram(self): filename = self.filename = self.ui.lineEdit_file_name.text() # Validate filename if not self.valid_filename(filename): return target_test = self.ui.comboBox_test_num.currentText() thresh = self.ui.doubleSpinBox_threshold.value() h_file = h5py.File(unicode(filename), 'r') # Find target segment for segment in h_file.keys(): for test in h_file[segment].keys(): if target_test == test: target_seg = segment target_test = test target_trace = int(self.ui.comboBox_trace.currentText().replace('trace_', '')) - 1 fs = h_file[target_seg].attrs['samplerate_ad'] reps = h_file[target_seg][target_test].attrs['reps'] start_time = h_file[target_seg][target_test].attrs['start'] trace_data = h_file[target_seg][target_test].value if len(trace_data.shape) == 4: pass stim_info = eval(h_file[target_seg][target_test].attrs['stim']) duration = trace_data.shape[-1] / fs * 1000 autoRasters = {} tStim = target_trace spikeTrains = pd.DataFrame([]) nspk = 0 for tRep in range(reps): if len(trace_data.shape) == 3: trace = trace_data[tStim][tRep] pass elif len(trace_data.shape) == 4: tchan = int(self.ui.comboBox_channel.currentText().replace('channel_', '')) - 1 trace = trace_data[tStim][tRep][tchan] pass else: self.add_message('Cannot handle trace_data of shape: ' + str(trace_data.shape)) return spike_times = 1000 * np.array(get_spike_times(trace, thresh, fs)) spike_times_s = pd.Series(spike_times) if spike_times_s.size > nspk: spikeTrains = spikeTrains.reindex(spike_times_s.index) nspk = spike_times_s.size spikeTrains[str(tRep)] = spike_times_s rasters = spikeTrains h_file.close() if len(rasters.shape) > 1: spks = np.array([]) trns = np.array([]) for trnNum in range(len(rasters.columns)): spkTrn = np.array(rasters.iloc[:, trnNum].dropna()) trns = np.hstack([trns, (trnNum + 1) * np.ones(len(spkTrn))]) spks = np.hstack([spks, spkTrn]) spikeTimes = rasters.stack() else: spikeTimes = rasters.dropna() # --- Histogram of spike times (1 ms bins)--- sns.set_style("white") sns.set_style("ticks") histogram_f = plt.figure(figsize=(8, 3)) axHist = spikeTimes.hist(bins=int(duration / 1), range=(0, duration)) # , figsize=(8,3)) sns.despine() plt.xlim(0, duration) plt.xlabel('Time (ms)', size=14) plt.ylabel('Number of spikes', size=14) plt.title(str.split(str(filename), '/')[-1].replace('.hdf5', '') + ' ' + str(self.ui.comboBox_test_num.currentText()).replace('test_', 'Test ')) plt.tick_params(axis='both', which='major', labelsize=14) plt.grid(False) histogram_f.show() return axHist
def generate_tuning_curve(self): if self.valid_filename(): h_file = h5py.File(unicode(self.filename), 'r') target_test = self.ui.comboBox_test_num.currentText() else: return target_seg = [] # Find target segment for segment in h_file.keys(): for test in h_file[segment].keys(): if target_test == test: target_seg = segment target_test = test trace_data = h_file[target_seg][target_test].value fs = h_file[target_seg].attrs['samplerate_ad'] samples = trace_data.shape[-1] traces = trace_data.shape[0] reps = trace_data.shape[1] if len(h_file[target_seg][target_test].value.shape) > 3: channels = h_file[target_seg][target_test].value.shape[2] else: channels = 1 stim_info = eval(h_file[target_seg][target_test].attrs['stim']) # Get the values from the combo boxes if self.ui.comboBox_trace.currentText() != '': target_trace = int(self.ui.comboBox_trace.currentText().replace( 'trace_', '')) - 1 if self.ui.comboBox_channel.currentText() != '': target_chan = int(self.ui.comboBox_channel.currentText().replace( 'channel_', '')) - 1 # Get the values from the spinbox thresh = self.ui.doubleSpinBox_threshold.value() # print 'test:', target_test # print 'trace:', target_trace # print 'chan:', target_chan # print '' frequency = [] intensity = [] spike_count = {} if len(trace_data.shape) == 4: for t in range(traces): # print 'trace:', t # print 'stim_type:', stim_info[t]['components'][0]['stim_type'] # print 'intensity:', stim_info[t]['components'][0]['intensity'] # if stim_info[t]['components'][0]['stim_type'] != 'silence': # print 'frequency:', stim_info[t]['components'][0]['frequency'] if stim_info[t]['components'][0]['stim_type'] != 'silence': intensity.append( stim_info[t]['components'][0]['intensity']) frequency.append( stim_info[t]['components'][0]['frequency'] / 1000) spikes = 0 for r in range(reps): if self.ui.groupBoxWindow.isChecked(): x_min = np.floor( self.ui.doubleSpinBox_xmin.value() * fs) x_max = np.floor( self.ui.doubleSpinBox_xmax.value() * fs) temp_trace = trace_data[t][r][target_chan] trace = temp_trace[x_min:x_max] pass else: trace = trace_data[t][r][target_chan] spike_times = 1000 * np.array( get_spike_times(trace, thresh, fs, self.ui.view._abs)) spikes += len(spike_times) spike_count[(stim_info[t]['components'][0]['frequency'] / 1000, stim_info[t]['components'][0]['intensity'] )] = float(spikes) / float(reps) # print 'spikes:', float(spikes) # print 'avg_spikes:', float(spikes)/float(reps) if len(trace_data.shape) == 3: for t in range(traces): if stim_info[t]['components'][0]['stim_type'] != 'silence': intensity.append( stim_info[t]['components'][0]['intensity']) frequency.append( stim_info[t]['components'][0]['frequency'] / 1000) spikes = 0 for r in range(reps): trace = trace_data[t][r] spike_times = 1000 * np.array( get_spike_times(trace, thresh, fs, self.ui.view._abs)) spikes += len(spike_times) if stim_info[t]['components'][0]['stim_type'] != 'silence': spike_count[(stim_info[t]['components'][0]['frequency'] / 1000, stim_info[t]['components'][0]['intensity'] )] = float(spikes) / float(reps) # Get only the unique values frequency = sorted(list(set(frequency))) intensity = sorted(list(set(intensity))) # print 'freq', len(frequency), frequency # print 'inten', len(intensity), intensity xlist = np.linspace(min(frequency), max(frequency), len(frequency)) ylist = np.linspace(min(intensity), max(intensity), len(intensity)) # print 'xlist', len(xlist), xlist # print 'ylist', len(ylist), ylist # Initialize Z Z = np.empty([len(intensity), len(frequency)]) X, Y = np.meshgrid(xlist, ylist) for y in range(len(intensity)): for x in range(len(frequency)): Z[y][x] = spike_count[(frequency[x], intensity[y])] # print '(y, x):', y, x # print 'freq:', frequency[x] # print 'inten:', intensity[y] plt.figure() if self.ui.groupBoxWindow.isChecked(): # Set the min, max and number of contour levels levels = np.linspace(self.ui.doubleSpinBox_zmin.value(), self.ui.doubleSpinBox_zmax.value(), num=(self.ui.spinBoxContourLevels.value() + 1)) cp = plt.contourf(X, Y, Z, levels) else: # Auto assign contour levels cp = plt.contourf(X, Y, Z) plt.colorbar(cp, label='Mean Spikes Per Presentation') # print 'X:', X # print 'Y:', Y # print 'Z:', Z # plt.title('Tuning Curve') if channels == 1: plt.title( str.split(str(self.filename), '/')[-1].replace('.hdf5', '') + ' ' + str(self.ui.comboBox_test_num.currentText()).replace( 'test_', 'Test ')) else: plt.title( str.split(str(self.filename), '/')[-1].replace('.hdf5', '') + ' ' + str(self.ui.comboBox_test_num.currentText()).replace( 'test_', 'Test ') + ' ' + str(self.ui.comboBox_channel.currentText()).replace( 'channel_', 'Channel ')) plt.xlabel('Frequency (kHz)') # plt.xlabel('Frequency (Hz)') plt.ylabel('Intensity (dB)') plt.figtext( .02, .02, 'Threshold: ' + str(self.ui.doubleSpinBox_threshold.value()) + ' V') # Idea to try interpolation, didn't work as intended # plt.imshow(Z, aspect='equal', interpolation=None, origin='lower') plt.show()