def plot_traces(plotwindow=None, ichannel=0, vchannel=1): """ Show traces in a figure Parameters ---------- plotwindow : (float, float), optional Plot window (in ms from beginning of trace) None for whole trace. Default: None ichannel : int, optional current channel number. Default: 0 vchannel : int, optional voltage channel number. Default: 1 """ import stf if not stf.check_doc(): return None nchannels = stf.get_size_recording() if nchannels < 2: sys.stderr.write( "Function requires 2 channels (0: current; 1: voltage)\n") return dt = stf.get_sampling_interval() fig = stf.mpl_panel(figsize=(12, 8)).fig fig.clear() gs = gridspec.GridSpec(4, 1) ax_currents = stfio_plot.StandardAxis( fig, gs[:3, 0], hasx=False, hasy=False) ax_voltages = stfio_plot.StandardAxis( fig, gs[3:, 0], hasx=False, hasy=False, sharex=ax_currents) if plotwindow is not None: istart = int(plotwindow[0]/dt) istop = int(plotwindow[1]/dt) else: istart = 0 istop = None for ntrace in range(stf.get_size_channel()): stf.set_trace(ntrace) stf.set_channel(ichannel) trace = stf.get_trace()[istart:istop] ax_currents.plot(np.arange(len(trace))*dt, trace) # Measure pulse amplitude stf.set_channel(vchannel) trace = stf.get_trace()[istart:istop] ax_voltages.plot(np.arange(len(trace))*dt, trace) # Reset active channel stf.set_channel(ichannel) stfio_plot.plot_scalebars( ax_currents, xunits=stf.get_xunits(), yunits=stf.get_yunits(channel=0)) stfio_plot.plot_scalebars( ax_voltages, xunits=stf.get_xunits(), yunits=stf.get_yunits(channel=1))
def baseline_from_linear_regression(): y_values_trace = stf.get_trace() x_values_trace = range(0, len(stf.get_trace())) results = scipy.stats.linregress(x_values_trace, y_values_trace) return (results)
def find_sample_points_of_detected_events(whole_trace_file, extracted_events_file, sweep_num): """takes the window of detected events from stimfit and, for each events, runs through the full trace to pull out time (in samples) of event """ #open and load trace from whole file stf.file_open(whole_trace_file) stf.set_trace(sweep_num) whole_trace = stf.get_trace() sampling_interval = stf.get_sampling_interval() #open extracted events file stf.file_open(extracted_events_file) time_points = [] for trace in range(stf.get_size_channel()): stf.set_trace(trace) trace_to_search = stf.get_trace(trace) # run find trace with updated search index # start at sample = 0 for first run through if len(time_points) == 0: sample_start = 0 else: sample_start = int(time_points[len(time_points) - 1] / sampling_interval) output_index = sub_func_find_trace(trace_to_search, whole_trace, sample_start) time_point = output_index * sampling_interval time_points.append(time_point) return (time_points)
def hpfilter(n): """ Perform median smoothing filter on the active trace. Computationally this is achieved by a central simple moving median over a sliding window of n points. The function then subtracts the smoothed trace from the original trace. The function uses reflect (or bounce) end corrections """ # Check that the number of points in the sliding window is odd n = int(n) if n % 2 != 1: raise ValueError('The filter rank must be an odd integer') elif n <= 1: raise ValueError('The filter rank must > 1') # Apply smoothing filter filtered_trace = [] l = stf.get_size_trace() padded_trace = np.pad(stf.get_trace(), (n - 1) / 2, 'reflect') filtered_trace.append([np.median(padded_trace[j:n + j]) for j in range(l)]) print "Window width was %g ms" % (stf.get_sampling_interval() * (n - 1)) # Apply subtraction subtracted_trace = stf.get_trace() - np.array(filtered_trace) return stf.new_window_list(subtracted_trace)
def get_dv_dt(slice_indicies=(0, 0)): """Main function to take 1st derivative of V_trace and return an array with V_values and dv_dt value for plotting --input tuple to use slice of trace""" #determine if using whole trace or slice if slice_indicies != 0: sample_start = slice_indicies[0] sample_end = slice_indicies[1] else: sample_start = 0 sample_end = len(stf.get_trace()) #get sampling interval to create dt part of dv/dt #dt is just sampling interval si = stf.get_sampling_interval() #read V values from trace, V_values = stf.get_trace()[sample_start:sample_end] #compute dv and by iterating over voltage vectors dv = [V_values[i + 1] - V_values[i] for i in range(len(V_values) - 1)] #compute dv/dt dv_dt = [(dv[i] / si) for i in range(len(dv))] #V values for a dv/dt / V graph is just truncated trace with final sample point removed V_plot = V_values[:-1] #combine for a plotting function/further manipulation V_dv_dt = np.vstack([V_plot, dv_dt]) stf.new_window(dv_dt) return (V_dv_dt)
def monoexpfit(optimization=True, Tn=20): """ Fits monoexponential function with offset to data between the fit cursors in the current trace of the active channel using a Chebyshev-Levenberg- Marquardt hybrid algorithm. Optimization requires Scipy. Setting optimization to False forces this function to use just the Chebyshev algorithm. The maximum order of the Chebyshev polynomials can be set using Tn. """ # Get data fit_start = stf.get_fit_start() fit_end = stf.get_fit_end() y = np.double(stf.get_trace()[fit_start:fit_end]) si = stf.get_sampling_interval() l = len(y) t = si * np.arange(0, l, 1, np.double) # Define monoexponential function def f(t, *p): return p[0] + p[1] * np.exp(-t / p[2]) # Get initial values from Chebyshev transform fit init = chebexp(1, Tn) p0 = (init.get('Offset'), ) p0 += (init.get('Amp_0'), ) p0 += (init.get('Tau_0'), ) # Optimize (if applicable) if optimization == True: # Optimize fit using Levenberg-Marquardt algorithm options = {"ftol": 2.22e-16, "xtol": 2.22e-16, "gtol": 2.22e-16} [p, pcov] = optimize.curve_fit(f, t, y, p0, **options) elif optimization == False: p = list(p0) fit = f(t, *p) # Calculate SSE SSE = np.sum((y - fit)**2) # Plot fit in a new window matrix = np.zeros((2, stf.get_size_trace())) * np.nan matrix[0, :] = stf.get_trace() matrix[1, fit_start:fit_end] = fit stf.new_window_matrix(matrix) # Create table of results retval = [("p0_Offset", p[0])] retval += [("p1_Amp_0", p[1])] retval += [("p2_Tau_0", p[2])] retval += [("SSE", SSE)] retval += [("dSSE", 1.0 - np.sum((y - f(t, *p0))**2) / SSE)] retval += [("Time fit begins", fit_start * si)] retval += [("Time fit ends", fit_end * si)] retval = dict(retval) stf.show_table( retval, "monoexpfit, Section #%i" % float(stf.get_trace_index() + 1)) return
def plot_screen(self): import stf tsl = [] try: l = stf.get_selected_indices() for idx in l: tsl.append( stfio_plot.Timeseries(stf.get_trace(idx), stf.get_sampling_interval(), yunits=stf.get_yunits(), color='0.2')) fit = stf.get_fit(idx) if fit is not None: self.axes.plot(fit[0], fit[1], color='0.4', alpha=0.5, lw=5.0) except: pass tsl.append( stfio_plot.Timeseries(stf.get_trace(), stf.get_sampling_interval(), yunits=stf.get_yunits())) if stf.get_size_recording() > 1: tsl2 = [ stfio_plot.Timeseries( stf.get_trace(trace=-1, channel=stf.get_channel_index(False)), stf.get_sampling_interval(), yunits=stf.get_yunits( trace=-1, channel=stf.get_channel_index(False)), color='r', linestyle='-r') ] stfio_plot.plot_traces(tsl, traces2=tsl2, ax=self.axes, textcolor2='r', xmin=stf.plot_xmin(), xmax=stf.plot_xmax(), ymin=stf.plot_ymin(), ymax=stf.plot_ymax(), y2min=stf.plot_y2min(), y2max=stf.plot_y2max()) else: stfio_plot.plot_traces(tsl, ax=self.axes, xmin=stf.plot_xmin(), xmax=stf.plot_xmax(), ymin=stf.plot_ymin(), ymax=stf.plot_ymax()) fit = stf.get_fit() if fit is not None: self.axes.plot(fit[0], fit[1], color='0.2', alpha=0.5, lw=5.0)
def average_sweeps(*argv): sweeps = stf.get_trace(argv[0]) for sweep in argv[1:]: sweep_ = stf.get_trace(sweep) sweeps = np.vstack((sweeps, sweep_)) sweeps_mean = np.mean(sweeps, axis=0) stf.new_window(sweeps_mean) return (sweeps_mean)
def normalize(): """ Normalize to the peak amplitude of the selected trace and scale all other traces in the currently active channel by the same factor. Ensure that you subtract the baseline before normalizing """ # Find index of the selected trace idx = stf.get_selected_indices() if len(idx) > 1: raise ValueError('More than one trace was selected') elif len(idx) < 1: raise ValueError('Select one trace to subtract from the others') # Measure peak amplitude in the selected trace stf.set_trace(idx[0]) refval = np.abs(stf.get_peak()) # Apply normalization scaled_traces = [ stf.get_trace(i) / refval for i in range(stf.get_size_channel()) ] return stf.new_window_list(scaled_traces)
def get_traces(start, end): trace_list = [] for x in range(start, end): trace = stf.get_trace(x) new_window(trace) return ()
def detect(template, mode, th, min_int): """ Detect events using the given template and the algorithm specified in 'mode' with a threshold 'th' and a minimal interval of 'min_int' between events. Returns amplitudes and interevent intervals. """ import stf # Compute criterium crit = stf.detect_events(template, mode=mode, norm=False, lowpass=0.1, highpass=0.001) dt = stf.get_sampling_interval() # Find event onset times (corresponding to peaks in criteria) onsets_i = stf.peak_detection(crit, th, int(min_int / dt)) trace = stf.get_trace() # Use event onset times to find event amplitudes (negative for epscs) peak_window_i = min_int / dt amps_i = np.array([ int(np.argmin(trace[onset_i:onset_i + peak_window_i]) + onset_i) for onset_i in onsets_i ], dtype=np.int) amps = trace[amps_i] onsets = onsets_i * dt return amps, onsets, crit
def peakalign(): """ Shift the selected traces in the currently active channel to align the peaks. """ # Measure peak indices in the selected traces pidx = [] for i in stf.get_selected_indices(): stf.set_trace(i) pidx.append(stf.peak_index()) # Find the earliest peak pref = min(pidx) # Align the traces j = 0 shifted_traces = [] for i in stf.get_selected_indices(): stf.set_trace(i) shift = int(pref - pidx[j]) shifted_traces.append(np.roll(stf.get_trace(), shift)) j += 1 return stf.new_window_list(shifted_traces)
def risealign(): """ Shift the selected traces in the currently active channel to align to the rise. """ # Measure peak indices in the selected traces rtidx = [] for i in stf.get_selected_indices(): stf.set_trace(i) rtidx.append(stf.rtlow_index()) # Find the earliest peak rtref = min(rtidx) # Align the traces j = 0 shifted_traces = [] for i in stf.get_selected_indices(): stf.set_trace(i) shift = int(round(rtref - rtidx[j])) shifted_traces.append(np.roll(stf.get_trace(), shift)) j += 1 return stf.new_window_list(shifted_traces)
def median_filter(n): """ Perform median smoothing filter on the selected traces. Computationally this is achieved by a central simple moving median over a sliding window of n points. The function uses reflect (or bounce) end corrections """ # Check that at least one trace was selected if not stf.get_selected_indices(): raise IndexError('No traces were selected') # Check that the number of points in the sliding window is odd n = int(n) if n % 2 != 1: raise ValueError('The filter rank must be an odd integer') elif n <= 1: raise ValueError('The filter rank must > 1') # Apply smoothing filter filtered_traces = [] for i in stf.get_selected_indices(): l = stf.get_size_trace(i) padded_trace = np.pad(stf.get_trace(i), (n - 1) / 2, 'reflect') filtered_traces.append( [np.median(padded_trace[j:n + j]) for j in range(l)]) print "Window width was %g ms" % (stf.get_sampling_interval() * (n - 1)) return stf.new_window_list(filtered_traces)
def peakscale(): """ Scale the selected traces in the currently active channel to their mean peak amplitude. """ # Measure baseline in selected traces base = [] for i in stf.get_selected_indices(): stf.set_trace(i) base.append(stf.get_base()) # Subtract baseline from selected traces stf.subtract_base() # Measure peak amplitudes in baseline-subtracted traces stf.select_all() peak = [] for i in stf.get_selected_indices(): stf.set_trace(i) peak.append(stf.get_peak()) # Calculate scale factor to make peak equal to the mean peak amplitude scale_factor = peak / np.mean(peak) # Scale the traces and apply offset equal to the mean baseline scaled_traces = [ stf.get_trace(i) / scale_factor[i] + np.mean(base) for i in stf.get_selected_indices() ] # Close window of baseline-subtracted traces stf.close_this() return stf.new_window_list(scaled_traces)
def sloping_base(trace=-1, method='scale'): """ Correct for linear sloping baseline in the displayed trace of the active channel. Useful for approximate correction of photobleaching during short periods of imaging. Available methods are 'scale' or 'subtract'. """ # Get trace and trace attributes selected_trace = stf.get_trace(trace) fit_start = stf.get_base_start() fit_end = stf.get_base_end() # Linear fit to baseline region fit = np.polyfit(np.arange(fit_start, fit_end, 1, int), selected_trace[fit_start:fit_end], 1) # Correct trace for sloping baseline l = stf.get_size_trace(trace) t = np.arange(0, l, 1, np.double) if method == 'subtract': corrected_trace = selected_trace - t * fit[0] elif method == 'scale': corrected_trace = selected_trace * fit[1] / (t * fit[0] + fit[1]) return stf.new_window_list([corrected_trace])
def plot_spectrum(self): import stf Pow, freq = mlab.psd(stf.get_trace(), Fs=(1.0 / stf.get_sampling_interval()) * 1e3, detrend=mlab.detrend_linear) self.axes.plot(freq, 10 * np.log10(Pow)) self.axes.set_xlabel("Frequency (Hz)") self.axes.set_ylabel("Power spectral density (dB/Hz)")
def plot_spectrum(self): import stf Pow, freq = mlab.psd(stf.get_trace(), Fs=(1.0/stf.get_sampling_interval())*1e3, detrend=mlab.detrend_linear) self.axes.plot(freq, 10*np.log10(Pow)) self.axes.set_xlabel("Frequency (Hz)") self.axes.set_ylabel("Power spectral density (dB/Hz)")
def stf_fit( p0, lsfunc ): data = stf.get_trace()[ stf.get_fit_start() : stf.get_fit_end() ] dt = stf.get_sampling_interval() x = np.arange(0, len(data)*dt, dt) plsq = leastsq(leastsq_stf, p0, args=(data, lsfunc, x)) return plsq[0]
def stf_fit(p0, lsfunc): data = stf.get_trace()[stf.get_fit_start():stf.get_fit_end()] dt = stf.get_sampling_interval() x = np.arange(0, len(data) * dt, dt) plsq = leastsq(leastsq_stf, p0, args=(data, lsfunc, x)) return plsq[0]
def automated_search_triexponential(trace_region_to_search, search_period, threshold, min_btw_events, tau_rise, tau_1_decay, tau_2_decay): """searches section of trace based on a user input triexponential function (tau_rise, tau_1_decay, tau_2_decay)""" #converts some inputs to sample points min_samples_btw_events = min_btw_events / stf.get_sampling_interval() #pull out region to search region_to_search = stf.get_trace( )[trace_region_to_search[0]:trace_region_to_search[1]] #list to store detected events event_times = [] #creates vector of time points t = np.linspace(0, 50, (50 / stf.get_sampling_interval())) #creates triexponential pattern function p_t = [(1 - math.exp(-(t_point - 0) / tau_rise)) * (math.exp(-(t_point - 0) / tau_1_decay)) * (math.exp(-(t_point - 0) / tau_2_decay)) for t_point in t] #slides window along pt = 0 while pt < range(len(region_to_search) - int(min_samples_btw_events)): EPSC_test = stf.get_trace()[pt:( pt + (search_period / stf.get_sampling_interval()))] corr_coeff = stats.pearsonr(p_t, EPSC_test)[0] if corr_coeff > threshold: stf.set_marker(pt, region_to_search[trace_region_to_search[0] + pt]) event_times.append(pt * stf.get_sampling_interval()) pt += min_samples_btw_events else: pt += 1 return (event_times)
def jjm_count(start, delta, threshold=0, up=True, trace=None, mark=True): """ Counts the number of events (e.g action potentials (AP)) in the current trace. Arguments: start -- starting time (in ms) to look for events. delta -- time interval (in ms) to look for events. threshold -- (optional) detection threshold (default = 0). up -- (optional) True (default) will look for upward events, False downwards. trace -- (optional) zero-based index of the trace in the current channel, if None, the current trace is selected. mark -- (optional) if True (default), set a mark at the point of threshold crossing Returns: An integer with the number of events. Examples: count_events(500,1000) returns the number of events found between t=500 ms and t=1500 ms above 0 in the current trace and shows a stf marker. count_events(500,1000,0,False,-10,i) returns the number of events found below -10 in the trace i and shows the corresponding stf markers. """ # sets the current trace or the one given in trace. if trace is None: sweep = stf.get_trace_index() else: if type(trace) !=int: print "trace argument admits only integers" return False sweep = trace # set the trace described in sweep stf.set_trace(sweep) # transform time into sampling points dt = stf.get_sampling_interval() pstart = int( round(start/dt) ) pdelta = int( round(delta/dt) ) # select the section of interest within the trace selection = stf.get_trace()[pstart:(pstart+pdelta)] # algorithm to detect events EventCounter,i = 0,0 # set counter and index to zero # list of sample points sample_points = [] # choose comparator according to direction: if up: comp = lambda a, b: a > b else: comp = lambda a, b: a < b # run the loop while i<len(selection): if comp(selection[i],threshold): EventCounter +=1 if mark: sample_point = pstart+i; sample_points.append(sample_point); stf.set_marker(pstart+i, selection[i]) while i<len(selection) and comp(selection[i],threshold): i+=1 # skip values if index in bounds AND until the value is below/above threshold again else: i+=1 time_points = [sample_point*dt for sample_point in sample_points]; return (EventCounter, sample_points, time_points)
def yoffset(value): """ Apply a common offset to all traces in the currently active channel. """ offset_traces = [ stf.get_trace(i) + value for i in range(stf.get_size_channel()) ] return stf.new_window_list(offset_traces)
def subtract_base(): """ """ subtracted_traces = [] for i in range(stf.get_size_channel()): stf.set_trace(i) subtracted_traces.append(stf.get_trace() - stf.get_base()) stf.new_window_list(subtracted_traces) return
def get_base(self): """ Get baseline according to cursor possition in the given current channel/trace """ self.update() return stf.get_trace(trace = -1 ,channel = -1)[stf.get_base_start():stf.get_base_end()+1].mean()
def subtract_trace(): """ Subtract the selected trace from all traces in the currently active channel """ # Find index of the selected trace to subtract from all the other traces idx = stf.get_selected_indices() if len(idx) > 1: raise ValueError('More than one trace was selected') elif len(idx) < 1: raise ValueError('Select one trace to subtract from the others') # Apply subtraction subtracted_traces = [ stf.get_trace(i) - stf.get_trace(idx[0]) for i in range(stf.get_size_channel()) ] return stf.new_window_list(subtracted_traces)
def remove_artifacts_from_sweeps(artifact_start_time, artifact_end_time): sampling_interval = stf.get_sampling_interval() artifact_start = int(artifact_start_time / sampling_interval) artifact_end = int(artifact_end_time / sampling_interval) continuous_trace = [] output_artifacts_removed = [] for sweep in range(stf.get_size_channel()): sweep_trace_before_artifact = stf.get_trace(sweep)[0:artifact_start] sweep_trace_after_artifact = stf.get_trace(sweep)[artifact_end:] sweep_trace = np.append(sweep_trace_before_artifact, sweep_trace_after_artifact) output_artifacts_removed.append(sweep_trace) continuous_trace.extend(sweep_trace) stf.new_window_list(output_artifacts_removed) return (continuous_trace)
def reverse(): """ Reverse the order of all traces """ reversed_traces = [] n = stf.get_size_channel() for i in range(n): reversed_traces.append(stf.get_trace(n - 1 - i)) stf.new_window_list(reversed_traces) return
def yvalue(origin, interval): stf.set_fit_start(origin, True) stf.set_fit_end(origin + interval, True) stf.measure() x = int(stf.get_fit_end(False)) y = [] for i in range(stf.get_size_channel()): stf.set_trace(i) y.append(stf.get_trace(i)[x]) return y
def find_baseline_amplitude(sigma): # gaussian filter with sigma 10 trace_ = stf.get_trace() trace_filtered = ndimage.filters.gaussian_filter(trace_, sigma) # take derivative si = stf.get_sampling_interval() #read V values from trace, V_values = stf.get_trace() #compute dv and by iterating over voltage vectors dv = [V_values[i + 1] - V_values[i] for i in range(len(V_values) - 1)] #compute dv/dt dv_dt = [(dv[i] / si) for i in range(len(dv))] # find index of derivative peak deriv_max = np.argmin(dv_dt) # use derivative peak index to get baseline from original trace # use a mean of 10 sample points baseline = np.mean(trace_[deriv_max - 10:deriv_max]) peak_amplitude = np.min(stf.get_trace()) peak_from_baseline = peak_amplitude - baseline return (baseline, peak_amplitude, peak_from_baseline)
def get_base(self): """ Get baseline according to cursor possition in the given current channel/trace """ self.update() return stf.get_trace( trace=-1, channel=-1)[stf.get_base_start():stf.get_base_end() + 1].mean()
def multiscale_traces(multiplier_list): """ Scale each trace to the respective multiplier in the list argument """ if len(multiplier_list) != stf.get_size_channel(): raise ValueError('The number of multipliers and traces are not equal') scaled_traces = [ stf.get_trace(i) * multiplier_list[i] for i in range(stf.get_size_channel()) ] return stf.new_window_list(scaled_traces)
def slice_peak_region(params, trace): """use time for params, function converts to samples for cutting/displaying""" stf.select_trace(trace) sampling_interval = stf.get_sampling_interval() peak_2_start_samples = (params[2] / sampling_interval) peak_2_end_samples = (params[3] / sampling_interval) peak_region = stf.get_trace()[peak_2_start_samples:peak_2_end_samples] return (peak_region)
def find_ADPs(AP_peak_indicies): ADP_values = [] ADP_indicies = [] ##slices for peak in range(len(AP_peak_indicies)-1): ADP_search = stf.get_trace()[AP_peak_indicies[peak]:AP_peak_indicies[peak+1]] min_value = np.min(ADP_search) min_index = AP_peak_indicies[peak] + np.argmin(ADP_search) stf.set_marker(min_index, min_value) ADP_values.append(min_value) ADP_indicies.append(min_index) return(ADP_values, ADP_indicies)
def get_max_rise(self): """ maximum rate of rise (dV/dt) of AP in the current trace, which depends on the available Na+ conductance, see Mainen et al, 1995, Schmidt-Hieber et al, 2008 """ self.update() pmaxrise = stf.maxrise_index() # in active channel trace = stf.get_trace(trace = -1, channel =-1) # current trace dV = trace[int(ceil(pmaxrise))]-trace[(int(floor(pmaxrise)))] return dV/self._dt
def plot_screen(self): import stf tsl = [] try: l = stf.get_selected_indices() for idx in l: tsl.append(stfio_plot.Timeseries(stf.get_trace(idx), stf.get_sampling_interval(), yunits = stf.get_yunits(), color='0.2')) fit = stf.get_fit(idx) if fit is not None: self.axes.plot(fit[0], fit[1], color='0.4', alpha=0.5, lw=5.0) except: pass tsl.append(stfio_plot.Timeseries(stf.get_trace(), stf.get_sampling_interval(), yunits = stf.get_yunits())) if stf.get_size_recording()>1: tsl2 = [stfio_plot.Timeseries(stf.get_trace(trace=-1, channel=stf.get_channel_index(False)), stf.get_sampling_interval(), yunits = stf.get_yunits(trace=-1, channel=stf.get_channel_index(False)), color='r', linestyle='-r')] stfio_plot.plot_traces(tsl, traces2=tsl2, ax=self.axes, textcolor2 = 'r', xmin=stf.plot_xmin(), xmax=stf.plot_xmax(), ymin=stf.plot_ymin(), ymax=stf.plot_ymax(), y2min=stf.plot_y2min(), y2max=stf.plot_y2max()) else: stfio_plot.plot_traces(tsl, ax=self.axes, xmin=stf.plot_xmin(), xmax=stf.plot_xmax(), ymin=stf.plot_ymin(), ymax=stf.plot_ymax()) fit = stf.get_fit() if fit is not None: self.axes.plot(fit[0], fit[1], color='0.2', alpha=0.5, lw=5.0)
def resistance( base_start, base_end, peak_start, peak_end, amplitude): """Calculates the resistance from a series of voltage clamp traces. Keyword arguments: base_start -- Starting index (zero-based) of the baseline cursors. base_end -- End index (zero-based) of the baseline cursors. peak_start -- Starting index (zero-based) of the peak cursors. peak_end -- End index (zero-based) of the peak cursors. amplitude -- Amplitude of the voltage command. Returns: The resistance. """ if not stf.check_doc(): print('Couldn\'t find an open file; aborting now.') return 0 #A temporary array to calculate the average: array = np.empty( (stf.get_size_channel(), stf.get_size_trace()) ) for n in range( 0, stf.get_size_channel() ): # Add this trace to set: array[n] = stf.get_trace( n ) # calculate average and create a new section from it: stf.new_window( np.average(set, 0) ) # set peak cursors: # -1 means all points within peak window. if not stf.set_peak_mean(-1): return 0 if not stf.set_peak_start(peak_start): return 0 if not stf.set_peak_end(peak_end): return 0 # set base cursors: if not stf.set_base_start(base_start): return 0 if not stf.set_base_end(base_end): return 0 # measure everything: stf.measure() # calculate r_seal and return: return amplitude / (stf.get_peak()-stf.get_base())
def rmean(binwidth, trace=-1, channel=-1): """ Calculates a running mean of a single trace Arguments: binwidth -- size of the bin in sampling points (pt). Obviously, it should be smaller than the length of the trace. trace: -- ZERO-BASED index of the trace within the channel. Note that this is one less than what is shown in the drop-down box. The default value of -1 returns the currently displayed trace. channel -- ZERO-BASED index of the channel. This is independent of whether a channel is active or not. The default value of -1 returns the currently active channel. Returns: A smoothed traced in a new stf window. """ # loads the current trace of the channel in a 1D Numpy Array sweep = stf.get_trace(trace, channel) # creates a destination python list to append the data dsweep = np.empty((len(sweep))) # running mean algorithm for i in range(len(sweep)): if (len(sweep)-i) > binwidth: # append to list the running mean of `binwidth` values # np.mean(sweep) calculates the mean of list dsweep[i] = np.mean( sweep[i:(binwidth+i)] ) else: # use all remaining points for the average: dsweep[i] = np.mean( sweep[i:] ) stf.new_window(dsweep)
def cut_sweeps(start, delta, sequence=None): """ Cuts a sequence of traces and present them in a new window. Arguments: start -- starting point (in ms) to cut. delta -- time interval (in ms) to cut sequence -- list of indices to be cut. If None, every trace in the channel will be cut. Returns: A new window with the traced cut. Examples: cut_sweeps(200,300) cut the traces between t=200 ms and t=500 ms within the whole channel. cut_sweeps(200,300,range(30,60)) the same as above, but only between traces 30 and 60. cut_sweeps(200,300,stf.get_selected_indices()) cut between 200 ms and 500 ms only in the selected traces. """ # select every trace in the channel if not selection is given in sequence if sequence is None: sequence = range(stf.get_size_channel()) # transform time into sampling points dt = stf.get_sampling_interval() pstart = int( round(start/dt) ) pdelta = int( round(delta/dt) ) # creates a destination python list dlist = [ stf.get_trace(i)[pstart:(pstart+pdelta)] for i in sequence ] return stf.new_window_list(dlist)
def timeconstants(fitwindow, pulsewindow, ichannel=0, vchannel=1): """ Compute and plot decay time constants Parameters ---------- fitwindow : (float, float), optional Window for fitting time constant (time in ms from beginning of sweep) None for current cursor settings. Default: None pulsewindow : (float, float), optional Window for voltage pulse measurement (time in ms from beginning of sweep) None for current cursor settings. Default: None ichannel : int, optional current channel number. Default: 0 vchannel : int, optional voltage channel number. Default: 1 Returns ------- v_commands : numpy.ndarray Command voltages taus : numpy.ndarray Time constants """ import stf if not stf.check_doc(): return None nchannels = stf.get_size_recording() if nchannels < 2: sys.stderr.write( "Function requires 2 channels (0: current; 1: voltage)\n") return dt = stf.get_sampling_interval() v_commands = [] taus = [] fig = stf.mpl_panel(figsize=(12, 8)).fig fig.clear() gs = gridspec.GridSpec(4, 8) ax_currents = stfio_plot.StandardAxis( fig, gs[:3, :4], hasx=False, hasy=False) ax_voltages = stfio_plot.StandardAxis( fig, gs[3:, :4], hasx=False, hasy=False, sharex=ax_currents) for ntrace in range(stf.get_size_channel()): stf.set_trace(ntrace) stf.set_channel(ichannel) trace = stf.get_trace() ax_currents.plot(np.arange(len(trace))*dt, trace) if fitwindow is not None: stf.fit.cursor_time = fitwindow res = stf.leastsq(0, False) taus.append(res['Tau_0']) # Measure pulse amplitude stf.set_channel(vchannel) trace = stf.get_trace() ax_voltages.plot(np.arange(len(trace))*dt, trace) stf.set_peak_direction("up") stf.set_peak_mean(-1) if pulsewindow is not None: stf.peak.cursor_time = pulsewindow stf.measure() v_commands.append(stf.peak.value) stfio_plot.plot_scalebars( ax_currents, xunits=stf.get_xunits(), yunits=stf.get_yunits(channel=ichannel)) stfio_plot.plot_scalebars( ax_voltages, xunits=stf.get_xunits(), yunits=stf.get_yunits(channel=vchannel)) v_commands = np.array(v_commands) taus = np.array(taus) ax_taus = plot_iv( taus, v_commands, "ms", stf.get_yunits(channel=vchannel), fig, 122) # Reset peak computation to single sampling point stf.set_peak_mean(1) # Reset active channel stf.set_channel(ichannel) # Compute conductances: stf.show_table_dictlist({ "Voltage ({0})".format( stf.get_yunits(channel=vchannel)): v_commands.tolist(), "Taus (ms)": taus.tolist(), }) return v_commands, taus
def count_events(start, delta, threshold=0, up=True, trace=None, mark=True): """ Counts the number of events (e.g action potentials (AP)) in the current trace. Arguments: start -- starting time (in ms) to look for events. delta -- time interval (in ms) to look for events. threshold -- (optional) detection threshold (default = 0). up -- (optional) True (default) will look for upward events, False downwards. trace -- (optional) zero-based index of the trace in the current channel, if None, the current trace is selected. mark -- (optional) if True (default), set a mark at the point of threshold crossing Returns: An integer with the number of events. Examples: count_events(500,1000) returns the number of events found between t=500 ms and t=1500 ms above 0 in the current trace and shows a stf marker. count_events(500,1000,0,False,-10,i) returns the number of events found below -10 in the trace i and shows the corresponding stf markers. """ # sets the current trace or the one given in trace. if trace is None: sweep = stf.get_trace_index() else: if type(trace) !=int: print('trace argument admits only integers') return False sweep = trace # set the trace described in sweep stf.set_trace(sweep) # transform time into sampling points dt = stf.get_sampling_interval() pstart = int( round(start/dt) ) pdelta = int( round(delta/dt) ) # select the section of interest within the trace selection = stf.get_trace()[pstart:(pstart+pdelta)] # algorithm to detect events event_counter, i = 0, 0 # set counter and index to zero # choose comparator according to direction: if up: comp = lambda a, b: a > b else: comp = lambda a, b: a < b # run the loop while i < len(selection): if comp(selection[i], threshold): event_counter += 1 if mark: stf.set_marker(pstart+i, selection[i]) while i < len(selection) and comp(selection[i], threshold): i += 1 # skip until value is below/above threshold else: i += 1 return event_counter
def iv(peakwindow=None, basewindow=None, pulsewindow=None, erev=None, peakmode="both", ichannel=0, vchannel=1, exclude=None): """ Compute and plot an IV curve for currents Parameters ---------- peakwindow : (float, float), optional Window for peak measurement (time in ms from beginning of sweep) None for current cursor settings. Default: None basewindow : (float, float), optional Window for baseline measurement (time in ms from beginning of sweep) None for current cursor settings. Default: None pulsewindow : (float, float), optional Window for voltage pulse measurement (time in ms from beginning of sweep) None for current cursor settings. Default: None erev : float, optional End of v clamp pulse in ms or None to determine automatically. Default: None peakmode : string, optional Peak direction - one of "up", "down", "both" or "mean". Default: "up" ichannel : int, optional current channel number. Default: 0 vchannel : int, optional voltage channel number. Default: 1 exclude : list of ints, optional List of trace indices to be excluded from the analysis. Default: None Returns ------- v_commands : numpy.ndarray Command voltages ipeaks : numpy.ndarray Peak currents gpeaks : numpy.ndarray Peak normalized conductances g_fit : numpy.ndarray Half-maximal voltage and slope of best-fit Boltzmann function """ import stf if not stf.check_doc(): return None nchannels = stf.get_size_recording() if nchannels < 2: sys.stderr.write( "Function requires 2 channels (0: current; 1: voltage)\n") return dt = stf.get_sampling_interval() olddirection = stf.get_peak_direction() v_commands = [] ipeaks = [] if basewindow is not None: stf.base.cursor_time = basewindow fig = stf.mpl_panel(figsize=(12, 8)).fig fig.clear() gs = gridspec.GridSpec(4, 8) ax_currents = stfio_plot.StandardAxis( fig, gs[:3, :4], hasx=False, hasy=False) ax_voltages = stfio_plot.StandardAxis( fig, gs[3:, :4], hasx=False, hasy=False, sharex=ax_currents) for ntrace in range(stf.get_size_channel()): if exclude is not None: if ntrace in exclude: continue stf.set_trace(ntrace) stf.set_channel(ichannel) trace = stf.get_trace() ax_currents.plot(np.arange(len(trace))*dt, trace) # Measure only downward peaks (inward currents) if peakmode is "mean": stf.set_peak_direction("up") stf.set_peak_mean(-1) else: stf.set_peak_direction(peakmode) # Set peak computation to single sampling point stf.set_peak_mean(1) if peakwindow is not None: stf.peak.cursor_time = peakwindow stf.measure() if basewindow is not None: ipeaks.append(stf.peak.value-stf.base.value) else: ipeaks.append(stf.peak.value) # Measure pulse amplitude stf.set_channel(vchannel) trace = stf.get_trace() ax_voltages.plot(np.arange(len(trace))*dt, trace) stf.set_peak_direction("up") stf.set_peak_mean(-1) if pulsewindow is not None: stf.peak.cursor_time = pulsewindow stf.measure() v_commands.append(stf.peak.value) stfio_plot.plot_scalebars( ax_currents, xunits=stf.get_xunits(), yunits=stf.get_yunits(channel=0)) stfio_plot.plot_scalebars( ax_voltages, xunits=stf.get_xunits(), yunits=stf.get_yunits(channel=1)) v_commands = np.array(v_commands) ipeaks = np.array(ipeaks) if erev is None: # Find first zero crossing in ipeaks: for npulse in range(ipeaks.shape[0]-1): if np.sign(ipeaks[npulse]) != np.sign(ipeaks[npulse+1]): # linear interpolation m1 = (ipeaks[npulse+1]-ipeaks[npulse]) / ( v_commands[npulse+1]-v_commands[npulse]) c1 = ipeaks[npulse] - m1*v_commands[npulse] erev = -c1/m1 break if erev is None: sys.stderr.write( "Could not determine reversal potential. Aborting now\n") return None # Reset peak computation to single sampling point stf.set_peak_mean(1) stf.set_peak_direction(olddirection) # Reset active channel stf.set_channel(ichannel) # Compute conductances: gpeaks, g_fit = gv(ipeaks, v_commands, erev) ax_ipeaks = plot_iv( ipeaks, v_commands, stf.get_yunits(channel=ichannel), stf.get_yunits(channel=1), fig, 222) ax_ipeaks.set_title("Peak current") ax_gpeaks = plot_gv( gpeaks, v_commands, stf.get_yunits(channel=vchannel), g_fit, fig, 224) ax_gpeaks.set_title("Peak conductance") stf.show_table_dictlist({ "Voltage ({0})".format( stf.get_yunits(channel=vchannel)): v_commands.tolist(), "Peak current ({0})".format( stf.get_yunits(channel=ichannel)): ipeaks.tolist(), "Peak conductance (g/g_max)": gpeaks.tolist(), }) return v_commands, ipeaks, gpeaks, g_fit