Example #1
0
def median_filter(n):
    """
    Perform median smoothing filter on the selected traces. 
    Computationally this is achieved by a central simple moving 
    median over a sliding window of n points.

    The function uses reflect (or bounce) end corrections

    """

    # Check that at least one trace was selected
    if not stf.get_selected_indices():
        raise IndexError('No traces were selected')

    # Check that the number of points in the sliding window is odd
    n = int(n)
    if n % 2 != 1:
        raise ValueError('The filter rank must be an odd integer')
    elif n <= 1:
        raise ValueError('The filter rank must > 1')

    # Apply smoothing filter
    filtered_traces = []
    for i in stf.get_selected_indices():
        l = stf.get_size_trace(i)
        padded_trace = np.pad(stf.get_trace(i), (n - 1) / 2, 'reflect')
        filtered_traces.append(
            [np.median(padded_trace[j:n + j]) for j in range(l)])

    print "Window width was %g ms" % (stf.get_sampling_interval() * (n - 1))

    return stf.new_window_list(filtered_traces)
Example #2
0
def count_aps():
    """
    Shows a result table with the number of action potentials (i.e
    events whose potential is above 0 mV) in selected traces.  If
    no trace is selected, then the current trace is analyzed.

    Returns:
    False if document is not open.
    """
    if not stf.check_doc():
        print("Open file first")
        return False
  
    if len( stf.get_selected_indices() )==0: 
        sel_trace = [ stf.get_trace_index()]
    else: 
        sel_trace = stf.get_selected_indices()

    mytable = dict()
    for trace in sel_trace:
        tstart = 0
        tend = stf.get_size_trace(trace)*stf.get_sampling_interval()
        threshold = 0
        spikes = count_events(tstart, tend, threshold, True, trace, True)
        mytable["Trace %.3d" %trace] = spikes

    stf.show_table(mytable)

    return True
Example #3
0
def sloping_base(trace=-1, method='scale'):
    """
    Correct for linear sloping baseline in the displayed trace of the active channel. 
    Useful for approximate correction of photobleaching during short periods of imaging.
    Available methods are 'scale' or 'subtract'.
    """

    # Get trace and trace attributes
    selected_trace = stf.get_trace(trace)
    fit_start = stf.get_base_start()
    fit_end = stf.get_base_end()

    # Linear fit to baseline region
    fit = np.polyfit(np.arange(fit_start, fit_end, 1, int),
                     selected_trace[fit_start:fit_end], 1)

    # Correct trace for sloping baseline
    l = stf.get_size_trace(trace)
    t = np.arange(0, l, 1, np.double)
    if method == 'subtract':
        corrected_trace = selected_trace - t * fit[0]
    elif method == 'scale':
        corrected_trace = selected_trace * fit[1] / (t * fit[0] + fit[1])

    return stf.new_window_list([corrected_trace])
Example #4
0
def hpfilter(n):
    """
    Perform median smoothing filter on the active trace.
    Computationally this is achieved by a central simple moving
    median over a sliding window of n points. The function then
    subtracts the smoothed trace from the original trace.
    The function uses reflect (or bounce) end corrections
    """

    # Check that the number of points in the sliding window is odd

    n = int(n)
    if n % 2 != 1:
        raise ValueError('The filter rank must be an odd integer')
    elif n <= 1:
        raise ValueError('The filter rank must > 1')

    # Apply smoothing filter
    filtered_trace = []
    l = stf.get_size_trace()
    padded_trace = np.pad(stf.get_trace(), (n - 1) / 2, 'reflect')
    filtered_trace.append([np.median(padded_trace[j:n + j]) for j in range(l)])

    print "Window width was %g ms" % (stf.get_sampling_interval() * (n - 1))

    # Apply subtraction
    subtracted_trace = stf.get_trace() - np.array(filtered_trace)

    return stf.new_window_list(subtracted_trace)
Example #5
0
def count_aps():
    """
    Shows a result table with the number of action potentials (i.e
    events whose potential is above 0 mV) in selected traces.  If
    no trace is selected, then the current trace is analyzed.

    Returns:
    False if document is not open.
    """
    if not stf.check_doc():
        print("Open file first")
        return False

    if len(stf.get_selected_indices()) == 0:
        sel_trace = [stf.get_trace_index()]
    else:
        sel_trace = stf.get_selected_indices()

    mytable = dict()
    for trace in sel_trace:
        tstart = 0
        tend = stf.get_size_trace(trace) * stf.get_sampling_interval()
        threshold = 0
        spikes = count_events(tstart, tend, threshold, True, trace, True)
        mytable["Trace %.3d" % trace] = spikes

    stf.show_table(mytable)

    return True
Example #6
0
def monoexpfit(optimization=True, Tn=20):
    """
    Fits monoexponential function with offset to data between the fit cursors
    in the current trace of the active channel using a Chebyshev-Levenberg-
    Marquardt hybrid algorithm. Optimization requires Scipy. Setting optimization
    to False forces this function to use just the Chebyshev algorithm. The maximum
    order of the Chebyshev polynomials can be set using Tn.
    """

    # Get data
    fit_start = stf.get_fit_start()
    fit_end = stf.get_fit_end()
    y = np.double(stf.get_trace()[fit_start:fit_end])
    si = stf.get_sampling_interval()
    l = len(y)
    t = si * np.arange(0, l, 1, np.double)

    # Define monoexponential function
    def f(t, *p):
        return p[0] + p[1] * np.exp(-t / p[2])

    # Get initial values from Chebyshev transform fit
    init = chebexp(1, Tn)
    p0 = (init.get('Offset'), )
    p0 += (init.get('Amp_0'), )
    p0 += (init.get('Tau_0'), )

    # Optimize (if applicable)
    if optimization == True:
        # Optimize fit using Levenberg-Marquardt algorithm
        options = {"ftol": 2.22e-16, "xtol": 2.22e-16, "gtol": 2.22e-16}
        [p, pcov] = optimize.curve_fit(f, t, y, p0, **options)
    elif optimization == False:
        p = list(p0)
    fit = f(t, *p)

    # Calculate SSE
    SSE = np.sum((y - fit)**2)

    # Plot fit in a new window
    matrix = np.zeros((2, stf.get_size_trace())) * np.nan
    matrix[0, :] = stf.get_trace()
    matrix[1, fit_start:fit_end] = fit
    stf.new_window_matrix(matrix)

    # Create table of results
    retval = [("p0_Offset", p[0])]
    retval += [("p1_Amp_0", p[1])]
    retval += [("p2_Tau_0", p[2])]
    retval += [("SSE", SSE)]
    retval += [("dSSE", 1.0 - np.sum((y - f(t, *p0))**2) / SSE)]
    retval += [("Time fit begins", fit_start * si)]
    retval += [("Time fit ends", fit_end * si)]
    retval = dict(retval)
    stf.show_table(
        retval, "monoexpfit, Section #%i" % float(stf.get_trace_index() + 1))

    return
Example #7
0
def upsample_flex():
    """
    Upsample to sampling interval of 1 ms using cubic spline interpolation
    """

    old_time = [
        i * stf.get_sampling_interval() for i in range(stf.get_size_trace())
    ]
    new_time = range(
        int(np.fix((stf.get_size_trace() - 1) * stf.get_sampling_interval())))
    new_traces = []
    for i in range(stf.get_size_channel()):
        f = interpolate.interp1d(old_time, stf.get_trace(i), 'cubic')
        new_traces.append(f(new_time))
    stf.new_window_list(new_traces)
    stf.set_sampling_interval(1)

    return
Example #8
0
def resistance( base_start, base_end, peak_start, peak_end, amplitude):
    """Calculates the resistance from a series of voltage clamp traces.

    Keyword arguments:
    base_start -- Starting index (zero-based) of the baseline cursors.
    base_end   -- End index (zero-based) of the baseline cursors.
    peak_start -- Starting index (zero-based) of the peak cursors.
    peak_end   -- End index (zero-based) of the peak cursors.
    amplitude  -- Amplitude of the voltage command.

    Returns:
    The resistance.
    """

    if not stf.check_doc():
        print('Couldn\'t find an open file; aborting now.')
        return 0

    #A temporary array to calculate the average:
    array = np.empty( (stf.get_size_channel(), stf.get_size_trace()) )
    for n in range( 0,  stf.get_size_channel() ):
        # Add this trace to set:
        array[n] = stf.get_trace( n )


    # calculate average and create a new section from it:
    stf.new_window( np.average(set, 0) )

    # set peak cursors:
    # -1 means all points within peak window.
    if not stf.set_peak_mean(-1): 
        return 0 
    if not stf.set_peak_start(peak_start): 
        return 0
    if not stf.set_peak_end(peak_end): 
        return 0

    # set base cursors:
    if not stf.set_base_start(base_start): 
        return 0
    if not stf.set_base_end(base_end): 
        return 0

    # measure everything:
    stf.measure()

    # calculate r_seal and return:
    return amplitude / (stf.get_peak()-stf.get_base())
Example #9
0
def resistance(base_start, base_end, peak_start, peak_end, amplitude):
    """Calculates the resistance from a series of voltage clamp traces.

    Keyword arguments:
    base_start -- Starting index (zero-based) of the baseline cursors.
    base_end   -- End index (zero-based) of the baseline cursors.
    peak_start -- Starting index (zero-based) of the peak cursors.
    peak_end   -- End index (zero-based) of the peak cursors.
    amplitude  -- Amplitude of the voltage command.

    Returns:
    The resistance.
    """

    if not stf.check_doc():
        print('Couldn\'t find an open file; aborting now.')
        return 0

    #A temporary array to calculate the average:
    array = np.empty((stf.get_size_channel(), stf.get_size_trace()))
    for n in range(0, stf.get_size_channel()):
        # Add this trace to set:
        array[n] = stf.get_trace(n)

    # calculate average and create a new section from it:
    stf.new_window(np.average(set, 0))

    # set peak cursors:
    # -1 means all points within peak window.
    if not stf.set_peak_mean(-1):
        return 0
    if not stf.set_peak_start(peak_start):
        return 0
    if not stf.set_peak_end(peak_end):
        return 0

    # set base cursors:
    if not stf.set_base_start(base_start):
        return 0
    if not stf.set_base_end(base_end):
        return 0

    # measure everything:
    stf.measure()

    # calculate r_seal and return:
    return amplitude / (stf.get_peak() - stf.get_base())
Example #10
0
def interpstim():
    """
    Interpolate values between fit cursors in all traces in the active channel.
    Typically used to remove stimulus artifacts.
    """

    x = np.array(
        [i * stf.get_sampling_interval() for i in range(stf.get_size_trace())])
    fit_start = int(stf.get_fit_start())
    fit_end = int(stf.get_fit_end())
    interp_traces = []
    for i in range(stf.get_size_channel()):
        tmp = stf.get_trace(i)
        tmp[fit_start:fit_end] = np.interp(x[fit_start:fit_end],
                                           [x[fit_start], x[fit_end]],
                                           [tmp[fit_start], tmp[fit_end]])
        interp_traces.append(tmp)
    stf.new_window_list(interp_traces)

    return
Example #11
0
def analyze_iv(pulses, trace_start=0, factor=1.0):
    """Creates an IV for the currently active channel.

    Keyword arguments:
    pulses --      Number of pulses for the IV.
    trace_start -- ZERO-BASED index of the first trace to be
                   used for the IV. Note that this is one less
                   than what is diplayed in the drop-down box.
    factor --      Multiply result with an optional factor, typically
                   from some external scaling.
    Returns:
    True upon success, False otherwise.
    """

    if (stf.check_doc() == False):
        print("Couldn\'t find an open file; aborting now.")
        return False

    if (pulses < 1):
        print("Number of pulses has to be greater or equal 1.")
        return False

    # create an empty array (will contain random numbers)
    channel = list()
    for m in range(pulses):
        # A temporary array to calculate the average:
        set = np.empty((int(
            (stf.get_size_channel() - m - 1 - trace_start) / pulses) + 1,
                        stf.get_size_trace(trace_start + m)))
        n_set = 0
        for n in range(trace_start + m, stf.get_size_channel(), pulses):
            # Add this trace to set:
            set[n_set, :] = stf.get_trace(n)
            n_set = n_set + 1

        # calculate average and create a new section from it, multiply:
        channel.append(np.average(set, 0) * factor)

    stf.new_window_list(channel)

    return True
Example #12
0
def Train10AP():
    """
    An example function to perform peak measurements of a train of
    evoked fluorescence signals in the active window
    """

    # Setup
    offset = 40
    stf.set_base_start(0)
    stf.set_peak_start(offset - 2)
    stf.measure()
    base = stf.get_base()
    stf.set_peak_mean(1)
    stf.set_peak_direction("up")
    peak = []

    # Get peak measurements
    for i in range(10):
        stf.set_peak_start(offset + (i * 4) - 2)
        stf.set_peak_end(offset + (i * 4) + 2)
        stf.measure()
        peak.append(stf.get_peak())

    # Plot fit in a new window
    matrix = np.zeros((2, stf.get_size_trace())) * np.nan
    matrix[0, :] = stf.get_trace()
    for i in range(10):
        matrix[1, offset + (i * 4) - 1:offset + (i * 4) + 2] = peak[i]
    stf.new_window_matrix(matrix)

    # Create table of results
    retval = []
    for i in range(10):
        retval += [("Peak %d" % (i), peak[i] - base)]
    retval = dict(retval)
    stf.show_table(retval,
                   "Train10AP, Section #%i" % float(stf.get_trace_index() + 1))

    return
Example #13
0
def EPSPtrains(latency=200,
               numStim=4,
               intvlList=[1, 0.8, 0.6, 0.4, 0.2, 0.1, 0.08, 0.06, 0.04, 0.02]):

    # Initialize
    numTrains = len(intvlList)  # Number of trains
    intvlArray = np.array(intvlList) * 1000  # Units in ms
    si = stf.get_sampling_interval()  # Units in ms

    # Background subtraction
    traceBaselines = []
    subtractedTraces = []
    k = 1e-4
    x = [i * stf.get_sampling_interval() for i in range(stf.get_size_trace())]
    for i in range(numTrains):
        stf.set_trace(i)
        z = x
        y = stf.get_trace()
        traceBaselines.append(y)
        ridx = []
        if intvlArray[i] > 500:
            for j in range(numStim):
                ridx += range(
                    int(round(((intvlArray[i] * j) + latency - 1) / si)),
                    int(round(
                        ((intvlArray[i] * (j + 1)) + latency - 1) / si)) - 1)
        else:
            ridx += range(
                int(round((latency - 1) / si)),
                int(
                    round(((intvlArray[i] *
                            (numStim - 1)) + latency + 500) / si)) - 1)
        ridx += range(int(round(4999 / si)), int(round(5199 / si)))
        z = np.delete(z, ridx, 0)
        y = np.delete(y, ridx, 0)
        yi = np.interp(x, z, y)
        yf = signal.symiirorder1(yi, (k**2), 1 - k)
        traceBaselines.append(yf)
        subtractedTraces.append(stf.get_trace() - yf)
    stf.new_window_list(traceBaselines)
    stf.new_window_list(subtractedTraces)

    # Measure depolarization
    # Initialize variables
    a = []
    b = []

    # Set baseline start and end cursors
    stf.set_base_start(np.round(
        (latency - 50) / si))  # Average during 50 ms period before stimulus
    stf.set_base_end(np.round(latency / si))

    # Set fit start cursor
    stf.set_fit_start(np.round(latency / si))
    stf.set_fit_end(
        np.round(((intvlArray[1] * (numStim - 1)) + latency + 1000) /
                 si))  # Include a 1 second window after last stimulus

    # Start AUC calculations
    for i in range(numTrains):
        stf.set_trace(i)
        stf.measure()
        b.append(stf.get_base())
        n = int(stf.get_fit_end() + 1 - stf.get_fit_start())
        x = np.array([k * stf.get_sampling_interval() for k in range(n)])
        y = stf.get_trace()[int(stf.get_fit_start()):int(stf.get_fit_end() +
                                                         1)]
        a.append(np.trapz(y - b[i], x))  # Units in V.s

    return a
Example #14
0
def rscomp(V_hold=-60,
           V_reversal=0,
           R_s_final=2,
           V_step=-5,
           step_start=10,
           step_duration=20):
    """
    Function for offline series resistance compensation on current trace in the active channel
    
    This script is my adaptation of Erwin Neher's Igor functions:
       SeriesresistanceComp
    These were freely available in in Proc02_Apr4Web.ipf from Erwin Neher's webpage:
     http://www3.mpibpc.mpg.de/groups/neher/index.php?page=software
     (last accessed: 01 July 2014)
     
    Instead of setting the fraction compensation, this function asks for the desired
    uncompensated series resistance. Whole-cell properties are calcuated using the wcp
    function.
    
    rsomp replaces current traces by their series-compensated version;
    the value at i is replaced by the average at i and i+1
    R_s is in ohms, C_m in Farads, fraction is the fraction to be compensated
    if R_s was 5 MOhm in the experiment and if it was 50% hardware compensated
    then R_s = 2.5e6 has to be entered and f=1 for a complete overall compensation
    The routine, similarly to that published by Traynelis J. Neurosc. Meth. 86:25,
    compensates the frequency response, assuming a single R_s*C_m - time constant
    (at constant V-hold) and a three component equivalent circuit for the pipette cell
    assembly with  R_s, C_m, R_m
    
    Theory: V_h = R_s*I+U_m;
    I_r is membrane resistive current,
    I is total current
    I_r = I-I_c = I-C_m*dU_m/dt = I+C_m*R_s*dI/dt (because R_s*I+U_m = const.)
    G_m=I_r/ U_m = (I+C_m*R_s*dI/dt)/ (V_h-R_s*I)
    For complete correction (fraction = 1) : I_corr = V_h*(I+C_m*R_s*dI/dt)/ (V_h-R_s*I)
    
    """

    # Get Whole cell properties
    wcp_stats = wcp(V_step, step_start, step_duration)

    # sampInt is sampling interval in seconds
    sampInt = stf.get_sampling_interval() * 1e-3

    # R_c is the cell membrane resistance in ohms
    R_c = wcp_stats["Cell resistance (Mohm)"] * 1e+6

    # R_s is the (initial) series resistance in ohms
    R_s = wcp_stats["Series resistance (Mohm)"] * 1e+6

    # C_m is the cell capacitance in farads
    C_m = wcp_stats["Cell capacitance (pF)"] * 1e-12

    # R_s_final is the final (uncompensated series resistance in ohms
    R_s_final *= 1e+6

    # fraction is the amount of compensation
    fraction = 1 - (R_s_final / R_s)

    # Convert unit of holding and reversal potentials to volts
    V_hold *= 1e-3
    V_reversal *= 1e-3

    # Calculate voltage difference
    voltage = V_hold - V_reversal

    # Calculate cell time constants
    #tau = R_s * C_m
    tau = (R_s * R_c * C_m) / (R_s + R_c)  # more accurate
    tau_corr = tau * (1 - fraction)

    # Assign current recording trace to I_wave and convert to amps
    I_wave = stf.get_trace() * 1e-12

    # First point: (we have to calculate this separately, because we need the value at i-1 below)
    denominator = voltage - R_s * fraction * I_wave[0]
    if denominator != 0:
        I_wave[0] = I_wave[0] * (voltage / denominator)

    for i in range(stf.get_size_trace() - 2):
        i += 1
        # this is the loop doing the correction for all other points
        # first calculate R_m for zero series resistance under the assumptions
        # that  U_m + U_Rs = const = voltage
        current = (I_wave[i + 1] + I_wave[i]) / 2  # The in between(mean) value
        derivative = (I_wave[i + 1] - I_wave[i]) / sampInt
        denominator = current + tau * derivative
        if denominator != 0:
            R_m = (voltage -
                   R_s * current) / denominator  # calculate the true R_m
        # Now calculate current for new series resitance
        denominator = (R_m + (1 - fraction) * R_s) * (1 + tau_corr / sampInt)
        if denominator != 0:
            I_wave[i] = tau_corr / (
                tau_corr + sampInt) * I_wave[i - 1] + voltage / denominator
        else:
            I_wave[i] = I_wave[i - 1]  # old value

    # Convert units of I_wave to pA
    I_wave *= 1e+12

    # Print information about the compensation performed
    print "Percentage series resistance compensation was %.1f%%" % (fraction *
                                                                    100)
    print "Residual (uncompensated) series resistance is %.2f Mohm" % (
        R_s_final * 1e-6)

    return stf.new_window_list([I_wave])