Exemplo n.º 1
0
def subtract_base():
    """
    """
    subtracted_traces = []
    for i in range(stf.get_size_channel()):
        stf.set_trace(i)
        subtracted_traces.append(stf.get_trace() - stf.get_base())
    stf.new_window_list(subtracted_traces)

    return
Exemplo n.º 2
0
def reverse():
    """
    Reverse the order of all traces
    """

    reversed_traces = []
    n = stf.get_size_channel()
    for i in range(n):
        reversed_traces.append(stf.get_trace(n - 1 - i))
    stf.new_window_list(reversed_traces)

    return
Exemplo n.º 3
0
def sloping_base(trace=-1, method='scale'):
    """
    Correct for linear sloping baseline in the displayed trace of the active channel. 
    Useful for approximate correction of photobleaching during short periods of imaging.
    Available methods are 'scale' or 'subtract'.
    """

    # Get trace and trace attributes
    selected_trace = stf.get_trace(trace)
    fit_start = stf.get_base_start()
    fit_end = stf.get_base_end()

    # Linear fit to baseline region
    fit = np.polyfit(np.arange(fit_start, fit_end, 1, int),
                     selected_trace[fit_start:fit_end], 1)

    # Correct trace for sloping baseline
    l = stf.get_size_trace(trace)
    t = np.arange(0, l, 1, np.double)
    if method == 'subtract':
        corrected_trace = selected_trace - t * fit[0]
    elif method == 'scale':
        corrected_trace = selected_trace * fit[1] / (t * fit[0] + fit[1])

    return stf.new_window_list([corrected_trace])
Exemplo n.º 4
0
def blankstim():
    """
    Blank values between fit cursors in all traces in the active channel.
    Typically used to blank stimulus artifacts.
    """

    fit_start = stf.get_fit_start()
    fit_end = stf.get_fit_end()
    blanked_traces = []
    for i in range(stf.get_size_channel()):
        tmp = stf.get_trace(i)
        tmp[fit_start:fit_end] = np.nan
        blanked_traces.append(tmp)
    stf.new_window_list(blanked_traces)

    return
Exemplo n.º 5
0
def median_filter(n):
    """
    Perform median smoothing filter on the selected traces. 
    Computationally this is achieved by a central simple moving 
    median over a sliding window of n points.

    The function uses reflect (or bounce) end corrections

    """

    # Check that at least one trace was selected
    if not stf.get_selected_indices():
        raise IndexError('No traces were selected')

    # Check that the number of points in the sliding window is odd
    n = int(n)
    if n % 2 != 1:
        raise ValueError('The filter rank must be an odd integer')
    elif n <= 1:
        raise ValueError('The filter rank must > 1')

    # Apply smoothing filter
    filtered_traces = []
    for i in stf.get_selected_indices():
        l = stf.get_size_trace(i)
        padded_trace = np.pad(stf.get_trace(i), (n - 1) / 2, 'reflect')
        filtered_traces.append(
            [np.median(padded_trace[j:n + j]) for j in range(l)])

    print "Window width was %g ms" % (stf.get_sampling_interval() * (n - 1))

    return stf.new_window_list(filtered_traces)
Exemplo n.º 6
0
def normalize():
    """
    Normalize to the peak amplitude of the selected trace and 
    scale all other traces in the currently active channel by 
    the same factor. 

    Ensure that you subtract the baseline before normalizing
    """

    # Find index of the selected trace
    idx = stf.get_selected_indices()
    if len(idx) > 1:
        raise ValueError('More than one trace was selected')
    elif len(idx) < 1:
        raise ValueError('Select one trace to subtract from the others')

    # Measure peak amplitude in the selected trace
    stf.set_trace(idx[0])
    refval = np.abs(stf.get_peak())

    # Apply normalization
    scaled_traces = [
        stf.get_trace(i) / refval for i in range(stf.get_size_channel())
    ]

    return stf.new_window_list(scaled_traces)
Exemplo n.º 7
0
def risealign():
    """
    Shift the selected traces in the currently active channel to align to the rise. 

    """

    # Measure peak indices in the selected traces
    rtidx = []
    for i in stf.get_selected_indices():
        stf.set_trace(i)
        rtidx.append(stf.rtlow_index())

    # Find the earliest peak
    rtref = min(rtidx)

    # Align the traces
    j = 0
    shifted_traces = []
    for i in stf.get_selected_indices():
        stf.set_trace(i)
        shift = int(round(rtref - rtidx[j]))
        shifted_traces.append(np.roll(stf.get_trace(), shift))
        j += 1

    return stf.new_window_list(shifted_traces)
Exemplo n.º 8
0
def peakalign():
    """
    Shift the selected traces in the currently active channel to align the peaks. 

    """

    # Measure peak indices in the selected traces
    pidx = []
    for i in stf.get_selected_indices():
        stf.set_trace(i)
        pidx.append(stf.peak_index())

    # Find the earliest peak
    pref = min(pidx)

    # Align the traces
    j = 0
    shifted_traces = []
    for i in stf.get_selected_indices():
        stf.set_trace(i)
        shift = int(pref - pidx[j])
        shifted_traces.append(np.roll(stf.get_trace(), shift))
        j += 1

    return stf.new_window_list(shifted_traces)
Exemplo n.º 9
0
def peakscale():
    """
    Scale the selected traces in the currently active channel to their mean peak amplitude. 

    """

    # Measure baseline in selected traces
    base = []
    for i in stf.get_selected_indices():
        stf.set_trace(i)
        base.append(stf.get_base())

    # Subtract baseline from selected traces
    stf.subtract_base()

    # Measure peak amplitudes in baseline-subtracted traces
    stf.select_all()
    peak = []
    for i in stf.get_selected_indices():
        stf.set_trace(i)
        peak.append(stf.get_peak())

    # Calculate scale factor to make peak equal to the mean peak amplitude
    scale_factor = peak / np.mean(peak)

    # Scale the traces and apply offset equal to the mean baseline
    scaled_traces = [
        stf.get_trace(i) / scale_factor[i] + np.mean(base)
        for i in stf.get_selected_indices()
    ]

    # Close window of baseline-subtracted traces
    stf.close_this()

    return stf.new_window_list(scaled_traces)
Exemplo n.º 10
0
def hpfilter(n):
    """
    Perform median smoothing filter on the active trace.
    Computationally this is achieved by a central simple moving
    median over a sliding window of n points. The function then
    subtracts the smoothed trace from the original trace.
    The function uses reflect (or bounce) end corrections
    """

    # Check that the number of points in the sliding window is odd

    n = int(n)
    if n % 2 != 1:
        raise ValueError('The filter rank must be an odd integer')
    elif n <= 1:
        raise ValueError('The filter rank must > 1')

    # Apply smoothing filter
    filtered_trace = []
    l = stf.get_size_trace()
    padded_trace = np.pad(stf.get_trace(), (n - 1) / 2, 'reflect')
    filtered_trace.append([np.median(padded_trace[j:n + j]) for j in range(l)])

    print "Window width was %g ms" % (stf.get_sampling_interval() * (n - 1))

    # Apply subtraction
    subtracted_trace = stf.get_trace() - np.array(filtered_trace)

    return stf.new_window_list(subtracted_trace)
Exemplo n.º 11
0
def SBR():
    """
    Calculate signal-to-baseline ratio (SBR) or delta F / F0 for
    traces in the active window. The result is expressed as a %.
    Useful for imaging data.

    Ensure that the baseline cursors are positioned appropriately.
    """

    SBR_traces = [
        100 * (stf.get_trace(i) - stf.get_base()) / stf.get_base()
        for i in range(stf.get_size_channel())
    ]
    stf.new_window_list(SBR_traces)
    stf.set_yunits('%')

    return
Exemplo n.º 12
0
def upsample_flex():
    """
    Upsample to sampling interval of 1 ms using cubic spline interpolation
    """

    old_time = [
        i * stf.get_sampling_interval() for i in range(stf.get_size_trace())
    ]
    new_time = range(
        int(np.fix((stf.get_size_trace() - 1) * stf.get_sampling_interval())))
    new_traces = []
    for i in range(stf.get_size_channel()):
        f = interpolate.interp1d(old_time, stf.get_trace(i), 'cubic')
        new_traces.append(f(new_time))
    stf.new_window_list(new_traces)
    stf.set_sampling_interval(1)

    return
Exemplo n.º 13
0
def yoffset(value):
    """
    Apply a common offset to all traces in the currently active channel.
    """

    offset_traces = [
        stf.get_trace(i) + value for i in range(stf.get_size_channel())
    ]

    return stf.new_window_list(offset_traces)
Exemplo n.º 14
0
def remove_artifacts_from_sweeps(artifact_start_time, artifact_end_time):

    sampling_interval = stf.get_sampling_interval()
    artifact_start = int(artifact_start_time / sampling_interval)
    artifact_end = int(artifact_end_time / sampling_interval)

    continuous_trace = []
    output_artifacts_removed = []

    for sweep in range(stf.get_size_channel()):
        sweep_trace_before_artifact = stf.get_trace(sweep)[0:artifact_start]
        sweep_trace_after_artifact = stf.get_trace(sweep)[artifact_end:]
        sweep_trace = np.append(sweep_trace_before_artifact,
                                sweep_trace_after_artifact)
        output_artifacts_removed.append(sweep_trace)
        continuous_trace.extend(sweep_trace)

    stf.new_window_list(output_artifacts_removed)

    return (continuous_trace)
Exemplo n.º 15
0
def interpstim():
    """
    Interpolate values between fit cursors in all traces in the active channel.
    Typically used to remove stimulus artifacts.
    """

    x = np.array(
        [i * stf.get_sampling_interval() for i in range(stf.get_size_trace())])
    fit_start = int(stf.get_fit_start())
    fit_end = int(stf.get_fit_end())
    interp_traces = []
    for i in range(stf.get_size_channel()):
        tmp = stf.get_trace(i)
        tmp[fit_start:fit_end] = np.interp(x[fit_start:fit_end],
                                           [x[fit_start], x[fit_end]],
                                           [tmp[fit_start], tmp[fit_end]])
        interp_traces.append(tmp)
    stf.new_window_list(interp_traces)

    return
Exemplo n.º 16
0
def analyze_iv(pulses, trace_start=0, factor=1.0):
    """Creates an IV for the currently active channel.

    Keyword arguments:
    pulses --      Number of pulses for the IV.
    trace_start -- ZERO-BASED index of the first trace to be
                   used for the IV. Note that this is one less
                   than what is diplayed in the drop-down box.
    factor --      Multiply result with an optional factor, typically
                   from some external scaling.
    Returns:
    True upon success, False otherwise.
    """

    if (stf.check_doc() == False):
        print("Couldn\'t find an open file; aborting now.")
        return False

    if (pulses < 1):
        print("Number of pulses has to be greater or equal 1.")
        return False

    # create an empty array (will contain random numbers)
    channel = list()
    for m in range(pulses):
        # A temporary array to calculate the average:
        set = np.empty((int(
            (stf.get_size_channel() - m - 1 - trace_start) / pulses) + 1,
                        stf.get_size_trace(trace_start + m)))
        n_set = 0
        for n in range(trace_start + m, stf.get_size_channel(), pulses):
            # Add this trace to set:
            set[n_set, :] = stf.get_trace(n)
            n_set = n_set + 1

        # calculate average and create a new section from it, multiply:
        channel.append(np.average(set, 0) * factor)

    stf.new_window_list(channel)

    return True
Exemplo n.º 17
0
def read_heka_stf(filename):
    channels, channelnames, channelunits, channeldt = read_heka(filename)
    for nc, channel in enumerate(channels):
        if channelunits[nc]=="V":
            for ns, sweep in enumerate(channel):
                channels[nc][ns] = np.array(channels[nc][ns])
                channels[nc][ns] *= 1.0e3
            channelunits[nc]="mV"
        if channelunits[nc]=="A":
            for ns, sweep in enumerate(channel):
                channels[nc][ns] = np.array(channels[nc][ns])
                channels[nc][ns] *= 1.0e12
            channelunits[nc]="pA"

    import stf
    stf.new_window_list(channels)
    for nc, name in enumerate(channelnames):
        stf.set_channel_name(name, nc)
    for nc, units in enumerate(channelunits):
        for ns in range(stf.get_size_channel()):
            stf.set_yunits(units, ns, nc)
    stf.set_sampling_interval(channeldt[0]*1e3)
Exemplo n.º 18
0
Arquivo: heka.py Projeto: nrsc/nphys
def read_heka_stf(filename):
    channels, channelnames, channelunits, channeldt = read_heka(filename)
    for nc, channel in enumerate(channels):
        if channelunits[nc] == "V":
            for ns, sweep in enumerate(channel):
                channels[nc][ns] = np.array(channels[nc][ns])
                channels[nc][ns] *= 1.0e3
            channelunits[nc] = "mV"
        if channelunits[nc] == "A":
            for ns, sweep in enumerate(channel):
                channels[nc][ns] = np.array(channels[nc][ns])
                channels[nc][ns] *= 1.0e12
            channelunits[nc] = "pA"

    import stf
    stf.new_window_list(channels)
    for nc, name in enumerate(channelnames):
        stf.set_channel_name(name, nc)
    for nc, units in enumerate(channelunits):
        for ns in range(stf.get_size_channel()):
            stf.set_yunits(units, ns, nc)
    stf.set_sampling_interval(channeldt[0] * 1e3)
Exemplo n.º 19
0
def multiscale_traces(multiplier_list):
    """
    Scale each trace to the respective multiplier in the list argument
    """

    if len(multiplier_list) != stf.get_size_channel():
        raise ValueError('The number of multipliers and traces are not equal')
    scaled_traces = [
        stf.get_trace(i) * multiplier_list[i]
        for i in range(stf.get_size_channel())
    ]

    return stf.new_window_list(scaled_traces)
Exemplo n.º 20
0
def mean_every_Nth(N):
    """
    Perform mean of the first and every Nth trace
    """

    m = stf.get_size_channel() / (N - 1)
    if np.fix(m) != m:
        raise ValueError('The number of traces is not divisible by N')

    # loop index calculations: [[i*n+j for j in range(n)] for i in range(m)]
    binned_traces = [[
        stf.get_trace((i + 1) + j * (N - 1) - 1) for j in range(m)
    ] for i in range(N - 1)]
    mean_traces = [np.mean(binned_traces[i], 0) for i in range(N - 1)]

    return stf.new_window_list(mean_traces)
Exemplo n.º 21
0
def rmeantraces(binwidth):
    """
    Perform running mean of all traces in the active channel. 
    The number of traces averaged is defined by binwidth. 
    """

    n = binwidth
    N = stf.get_size_channel()
    m = N / n
    if np.fix(m) != m:
        raise ValueError('The number of traces is not divisible by n')

    # loop index calculations: [[i*n+j for j in range(n)] for i in range(m)]
    binned_traces = [[stf.get_trace(i * n + j) for j in range(n)]
                     for i in range(m)]
    mean_traces = [np.mean(binned_traces[i], 0) for i in range(m)]

    return stf.new_window_list(mean_traces)
Exemplo n.º 22
0
def subtract_trace():
    """
    Subtract the selected trace from all traces in the currently active channel

    """

    # Find index of the selected trace to subtract from all the other traces
    idx = stf.get_selected_indices()
    if len(idx) > 1:
        raise ValueError('More than one trace was selected')
    elif len(idx) < 1:
        raise ValueError('Select one trace to subtract from the others')

    # Apply subtraction
    subtracted_traces = [
        stf.get_trace(i) - stf.get_trace(idx[0])
        for i in range(stf.get_size_channel())
    ]

    return stf.new_window_list(subtracted_traces)
Exemplo n.º 23
0
def cut_sweeps(start, delta, sequence=None):
    """
    Cuts a sequence of traces and present
    them in a new window.

    Arguments:

    start       -- starting point (in ms) to cut.
    delta       -- time interval (in ms) to cut
    sequence    -- list of indices to be cut. If None, every trace in the
                    channel will be cut.

    Returns:
    A new window with the traced cut.

    Examples:
    cut_sweeps(200,300) cut the traces between t=200 ms and t=500 ms 
        within the whole channel.
    cut_sweeps(200,300,range(30,60)) the same as above, but only between 
        traces 30 and 60.
    cut_sweeps(200,300,stf.get_selected_indices()) cut between 200 ms               and 500 ms only in the selected traces.

    """

    # select every trace in the channel if not selection is given in sequence
    if sequence is None:
        sequence = range(stf.get_size_channel())

    # transform time into sampling points
    dt = stf.get_sampling_interval()

    pstart = int(round(start / dt))
    pdelta = int(round(delta / dt))

    # creates a destination python list
    dlist = [stf.get_trace(i)[pstart:(pstart + pdelta)] for i in sequence]

    return stf.new_window_list(dlist)
Exemplo n.º 24
0
def cut_sweeps(start, delta, sequence=None):
    """
    Cuts a sequence of traces and present
    them in a new window.

    Arguments:

    start       -- starting point (in ms) to cut.
    delta       -- time interval (in ms) to cut
    sequence    -- list of indices to be cut. If None, every trace in the
                    channel will be cut.

    Returns:
    A new window with the traced cut.

    Examples:
    cut_sweeps(200,300) cut the traces between t=200 ms and t=500 ms 
        within the whole channel.
    cut_sweeps(200,300,range(30,60)) the same as above, but only between 
        traces 30 and 60.
    cut_sweeps(200,300,stf.get_selected_indices()) cut between 200 ms               and 500 ms only in the selected traces.

    """

    # select every trace in the channel if not selection is given in sequence
    if sequence is None:
        sequence = range(stf.get_size_channel())

    # transform time into sampling points
    dt = stf.get_sampling_interval()

    pstart = int( round(start/dt) )
    pdelta = int( round(delta/dt) )

    # creates a destination python list
    dlist = [ stf.get_trace(i)[pstart:(pstart+pdelta)] for i in sequence ]

    return stf.new_window_list(dlist)
Exemplo n.º 25
0
def rscomp(V_hold=-60,
           V_reversal=0,
           R_s_final=2,
           V_step=-5,
           step_start=10,
           step_duration=20):
    """
    Function for offline series resistance compensation on current trace in the active channel
    
    This script is my adaptation of Erwin Neher's Igor functions:
       SeriesresistanceComp
    These were freely available in in Proc02_Apr4Web.ipf from Erwin Neher's webpage:
     http://www3.mpibpc.mpg.de/groups/neher/index.php?page=software
     (last accessed: 01 July 2014)
     
    Instead of setting the fraction compensation, this function asks for the desired
    uncompensated series resistance. Whole-cell properties are calcuated using the wcp
    function.
    
    rsomp replaces current traces by their series-compensated version;
    the value at i is replaced by the average at i and i+1
    R_s is in ohms, C_m in Farads, fraction is the fraction to be compensated
    if R_s was 5 MOhm in the experiment and if it was 50% hardware compensated
    then R_s = 2.5e6 has to be entered and f=1 for a complete overall compensation
    The routine, similarly to that published by Traynelis J. Neurosc. Meth. 86:25,
    compensates the frequency response, assuming a single R_s*C_m - time constant
    (at constant V-hold) and a three component equivalent circuit for the pipette cell
    assembly with  R_s, C_m, R_m
    
    Theory: V_h = R_s*I+U_m;
    I_r is membrane resistive current,
    I is total current
    I_r = I-I_c = I-C_m*dU_m/dt = I+C_m*R_s*dI/dt (because R_s*I+U_m = const.)
    G_m=I_r/ U_m = (I+C_m*R_s*dI/dt)/ (V_h-R_s*I)
    For complete correction (fraction = 1) : I_corr = V_h*(I+C_m*R_s*dI/dt)/ (V_h-R_s*I)
    
    """

    # Get Whole cell properties
    wcp_stats = wcp(V_step, step_start, step_duration)

    # sampInt is sampling interval in seconds
    sampInt = stf.get_sampling_interval() * 1e-3

    # R_c is the cell membrane resistance in ohms
    R_c = wcp_stats["Cell resistance (Mohm)"] * 1e+6

    # R_s is the (initial) series resistance in ohms
    R_s = wcp_stats["Series resistance (Mohm)"] * 1e+6

    # C_m is the cell capacitance in farads
    C_m = wcp_stats["Cell capacitance (pF)"] * 1e-12

    # R_s_final is the final (uncompensated series resistance in ohms
    R_s_final *= 1e+6

    # fraction is the amount of compensation
    fraction = 1 - (R_s_final / R_s)

    # Convert unit of holding and reversal potentials to volts
    V_hold *= 1e-3
    V_reversal *= 1e-3

    # Calculate voltage difference
    voltage = V_hold - V_reversal

    # Calculate cell time constants
    #tau = R_s * C_m
    tau = (R_s * R_c * C_m) / (R_s + R_c)  # more accurate
    tau_corr = tau * (1 - fraction)

    # Assign current recording trace to I_wave and convert to amps
    I_wave = stf.get_trace() * 1e-12

    # First point: (we have to calculate this separately, because we need the value at i-1 below)
    denominator = voltage - R_s * fraction * I_wave[0]
    if denominator != 0:
        I_wave[0] = I_wave[0] * (voltage / denominator)

    for i in range(stf.get_size_trace() - 2):
        i += 1
        # this is the loop doing the correction for all other points
        # first calculate R_m for zero series resistance under the assumptions
        # that  U_m + U_Rs = const = voltage
        current = (I_wave[i + 1] + I_wave[i]) / 2  # The in between(mean) value
        derivative = (I_wave[i + 1] - I_wave[i]) / sampInt
        denominator = current + tau * derivative
        if denominator != 0:
            R_m = (voltage -
                   R_s * current) / denominator  # calculate the true R_m
        # Now calculate current for new series resitance
        denominator = (R_m + (1 - fraction) * R_s) * (1 + tau_corr / sampInt)
        if denominator != 0:
            I_wave[i] = tau_corr / (
                tau_corr + sampInt) * I_wave[i - 1] + voltage / denominator
        else:
            I_wave[i] = I_wave[i - 1]  # old value

    # Convert units of I_wave to pA
    I_wave *= 1e+12

    # Print information about the compensation performed
    print "Percentage series resistance compensation was %.1f%%" % (fraction *
                                                                    100)
    print "Residual (uncompensated) series resistance is %.2f Mohm" % (
        R_s_final * 1e-6)

    return stf.new_window_list([I_wave])
Exemplo n.º 26
0
def combiRec(offset):
    import os
    import ephysIO

    # Import required modules for file IO
    from Tkinter import Tk
    import tkFileDialog
    from gc import collect

    # Use file open dialog to obtain file path
    root = Tk()
    opt = dict(defaultextension='.phy',
               filetypes=[('ephysIO (HDF5) file', '*.phy'),
                          ('All files', '*.*')])
    if 'loadcwd' not in globals():
        global loadcwd
    else:
        opt['initialdir'] = loadcwd
    filepath = tkFileDialog.askopenfilename(**opt)
    root.withdraw()

    # Set this to file name prefix (i.e. the protocol name)
    filename = filepath.rsplit('/', 1)[-1]  # e.g. "1.phy"
    dirpath = filepath.rsplit(
        '/', 1)[0]  # e.g. "<path>/pair_000/dual_mixed_eEPSC_000"
    protocol = (dirpath.rsplit('/',
                               1)[1]).rsplit('_',
                                             1)[0]  # e.g. "dual_mixed_eEPSC"
    rootdir = dirpath.rsplit('/', 1)[0]  # e.g. "<path>/pair_000/"

    # Load data from channel 1
    os.chdir(rootdir)
    count = 0
    allwaves = []
    notes = ''
    holding = []
    while True:
        wavename = protocol + "_" + ("000" + str(count))[-3::]
        if os.path.isdir(wavename):
            os.chdir(wavename)
            data = ephysIO.PHYload(filename)
            allwaves.append(1.0e+12 * data.get("array")[1])
            notes += notes + 'Wave %d\n' % (count) + '\n'.join(
                data['notes']) + '\n\n'
            #print data['notes'][9][10::]
            holding.append(eval(data['notes'][9][10::]))
            count += 1
            os.chdir("..")
        else:
            break
    stf.new_window_list(allwaves)
    stf.set_xunits('m' + data.get('xunit'))
    stf.set_yunits('p' + data.get('yunit'))
    stf.set_sampling_interval(1.0e+3 * data.get('xdiff'))
    stf.set_recording_comment(notes)
    gwaves = [stf.get_trace(i) / (holding[i] - offset) for i in range(count)]
    stf.new_window_list(gwaves)
    stf.set_recording_comment('Mixed AMPA/NMDA-mediated conductance')
    gnmda = [stf.get_trace(i) - stf.get_trace(0) for i in range(count)]
    stf.new_window_list(gnmda)
    stf.set_recording_comment('NMDA-mediated conductance')
    ivnmda = [stf.get_trace(i) * (holding[i] - offset) for i in range(count)]
    stf.new_window_list(ivnmda)
    stf.set_recording_comment('NMDA-mediated current')

    return holding
Exemplo n.º 27
0
def EPSPtrains(latency=200,
               numStim=4,
               intvlList=[1, 0.8, 0.6, 0.4, 0.2, 0.1, 0.08, 0.06, 0.04, 0.02]):

    # Initialize
    numTrains = len(intvlList)  # Number of trains
    intvlArray = np.array(intvlList) * 1000  # Units in ms
    si = stf.get_sampling_interval()  # Units in ms

    # Background subtraction
    traceBaselines = []
    subtractedTraces = []
    k = 1e-4
    x = [i * stf.get_sampling_interval() for i in range(stf.get_size_trace())]
    for i in range(numTrains):
        stf.set_trace(i)
        z = x
        y = stf.get_trace()
        traceBaselines.append(y)
        ridx = []
        if intvlArray[i] > 500:
            for j in range(numStim):
                ridx += range(
                    int(round(((intvlArray[i] * j) + latency - 1) / si)),
                    int(round(
                        ((intvlArray[i] * (j + 1)) + latency - 1) / si)) - 1)
        else:
            ridx += range(
                int(round((latency - 1) / si)),
                int(
                    round(((intvlArray[i] *
                            (numStim - 1)) + latency + 500) / si)) - 1)
        ridx += range(int(round(4999 / si)), int(round(5199 / si)))
        z = np.delete(z, ridx, 0)
        y = np.delete(y, ridx, 0)
        yi = np.interp(x, z, y)
        yf = signal.symiirorder1(yi, (k**2), 1 - k)
        traceBaselines.append(yf)
        subtractedTraces.append(stf.get_trace() - yf)
    stf.new_window_list(traceBaselines)
    stf.new_window_list(subtractedTraces)

    # Measure depolarization
    # Initialize variables
    a = []
    b = []

    # Set baseline start and end cursors
    stf.set_base_start(np.round(
        (latency - 50) / si))  # Average during 50 ms period before stimulus
    stf.set_base_end(np.round(latency / si))

    # Set fit start cursor
    stf.set_fit_start(np.round(latency / si))
    stf.set_fit_end(
        np.round(((intvlArray[1] * (numStim - 1)) + latency + 1000) /
                 si))  # Include a 1 second window after last stimulus

    # Start AUC calculations
    for i in range(numTrains):
        stf.set_trace(i)
        stf.measure()
        b.append(stf.get_base())
        n = int(stf.get_fit_end() + 1 - stf.get_fit_start())
        x = np.array([k * stf.get_sampling_interval() for k in range(n)])
        y = stf.get_trace()[int(stf.get_fit_start()):int(stf.get_fit_end() +
                                                         1)]
        a.append(np.trapz(y - b[i], x))  # Units in V.s

    return a
Exemplo n.º 28
0
def loadacq4(channel=1):
    """
    Load electrophysiology recording data from acq4 hdf5 (.ma) files.
    By default the primary recording channel is loaded.  
    
    If the file is in a folder entitled 000, loadacq4 will load
    the recording traces from all sibling folders (000,001,002,...)
    """

    # Import required modules for file IO
    from Tkinter import Tk
    import tkFileDialog
    from gc import collect

    # Use file open dialog to obtain file path
    root = Tk()
    opt = dict(defaultextension='.ma',
               filetypes=[('ACQ4 (HDF5) file', '*.ma'), ('All files', '*.*')])
    if 'loadcwd' not in globals():
        global loadcwd
    else:
        opt['initialdir'] = loadcwd
    filepath = tkFileDialog.askopenfilename(**opt)
    root.withdraw()

    if filepath != '':

        # Load data into python
        loadcwd = filepath.rsplit('/', 1)[0]
        import ephysIO
        data = ephysIO.MAload(filepath, channel)
        print filepath

        # Display data in Stimfit
        import stf
        if data.get('yunit') == 'A':
            stf.new_window_list(1.0e+12 * data.get('array')[1::])
            stf.set_yunits('p' + data.get('yunit'))
        elif data.get('yunit') == 'V':
            stf.new_window_list(1.0e+3 * data.get('array')[1::])
            stf.set_yunits('m' + data.get('yunit'))
        stf.set_sampling_interval(1.0e+3 * data['xdiff'])
        stf.set_xunits('m' + data.get('xunit'))
        stf.set_trace(0)

        # Import metadata into stimfit
        stf.set_recording_comment('\n'.join(data['notes']))
        date = data['saved'][0:8]
        date = tuple(map(int, (date[0:4], date[4:6], date[6:8])))
        stf.set_recording_date('%s-%s-%s' % date)
        time = data['saved'][9::]
        time = tuple(map(int, (time[0:2], time[2:4], time[4:6])))
        stf.set_recording_time('%i-%i-%i' % time)

    else:

        data = {}

    collect()

    return
Exemplo n.º 29
0
def loadflex():
    """
    Load raw traces of FlexStation data from CSV files 
    """

    # Import required modules
    from os import chdir
    import csv
    import stf
    import numpy as np
    from Tkinter import Tk
    import tkFileDialog
    from gc import collect

    # Use file open dialog to obtain file path
    root = Tk()
    opt = dict(defaultextension='.csv',
               filetypes=[('Comma Separated Values file', '*.csv'),
                          ('All files', '*.*')])
    if 'loadcwd' not in globals():
        global loadcwd
    else:
        opt['initialdir'] = loadcwd
    filepath = tkFileDialog.askopenfilename(**opt)
    root.withdraw()

    if filepath != '':

        # Move to file directory and check file version
        loadcwd = filepath.rsplit('/', 1)[0]
        print filepath
        chdir(loadcwd)

        # Load data into numpy array
        with open(filepath, 'rb') as csvfile:
            csvtext = csv.reader(csvfile)
            data = []
            for row in csvtext:
                data.append(row)
        data = np.array(data)
        time = data.T[0][1::].astype(np.float)
        sampling_interval = np.mean(np.diff(time))
        comment = 'Temperature: %d degrees Centigrade' % np.mean(
            data.T[1][1::].astype(np.float))

        # Plot fluorescence measurements
        well = data.T[2::, 0]
        data = data.T[2::, 1::]
        ridx = []
        idx = []
        for i in range(96):
            if np.all(data[i] == ''):
                ridx.append(i)
            else:
                idx.append(i)
        data = np.delete(data, ridx, 0)
        data[[data == '']] = 'NaN'
        data[[data == ' ']] = 'NaN'
        delrow = np.any(data == 'NaN', 0)
        didx = []
        for i in range(np.size(data, 1)):
            if np.any(data[::, i] == 'NaN', 0):
                didx.append(i)
        time = np.delete(time, didx, 0)
        data = np.delete(data, didx, 1)
        data = data.astype(np.float)
        stf.new_window_list(data)

        # Set x-units and sampling interval
        stf.set_xunits('ms')
        stf.set_yunits(' ')
        stf.set_sampling_interval(1000 * sampling_interval)

        # Record temperature
        comment += '\nTr\tWell'
        for i in range(len(idx)):
            comment += '\n%i\t%s' % (i + 1, well[idx[i]])
        comment += '\nInitial time point: %.3g' % time[0]
        print comment
        stf.set_recording_comment(comment)

    else:

        data = {}

    collect()

    return
Exemplo n.º 30
0
def loadmat():
    """
    Load electrophysiology recordings from ephysIO
    HDF5-based Matlab v7.3 (.mat) files 
    """

    # Import required modules for file IO
    from Tkinter import Tk
    import tkFileDialog
    from gc import collect

    # Use file open dialog to obtain file path
    root = Tk()
    opt = dict(defaultextension='.mat',
               filetypes=[('MATLAB v7.3 (HDF5) file', '*.mat'),
                          ('All files', '*.*')])
    if 'loadcwd' not in globals():
        global loadcwd
    else:
        opt['initialdir'] = loadcwd
    filepath = tkFileDialog.askopenfilename(**opt)
    root.withdraw()

    if filepath != '':

        # Move to file directory and check file version
        loadcwd = filepath.rsplit('/', 1)[0]
        from os import chdir
        print filepath
        chdir(loadcwd)

        # Load data into python
        import ephysIO
        data = ephysIO.MATload(filepath)

        # Display data in Stimfit
        import stf
        if data.get('xdiff') > 0:
            if data.get('yunit') == "V":
                stf.new_window_list(1.0e+3 * np.array(data.get('array')[1::]))
                stf.set_yunits('m' + data.get('yunit'))
            elif data.get('yunit') == "A":
                stf.new_window_list(1.0e+12 * data.get('array')[1::])
                stf.set_yunits('p' + data.get('yunit'))
            else:
                stf.new_window_list(data.get('array')[1::])
                stf.set_yunits(data.get('yunit'))
            stf.set_sampling_interval(1.0e+3 * data.get('xdiff'))
            stf.set_xunits('m' + data.get('xunit'))
            stf.set_trace(0)
            stf.set_recording_comment('\n'.join(data['notes']))
            if data['saved'] != '':
                date = data['saved'][0:8]
                date = tuple(map(int, (date[0:4], date[4:6], date[6:8])))
                stf.set_recording_date('%s-%s-%s' % date)
                time = data['saved'][9::]
                time = tuple(map(int, (time[0:2], time[2:4], time[4:6])))
                stf.set_recording_time('%i-%i-%i' % time)
        elif data.get('xdiff') == 0:
            raise ValueError("Sample interval is not constant")

    else:

        data = {}

    collect()

    return