Exemple #1
0
def find_AP_peak_ADP(start_msec, delta_msec, current_start, current_delta, threshold_value, deflection_direction, mark_option):
	
	""" count number of APs in traces with current injection/gradually increasing steps
	inputs: (time (msec) to start search, length of search region, starting current value, current delta between traces, threshold value, deflection direction ('up'/'down'), mark traces (True/False))"""	
	
	loaded_file = stf.get_filename()[:-4] 
	event_counts = np.zeros((stf.get_size_channel(),2)); 	
	for trace_ in range(stf.get_size_channel()):
		##gets AP counts and sample points in current trace 
		if deflection_direction == 'up':
			direction_input = True
		else:
			direction_input = False
		[trace_count, sample_points, time_points] = jjm_count(start_msec, delta_msec, threshold=threshold_value, up=direction_input, trace=trace_, mark=mark_option); 
		
		##gets ADP values 
		values, indicies = find_ADPs(sample_points)
		print(values)
		out_array = np.array([values, indicies])
		
		np.savetxt(loaded_file + 'trace' + str(str(trace_).zfill(3)) +'ADP_values.csv', out_array, delimiter=',', newline='\n')
		
		event_counts[trace_][1] = trace_count
		
		event_counts[trace_][0] = current_start + (current_delta*trace_) ; 
		
	np.savetxt(loaded_file + '_AP_counts.csv', event_counts, delimiter=',', newline='\n'); 
	return(event_counts)
def scan_through_train_expt(params_expt_input, train_increment, num_stims):

    len_peak_region_in_samples = round(
        (params_expt_input[3] - params_expt_input[2]) /
        stf.get_sampling_interval())

    expt_peaks = np.zeros((stf.get_size_channel(), num_stims))
    expt_peak_arrays = np.zeros(
        (stf.get_size_channel(), num_stims, len_peak_region_in_samples))

    trace = 0
    while trace < stf.get_size_channel():

        params_expt = params_expt_input

        [expt_peaks[trace], expt_peak_arrays[trace]
         ] = scan_through_train(params_expt, train_increment, num_stims, trace)
        params_expt[2] = params_expt_input[2] - (train_increment * (num_stims))
        params_expt[3] = params_expt_input[3] - (train_increment * (num_stims))

        trace += 1

    loaded_file = stf.get_filename()[:-3]
    np.savetxt(loaded_file + '_peaks.csv',
               expt_peaks,
               delimiter=',',
               newline='\n')

    return (expt_peaks, expt_peak_arrays)
Exemple #3
0
def multiscale_traces(multiplier_list):
    """
    Scale each trace to the respective multiplier in the list argument
    """

    if len(multiplier_list) != stf.get_size_channel():
        raise ValueError('The number of multipliers and traces are not equal')
    scaled_traces = [
        stf.get_trace(i) * multiplier_list[i]
        for i in range(stf.get_size_channel())
    ]

    return stf.new_window_list(scaled_traces)
Exemple #4
0
def find_AP_peaks(start_msec, delta_msec, current_start, current_delta, threshold_value, deflection_direction, mark_option):
	
	""" count number of APs in traces with current injection/gradually increasing steps
	inputs: (time (msec) to start search, length of search region, starting current value, current delta between traces, threshold value, deflection direction ('up'/'down'), mark traces (True/False))"""	
	
	event_counts = np.zeros((stf.get_size_channel(),2)); 
	
	for trace_ in range(stf.get_size_channel()):
		event_counts[trace_][1] = spells.count_events(start_msec, delta_msec, threshold=threshold_value, up=deflection_direction, trace=trace_, mark=mark_option); 
		event_counts[trace_][0] = current_start + (current_delta*trace_) ; 
		
	loaded_file = stf.get_filename()[:-4] ; 
	np.savetxt(loaded_file + '_AP_counts.csv', event_counts, delimiter=',', newline='\n'); 
	return(event_counts)
Exemple #5
0
def resistance( base_start, base_end, peak_start, peak_end, amplitude):
    """Calculates the resistance from a series of voltage clamp traces.

    Keyword arguments:
    base_start -- Starting index (zero-based) of the baseline cursors.
    base_end   -- End index (zero-based) of the baseline cursors.
    peak_start -- Starting index (zero-based) of the peak cursors.
    peak_end   -- End index (zero-based) of the peak cursors.
    amplitude  -- Amplitude of the voltage command.

    Returns:
    The resistance.
    """

    if not stf.check_doc():
        print('Couldn\'t find an open file; aborting now.')
        return 0

    #A temporary array to calculate the average:
    array = np.empty( (stf.get_size_channel(), stf.get_size_trace()) )
    for n in range( 0,  stf.get_size_channel() ):
        # Add this trace to set:
        array[n] = stf.get_trace( n )


    # calculate average and create a new section from it:
    stf.new_window( np.average(set, 0) )

    # set peak cursors:
    # -1 means all points within peak window.
    if not stf.set_peak_mean(-1): 
        return 0 
    if not stf.set_peak_start(peak_start): 
        return 0
    if not stf.set_peak_end(peak_end): 
        return 0

    # set base cursors:
    if not stf.set_base_start(base_start): 
        return 0
    if not stf.set_base_end(base_end): 
        return 0

    # measure everything:
    stf.measure()

    # calculate r_seal and return:
    return amplitude / (stf.get_peak()-stf.get_base())
Exemple #6
0
def plot_traces(plotwindow=None, ichannel=0, vchannel=1):
    """
    Show traces in a figure

    Parameters
    ----------
    plotwindow : (float, float), optional
        Plot window (in ms from beginning of trace)
        None for whole trace. Default: None
    ichannel : int, optional
        current channel number. Default: 0
    vchannel : int, optional
        voltage channel number. Default: 1
    """

    import stf
    if not stf.check_doc():
        return None

    nchannels = stf.get_size_recording()
    if nchannels < 2:
        sys.stderr.write(
            "Function requires 2 channels (0: current; 1: voltage)\n")
        return

    dt = stf.get_sampling_interval()

    fig = stf.mpl_panel(figsize=(12, 8)).fig
    fig.clear()
    gs = gridspec.GridSpec(4, 1)
    ax_currents = stfio_plot.StandardAxis(
        fig, gs[:3, 0], hasx=False, hasy=False)
    ax_voltages = stfio_plot.StandardAxis(
        fig, gs[3:, 0], hasx=False, hasy=False, sharex=ax_currents)
    if plotwindow is not None:
        istart = int(plotwindow[0]/dt)
        istop = int(plotwindow[1]/dt)
    else:
        istart = 0
        istop = None

    for ntrace in range(stf.get_size_channel()):
        stf.set_trace(ntrace)
        stf.set_channel(ichannel)
        trace = stf.get_trace()[istart:istop]

        ax_currents.plot(np.arange(len(trace))*dt, trace)

        # Measure pulse amplitude
        stf.set_channel(vchannel)
        trace = stf.get_trace()[istart:istop]
        ax_voltages.plot(np.arange(len(trace))*dt, trace)

    # Reset active channel
    stf.set_channel(ichannel)

    stfio_plot.plot_scalebars(
        ax_currents, xunits=stf.get_xunits(), yunits=stf.get_yunits(channel=0))
    stfio_plot.plot_scalebars(
        ax_voltages, xunits=stf.get_xunits(), yunits=stf.get_yunits(channel=1))
Exemple #7
0
def normalize():
    """
    Normalize to the peak amplitude of the selected trace and 
    scale all other traces in the currently active channel by 
    the same factor. 

    Ensure that you subtract the baseline before normalizing
    """

    # Find index of the selected trace
    idx = stf.get_selected_indices()
    if len(idx) > 1:
        raise ValueError('More than one trace was selected')
    elif len(idx) < 1:
        raise ValueError('Select one trace to subtract from the others')

    # Measure peak amplitude in the selected trace
    stf.set_trace(idx[0])
    refval = np.abs(stf.get_peak())

    # Apply normalization
    scaled_traces = [
        stf.get_trace(i) / refval for i in range(stf.get_size_channel())
    ]

    return stf.new_window_list(scaled_traces)
def find_sample_points_of_detected_events(whole_trace_file,
                                          extracted_events_file, sweep_num):
    """takes the window of detected events from stimfit and, for each events, runs through the full trace to pull out time (in samples) of event
	"""
    #open and load trace from whole file
    stf.file_open(whole_trace_file)
    stf.set_trace(sweep_num)
    whole_trace = stf.get_trace()
    sampling_interval = stf.get_sampling_interval()

    #open extracted events file
    stf.file_open(extracted_events_file)

    time_points = []
    for trace in range(stf.get_size_channel()):
        stf.set_trace(trace)
        trace_to_search = stf.get_trace(trace)
        # run find trace with updated search index
        # start at sample = 0 for first run through
        if len(time_points) == 0:
            sample_start = 0
        else:
            sample_start = int(time_points[len(time_points) - 1] /
                               sampling_interval)

        output_index = sub_func_find_trace(trace_to_search, whole_trace,
                                           sample_start)
        time_point = output_index * sampling_interval
        time_points.append(time_point)

    return (time_points)
Exemple #9
0
def resistance(base_start, base_end, peak_start, peak_end, amplitude):
    """Calculates the resistance from a series of voltage clamp traces.

    Keyword arguments:
    base_start -- Starting index (zero-based) of the baseline cursors.
    base_end   -- End index (zero-based) of the baseline cursors.
    peak_start -- Starting index (zero-based) of the peak cursors.
    peak_end   -- End index (zero-based) of the peak cursors.
    amplitude  -- Amplitude of the voltage command.

    Returns:
    The resistance.
    """

    if not stf.check_doc():
        print('Couldn\'t find an open file; aborting now.')
        return 0

    #A temporary array to calculate the average:
    array = np.empty((stf.get_size_channel(), stf.get_size_trace()))
    for n in range(0, stf.get_size_channel()):
        # Add this trace to set:
        array[n] = stf.get_trace(n)

    # calculate average and create a new section from it:
    stf.new_window(np.average(set, 0))

    # set peak cursors:
    # -1 means all points within peak window.
    if not stf.set_peak_mean(-1):
        return 0
    if not stf.set_peak_start(peak_start):
        return 0
    if not stf.set_peak_end(peak_end):
        return 0

    # set base cursors:
    if not stf.set_base_start(base_start):
        return 0
    if not stf.set_base_end(base_end):
        return 0

    # measure everything:
    stf.measure()

    # calculate r_seal and return:
    return amplitude / (stf.get_peak() - stf.get_base())
Exemple #10
0
def subtract_base():
    """
    """
    subtracted_traces = []
    for i in range(stf.get_size_channel()):
        stf.set_trace(i)
        subtracted_traces.append(stf.get_trace() - stf.get_base())
    stf.new_window_list(subtracted_traces)

    return
Exemple #11
0
def yoffset(value):
    """
    Apply a common offset to all traces in the currently active channel.
    """

    offset_traces = [
        stf.get_trace(i) + value for i in range(stf.get_size_channel())
    ]

    return stf.new_window_list(offset_traces)
Exemple #12
0
def find_ADP_thresholds_for_file(current_start_file, current_delta_file,
                                 *argv):
    """ count number of APs in traces with current injection/gradually increasing steps
	inputs: (time (msec) to start search, length of search region, starting current value, current delta between traces, threshold value, deflection direction ('up'/'down'), mark traces (True/False))"""
    if len(argv) > 0:
        threshold_value_file = argv[0]
        deflection_direction_file = argv[1]
        mark_option_file = argv[2]
        start_msec_file = float(argv[3])
        delta_msec_file = float(argv[4])
    else:
        threshold_value_file = 0
        deflection_direction_file = 'up'
        mark_option_file = True
        start_msec_file = float(stf.get_peak_start(True))
        delta_msec_file = float(stf.get_peak_end(True) - start_msec_file)

    loaded_file = stf.get_filename()[:-4]
    event_counts = np.zeros((stf.get_size_channel(), 2))
    ADPs = np.zeros((stf.get_size_channel(), 2))
    thresholds = np.zeros((stf.get_size_channel(), 2))
    trace_df_dict = {}
    for trace_ in range(stf.get_size_channel()):
        AP_count_for_trace, df_for_trace = find_AP_peak_ADP_trace(
            trace_, threshold_value_file, deflection_direction_file,
            mark_option_file, start_msec_file, delta_msec_file)
        trace_df_dict['trace' + str(str(trace_).zfill(3))] = df_for_trace

    event_counts[trace_][1] = AP_count_for_trace
    event_counts[trace_][0] = current_start_file + (current_delta_file *
                                                    trace_)

    np.savetxt(loaded_file + '_AP_counts.csv',
               event_counts,
               delimiter=',',
               newline='\n')
    output_path = loaded_file + 'ADP_thresholds.xlsx'
    xlsx_out = pd.ExcelWriter(output_path, engine='xlsxwriter')
    for trace_name, trace_df in sorted(trace_df_dict.items()):
        trace_df.to_excel(xlsx_out, sheet_name=trace_name)
    xlsx_out.save()
    return (True)
Exemple #13
0
def analyze_iv(pulses, trace_start=0, factor=1.0):
    """Creates an IV for the currently active channel.

    Keyword arguments:
    pulses --      Number of pulses for the IV.
    trace_start -- ZERO-BASED index of the first trace to be
                   used for the IV. Note that this is one less
                   than what is diplayed in the drop-down box.
    factor --      Multiply result with an optional factor, typically
                   from some external scaling.
    Returns:
    True upon success, False otherwise.
    """

    if (stf.check_doc() == False):
        print("Couldn\'t find an open file; aborting now.")
        return False

    if (pulses < 1):
        print("Number of pulses has to be greater or equal 1.")
        return False

    # create an empty array (will contain random numbers)
    channel = list()
    for m in range(pulses):
        # A temporary array to calculate the average:
        set = np.empty((int(
            (stf.get_size_channel() - m - 1 - trace_start) / pulses) + 1,
                        stf.get_size_trace(trace_start + m)))
        n_set = 0
        for n in range(trace_start + m, stf.get_size_channel(), pulses):
            # Add this trace to set:
            set[n_set, :] = stf.get_trace(n)
            n_set = n_set + 1

        # calculate average and create a new section from it, multiply:
        channel.append(np.average(set, 0) * factor)

    stf.new_window_list(channel)

    return True
Exemple #14
0
def yvalue(origin, interval):

    stf.set_fit_start(origin, True)
    stf.set_fit_end(origin + interval, True)
    stf.measure()
    x = int(stf.get_fit_end(False))
    y = []
    for i in range(stf.get_size_channel()):
        stf.set_trace(i)
        y.append(stf.get_trace(i)[x])

    return y
Exemple #15
0
def reverse():
    """
    Reverse the order of all traces
    """

    reversed_traces = []
    n = stf.get_size_channel()
    for i in range(n):
        reversed_traces.append(stf.get_trace(n - 1 - i))
    stf.new_window_list(reversed_traces)

    return
def analyze_file(baseline_start, baseline_end, cap_trans_start, cap_trans_end,
                 amplitude, EPSC1_s, EPSC1_e, EPSC2_s, EPSC2_e, sweep_start,
                 sweep_end):
    """inputs: (baseline_start, baseline_end, cap_trans_start, cap_trans_end, amplitude, EPSC1_s, EPSC1_e, EPSC2_s, EPSC2_e, sweep_start, sweep_end)
	output: numpy array where 1st column is capacitance transient amplitude, 2nd is series resistance, 3rd is 1st EPSC, 4th is 2nd EPCSC
	also writes output to .csv file"""

    num_sweeps = stf.get_size_channel()
    print('there are')
    print(num_sweeps)
    print('sweeps in recording')
    print('analyzing sweeps')
    print(sweep_start)
    print('to')
    print(sweep_end)
    sweeps_to_analyze = sweep_end - sweep_start

    #create array for results
    data_array = np.zeros((sweeps_to_analyze + 1, 4))

    y = 0
    for x in range(sweep_start - 1, sweep_end):
        #moves to next trace
        stf.set_trace(x)

        [cap_trans_amplitude,
         series_resistance] = jjm_resistance(baseline_start, baseline_end,
                                             cap_trans_start, cap_trans_end,
                                             amplitude)
        data_array[y][0] = cap_trans_amplitude
        data_array[y][1] = series_resistance
        EPSC_1 = jjm_peak(baseline_start, baseline_end, EPSC1_s, EPSC1_e)
        data_array[y][2] = EPSC_1
        EPSC_2 = jjm_peak(baseline_start, baseline_end, EPSC2_s, EPSC2_e)
        data_array[y][3] = EPSC_2
        pp_40 = float(float(EPSC_2) / float(EPSC_1))

        y += 1

    #print first few entries to check accuracy
    print(data_array[:3])

    #make csv file with data
    file_name = stf.get_filename()
    #expt = file_name[-12:].rstrip('.abf');
    np.savetxt(file_name + '_stimfitanalysis.csv',
               data_array,
               delimiter=',',
               newline='\n')

    return (data_array)
Exemple #17
0
def mean_every_Nth(N):
    """
    Perform mean of the first and every Nth trace
    """

    m = stf.get_size_channel() / (N - 1)
    if np.fix(m) != m:
        raise ValueError('The number of traces is not divisible by N')

    # loop index calculations: [[i*n+j for j in range(n)] for i in range(m)]
    binned_traces = [[
        stf.get_trace((i + 1) + j * (N - 1) - 1) for j in range(m)
    ] for i in range(N - 1)]
    mean_traces = [np.mean(binned_traces[i], 0) for i in range(N - 1)]

    return stf.new_window_list(mean_traces)
Exemple #18
0
def blankstim():
    """
    Blank values between fit cursors in all traces in the active channel.
    Typically used to blank stimulus artifacts.
    """

    fit_start = stf.get_fit_start()
    fit_end = stf.get_fit_end()
    blanked_traces = []
    for i in range(stf.get_size_channel()):
        tmp = stf.get_trace(i)
        tmp[fit_start:fit_end] = np.nan
        blanked_traces.append(tmp)
    stf.new_window_list(blanked_traces)

    return
Exemple #19
0
def SBR():
    """
    Calculate signal-to-baseline ratio (SBR) or delta F / F0 for
    traces in the active window. The result is expressed as a %.
    Useful for imaging data.

    Ensure that the baseline cursors are positioned appropriately.
    """

    SBR_traces = [
        100 * (stf.get_trace(i) - stf.get_base()) / stf.get_base()
        for i in range(stf.get_size_channel())
    ]
    stf.new_window_list(SBR_traces)
    stf.set_yunits('%')

    return
Exemple #20
0
def rmeantraces(binwidth):
    """
    Perform running mean of all traces in the active channel. 
    The number of traces averaged is defined by binwidth. 
    """

    n = binwidth
    N = stf.get_size_channel()
    m = N / n
    if np.fix(m) != m:
        raise ValueError('The number of traces is not divisible by n')

    # loop index calculations: [[i*n+j for j in range(n)] for i in range(m)]
    binned_traces = [[stf.get_trace(i * n + j) for j in range(n)]
                     for i in range(m)]
    mean_traces = [np.mean(binned_traces[i], 0) for i in range(m)]

    return stf.new_window_list(mean_traces)
def find_sample_points_of_detected_events(whole_trace):
    """takes the window of detected events from stimfit and, for each events, runs through the full trace to pull out time (in samples) of event
	"""
    trace_indicies = []
    for trace in range(stf.get_size_channel()):
        trace_to_search = stf.get_trace(trace)
        # run find trace with updated search index
        # start at sample = 0 for first run through
        if len(trace_indicies) == 0:
            sample_start = 0
        else:
            sample_start = trace_indicies[len(trace_indicies) - 1]

        output_index = sub_func_find_trace(trace_to_search, whole_trace,
                                           sample_start)
        trace_indicies.append(output_index)

    return (trace_indicies)
Exemple #22
0
def upsample_flex():
    """
    Upsample to sampling interval of 1 ms using cubic spline interpolation
    """

    old_time = [
        i * stf.get_sampling_interval() for i in range(stf.get_size_trace())
    ]
    new_time = range(
        int(np.fix((stf.get_size_trace() - 1) * stf.get_sampling_interval())))
    new_traces = []
    for i in range(stf.get_size_channel()):
        f = interpolate.interp1d(old_time, stf.get_trace(i), 'cubic')
        new_traces.append(f(new_time))
    stf.new_window_list(new_traces)
    stf.set_sampling_interval(1)

    return
Exemple #23
0
def get_amplitude_select_NMDA(amplithresh):
    stf.unselect_all()
    stf.set_peak_direction('both')
    # total number of traces
    traces = stf.get_size_channel()
    selectedtraces, i = 0, 0

    while i < traces:
        stf.set_trace(i)
        amplitude = stf.get_peak() - stf.get_base()
        if amplitude < amplithresh and amplitude > 0:
            # print(i)
            stf.select_trace(i)
            i += 1
            selectedtraces += 1
        else:
            i += 1

    return selectedtraces
Exemple #24
0
def batch_integration():
    """
    Perform batch integration between the decay/fit cursors of all traces
    in the active window
    """
    n = int(stf.get_fit_end() + 1 - stf.get_fit_start())
    x = [i * stf.get_sampling_interval() for i in range(n)]
    dictlist = []
    for i in range(stf.get_size_channel()):
        stf.set_trace(i)
        y = stf.get_trace()[int(stf.get_fit_start()):int(stf.get_fit_end() +
                                                         1)]
        auc = np.trapz(y - stf.get_base(), x)
        dictlist += [("%i" % (i + 1), auc)]
    retval = dict(dictlist)
    stf.show_table(retval, "Area Under Curve")
    stf.set_trace(0)

    return
Exemple #25
0
def remove_artifacts_from_sweeps(artifact_start_time, artifact_end_time):

    sampling_interval = stf.get_sampling_interval()
    artifact_start = int(artifact_start_time / sampling_interval)
    artifact_end = int(artifact_end_time / sampling_interval)

    continuous_trace = []
    output_artifacts_removed = []

    for sweep in range(stf.get_size_channel()):
        sweep_trace_before_artifact = stf.get_trace(sweep)[0:artifact_start]
        sweep_trace_after_artifact = stf.get_trace(sweep)[artifact_end:]
        sweep_trace = np.append(sweep_trace_before_artifact,
                                sweep_trace_after_artifact)
        output_artifacts_removed.append(sweep_trace)
        continuous_trace.extend(sweep_trace)

    stf.new_window_list(output_artifacts_removed)

    return (continuous_trace)
Exemple #26
0
def subtract_trace():
    """
    Subtract the selected trace from all traces in the currently active channel

    """

    # Find index of the selected trace to subtract from all the other traces
    idx = stf.get_selected_indices()
    if len(idx) > 1:
        raise ValueError('More than one trace was selected')
    elif len(idx) < 1:
        raise ValueError('Select one trace to subtract from the others')

    # Apply subtraction
    subtracted_traces = [
        stf.get_trace(i) - stf.get_trace(idx[0])
        for i in range(stf.get_size_channel())
    ]

    return stf.new_window_list(subtracted_traces)
Exemple #27
0
def interpstim():
    """
    Interpolate values between fit cursors in all traces in the active channel.
    Typically used to remove stimulus artifacts.
    """

    x = np.array(
        [i * stf.get_sampling_interval() for i in range(stf.get_size_trace())])
    fit_start = int(stf.get_fit_start())
    fit_end = int(stf.get_fit_end())
    interp_traces = []
    for i in range(stf.get_size_channel()):
        tmp = stf.get_trace(i)
        tmp[fit_start:fit_end] = np.interp(x[fit_start:fit_end],
                                           [x[fit_start], x[fit_end]],
                                           [tmp[fit_start], tmp[fit_end]])
        interp_traces.append(tmp)
    stf.new_window_list(interp_traces)

    return
def compile_amplitudes_in_trace():

    # for each trace in file run find_baseline_amplitudes
    output_array = np.array(['baseline', 'peak', 'peak_from_baseline'])

    for trace in range(stf.get_size_channel()):

        stf.set_trace(trace)

        fba_output = find_baseline_amplitude(10)

        output_array = np.vstack([output_array, fba_output])

    output_df = pd.DataFrame(output_array[1:],
                             columns=output_array[0],
                             dtype=float)

    output_df.to_excel(str(stf.get_filename()[-40:-3]) + '.xlsx')

    return (output_df)
Exemple #29
0
def select_pon(pon_pulses=8):
    """Selects correction-subtracted pulses from FPulse-generated
    files.
    
    Keyword arguments:
    pon_pulses -- Number of p-over-n correction pulses.
                  This is typically 4 (for PoN=5 in the FPulse script)
                  or 8 (for PoN=9).
    Returns:
    True upon success, False otherwise.
    """

    # Zero-based indices! Hence, for P over N = 8, the first corrected
    # trace index is 9.
    for n in range(pon_pulses + 1, stf.get_size_channel(), pon_pulses + 2):
        if (stf.select_trace(n) == False):
            # Unselect everything and break if there was an error:
            stf.unselect_all()
            return False

    return True
Exemple #30
0
def read_heka_stf(filename):
    channels, channelnames, channelunits, channeldt = read_heka(filename)
    for nc, channel in enumerate(channels):
        if channelunits[nc]=="V":
            for ns, sweep in enumerate(channel):
                channels[nc][ns] = np.array(channels[nc][ns])
                channels[nc][ns] *= 1.0e3
            channelunits[nc]="mV"
        if channelunits[nc]=="A":
            for ns, sweep in enumerate(channel):
                channels[nc][ns] = np.array(channels[nc][ns])
                channels[nc][ns] *= 1.0e12
            channelunits[nc]="pA"

    import stf
    stf.new_window_list(channels)
    for nc, name in enumerate(channelnames):
        stf.set_channel_name(name, nc)
    for nc, units in enumerate(channelunits):
        for ns in range(stf.get_size_channel()):
            stf.set_yunits(units, ns, nc)
    stf.set_sampling_interval(channeldt[0]*1e3)
Exemple #31
0
def read_heka_stf(filename):
    channels, channelnames, channelunits, channeldt = read_heka(filename)
    for nc, channel in enumerate(channels):
        if channelunits[nc] == "V":
            for ns, sweep in enumerate(channel):
                channels[nc][ns] = np.array(channels[nc][ns])
                channels[nc][ns] *= 1.0e3
            channelunits[nc] = "mV"
        if channelunits[nc] == "A":
            for ns, sweep in enumerate(channel):
                channels[nc][ns] = np.array(channels[nc][ns])
                channels[nc][ns] *= 1.0e12
            channelunits[nc] = "pA"

    import stf
    stf.new_window_list(channels)
    for nc, name in enumerate(channelnames):
        stf.set_channel_name(name, nc)
    for nc, units in enumerate(channelunits):
        for ns in range(stf.get_size_channel()):
            stf.set_yunits(units, ns, nc)
    stf.set_sampling_interval(channeldt[0] * 1e3)
Exemple #32
0
def cut_sweeps(start, delta, sequence=None):
    """
    Cuts a sequence of traces and present
    them in a new window.

    Arguments:

    start       -- starting point (in ms) to cut.
    delta       -- time interval (in ms) to cut
    sequence    -- list of indices to be cut. If None, every trace in the
                    channel will be cut.

    Returns:
    A new window with the traced cut.

    Examples:
    cut_sweeps(200,300) cut the traces between t=200 ms and t=500 ms 
        within the whole channel.
    cut_sweeps(200,300,range(30,60)) the same as above, but only between 
        traces 30 and 60.
    cut_sweeps(200,300,stf.get_selected_indices()) cut between 200 ms               and 500 ms only in the selected traces.

    """

    # select every trace in the channel if not selection is given in sequence
    if sequence is None:
        sequence = range(stf.get_size_channel())

    # transform time into sampling points
    dt = stf.get_sampling_interval()

    pstart = int(round(start / dt))
    pdelta = int(round(delta / dt))

    # creates a destination python list
    dlist = [stf.get_trace(i)[pstart:(pstart + pdelta)] for i in sequence]

    return stf.new_window_list(dlist)
Exemple #33
0
def cut_sweeps(start, delta, sequence=None):
    """
    Cuts a sequence of traces and present
    them in a new window.

    Arguments:

    start       -- starting point (in ms) to cut.
    delta       -- time interval (in ms) to cut
    sequence    -- list of indices to be cut. If None, every trace in the
                    channel will be cut.

    Returns:
    A new window with the traced cut.

    Examples:
    cut_sweeps(200,300) cut the traces between t=200 ms and t=500 ms 
        within the whole channel.
    cut_sweeps(200,300,range(30,60)) the same as above, but only between 
        traces 30 and 60.
    cut_sweeps(200,300,stf.get_selected_indices()) cut between 200 ms               and 500 ms only in the selected traces.

    """

    # select every trace in the channel if not selection is given in sequence
    if sequence is None:
        sequence = range(stf.get_size_channel())

    # transform time into sampling points
    dt = stf.get_sampling_interval()

    pstart = int( round(start/dt) )
    pdelta = int( round(delta/dt) )

    # creates a destination python list
    dlist = [ stf.get_trace(i)[pstart:(pstart+pdelta)] for i in sequence ]

    return stf.new_window_list(dlist)
def fit_experiment(params, pulse_length, function_to_fit):

    num_sweeps = stf.get_size_channel()
    stf.set_channel(0)
    stf.set_trace(0)

    #jjm_analysis.set_params(params);
    #stf.measure();
    #this is in samples
    #peak_index = stf.peak_index();
    #stf.set_fit_start(peak_index, is_time=False);
    #fit_start_time = peak_index*stf.get_sampling_interval();
    #stf.set_fit_end(fit_start_time+pulse_length-(10*stf.get_sampling_interval()), is_time=True);
    #fit_func = stf.leastsq(function_to_fit);
    #fit_func['Baseline(pA)']=stf.get_base();
    #fit_df = pd.DataFrame(fit_func, index=[0]);

    fits = []
    traces = []
    for x in range(0, num_sweeps):
        stf.set_trace(x)
        jjm_analysis.set_params(params)
        stf.measure()
        #this is in samples
        peak_index = stf.peak_index()
        stf.set_fit_start(peak_index, is_time=False)
        fit_start_time = peak_index * stf.get_sampling_interval()
        stf.set_fit_end(fit_start_time + pulse_length -
                        (10 * stf.get_sampling_interval()),
                        is_time=True)
        sweep_fit = stf.leastsq(function_to_fit)
        sweep_fit['Baseline(pA)'] = stf.get_base()
        fits.append(sweep_fit)
        traces.append(x)

    fit_df = pd.DataFrame(fits)
    return (fit_df)
Exemple #35
0
def timeconstants(fitwindow, pulsewindow, ichannel=0, vchannel=1):
    """
    Compute and plot decay time constants

    Parameters
    ----------
    fitwindow : (float, float), optional
        Window for fitting time constant (time in ms from beginning of sweep)
        None for current cursor settings. Default: None
    pulsewindow : (float, float), optional
        Window for voltage pulse measurement (time in ms from beginning of sweep)
        None for current cursor settings. Default: None
    ichannel : int, optional
        current channel number. Default: 0
    vchannel : int, optional
        voltage channel number. Default: 1

    Returns
    -------
    v_commands : numpy.ndarray
        Command voltages
    taus : numpy.ndarray
        Time constants
    """

    import stf
    if not stf.check_doc():
        return None

    nchannels = stf.get_size_recording()
    if nchannels < 2:
        sys.stderr.write(
            "Function requires 2 channels (0: current; 1: voltage)\n")
        return

    dt = stf.get_sampling_interval()

    v_commands = []
    taus = []

    fig = stf.mpl_panel(figsize=(12, 8)).fig
    fig.clear()
    gs = gridspec.GridSpec(4, 8)
    ax_currents = stfio_plot.StandardAxis(
        fig, gs[:3, :4], hasx=False, hasy=False)
    ax_voltages = stfio_plot.StandardAxis(
        fig, gs[3:, :4], hasx=False, hasy=False, sharex=ax_currents)
    for ntrace in range(stf.get_size_channel()):
        stf.set_trace(ntrace)
        stf.set_channel(ichannel)
        trace = stf.get_trace()

        ax_currents.plot(np.arange(len(trace))*dt, trace)

        if fitwindow is not None:
            stf.fit.cursor_time = fitwindow
        res = stf.leastsq(0, False)
        taus.append(res['Tau_0'])

        # Measure pulse amplitude
        stf.set_channel(vchannel)
        trace = stf.get_trace()
        ax_voltages.plot(np.arange(len(trace))*dt, trace)

        stf.set_peak_direction("up")
        stf.set_peak_mean(-1)
        if pulsewindow is not None:
            stf.peak.cursor_time = pulsewindow
        stf.measure()
        v_commands.append(stf.peak.value)

    stfio_plot.plot_scalebars(
        ax_currents, xunits=stf.get_xunits(),
        yunits=stf.get_yunits(channel=ichannel))
    stfio_plot.plot_scalebars(
        ax_voltages, xunits=stf.get_xunits(),
        yunits=stf.get_yunits(channel=vchannel))

    v_commands = np.array(v_commands)
    taus = np.array(taus)

    ax_taus = plot_iv(
        taus, v_commands, "ms",
        stf.get_yunits(channel=vchannel), fig, 122)

    # Reset peak computation to single sampling point
    stf.set_peak_mean(1)

    # Reset active channel
    stf.set_channel(ichannel)

    # Compute conductances:
    stf.show_table_dictlist({
        "Voltage ({0})".format(
            stf.get_yunits(channel=vchannel)): v_commands.tolist(),
        "Taus (ms)": taus.tolist(),
    })

    return v_commands, taus
Exemple #36
0
def glu_iv( pulses = 13, subtract_base=True ):
    """Calculates an iv from a repeated series of fast application and
    voltage pulses. 

    Keyword arguments:
    pulses        -- Number of pulses for the iv.
    subtract_base -- If True (default), baseline will be subtracted.
    
    Returns:
    True if successful.
    """

    # Some ugly definitions for the time being
    # Cursors are in ms here.
    gFitEnd = 330.6 # fit end cursor is variable
    gFSelect  =  0 # Monoexp
    gDictSize =  stf.leastsq_param_size( gFSelect ) + 2 # Parameters, chisqr, peak value
    gBaseStart  = 220.5 # Start and end of the baseline before the control pulse, in ms
    gBaseEnd    = 223.55
    gPeakStart  = 223.55 # Start and end of the peak cursors for the control pulse, in ms
    gPeakEnd = 253.55 
    
    if ( gDictSize < 0 ):
        print('Couldn\'t retrieve function id=%d, aborting now.'%gFSelect)
        return False        
    
    if ( not(stf.check_doc()) ):
        print('Couldn\'t find an open file; aborting now.')
        return False
    
    # analyse iv, subtract baseline if requested:
    ivtools.analyze_iv( pulses )
    if ( subtract_base == True ):
        if ( not(stf.set_base_start( gBaseStart, True )) ): return False
        if ( not(stf.set_base_end( gBaseEnd, True )) ): return False
        stf.measure()
        stf.select_all()
        stf.subtract_base()
    
    # set cursors:
    if ( not(stf.set_peak_start( gPeakStart, True )) ): return False
    if ( not(stf.set_peak_end( gPeakEnd, True )) ): return False
    if ( not(stf.set_base_start( gBaseStart, True )) ): return False
    if ( not(stf.set_base_end( gBaseEnd, True )) ): return False
    if ( not(stf.set_fit_end( gFitEnd, True )) ): return False
    
    if ( not(stf.set_peak_mean( 3 )) ): return False
    if ( not(stf.set_peak_direction( "both" )) ): return False

    # A list for dictionary keys and values:
    dict_keys = []
    dict_values = np.empty( (gDictSize, stf.get_size_channel()) )
    firstpass = True
    for n in range( 0, stf.get_size_channel() ):
        if ( stf.set_trace( n ) == False ):
            print('Couldn\'t set a new trace; aborting now.')
            return False
        
        print('Analyzing trace %d of %d'%( n+1, stf.get_size_channel() ) )
        # set the fit window cursors:
        if ( not(stf.set_fit_start( stf.peak_index() )) ): return False
        
        # Least-squares fitting:
        p_dict = stf.leastsq( gFSelect )
        
        if ( p_dict == 0 ):
            print('Couldn\'t perform a fit; aborting now.')
            return False
            
        # Create an empty list:
        tempdict_entry = []
        row = 0
        for k, v in p_dict.iteritems():
            if ( firstpass == True ):
                dict_keys.append( k )
            dict_values[row][n] = v 
            row = row+1
        
        if ( firstpass ):
            dict_keys.append( "Peak amplitude" )
        dict_values[row][n] = stf.get_peak()-stf.get_base()
        
        firstpass = False
    
    retDict = dict()
    # Create the dictionary for the table:
    entry = 0
    for elem in dict_keys:
        retDict[ elem ] = dict_values[entry].tolist()
        entry = entry+1
   
    return stf.show_table_dictlist( retDict )
Exemple #37
0
def iv(peakwindow=None, basewindow=None, pulsewindow=None,
       erev=None, peakmode="both", ichannel=0, vchannel=1,
       exclude=None):
    """
    Compute and plot an IV curve for currents

    Parameters
    ----------
    peakwindow : (float, float), optional
        Window for peak measurement (time in ms from beginning of sweep)
        None for current cursor settings. Default: None
    basewindow : (float, float), optional
        Window for baseline measurement (time in ms from beginning of sweep)
        None for current cursor settings. Default: None
    pulsewindow : (float, float), optional
        Window for voltage pulse measurement (time in ms from beginning of sweep)
        None for current cursor settings. Default: None
    erev : float, optional
        End of v clamp pulse in ms or None to determine automatically.
        Default: None
    peakmode : string, optional
        Peak direction - one of "up", "down", "both" or "mean". Default: "up"
    ichannel : int, optional
        current channel number. Default: 0
    vchannel : int, optional
        voltage channel number. Default: 1
    exclude : list of ints, optional
        List of trace indices to be excluded from the analysis. Default: None

    Returns
    -------
    v_commands : numpy.ndarray
        Command voltages
    ipeaks : numpy.ndarray
        Peak currents
    gpeaks : numpy.ndarray
        Peak normalized conductances
    g_fit : numpy.ndarray
        Half-maximal voltage and slope of best-fit Boltzmann function
    """

    import stf
    if not stf.check_doc():
        return None

    nchannels = stf.get_size_recording()
    if nchannels < 2:
        sys.stderr.write(
            "Function requires 2 channels (0: current; 1: voltage)\n")
        return

    dt = stf.get_sampling_interval()
    olddirection = stf.get_peak_direction()

    v_commands = []
    ipeaks = []
    if basewindow is not None:
        stf.base.cursor_time = basewindow

    fig = stf.mpl_panel(figsize=(12, 8)).fig
    fig.clear()
    gs = gridspec.GridSpec(4, 8)
    ax_currents = stfio_plot.StandardAxis(
        fig, gs[:3, :4], hasx=False, hasy=False)
    ax_voltages = stfio_plot.StandardAxis(
        fig, gs[3:, :4], hasx=False, hasy=False, sharex=ax_currents)
    for ntrace in range(stf.get_size_channel()):
        if exclude is not None:
            if ntrace in exclude:
                continue

        stf.set_trace(ntrace)
        stf.set_channel(ichannel)
        trace = stf.get_trace()

        ax_currents.plot(np.arange(len(trace))*dt, trace)

        # Measure only downward peaks (inward currents)
        if peakmode is "mean":
            stf.set_peak_direction("up")
            stf.set_peak_mean(-1)
        else:
            stf.set_peak_direction(peakmode)
            # Set peak computation to single sampling point
            stf.set_peak_mean(1)

        if peakwindow is not None:
            stf.peak.cursor_time = peakwindow
        stf.measure()
        if basewindow is not None:
            ipeaks.append(stf.peak.value-stf.base.value)
        else:
            ipeaks.append(stf.peak.value)

        # Measure pulse amplitude
        stf.set_channel(vchannel)
        trace = stf.get_trace()
        ax_voltages.plot(np.arange(len(trace))*dt, trace)

        stf.set_peak_direction("up")
        stf.set_peak_mean(-1)
        if pulsewindow is not None:
            stf.peak.cursor_time = pulsewindow
        stf.measure()
        v_commands.append(stf.peak.value)

    stfio_plot.plot_scalebars(
        ax_currents, xunits=stf.get_xunits(), yunits=stf.get_yunits(channel=0))
    stfio_plot.plot_scalebars(
        ax_voltages, xunits=stf.get_xunits(), yunits=stf.get_yunits(channel=1))

    v_commands = np.array(v_commands)
    ipeaks = np.array(ipeaks)

    if erev is None:
        # Find first zero crossing in ipeaks:
        for npulse in range(ipeaks.shape[0]-1):
            if np.sign(ipeaks[npulse]) != np.sign(ipeaks[npulse+1]):
                # linear interpolation
                m1 = (ipeaks[npulse+1]-ipeaks[npulse]) / (
                    v_commands[npulse+1]-v_commands[npulse])
                c1 = ipeaks[npulse] - m1*v_commands[npulse]
                erev = -c1/m1
                break
        if erev is None:
            sys.stderr.write(
                "Could not determine reversal potential. Aborting now\n")
            return None

    # Reset peak computation to single sampling point
    stf.set_peak_mean(1)
    stf.set_peak_direction(olddirection)

    # Reset active channel
    stf.set_channel(ichannel)

    # Compute conductances:
    gpeaks, g_fit = gv(ipeaks, v_commands, erev)

    ax_ipeaks = plot_iv(
        ipeaks, v_commands, stf.get_yunits(channel=ichannel),
        stf.get_yunits(channel=1), fig, 222)

    ax_ipeaks.set_title("Peak current")

    ax_gpeaks = plot_gv(
        gpeaks, v_commands, stf.get_yunits(channel=vchannel),
        g_fit, fig, 224)
    ax_gpeaks.set_title("Peak conductance")

    stf.show_table_dictlist({
        "Voltage ({0})".format(
            stf.get_yunits(channel=vchannel)): v_commands.tolist(),
        "Peak current ({0})".format(
            stf.get_yunits(channel=ichannel)): ipeaks.tolist(),
        "Peak conductance (g/g_max)": gpeaks.tolist(),
    })

    return v_commands, ipeaks, gpeaks, g_fit