def scan_through_train_expt(params_expt_input, train_increment, num_stims):

    len_peak_region_in_samples = round(
        (params_expt_input[3] - params_expt_input[2]) /
        stf.get_sampling_interval())

    expt_peaks = np.zeros((stf.get_size_channel(), num_stims))
    expt_peak_arrays = np.zeros(
        (stf.get_size_channel(), num_stims, len_peak_region_in_samples))

    trace = 0
    while trace < stf.get_size_channel():

        params_expt = params_expt_input

        [expt_peaks[trace], expt_peak_arrays[trace]
         ] = scan_through_train(params_expt, train_increment, num_stims, trace)
        params_expt[2] = params_expt_input[2] - (train_increment * (num_stims))
        params_expt[3] = params_expt_input[3] - (train_increment * (num_stims))

        trace += 1

    loaded_file = stf.get_filename()[:-3]
    np.savetxt(loaded_file + '_peaks.csv',
               expt_peaks,
               delimiter=',',
               newline='\n')

    return (expt_peaks, expt_peak_arrays)
Beispiel #2
0
def find_AP_peak_ADP(start_msec, delta_msec, current_start, current_delta, threshold_value, deflection_direction, mark_option):
	
	""" count number of APs in traces with current injection/gradually increasing steps
	inputs: (time (msec) to start search, length of search region, starting current value, current delta between traces, threshold value, deflection direction ('up'/'down'), mark traces (True/False))"""	
	
	loaded_file = stf.get_filename()[:-4] 
	event_counts = np.zeros((stf.get_size_channel(),2)); 	
	for trace_ in range(stf.get_size_channel()):
		##gets AP counts and sample points in current trace 
		if deflection_direction == 'up':
			direction_input = True
		else:
			direction_input = False
		[trace_count, sample_points, time_points] = jjm_count(start_msec, delta_msec, threshold=threshold_value, up=direction_input, trace=trace_, mark=mark_option); 
		
		##gets ADP values 
		values, indicies = find_ADPs(sample_points)
		print(values)
		out_array = np.array([values, indicies])
		
		np.savetxt(loaded_file + 'trace' + str(str(trace_).zfill(3)) +'ADP_values.csv', out_array, delimiter=',', newline='\n')
		
		event_counts[trace_][1] = trace_count
		
		event_counts[trace_][0] = current_start + (current_delta*trace_) ; 
		
	np.savetxt(loaded_file + '_AP_counts.csv', event_counts, delimiter=',', newline='\n'); 
	return(event_counts)
def analyze_file(baseline_start, baseline_end, cap_trans_start, cap_trans_end,
                 amplitude, EPSC1_s, EPSC1_e, EPSC2_s, EPSC2_e, sweep_start,
                 sweep_end):
    """inputs: (baseline_start, baseline_end, cap_trans_start, cap_trans_end, amplitude, EPSC1_s, EPSC1_e, EPSC2_s, EPSC2_e, sweep_start, sweep_end)
	output: numpy array where 1st column is capacitance transient amplitude, 2nd is series resistance, 3rd is 1st EPSC, 4th is 2nd EPCSC
	also writes output to .csv file"""

    num_sweeps = stf.get_size_channel()
    print('there are')
    print(num_sweeps)
    print('sweeps in recording')
    print('analyzing sweeps')
    print(sweep_start)
    print('to')
    print(sweep_end)
    sweeps_to_analyze = sweep_end - sweep_start

    #create array for results
    data_array = np.zeros((sweeps_to_analyze + 1, 4))

    y = 0
    for x in range(sweep_start - 1, sweep_end):
        #moves to next trace
        stf.set_trace(x)

        [cap_trans_amplitude,
         series_resistance] = jjm_resistance(baseline_start, baseline_end,
                                             cap_trans_start, cap_trans_end,
                                             amplitude)
        data_array[y][0] = cap_trans_amplitude
        data_array[y][1] = series_resistance
        EPSC_1 = jjm_peak(baseline_start, baseline_end, EPSC1_s, EPSC1_e)
        data_array[y][2] = EPSC_1
        EPSC_2 = jjm_peak(baseline_start, baseline_end, EPSC2_s, EPSC2_e)
        data_array[y][3] = EPSC_2
        pp_40 = float(float(EPSC_2) / float(EPSC_1))

        y += 1

    #print first few entries to check accuracy
    print(data_array[:3])

    #make csv file with data
    file_name = stf.get_filename()
    #expt = file_name[-12:].rstrip('.abf');
    np.savetxt(file_name + '_stimfitanalysis.csv',
               data_array,
               delimiter=',',
               newline='\n')

    return (data_array)
Beispiel #4
0
def find_AP_peaks(start_msec, delta_msec, current_start, current_delta, threshold_value, deflection_direction, mark_option):
	
	""" count number of APs in traces with current injection/gradually increasing steps
	inputs: (time (msec) to start search, length of search region, starting current value, current delta between traces, threshold value, deflection direction ('up'/'down'), mark traces (True/False))"""	
	
	event_counts = np.zeros((stf.get_size_channel(),2)); 
	
	for trace_ in range(stf.get_size_channel()):
		event_counts[trace_][1] = spells.count_events(start_msec, delta_msec, threshold=threshold_value, up=deflection_direction, trace=trace_, mark=mark_option); 
		event_counts[trace_][0] = current_start + (current_delta*trace_) ; 
		
	loaded_file = stf.get_filename()[:-4] ; 
	np.savetxt(loaded_file + '_AP_counts.csv', event_counts, delimiter=',', newline='\n'); 
	return(event_counts)
def batch_compile(file_list, summary_file_name):

    means_dict = {}

    for fname in file_list:
        stf.file_open(fname)

        file_df = compile_amplitudes_in_trace()

        means_dict[stf.get_filename()] = file_df.mean(axis=0)

    summary_df = pd.DataFrame(means_dict)
    summary_df.to_excel(str(summary_file_name) + '_amplitudes_compiled.xlsx')

    return (summary_df)
Beispiel #6
0
def find_ADP_thresholds_for_file(current_start_file, current_delta_file,
                                 *argv):
    """ count number of APs in traces with current injection/gradually increasing steps
	inputs: (time (msec) to start search, length of search region, starting current value, current delta between traces, threshold value, deflection direction ('up'/'down'), mark traces (True/False))"""
    if len(argv) > 0:
        threshold_value_file = argv[0]
        deflection_direction_file = argv[1]
        mark_option_file = argv[2]
        start_msec_file = float(argv[3])
        delta_msec_file = float(argv[4])
    else:
        threshold_value_file = 0
        deflection_direction_file = 'up'
        mark_option_file = True
        start_msec_file = float(stf.get_peak_start(True))
        delta_msec_file = float(stf.get_peak_end(True) - start_msec_file)

    loaded_file = stf.get_filename()[:-4]
    event_counts = np.zeros((stf.get_size_channel(), 2))
    ADPs = np.zeros((stf.get_size_channel(), 2))
    thresholds = np.zeros((stf.get_size_channel(), 2))
    trace_df_dict = {}
    for trace_ in range(stf.get_size_channel()):
        AP_count_for_trace, df_for_trace = find_AP_peak_ADP_trace(
            trace_, threshold_value_file, deflection_direction_file,
            mark_option_file, start_msec_file, delta_msec_file)
        trace_df_dict['trace' + str(str(trace_).zfill(3))] = df_for_trace

    event_counts[trace_][1] = AP_count_for_trace
    event_counts[trace_][0] = current_start_file + (current_delta_file *
                                                    trace_)

    np.savetxt(loaded_file + '_AP_counts.csv',
               event_counts,
               delimiter=',',
               newline='\n')
    output_path = loaded_file + 'ADP_thresholds.xlsx'
    xlsx_out = pd.ExcelWriter(output_path, engine='xlsxwriter')
    for trace_name, trace_df in sorted(trace_df_dict.items()):
        trace_df.to_excel(xlsx_out, sheet_name=trace_name)
    xlsx_out.save()
    return (True)
def compile_amplitudes_in_trace():

    # for each trace in file run find_baseline_amplitudes
    output_array = np.array(['baseline', 'peak', 'peak_from_baseline'])

    for trace in range(stf.get_size_channel()):

        stf.set_trace(trace)

        fba_output = find_baseline_amplitude(10)

        output_array = np.vstack([output_array, fba_output])

    output_df = pd.DataFrame(output_array[1:],
                             columns=output_array[0],
                             dtype=float)

    output_df.to_excel(str(stf.get_filename()[-40:-3]) + '.xlsx')

    return (output_df)
Beispiel #8
0
perform events detection as decribed in the Stimfit manual [1]
It creates a preliminary and final templates from a file 'minis.dat'.

You can download the file here: http://stimfit.org/tutorial/minis.dat

last revision:  Wed Sep  5 09:38:41 CEST 2018

C. Schmidt-Hieber

[1] https://neurodroid.github.io/stimfit/manual/event_extraction.html
"""

import stf
from wx import MessageBox

if stf.get_filename()[-9:] != 'minis.dat':
    MessageBox('Use minis.dat for this demo.', 'Warning')


def preliminary():
    """
    Sets peak, base and fit cursors around a synaptic event
    and performs a biexponential fit to create the preliminary template
    for event detection.
    """
    stf.base.cursor_index = (209600, 209900)
    stf.peak.cursor_index = (209900, 210500)
    stf.fit.cursor_index = (209900, 210400)

    stf.set_peak_mean(3)