def abf_file_to_df(path_to_abf_file): """Inputs: file path to abf file Outputs: multidimensional data fame containing data organized by sweeps and channels """ r = io.AxonIO(filename=path_to_abf_file) bl = r.read_block(lazy=False) num_channels = len(bl.segments[0].analogsignals) channels = [] df_list = [] signals = [] sweep_list = [] for seg_num, seg in enumerate(bl.segments): channels = ['channel_{0}'.format(str(i)) for i in range(num_channels)] signals = [] for i in range(num_channels): signals.append( np.array(bl.segments[seg_num].analogsignals[i])[:, 0]) data_dict = dict(zip(channels, signals)) time = seg.analogsignals[0].times - seg.analogsignals[0].times[0] data_dict['time'] = time df = pd.DataFrame(data_dict) df_list.append(df) sweep_list.append('sweep' + str(seg_num + 1).zfill(3)) df = pd.concat(df_list, keys=sweep_list, names=['sweep']) return (df)
def load_abf(filename): ''' Use ``neo`` to load data from a .abf file ''' assert filename.endswith('abf'), "Not an .abf file" from neo import io r = io.AxonIO(filename=filename) bl = r.read_block(lazy=False, cascade=True) return bl
def impt(link): read_abf = io.AxonIO(filename=link) read_blocks = read_abf.read_block(lazy=False, cascade=True) reader_data = read_blocks.segments[0].analogsignals print("INFO #1") print(read_abf.name, read_abf.description, read_abf.extensions, \ read_abf.extentions, read_abf.filename, read_abf.has_header, \ read_abf.is_readable, read_abf.is_streameable, \ read_abf.is_writable, read_abf.logger, read_abf.mode) print("INFO #2") print(read_blocks.annotations, read_abf.description, \ read_blocks.file_datetime, read_blocks.file_origin, \ read_blocks.index, read_blocks.name, read_blocks.rec_datetime) print("INFO #3") print(read_blocks, read_blocks.segments, read_blocks.segments, \ read_blocks.segments[0].analogsignals) current = np.array(reader_data[0], float) length = np.array(reader_data[1], float) voltage = np.array(reader_data[2], float) current = np.reshape(current, len(current)) length = np.reshape(length, len(length)) voltage = np.reshape(voltage, len(voltage)) return current, length, voltage
def total_trace(f): global Vm_trail global sampling_rate global trace_unit global total_time f = str(f) Vm_trail = [] reader = nio.AxonIO(filename=f) segments = reader.read_block().segments sample_trace = segments[0].analogsignals[0] sampling_rate = sample_trace.sampling_rate trace_unit = str(sample_trace.units).split()[1] # print(sampling_rate) # print(trace_unit) for si, segment in enumerate(segments): analog_signals = segment.analogsignals for trace in analog_signals: v = trace v = np.ravel(v) v = v.magnitude tf = trace.t_stop ti = trace.t_start t = np.linspace(0, float(tf - ti), len(v)) m = [v, t] Vm_trail.append(m) total_time = float(tf - ti) return Vm_trail
def __init__(self, fname, createFolder=False): """ Load an ABF and makes its stats and sweeps easily available. Arguments: fname - filename of an ABF object createFolder - if True, the ./swhlab/ folder will be created """ logging.basicConfig(format=swhlab.logFormat, datefmt=swhlab.logDateFormat, level=swhlab.loglevel) self.log = logging.getLogger("swhlab ABF") self.log.setLevel(swhlab.loglevel) if "ABF object" in str(fname): self.log.debug("reusing same ABF object") for item in sorted(dir(fname)): try: setattr(self, item, getattr(fname, item)) except: pass return self.log.debug("_" * 60) self.log.info("SWHLab (%s) loading ABF [%s]", swhlab.__version__, str(fname)) if not os.path.exists(str(fname)): self.log.error("path doesn't exist!") return # load the ABF and populate properties self.ABFreader = io.AxonIO(filename=fname) self.ABFblock = self.ABFreader.read_block(lazy=False, cascade=True) self.header = self.ABFreader.read_header() self.protocomment = abfProtocol(fname) # get ABF file comment self.ID = abfIDfromFname(fname) # filename without extension self.filename = os.path.abspath(fname) # full path to file on disk self.fileID = os.path.abspath(os.path.splitext( self.filename)[0]) # no extension self.outFolder = os.path.abspath(os.path.dirname(fname) + "/swhlab/") # save stuff here self.outPre = os.path.join(self.outFolder, self.ID) + '_' # save files prefixed this self.sweeps = self.ABFblock.size["segments"] # number of sweeps in ABF self.timestamp = self.ABFblock.rec_datetime # when the ABF recording started # these I still have to read directly out of the header self.holding = self.header['listDACInfo'][0][ 'fDACHoldingLevel'] #clamp current or voltage # we've pulled what we can out of the header, now proceed with advanced stuff self.derivative = False # whether or not to use the first derivative self.setsweep() # run setsweep to populate sweep properties self.comments_load() # populate comments self.kernel = None # variable which may be set for convolution if createFolder: self.output_touch() # make sure output folder exists #TODO: detect if invalid or corrupted ABF self.log.debug("ABF loaded. (protocol: %s)" % self.protocomment)
def protocol_name(f): f = str(f) reader = nio.AxonIO(f) protocol_name = reader._axon_info['sProtocolPath'] protocol_name = str(protocol_name).split("\\")[-1] protocol_name = protocol_name.split(".")[0] protocol_num = protocol_name.split("_")[0] del (reader) return protocol_num, protocol_name
def load_episodic(filename): ''' load_episodic(filename) Loads episodic recordings from pClamp data in 'filename'. Returns the following: trace: a numpy array of size [t, n, c], where t is the number of samples per episode, n is the number of episodes (sweeps) and c is the number of channels. cinfo: a dictionary containing lists with the names and units of each of the channels, keys are 'names' and 'units'. ''' # open the file try: r = io.AxonIO(filename=filename) except IOError as e: print('Problem loading specified file') # read file into blocks bl = r.read_block(lazy=False,cascade=True) # read in the header info head = r.read_header() # determine the input channels and their info chans = head['listADCInfo'] nchans = len(chans) cinfo = {'names' : [], 'units' : []} for c in chans: cinfo['names'].append(c['ADCChNames']) cinfo['units'].append(c['ADCChUnits']) # determine the number of sweeps and their length nsweeps = np.size(bl.segments) nsamples = head['protocol']['lNumSamplesPerEpisode']/nchans # initialize an array to store the data trace = np.zeros((nsamples,nsweeps,nchans)) # load the data into the traces bl = r.read_block(lazy=False,cascade=True) for c in range(nchans): for s in range(nsweeps): #pdb.set_trace() trace[:,[s],[c]] = bl.segments[s].analogsignals[c] return (trace, cinfo)
def load_gap_free_trace(file_to_load): """imports abf file, must be in directory of file Input: abf_file_to_load""" filename = file_to_load experiment_name = filename.rstrip('.abf') r = io.AxonIO(filename=file_to_load) bl = r.read_block(lazy=False, cascade=True) #segments are sweeps print bl.segments[0].analogsignals[0].magnitude ##get sampling rate sampling_rate = bl.segments[0].analogsignals[0].sampling_rate print(sampling_rate) ##adds channel 0 from each sweep to array print('file has') print(len(bl.segments)) print('sweeps') print(len(bl.segments[0].analogsignals[0].magnitude)) print('samples') channel_array = np.empty( (len(bl.segments) + 1, (len(bl.segments[0].analogsignals[0])))) print(channel_array.shape) for sweep in range(len(bl.segments)): channel_0_sweep = [] for data_point in range( len(bl.segments[sweep].analogsignals[0].magnitude)): #print(bl.segments[sweep].analogsignals[0].magnitude[data_point]) channel_array[sweep + 1][data_point] = ( bl.segments[sweep].analogsignals[0].magnitude[data_point]) print channel_array[0][0:10] ## make additional row for time samplingrate_Hz = sampling_rate.magnitude sampling_interval_msec = (1000 / float(samplingrate_Hz)) for time_point in range(len( bl.segments[sweep].analogsignals[0].magnitude)): channel_array[0][time_point] = (float(time_point) * sampling_interval_msec) ## write a csv file np.savetxt(experiment_name + 'abf_to_csv.csv', np.transpose(channel_array), delimiter=',', newline='\n') return (channel_array)
def Channel_fetcher(f): global seg_no f = str(f) reader = nio.AxonIO(f) segments = reader.read_segment() an_sig = segments.analogsignals protocol_num = str(reader._axon_info['sProtocolPath']).split('\\')[-1] protocol_num = int(protocol_num.split('_')[0]) chan_info = reader.header['signal_channels'] chan_num = len( reader.read_block( signal_group_mode='split-all').segments[0].analogsignals) int(chan_num) seg_no = len(reader.read_block(signal_group_mode='split-all').segments) return chan_num
def protocol_class(f): global protocol_type global protocols global protocol_unit protocols = [] reader = nio.AxonIO(f) protocols = reader.read_raw_protocol() clamp_stat = protocols[2][0] if clamp_stat == 'pA': protocol_type = 'Current_clamp' elif clamp_stat == 'mV': protocol_type = 'Voltage_clamp' else: protocol_type = 'Unkown' protocol_unit = clamp_stat # print(f'protocol type is {protocol_type}') return protocol_type
def data_file_filter(f_list): r = [] # print(f"length of file list beofore{len(f_list)}") for f in f_list: p = str(f) reader = nio.AxonIO(p) prot_name = reader._axon_info['sProtocolPath'] prot_name = str(prot_name).split("\\")[-1] prot_name = prot_name.split(".")[0] prot_num = prot_name.split("_")[0] if str.isdigit(prot_num) == True: r.append(f) f_list = r f_list.sort() del (reader) # print(f"length of file list later{len(f_list)}") return f_list
def read_abf(filepath): """ Imports ABF file using neo io AxonIO, breaks it down by blocks which are then processed into a multidimensional pandas dataframe where each block corresponds to a sweep and columns represent time and each recorded channel. Parameters ---------- filename: str Full filepath WITH '.abf' extension. Return ------ df: DataFrame Pandas DataFrame broken down by sweep. References ---------- [1] https://neo.readthedocs.org/en/latest/index.html """ r = io.AxonIO(filename=filepath) bl = r.read_block(lazy=False, cascade=True) num_channels = len(bl.segments[0].analogsignals) df_list = [] sweep_list = [] for seg_num, seg in enumerate(bl.segments): channels = ['primary'] + [ 'channel_{0}'.format(str(i + 1)) for i in range(num_channels - 1) ] signals = [] for i in range(num_channels): data = np.array(bl.segments[seg_num].analogsignals[i].data) signals.append(data.T[0]) data_dict = dict(zip(channels, signals)) time = seg.analogsignals[0].times - seg.analogsignals[0].times[0] data_dict['time'] = time df = pd.DataFrame(data_dict) df_list.append(df) sweep_list.append('sweep' + str(seg_num + 1).zfill(3)) df = pd.concat(df_list, keys=sweep_list, names=['sweep', 'index']) return df
def __init__(self, abf): self.abf = abf abf = str(self.abf) reader = nio.AxonIO(abf) self.abf_name = abf.split("/")[-1] prot_name = reader._axon_info['sProtocolPath'] prot_name = str(prot_name).split("\\")[-1] # prot_name = prot_name.split(".")[0] self.protocol_name = prot_name self.recording = raw_trace(abf) self.protocol_raw_data = reader.read_raw_protocol() self.protol_trace = protol_trace(self.protocol_raw_data) self.sampling_rate = reader._sampling_rate self.protol_unit = self.protocol_raw_data[2][0] self.supported_obj = reader.supported_objects print(f"++++++{self.abf_name}+++++++++") print(f".......{self.protocol_name}........") print(f"'''''''{self.sampling_rate}'''''''''''''")
def make_df(source): """ DESCRIPTION this method reads the source abf file located at the cwd and creates and returns it as a pandas DataFrame """ r = io.AxonIO(filename=cwd + source) bl = r.read_block(lazy=False, cascade=True) # following prints the voltage values # print bl.segments[0].analogsignals # print bl.segments[0].eventarrays a = np.array(bl.segments[0].analogsignals) df = pd.DataFrame(data={ 'time(ms)': [float(i) / 10 for i in range(len(a[0]))], 'voltage(mV)': a[0, :, 0] * 1000 }, index=range(len(a[0])), columns=['time(ms)', 'voltage(mV)']) return df
def __init__(self, abf): self.abf = abf #Name of the file reader = nio.AxonIO(abf) self.name = abf.split("/")[-1] prot_name = reader._axon_info['sProtocolPath'] prot_name = str(prot_name).split("\\")[-1] prot_name = prot_name.split(".")[-2] self.protocol_name = prot_name print(prot_name) self.analysis_func = proto_map[prot_name][0] self.plot_func = proto_map[prot_name][1] self.plot_pos = proto_map[prot_name][2] self.recording = raw_trace(abf) self.protocol_raw_data = reader.read_raw_protocol() self.protol_trace = protol_trace(self.protocol_raw_data) self.sampling_rate = reader._sampling_rate self.protol_unit = self.protocol_raw_data[2][0] print(self.abf) print(self.protocol_name)
def raw_trace(f): Vm_trail = [] reader = nio.AxonIO(filename=f) segments = reader.read_block().segments sample_trace = segments[0].analogsignals[0] sampling_rate = sample_trace.sampling_rate trace_unit = str(sample_trace.units).split()[1] for si, segment in enumerate(segments): analog_signals = segment.analogsignals for trace in analog_signals: v = trace v = np.ravel(v) v = v.magnitude tf = trace.t_stop ti = trace.t_start t = np.linspace(0, float(tf - ti), len(v)) m = [t, v] Vm_trail.append(m) total_time = float(tf - ti) print(f"diemntion of vm trail = {len(Vm_trail)}") return Vm_trail
def load_data_MIND_stepRa(cell_ind): i = cell_ind # load some Axon data from ABF files file_name = os.path.join(data_folder[i], stepRa_file[i]) # r is the name bound to the object created by io.AxonIO r = io.AxonIO(filename=file_name) # bl is the object that actually has the data, created by read_block bl = r.read_block() # get list of channel names channel_list = [] for asig in bl.segments[0].analogsignals: channel_list.append(asig.name) sweep_start = 0 sweep_end = len(bl.segments) sweep_pts = len(bl.segments[0].analogsignals[0].times) full_ts = np.zeros([sweep_pts, sweep_end]) for j in np.arange(sweep_end): full_ts[:, j] = np.squeeze(bl.segments[j].analogsignals[0].times) stepRa_ts = np.zeros([sweep_pts, (sweep_end - sweep_start)]) for j in np.arange(sweep_start, sweep_end): stepRa_ts[:, j - sweep_start] = np.squeeze( bl.segments[j].analogsignals[0].times) ind = channel_list.index('Chan2') stepRa_Im = np.zeros([sweep_pts, (sweep_end - sweep_start)]) for l in np.arange(sweep_start, sweep_end): stepRa_Im[:, l - sweep_start] = np.squeeze( bl.segments[l].analogsignals[ind].data) ind = channel_list.index('Chan2Hold') stepRa_V = np.zeros([sweep_pts, (sweep_end - sweep_start)]) for f in np.arange(sweep_start, sweep_end): stepRa_V[:, f - sweep_start] = np.squeeze( bl.segments[f].analogsignals[ind].data) return stepRa_ts, stepRa_Im, stepRa_V
def raw_trace(f): # use folder path in the previous loop to make use of the cell_*** folder path # allocation # columns = int(len(Vm_trail)/3) f = str(f) Vm_trail = [] reader = nio.AxonIO(filename=f) segments = reader.read_block().segments sample_trace = segments[0].analogsignals[0] sampling_rate = sample_trace.sampling_rate trace_unit = str(sample_trace.units).split()[1] for si, segment in enumerate(segments): analog_signals = segment.analogsignals for trace in analog_signals: v = trace v = np.ravel(v) v = v.magnitude tf = trace.t_stop ti = trace.t_start t = np.linspace(0, float(tf - ti), len(v)) m = [v, t] Vm_trail.append(m) total_time = float(tf - ti) return Vm_trail
# -*- coding: utf-8 -*- """ Created on Tue Sep 11 10:52:17 2018 @author: imbroscb """ from neo import io data = io.AxonIO('16060608.abf') b1 = data.read()[0] chan = {} for ch in range(len(b1.segments[0].analogsignals)): signal = [] for s in range(len(b1.segments)): signal.append(b1.segments[s].analogsignals[ch]) numb = ch + 1 chan['ch%d' % numb] = signal del ch del numb del s del signal
def get_data(recording): '''Abre el archivo especificado en 'cell' mediante la funcion correspondiente a archivos .abf''' traces = io.AxonIO(filename=recording) data = traces.read_block() return data
def load_h5(browser, tree, push): """ Main loading function. Initially written for .hdf5 files only, but now also load .tdms files. The whole thing could use with consolidating the code, there is redundancy and some of the functionality is not necessary. """ browser.ui.fileDataTree.data = [] index = browser.ui.dirTree.selectedIndexes()[0] currentFile = str(index.model().filePath(index)) browser.currentFolder = os.path.dirname(currentFile) browser.ui.loadFolderInput.setText(browser.currentFolder) if '.hdf5' in currentFile: if browser.db: browser.db.close() browser.db = h5py.File(currentFile, 'r+') browser.dbType = 'hdf5' tree.clear() # Insert groups into the tree and add data to internal data list for group in browser.db: item = h5Item([str(group)]) item.path = '/' + str(group) set_attrs(browser.db[group], item) tree.addTopLevelItem(item) populate_h5tree(browser, browser.db['/' + str(group)], parentWidget=item, push=push) # Select first item of loaded list tree.setCurrentItem(tree.itemAt(0, 0)) if push: browser.saveFolder = browser.currentFolder browser.ui.saveFolderInput.setText(browser.saveFolder) browser.ui.workingDataTree.setSortingEnabled(True) browser.ui.notesWidget.clear() for attr in browser.db.attrs: browser.ui.workingDataTree.root.attrs[attr] = browser.db.attrs[ attr] if 'Notes' in attr: browser.ui.notesWidget.setText(browser.db.attrs['Notes']) #if 'dt' in attr: browser.ui.workingDataTree.propsDt = str(browser.db.attrs[attr]) if 'description' in attr: browser.ui.propsTableWidget.setData( {'Description': [browser.db.attrs['description']]}) #table.update_props(browser) browser.currentOpenFile = currentFile browser.currentSaveFile = currentFile browser.ui.workingDataTree.setHeaderLabels( [os.path.split(currentFile)[1]]) browser.ui.workingDataTree.setSortingEnabled( False) # Otherwise it screws up drag and drop if '.abf' in currentFile: browser.db = io.AxonIO(filename=currentFile) browser.dbType = 'abf' tree.clear() # read data browser.bl = browser.db.read_block(lazy=False, cascade=True) nSweeps = len(browser.bl.segments) signal = browser.bl.segments[0].analogsignals signalItem = signal.pop() samplingRate = np.array(signalItem.sampling_rate) # Hz if 'A' in str(signalItem.units): groupname = 'current' else: groupname = 'voltage' item = h5Item([groupname]) tree.addTopLevelItem(item) browser.bl = browser.db.read_block(lazy=False, cascade=True) browser.saveFolder = browser.currentFolder browser.ui.saveFolderInput.setText(browser.saveFolder) browser.ui.workingDataTree.setSortingEnabled(True) browser.ui.notesWidget.clear() browser.currentOpenFile = currentFile browser.currentSaveFile = os.path.splitext(currentFile)[0] + '.hdf5' browser.ui.workingDataTree.setHeaderLabels( [os.path.split(browser.currentSaveFile)[1]]) browser.ui.workingDataTree.setSortingEnabled( False) # Otherwise it screws up drag and drop for sweep in np.arange(1, nSweeps): datasetname = 'sweep_' + str(sweep) child = h5Item([datasetname]) child.sweep = sweep child.attrs['dt'] = 1. / (samplingRate / 1000.) item.addChild(child) if push: child.data = get_dataFromFile(browser, child) # Deal with strings (display in Notes and convert to ASCII) text = [] if (isinstance(child.data[0], basestring)) == True: browser.ui.notesWidget.append(str(channelname)) for d in child.data: if bool(d): text.append(d) # Get rid of empty strings child.data = np.string_( text) # Convert to fixed length ASCII for t in text: browser.ui.notesWidget.append(t) browser.ui.notesWidget.append('\r') child.listIndex = len(browser.ui.workingDataTree.dataItems) browser.ui.workingDataTree.dataItems.append(child) elif '.tdms' in currentFile: browser.db = TdmsFile(currentFile) browser.dbType = 'tdms' tree.clear() # Deal with properties for attr in browser.db.object().properties: if 'kHz' in attr: browser.ui.fileDataTree.root.attrs[ 'sampling_rate(kHz)'] = browser.db.object( ).properties[attr] if push: browser.ui.workingDataTree.root.attrs[ 'sampling_rate(kHz)'] = browser.db.object( ).properties[attr] browser.ui.fileDataTree.root.attrs[attr] = browser.db.object( ).properties[attr] if push: browser.ui.workingDataTree.root.attrs[ attr] = browser.db.object().properties[attr] # Insert groups into the tree and add data to internal data list if push: try: browser.db.object().properties['imaging'] imaging = True except KeyError: imaging = False browser.saveFolder = browser.currentFolder browser.ui.saveFolderInput.setText(browser.saveFolder) browser.ui.workingDataTree.setSortingEnabled(True) browser.ui.notesWidget.clear() browser.currentOpenFile = currentFile browser.currentSaveFile = os.path.splitext( currentFile)[0] + '.hdf5' browser.ui.workingDataTree.setHeaderLabels( [os.path.split(browser.currentSaveFile)[1]]) browser.ui.workingDataTree.setSortingEnabled( False) # Otherwise it screws up drag and drop for group in browser.db.groups(): item = h5Item([str(group)]) tree.addTopLevelItem(item) for channel in browser.db.group_channels(group): channelname = re.findall(r"'(.*?)'", channel.path, re.DOTALL)[1] child = h5Item([str(channelname)]) child.group = group child.channel = channelname item.addChild(child) if 'kHz' in str(tree.root.attrs): child.attrs[ 'dt'] = 1. / tree.root.attrs['sampling_rate(kHz)'] if push: child.data = get_dataFromFile(browser, child) if imaging: pixels = float( browser.db.object().properties['pixels_per_line']) lines = float( browser.db.object().properties['lines_per_frame']) imageArray = imagefun.array2image( child.data, (pixels, lines)) child.data = imageArray # Deal with strings (display in Notes and convert to ASCII) text = [] if (isinstance(child.data[0], basestring)) == True: browser.ui.notesWidget.append(str(channelname)) for d in child.data: if bool(d): text.append(d) # Get rid of empty strings child.data = np.string_( text) # Convert to fixed length ASCII for t in text: browser.ui.notesWidget.append(t) browser.ui.notesWidget.append('\r') child.listIndex = len(browser.ui.workingDataTree.dataItems) browser.ui.workingDataTree.dataItems.append(child) #if 'kHz' in str(browser.ui.workingDataTree.root.attrs): # child.attrs['dt'] = 1./browser.ui.workingDataTree.root.attrs['sampling_rate(kHz)'] elif '.mp4' in currentFile: browser.dbType = 'video' tree.clear() item = h5Item(['Video stream']) item.attrs['mrl'] = currentFile item.attrs['video'] = 'True' tree.addTopLevelItem(item) # Read and show some properties clip = VideoFileClip(currentFile) resolution = h5Item( ['Resolution: ' + str(clip.size[0]) + 'x' + str(clip.size[1])]) item.addChild(resolution) frameRate = h5Item(['Frame rate: ' + str(clip.fps)]) item.addChild(frameRate) duration = h5Item(['Duration: ' + str(clip.duration) + ' sec']) item.addChild(duration)
def threshold_protocol(Vm_trail, prot, f, outdir): f_str = str(f) reader = nio.AxonIO(f_str) protocols = reader.read_raw_protocol() protocol_unit = clamp_stat = protocols[2][0] segments = reader.read_block().segments sample_trace = segments[0].analogsignals[0] sampling_rate = sample_trace.sampling_rate trace_unit = str(sample_trace.units).split()[1] global Threshold_voltage fig = plt.figure(figsize=(16, 5)) ax1 = fig.add_subplot(121) ax2 = fig.add_subplot(122) thresh_state = 0 iter_num = 0 trace_num = 0 for vi, v in enumerate(Vm_trail): iter_num += 1 trace = v[0] time = v[1] v = np.copy(trace) t = np.copy(time) print(f"itering through loop = {iter_num}") print(f"v trace = {v}, t trace = {t}") if thresh_state == 0: print(f"passed threshstate {thresh_state}") Vm = str(np.around(np.mean(v[0:299]), decimals=2)) del_Vm = str( np.around( (np.mean(v[len(v) - 300:len(v)]) - np.mean(v[0:299])), decimals=2)) print(f"got the del vm value = {del_Vm}") v_lowercut = np.copy(v) t = time v_lowercut[v_lowercut < -50] = -50 print(f"lower cut variable assigned = {v_lowercut}") v_smooth = butter_bandpass_filter(v_lowercut, 1, 500, sampling_rate, order=1) print(f"band pass applied {v_smooth}") peaks, peak_dict = signal.find_peaks(x=v_smooth, height=None, threshold=None, distance=None, prominence=5, width=None, wlen=None, rel_height=0.5, plateau_size=None) # v_cut = butter_bandpass_filter(v_smooth,50, 500, sampling_rate, order=1) v_peaks = v_smooth print(f" value of v peaks = {v_peaks}") t_peaks = t thr_peak = 3 # print(f"peak t = {t_peaks}") # print(f"peak value comaprison {trace[peaks[0]]}") if len(peaks) != 0: print(f"passed to ploting") thresh_state = 1 dv = np.diff(v_smooth) dt = np.diff(t) dv_dt = dv / dt dv_dt_max = np.max(dv / dt) v_dt_max = np.where(dv_dt == dv_dt_max)[0] - 20 t_dt_max = np.where(dv_dt == dv_dt_max)[0] - 20 print(f" peak index on time axis = {time[peaks[0]]}") ax1.scatter(time[peaks[0] - 10], trace[peaks[0] - 10], color='r', label='spike') ax1.plot(t, v, alpha=0.5, label='smoothened') ax1.plot(time, trace, alpha=0.5, label=f'raw trace no. {iter_num}') ax1.scatter(time[t_dt_max], trace[v_dt_max], label="threshold", color='k') Threshold_voltage = "firing threshold = " str(np.around(trace[v_dt_max][0], decimals=2)) trace_num = iter_num plt.figtext(0.10, 0.0, Threshold_voltage + "mV", fontsize=12, va="top", ha="left") plt.figtext(0.10, -0.05, f"membrane voltage = " f"{Vm} mV", fontsize=12, va="top", ha="left") plt.figtext(0.10, -0.10, f"membrane voltage difference = " f"{del_Vm}mV", fontsize=12, va="top", ha="left") P_traces = protocols[0] iter_num_p = 0 Threshold_injection = "NA" for p in P_traces: iter_num_p += 1 if iter_num_p == 1: for i in p: t_ = len(i) / sampling_rate t = np.linspace(0, float(t_), len(i)) ax2.plot(i, color='g') elif iter_num_p == trace_num: c_inj = [] for i in p: t_ = len(i) / sampling_rate t = np.linspace(0, float(t_), len(i)) ax2.plot(i, color='k') c_inj.append(i) Threshold_injection = f"Injected current at threshold = " f"{str(np.max(c_inj))}" elif iter_num_p == len(P_traces): for i in p: t_ = len(i) / sampling_rate t = np.linspace(0, float(t_), len(i)) ax2.plot(i, color='r') First_inj = mpatches.Patch(color='green', label='First injection') Thres_inj = mpatches.Patch(color='black', label='Threshold injection') Last_inj = mpatches.Patch(color='red', label='Final injection') ax2.legend(handles=[First_inj, Thres_inj, Last_inj]) plt.figtext(0.55, 0.0, Threshold_injection + "pA", fontsize=12, va="top", ha="left") ax1.set_title('Recording') ax1.set_ylabel(trace_unit) ax1.set_xlabel('time(s)') ax1.legend() ax2.set_title('Protocol trace') ax2.set_ylabel(protocol_unit) ax2.set_xlabel('time(s)') # ax2.legend() plt.suptitle(f'Protocol type: {prot}', fontsize=15) plt.figtext(0.10, -0.15, f"sampling rate = {sampling_rate}", fontsize=12, va="top", ha="left") # plt.figtext(0.10, -0.20, f"total recording time = {total_time}" , # fontsize=12, va="top", ha="left") outfile = str(outdir) + "/" + str(f.stem) + f" {prot}_{vi}.png" plt.savefig(outfile, bbox_inches='tight') print("-----> Saved to %s" % outfile) fig = plt.close()
def load_data_MIND(cell_ind): i = cell_ind # load some Axon data from ABF files file_name = os.path.join(data_folder[i], ephy_file[i]) # r is the name bound to the object created by io.AxonIO r = io.AxonIO(filename=file_name) # bl is the object that actually has the data, created by read_block bl = r.read_block() # get list of channel names channel_list = [] for asig in bl.segments[0].analogsignals: channel_list.append(asig.name) if np.isnan(sweep_lenght[i]): full_ts = np.copy(bl.segments[0].analogsignals[0].times) lfp_raw = np.copy(bl.segments[0].analogsignals[1].data) lfp_raw = lfp_raw[(full_ts >= good_seconds_start[i]) & (full_ts < good_seconds_stop[i])] lfp_raw = np.squeeze(lfp_raw) Vm = np.copy(bl.segments[0].analogsignals[0].data) Vm = Vm[(full_ts >= good_seconds_start[i]) & (full_ts < good_seconds_stop[i])] Vm = np.squeeze(Vm) Vm_ts = full_ts[(full_ts >= good_seconds_start[i]) & (full_ts < good_seconds_stop[i])] else: sweep_end = len(bl.segments) sweep_pts = len(bl.segments[0].analogsignals[0].times) full_ts = np.zeros(sweep_pts * sweep_end) for j in np.arange(sweep_end): start_ind = j * sweep_pts a = np.squeeze(bl.segments[j].analogsignals[0].times) full_ts[start_ind:start_ind + sweep_pts] = a lfp_raw = np.zeros(sweep_pts * sweep_end) for k in np.arange(sweep_end): start_ind = k * sweep_pts a = np.squeeze(bl.segments[k].analogsignals[1].data) lfp_raw[start_ind:start_ind + sweep_pts] = a Vm = np.zeros(sweep_pts * sweep_end) for l in np.arange(sweep_end): start_ind = l * sweep_pts a = np.squeeze(bl.segments[l].analogsignals[0].data) Vm[start_ind:start_ind + sweep_pts] = a # remove the times that we don't want lfp_raw = lfp_raw[(full_ts >= good_seconds_start[i]) & (full_ts < good_seconds_stop[i])] Vm = Vm[(full_ts >= good_seconds_start[i]) & (full_ts < good_seconds_stop[i])] Vm_ts = full_ts[(full_ts >= good_seconds_start[i]) & (full_ts < good_seconds_stop[i])] # downsample lfp 16 times to a final frequency of 1250 Hz ds_factor = 16 lfp_ts, lfp_ds = ds(Vm_ts, lfp_raw, ds_factor) # find the sampling frequency and nyquist of the downsampled LFP samp_freq = 1 / (lfp_ts[1] - lfp_ts[0]) nyq = samp_freq / 2 # filter the lfp between 0.2 Hz and 300 Hz # this algorithm seems to cause no time shift # high pass filter b, a = signal.butter(4, 0.2 / nyq, "high", analog=False) lfp_highpass = signal.filtfilt(b, a, lfp_ds) # low pass filter b, a = signal.butter(4, 300 / nyq, "low", analog=False) lfp = signal.filtfilt(b, a, lfp_highpass) # if the file has 'Chan2Hold', load it, if not, create a nan vector ds_factor = 10000 if 'Chan2Hold' in channel_list: ind = channel_list.index('Chan2Hold') if np.isnan(sweep_lenght[i]): Vm_Ih = np.squeeze(np.copy(bl.segments[0].analogsignals[ind].data)) Vm_Ih = Vm_Ih[(full_ts >= good_seconds_start[i]) & (full_ts < good_seconds_stop[i])] else: Vm_Ih = np.zeros(sweep_pts * sweep_end) for f in np.arange(sweep_end): start_ind = f * sweep_pts a = np.squeeze(bl.segments[f].analogsignals[ind].data) Vm_Ih[start_ind:start_ind + sweep_pts] = a # keep only the good seconds Vm_Ih = Vm_Ih[(full_ts >= good_seconds_start[i]) & (full_ts < good_seconds_stop[i])] # downsample Vm_Ih to 2 Hz (factor of 10000) Vm_Ih = np.mean( np.resize(Vm_Ih, (int(np.floor(Vm_Ih.size / ds_factor)), ds_factor)), 1) Vm_Ih_ts = Vm_ts[np.arange(0, Vm_ts.size, ds_factor)] # trim off last time stamp if necessary Vm_Ih_ts = Vm_Ih_ts[0:Vm_Ih.size] else: Vm_Ih = np.empty(int(Vm_ts.size / ds_factor)) Vm_Ih[:] = np.nan Vm_Ih_ts = Vm_ts[np.arange(0, Vm_ts.size, ds_factor)] # trim off last time stamp if necessary Vm_Ih_ts = Vm_Ih_ts[0:Vm_Ih.size] return Vm_ts, Vm, lfp, lfp_ts, Vm_Ih_ts, Vm_Ih
def process(config=None, filename=None, cellname=None, expname=None, stim_feats=None, idx_file=None, ljp=0, v_corr=0): path = config['path'] cells = config['cells'] # features = config['features'] # options = config['options'] data = OrderedDict() data['voltage'] = [] data['current'] = [] data['dt'] = [] data['t'] = [] data['ton'] = [] data['toff'] = [] data['tend'] = [] data['amp'] = [] data['hypamp'] = [] data['filename'] = [] logger.debug(" Adding axon file %s", filename) f = os.path.join(path, cellname, filename + '.abf') r = io.AxonIO(filename=f) # read header # Below line doesn't work anymore due to api change # Now using rawio # header = r.read_header() header = neo.rawio.axonrawio.parse_axon_soup(f) # read sampling rate sampling_rate = 1.e6 / header['protocol']['fADCSequenceInterval'] dt = 1. / int(sampling_rate) * 1e3 # version = header['fFileVersionNumber'] # read file version bl = r.read_block(lazy=False) stim_info = None if 'stim_info' in cells[cellname]['experiments'][expname]: stim_info = cells[cellname]['experiments'][expname]['stim_info'] else: # read stimulus features if present stim_feats = [] if 'stim_feats' in cells[cellname]['experiments'][expname]: stim_feats = cells[cellname]['experiments'][expname]['stim_feats'] all_stims = [] if stim_feats: res = stim_feats_from_meta(stim_feats, len(bl.segments), idx_file) if res[0]: all_stims = res[1] else: print(res[1]) if not all_stims: res = stim_feats_from_header(header) if res[0]: all_stims = res[1] else: pprint.pprint( "No valid stimulus was found in metadata or files. \ Skipping current file") return # for all segments in file for i_seg, seg in enumerate(bl.segments): # dt = 1./int(seg.analogsignals[0].sampling_rate) * 1e3 if stim_info is not None: voltage = numpy.array( seg.analogsignals[0]).astype( numpy.float64).flatten() current = numpy.array( seg.analogsignals[1]).astype( numpy.float64).flatten() t = numpy.arange(len(voltage)) * dt ton = stim_info['ton'] toff = stim_info['toff'] ion = int(ton / dt) ioff = int(toff / dt) if 'tamp' in stim_info: tamp = [int(stim_info['tamp'][0] / dt), int(stim_info['tamp'][1] / dt)] else: tamp = [ion, ioff] i_unit = stim_info['i_unit'] if i_unit == 'A': current = current * 1e9 # nA elif i_unit == 'pA': current = current * 1e-3 # nA else: raise Exception( "Unit current not configured!") amp = numpy.nanmean(current[tamp[0]:tamp[1]]) hypamp = numpy.nanmean(current[0:ion]) else: voltage = numpy.array(seg.analogsignals[0]).astype(numpy.float64) t = numpy.arange(len(voltage)) * dt ton = all_stims[i_seg][1] toff = all_stims[i_seg][2] amp = numpy.float64(all_stims[i_seg][3]) ion = int(ton / dt) ioff = int(toff / dt) current = [] current = numpy.zeros(len(voltage)) current[ion:ioff] = amp # estimate hyperpolarization current hypamp = numpy.mean(current[0:ion]) # clean voltage from transients voltage[ion:ion + int(numpy.ceil(0.4 / dt))] = \ voltage[ion + int(numpy.ceil(0.4 / dt))] voltage[ioff:ioff + int(numpy.ceil(0.4 / dt))] = \ voltage[ioff + int(numpy.ceil(0.4 / dt))] # normalize membrane potential to known value (given in UCL excel # sheet) if v_corr: if len(v_corr) == 1 and v_corr[0] != 0.0: voltage = voltage - numpy.mean(voltage[0:ion]) + v_corr[0] elif len(v_corr) - 1 >= idx_file and v_corr[idx_file] != 0.0: voltage = voltage - numpy.mean(voltage[0:ion]) \ + v_corr[idx_file] voltage = voltage - ljp # clip spikes after stimulus so they are not analysed voltage[ioff:] = numpy.clip(voltage[ioff:], -300, -40) if ('exclude' in cells[cellname] and any(abs(cells[cellname]['exclude'][idx_file] - amp) < 1e-4)): continue # llb else: data['voltage'].append(voltage) data['current'].append(current) data['dt'].append(dt) data['t'].append(t) data['tend'].append(t[-1]) data['ton'].append(ton) data['toff'].append(toff) data['amp'].append(amp) data['hypamp'].append(hypamp) data['filename'].append(filename) return data
pass try: os.mkdir(folder_to_read + '/Results/Voltage_clamp') except: pass #make the file path results_folder = str(folder_to_read + '/Results/Voltage_clamp/') #list out all the files with .dat extension for plotting for root, dirs, files in os.walk(folder_to_read): for file in files: if file.endswith(".abf"): # print(file) file_name = str(file).split(".")[0] print(file_name) #import the file of interest file_to_read = nio.AxonIO(root + file) segments = file_to_read.read_block().segments #segments = () iteration_number = 0 for segment in segments: # print(segment) analog_signals = segment.analogsignals # print(analog_signals) for trace in analog_signals: iteration_number += 1 # print(trace) v = trace v = np.ravel(v) # print(v) if '1.0 mV' == str(v.units): continue
def Base_line_protocol(Vm_trail, prot, f, outdir): f_str = str(f) reader = nio.AxonIO(f_str) protocols = reader.read_raw_protocol() protocol_unit = clamp_stat = protocols[2][0] segments = reader.read_block().segments sample_trace = segments[0].analogsignals[0] sampling_rate = sample_trace.sampling_rate trace_unit = str(sample_trace.units).split()[1] fig = plt.figure(figsize=(16, 5)) ax1 = fig.add_subplot(121) ax2 = fig.add_subplot(122) RMP = [] iter_num = 0 for v in enumerate(Vm_trail): trace = v[1][0] time = v[1][1] mean_RMP = np.mean(trace) cv_rmp = float((np.std(trace)) / mean_RMP) if mean_RMP < -50: iter_num += 1 ax1.plot(time, trace, label=f'trace no. {iter_num}', alpha=0.5) P_traces = protocols[0] RMP = mean_RMP for p in P_traces: for i in p: t_ = len(i) / sampling_rate t = np.linspace(0, float(t_), len(i)) ax2.plot(t, i) # First_inj = mpatches.Patch(color='green', label='First injection') # Thres_inj = mpatches.Patch(color='black', label='Threshold injection') # Last_inj = mpatches.Patch(color='red', label='Final injection') # ax2.legend(handles=[First_inj,Thres_inj,Last_inj]) ax1.set_title('Recording') ax1.set_ylabel(trace_unit) ax1.set_xlabel('time(s)') ax1.legend() ax1.set_ylim(-90, -20) ax2.set_title('Protocol trace') ax2.set_ylabel(protocol_unit) ax2.set_xlabel('time(s)') # ax2.legend() plt.figtext(0.10, -0.05, "Resting membrane potential average from" f" {iter_num} traces= " + str(np.around(RMP, decimals=2)) + " mV", fontsize=12, va="top", ha="left") plt.suptitle(f'Protocol type: {prot}', fontsize=15) plt.figtext(0.10, -0.10, f"sampling rate = {sampling_rate}", fontsize=12, va="top", ha="left") plt.figtext(0.10, -0.15, f" = cv of trace = {np.around(cv_rmp, decimals= 3)}", fontsize=12, va="top", ha="left") outfile = str(outdir) + "/" + str(f.stem) + f" {prot}.png" plt.savefig(outfile, bbox_inches='tight') print("-----> Saved to %s" % outfile) fig = plt.close()
def loadFile(filename): r = io.AxonIO(filename) bl = r.read_block(lazy=False, cascade=True) return bl
def sampling_rate(f): reader = nio.AxonIO(filename=f) segments = reader.read_block().segments sampling_rate = sample_trace.sampling_rate return sampling_rate
def load_data_MIND(cell_ind): i = cell_ind # load some Axon data from ABF files file_name = os.path.join(data_folder[i], ephy_file[i]) # r is the name bound to the object created by io.AxonIO r = io.AxonIO(filename=file_name) # bl is the object that actually has the data, created by read_block bl = r.read_block() # get list of channel names channel_list = [] for asig in bl.segments[0].analogsignals: channel_list.append(asig.name) if np.isnan(sweep_lenght[i]): full_ts = np.copy(bl.segments[0].analogsignals[0].times) lfp_raw = np.copy(bl.segments[0].analogsignals[1].data) lfp_raw = lfp_raw[(full_ts >= good_seconds_start[i]) & (full_ts < good_seconds_stop[i])] lfp_raw = np.squeeze(lfp_raw) Vm = np.copy(bl.segments[0].analogsignals[0].data) Vm = Vm[(full_ts >= good_seconds_start[i]) & (full_ts < good_seconds_stop[i])] Vm = np.squeeze(Vm) Vm_ts = full_ts[(full_ts >= good_seconds_start[i]) & (full_ts < good_seconds_stop[i])] else: sweep_end = len(bl.segments) sweep_pts = len(bl.segments[0].analogsignals[0].times) full_ts = np.zeros(sweep_pts * sweep_end) for j in np.arange(sweep_end): start_ind = j * sweep_pts a = np.squeeze(bl.segments[j].analogsignals[0].times) full_ts[start_ind:start_ind + sweep_pts] = a lfp_raw = np.zeros(sweep_pts * sweep_end) for k in np.arange(sweep_end): start_ind = k * sweep_pts a = np.squeeze(bl.segments[k].analogsignals[1].data) lfp_raw[start_ind:start_ind + sweep_pts] = a Vm = np.zeros(sweep_pts * sweep_end) for l in np.arange(sweep_end): start_ind = l * sweep_pts a = np.squeeze(bl.segments[l].analogsignals[0].data) Vm[start_ind:start_ind + sweep_pts] = a # remove the times that we don't want lfp_raw = lfp_raw[(full_ts >= good_seconds_start[i]) & (full_ts < good_seconds_stop[i])] Vm = Vm[(full_ts >= good_seconds_start[i]) & (full_ts < good_seconds_stop[i])] Vm_ts = full_ts[(full_ts >= good_seconds_start[i]) & (full_ts < good_seconds_stop[i])] # downsample lfp to 2000 Hz (factor of 10) ds_factor = 10 lfp_ds_ts_10, lfp_ds_10 = ds(Vm_ts, lfp_raw, ds_factor) samp_freq = 1 / (lfp_ds_ts_10[1] - lfp_ds_ts_10[0]) nyq = samp_freq / 2 # filter the lfp between 0.2 Hz and 100 Hz # this algorithm seems to cause no time shift # high pass filter b, a = signal.butter(4, 0.2 / nyq, "high", analog=False) lfp_highpass = signal.filtfilt(b, a, lfp_ds_10) # low pass filter wp = 80 # Hz, passband ws = 120 # Hz, stopband N, Wn = signal.buttord(wp / nyq, ws / nyq, 3, 40) b, a = signal.butter(N, Wn, "low", analog=False) lfp_f = signal.filtfilt(b, a, lfp_highpass) # downsample lfp a second time to 200 Hz (factor of 10) ds_factor = 10 lfp_ts, lfp = ds(lfp_ds_ts_10, lfp_f, ds_factor) # if the file has synchronization info for the wheel, use it. # If not, keep the original timestamps. # keep only the selection of wheel data according to the good seconds if 'IN7' in channel_list: ind = channel_list.index('IN7') if np.isnan(sweep_lenght[i]): TTL = np.squeeze(np.copy(bl.segments[0].analogsignals[ind].data)) else: TTL = np.zeros(sweep_pts * sweep_end) for l in np.arange(sweep_end): start_ind = l * sweep_pts a = np.squeeze(bl.segments[l].analogsignals[ind].data) TTL[start_ind:start_ind + sweep_pts] = a # find the axon times where the 32 Hz goes from high V to low wh_ts = full_ts[np.ediff1d(1 * (TTL < 1), to_begin=0) > 0] # something is weird - I would have thought it should be < 0 # load the corresponding wheel file (ignore imtrk timestamps) file_name = os.path.join(data_folder[i], wh_file[i]) imtrk = pd.read_excel(file_name) wh_speed = imtrk.values[:, 1] # as calculated by imetronic # if wheel file is longer than ephy, trim off the end wh_speed = wh_speed[0:wh_ts.size] # save only the good seconds according to the excel file wh_speed = wh_speed[(wh_ts >= good_seconds_start[i]) & (wh_ts < good_seconds_stop[i])] wh_ts = wh_ts[(wh_ts >= good_seconds_start[i]) & (wh_ts < good_seconds_stop[i])] else: file_name = os.path.join(data_folder[i], wh_file[i]) imtrk = pd.read_excel(file_name) wh_ts = imtrk.values[:, 0] / 1000 # in seconds, sampled at 32Hz wh_speed = imtrk.values[:, 1] # as calculated by imetronic wh_speed = wh_speed[(wh_ts >= good_seconds_start[i]) & (wh_ts < good_seconds_stop[i])] wh_ts = wh_ts[(wh_ts >= good_seconds_start[i]) & (wh_ts < good_seconds_stop[i])] # load the extracted pupil diameters, use synchronization timestamps if 'IN5' in channel_list: ind = channel_list.index('IN5') if isinstance(eye_track[i], str): TTL = np.zeros(sweep_pts * (sweep_end)) for j in np.arange(sweep_end): start_ind = j * sweep_pts a = np.squeeze(bl.segments[j].analogsignals[ind].data) TTL[start_ind:start_ind + sweep_pts] = a pupil_ts = full_ts[np.ediff1d(1 * (TTL < 1), to_begin=0) > 0] file_name = os.path.join(data_folder[i], eye_track[i]) pupil_excel = pd.read_excel(file_name) radii = pupil_excel.iloc[:, 1].values radii = radii[0:pupil_ts.size] radii = radii[(pupil_ts >= good_seconds_start[i]) & (pupil_ts < good_seconds_stop[i])] pupil_ts = pupil_ts[(pupil_ts >= good_seconds_start[i]) & (pupil_ts < good_seconds_stop[i])] radii_nozero = np.copy(radii) for j in np.arange(radii.size): if radii[j] == 0: radii_nozero[j] = radii_nozero[j - 1] # low pass filter c, d = signal.butter(4, 0.1, "low", analog=False) # 4 poles, 0.5 Hz normalized by nyquist of 5 is 0.1 pupil = signal.filtfilt(c, d, radii_nozero) else: pupil = np.empty(0) pupil_ts = np.empty(0) else: pupil = np.empty(0) pupil_ts = np.empty(0) # if the file has 'Chan2Hold', load it, if not, create a nan vector ds_factor = 10000 if 'Chan2Hold' in channel_list: ind = channel_list.index('Chan2Hold') if np.isnan(sweep_lenght[i]): Vm_Ih = np.squeeze(np.copy(bl.segments[0].analogsignals[ind].data)) Vm_Ih = Vm_Ih[(full_ts >= good_seconds_start[i]) & (full_ts < good_seconds_stop[i])] else: Vm_Ih = np.zeros(sweep_pts * sweep_end) for f in np.arange(sweep_end): start_ind = f * sweep_pts a = np.squeeze(bl.segments[f].analogsignals[ind].data) Vm_Ih[start_ind:start_ind + sweep_pts] = a # keep only the good seconds Vm_Ih = Vm_Ih[(full_ts >= good_seconds_start[i]) & (full_ts < good_seconds_stop[i])] # downsample Vm_Ih to 2 Hz (factor of 10000) Vm_Ih = np.mean( np.resize(Vm_Ih, (int(np.floor(Vm_Ih.size / ds_factor)), ds_factor)), 1) Vm_Ih_ts = Vm_ts[np.arange(0, Vm_ts.size, ds_factor)] # trim off last time stamp if necessary Vm_Ih_ts = Vm_Ih_ts[0:Vm_Ih.size] else: Vm_Ih = np.empty(int(Vm_ts.size / ds_factor)) Vm_Ih[:] = np.nan Vm_Ih_ts = Vm_ts[np.arange(0, Vm_ts.size, ds_factor)] # trim off last time stamp if necessary Vm_Ih_ts = Vm_Ih_ts[0:Vm_Ih.size] return Vm_ts, Vm, lfp_ts, lfp, wh_ts, wh_speed, pupil_ts, pupil, Vm_Ih_ts, Vm_Ih
def series_res_check(Vm_trail, prot, f, outdir): f_str = str(f) reader = nio.AxonIO(f_str) protocols = reader.read_raw_protocol() protocol_unit = clamp_stat = protocols[2][0] segments = reader.read_block().segments sample_trace = segments[0].analogsignals[0] sampling_rate = sample_trace.sampling_rate trace_unit = str(sample_trace.units).split()[1] fig = plt.figure(figsize=(16, 5)) ax1 = fig.add_subplot(121) ax2 = fig.add_subplot(122) iter_num = 0 mean_R = [] for v in enumerate(Vm_trail): iter_num += 1 trace = v[1][0] time = v[1][0] Vb = np.mean(trace[int(sampling_rate * 0.35):int(sampling_rate * 0.38)]) Vl = np.mean(trace[int(sampling_rate * 0.15):int(sampling_rate * 0.20)]) input_R = (np.around((Vb - Vl), decimals=2) * 1000) / (50) mean_R.append(input_R) if iter_num == 2: ax1.plot(time, trace, label=f'trace no. {iter_num}', alpha=0.7) ax1.scatter(time[int(sampling_rate * 0.35)], Vb, color='r', label='baseline') ax1.scatter(time[int(sampling_rate * 0.20)], Vl, color='k', label='input_V') P_traces = protocols[0] for p in P_traces: for i in p: t_ = len(i) / sampling_rate t = np.linspace(0, float(t_), len(i)) ax2.plot(t, i) mean_R = np.mean(mean_R) ax1.set_title('Recording') ax1.set_ylabel(trace_unit) ax1.set_xlabel('time(s)') ax1.set_ylim(-90, -40) ax1.legend() ax2.set_title('Protocol trace') ax2.set_ylabel(protocol_unit) ax2.set_xlabel('time(s)') # ax2.legend() plt.figtext(0.10, -0.05, f"Input resistance averaged from {iter_num} traces =" f"{str(np.around(mean_R,decimals =2))}" f" MOhm ", fontsize=12, va="top", ha="left") plt.suptitle(f'Protocol type: {prot}', fontsize=15) plt.figtext(0.10, -0.10, f"sampling rate = {sampling_rate}", fontsize=12, va="top", ha="left") # plt.figtext(0.10, -0.15, f"total recording time =" # f" {np.around(total_time,decimals = 2)} s" , # fontsize=12, va="top", ha="left") outfile = outdir + "/" + str(f.stem) + f" input_R_check_.png" plt.savefig(outfile, bbox_inches='tight') print("-----> Saved to %s" % outfile) fig = plt.close() del (reader)