def get_voltage(self, neuron_config, stim_name): ephys_sweeps = self.cfg.ephys_sweeps ephys_sweep = next(s for s in ephys_sweeps if s['stimulus_name'] == stim_name) ds = NwbDataSet(self.ephys_file_name) data = ds.get_sweep(ephys_sweep['sweep_number']) stimulus = data['stimulus'] stimulus = stimulus[stimulus != 0] stimulus = stimulus[:self.cfg.stimulus_allow] # initialize the neuron neuron = GlifNeuron.from_dict(neuron_config) # Set dt neuron.dt = 1.0 / data['sampling_rate'] # simulate the neuron output = neuron.run(stimulus) voltage = output['voltage'] * 1e3 voltage = voltage[~np.isnan(voltage)] voltage = voltage[:self.cfg.signal_allow] return output, voltage, neuron, stimulus
def test_spike_times(self): expected = [ 2.937305, 3.16453 , 3.24271 , 4.1622 , 4.24182 , 10.0898 , 10.132545, 10.176095, 10.2361 , 10.660655, 10.762125, 10.863465, 10.93833 , 11.140815, 11.19246 , 11.24553 , 11.696305, 11.812655, 11.90469 , 12.056755, 12.15794 , 12.233905, 12.47577 , 12.741295, 12.82861 , 12.923175, 18.05068 , 18.139875, 18.17693 , 18.221485, 18.24337 , 18.39981 , 18.470705, 18.759675, 18.82183 , 18.877475, 18.91033 , 18.941195, 19.050515, 19.12557 , 19.15963 , 19.188655, 19.226205, 19.29813 , 19.420665, 19.47627 , 19.763365, 19.824225, 19.897995, 19.93155 , 20.04916 , 20.11832 , 20.148755, 20.18004 , 20.22173 , 20.2433 , 20.40018 , 20.470915, 20.759715, 20.82156 , 20.866465, 20.90807 , 20.939175] bp = BiophysicalPerisomaticApi('http://api.brain-map.org') bp.cache_stimulus = True # change to False to not download the large stimulus NWB file neuronal_model_id = 472451419 # get this from the web site as above bp.cache_data(neuronal_model_id, working_directory='neuronal_model') cwd = os.path.realpath(os.curdir) print(cwd) os.chdir(os.path.join(cwd, 'neuronal_model')) manifest = ju.read('manifest.json') manifest['biophys'][0]['model_file'][0] = 'manifest_51.json' manifest['runs'][0]['sweeps'] = [51] ju.write('manifest_51.json', manifest) subprocess.call(['nrnivmodl', './modfiles']) run(Config().load('manifest_51.json')) #os.chdir(cwd) nwb_out = NwbDataSet('work/386049444.nwb') spikes = nwb_out.get_spike_times(51) numpy.testing.assert_array_almost_equal(spikes, expected)
def calc_ev(ew, folder, s, sweeps, stim_len, data_spike_times, dt): ''' ''' print ew, folder #convert data times to indicies data_spike_ind = [] for d in data_spike_times: data_spike_ind.append((d / dt).astype(int)) #get model data path = get_model_nwb_path_from_folder(ew, folder, s) #get nwb file path if isinstance(path, basestring): model = NwbDataSet(path) model_spike_ind = [] for sw in sweeps: spikes = (model.get_spike_times(sw) / dt).astype(int) model_spike_ind.append(spikes) #check to make sure all spike time arrays are the same for the model for ii in range(1, len(model_spike_ind)): if not np.array_equal(model_spike_ind[ii], model_spike_ind[ii - 1]): print 'MODEL SPIKE TIMES SHOULD BE THE SAME AND THEY ARE NOT!', os.path.basename( folder)[:9] print len(model_spike_ind), model_spike_ind # raise Exception('model spike times should be the same and they are not') return exVar(data_spike_ind, [model_spike_ind[0]], sigma, dt, stim_len) else: return np.nan
def set_up_objective(self, measure='spike frequency'): ''' Prepares the model for parameter optimization by assigning the output measure to be used in the cost function. Parameters ---------- measure: string Name of the output measure to be used in optimization. Currently only 'spike frequency' is implemented. ''' if (measure == 'spike frequency'): # get the experimental data from the NWB file data_set = NwbDataSet( os.path.join( self.model_dir, self.description.manifest.get_path('stimulus_path'))) spike_times = data_set.get_spike_times(self.reference_sweep) # calculate firing frequency for the NWB data sum_intervals = 0.0 for i in range(len(spike_times) - 1): sum_intervals += (spike_times[i + 1] - spike_times[i]) self.reference_output = len(spike_times) / sum_intervals else: print "Model fitting using the output measure", measure, "has not been implemented yet."
def read_stimulus(self, stimulus_path, sweep=0): '''Load current values for a specific experiment sweep and setup simulation and stimulus sampling rates. NOTE: NEURON only allows simulation timestamps of multiples of 40KHz. To avoid aliasing, we set the simulation sampling rate to the least common multiple of the stimulus sampling rate and 40KHz. Parameters ---------- stimulus path : string NWB file name sweep : integer, optional sweep index ''' Utils._log.info("reading stimulus path: %s, sweep %s", stimulus_path, sweep) stimulus_data = NwbDataSet(stimulus_path) sweep_data = stimulus_data.get_sweep(sweep) # convert to nA for NEURON self.stim_curr = sweep_data['stimulus'] * 1.0e9 # convert from Hz hz = int(sweep_data['sampling_rate']) neuron_hz = Utils.nearest_neuron_sampling_rate(hz) self.simulation_sampling_rate = neuron_hz self.stimulus_sampling_rate = hz if hz != neuron_hz: Utils._log.debug( "changing sampling rate from %d to %d to avoid NEURON aliasing", hz, neuron_hz)
def save_nwb(output_path, v, sweep, sweeps_by_type): '''Save a single voltage output result into an existing sweep in a NWB file. This is intended to overwrite a recorded trace with a simulated voltage. Parameters ---------- output_path : string file name of a pre-existing NWB file. v : numpy array voltage sweep : integer which entry to overwrite in the file. ''' output = NwbDataSet(output_path) output.set_sweep(sweep, None, v) sweep_by_type = {t: [ sweep ] for t, ss in sweeps_by_type.items() if sweep in ss } sweep_features = extract_cell_features.extract_sweep_features(output, sweep_by_type) try: spikes = sweep_features[sweep]['spikes'] spike_times = [ s['threshold_t'] for s in spikes ] output.set_spike_times(sweep, spike_times) except Exception, e: logging.info("sweep %d has no sweep features. %s" % (sweep, e.message) )
def read_stimulus(self, stimulus_path, sweep=0): '''Load current values for a specific experiment sweep and setup simulation and stimulus sampling rates. NOTE: NEURON only allows simulation timestamps of multiples of 40KHz. To avoid aliasing, we set the simulation sampling rate to the least common multiple of the stimulus sampling rate and 40KHz. Parameters ---------- stimulus path : string NWB file name sweep : integer, optional sweep index ''' Utils._log.info( "reading stimulus path: %s, sweep %s", stimulus_path, sweep) stimulus_data = NwbDataSet(stimulus_path) sweep_data = stimulus_data.get_sweep(sweep) # convert to nA for NEURON self.stim_curr = sweep_data['stimulus'] * 1.0e9 # convert from Hz hz = int(sweep_data['sampling_rate']) neuron_hz = Utils.nearest_neuron_sampling_rate(hz) self.simulation_sampling_rate = neuron_hz self.stimulus_sampling_rate = hz if hz != neuron_hz: Utils._log.debug("changing sampling rate from %d to %d to avoid NEURON aliasing", hz, neuron_hz)
def ve_tau(specimen_id, ve_path): #print(chr(27) + "[2J") # To clear terminal screen print "START VE_TAU " + str(specimen_id) + " " + str(ve_path) expt_taus = [] data_set = NwbDataSet(ve_path) long_square_sweeps = lims_utils.get_sweeps_of_type("C1LSCOARSE", specimen_id, passed_only=True) print "ve specimen id= " + str(specimen_id) for sweep in long_square_sweeps: #print "ve_sweep_number: " + str(sweep) #print(data_set.get_sweep_metadata(sweep)) try: (data_set.get_sweep_metadata(sweep)["aibs_stimulus_amplitude_pa"]) except: continue else: if (data_set.get_sweep_metadata(sweep) ["aibs_stimulus_amplitude_pa"] < 0): v, i, t = lims_utils.get_sweep_v_i_t_from_set(data_set, sweep) sweep_feat = EphysSweepFeatureExtractor( t, v) # Get time and voltage of each hyperpolarizing sweep if np.isnan(sweep_feat): continue else: expt_taus.append(sweep_feat.estimate_time_constant( )) # Append time constant of each sweep to list mean_expt_tau = np.nanmean(expt_taus) # Mean time constant for this cell print "mean_ve_tau= " + str(mean_expt_tau) return mean_expt_tau
def ve_ramp_latency(specimen_id, ve_path): data_set = NwbDataSet(ve_path) ramp_sweeps = lims_utils.get_sweeps_of_type("C1RP25PR1S", specimen_id, passed_only=True) if len(ramp_sweeps) == 0: return np.nan spike_times = data_set.get_spike_times(ramp_sweeps[0]) if len(spike_times) > 0: return spike_times[0] else: return np.nan
class Nwb1Appender(NwbAppender): def __init__(self, nwb_file_name): NwbAppender.__init__(self, nwb_file_name) self.nwbfile = NwbDataSet(self.nwb_file_name) def add_spike_times(self, sweep_spike_times): for sweep_num, spike_times in sweep_spike_times.items(): self.nwbfile.set_spike_times(sweep_num, spike_times)
def load_sweep(file_name, sweep_number, desired_dt=None, cut=0, bessel=False): '''load a data sweep and do specified data processing. Inputs: file_name: string name of .nwb data file sweep_number: number specifying the sweep to be loaded desired_dt: the size of the time step the data should be subsampled to cut: indicie of which to start reporting data (i.e. cut off data before this indicie) bessel: dictionary contains parameters 'N' and 'Wn' to implement standard python bessel filtering Returns: dictionary containing voltage: array current: array dt: time step of the returned data start_idx: the index at which the first stimulus starts (excluding the test pulse) ''' ds = NwbDataSet(file_name) data = ds.get_sweep(sweep_number) data["dt"] = 1.0 / data["sampling_rate"] if cut > 0: data["response"] = data["response"][cut:] data["stimulus"] = data["stimulus"][cut:] if bessel: sample_freq = 1. / data["dt"] filt_coeff = (bessel["freq"]) / ( sample_freq / 2.) # filter fraction of Nyquist frequency b, a = signal.bessel(bessel["N"], filt_coeff, "low") data['response'] = signal.filtfilt(b, a, data['response'], axis=0) if desired_dt is not None: if data["dt"] != desired_dt: data["response"] = subsample_data(data["response"], "mean", data["dt"], desired_dt) data["stimulus"] = subsample_data(data["stimulus"], "mean", data["dt"], desired_dt) data["start_idx"] = int(data["index_range"][0] / (desired_dt / data["dt"])) data["dt"] = desired_dt if "start_idx" not in data: data["start_idx"] = data["index_range"][0] return { "voltage": data["response"], "current": data["stimulus"], "dt": data["dt"], "start_idx": data["start_idx"] }
def stimulus(neuron_config_file, ephys_sweeps_file): ephys_sweeps = json_utilities.read(ephys_sweeps_file) ephys_file_name = 'stimulus.nwb' # pull out the stimulus for the first sweep ephys_sweep = ephys_sweeps[0] ds = NwbDataSet(ephys_file_name) data = ds.get_sweep(ephys_sweep['sweep_number']) stimulus = data['stimulus'] return stimulus
def load_experiment(file_name, sweep_number): ds = NwbDataSet(file_name) sweep = ds.get_sweep(sweep_number) r = sweep['index_range'] v = sweep['response'] * 1e3 i = sweep['stimulus'] * 1e12 dt = 1.0 / sweep['sampling_rate'] t = np.arange(0, len(v)) * dt return (v, i, t, r, dt)
def write_sweep_response(file_name, sweep_number, response, spike_times): ''' Overwrite the response in a file. ''' logging.debug("writing sweep") write_start_time = time.time() ephds = NwbDataSet(file_name) ephds.set_sweep(sweep_number, stimulus=None, response=response) ephds.set_spike_times(sweep_number, spike_times) logging.debug("write time %f" % (time.time() - write_start_time))
def stimulus(neuron_config_file, ephys_sweeps_file): neuron_config = json_utilities.read(neuron_config_file) ephys_sweeps = json_utilities.read(ephys_sweeps_file) ephys_file_name = 'stimulus.nwb' # pull out the stimulus for the first sweep ephys_sweep = ephys_sweeps[0] ds = NwbDataSet(ephys_file_name) data = ds.get_sweep(ephys_sweep['sweep_number']) stimulus = data['stimulus'] return stimulus
def from_electrophysiology( cell_id: int, ephys: NwbDataSet, duration=2.0) -> 'ProcessedAllenNeuronElectrophysiology': current_list = [] voltage_list = [] time_list = [] stim_amp_list = [] n_spikes_list = [] spike_features_list = [] for sweep_number in ephys.get_sweep_numbers(): sweep_metadata = ephys.get_sweep_metadata(sweep_number) if sweep_metadata['aibs_stimulus_name'] == 'Long Square': sweep_data = ephys.get_sweep(sweep_number) amp = sweep_metadata['aibs_stimulus_amplitude_pa'] index_range = sweep_data["index_range"] sampling_rate = sweep_data["sampling_rate"] current = sweep_data["stimulus"][ index_range[0]:index_range[1] + 1] voltage = sweep_data["response"][ index_range[0]:index_range[1] + 1] # truncate max_frames = int(duration * sampling_rate) assert max_frames < len(voltage) current = current[:max_frames] * 1e12 # in pA voltage = voltage[:max_frames] * 1e3 # in mV # extract featrures time = np.arange(0, max_frames, dtype=np.float) / sampling_rate # in seconds ext = EphysSweepFeatureExtractor(t=time, v=voltage, i=current) ext.process_spikes() spike_features = ext.spikes() n_spikes = len(spike_features) current_list.append(current) voltage_list.append(voltage) time_list.append(time) stim_amp_list.append(amp) n_spikes_list.append(n_spikes) spike_features_list.append(spike_features) return ProcessedAllenNeuronElectrophysiology( cell_id=cell_id, current_list=current_list, voltage_list=voltage_list, time_list=time_list, stim_amp_list=stim_amp_list, n_spikes_list=n_spikes_list, spike_features_list=spike_features_list)
def get_sweep_from_nwb(nwb_file, sweep_num): ''' Read a sweep from an NWB file and convert Volts -> mV and Amps -> pA. ''' ds = NwbDataSet(nwb_file) data = ds.get_sweep(sweep_num) v = data['response'] * 1e3 # convert to mV i = data['stimulus'] * 1e12 # convert to pA dt = 1.0 / data['sampling_rate'] t = np.arange(0,len(v)) * dt return (v, i, t)
def read_stimulus(self, stimulus_path, sweep=0): """load current values for a specific experiment sweep. Parameters ---------- stimulus path : string NWB file name sweep : integer, optional sweep index """ Utils._log.info("reading stimulus path: %s, sweep %s" % (stimulus_path, sweep)) stimulus_data = NwbDataSet(stimulus_path) sweep_data = stimulus_data.get_sweep(sweep) self.stim_curr = sweep_data["stimulus"] * 1.0e9 # convert to nA for NEURON self.sampling_rate = 1.0e3 / sweep_data["sampling_rate"] # convert from Hz
def save_nwb(output_path, v, sweep): """Save a single voltage output result into an existing sweep in a NWB file. This is intended to overwrite a recorded trace with a simulated voltage. Parameters ---------- output_path : string file name of a pre-existing NWB file. v : numpy array voltage sweep : integer which entry to overwrite in the file. """ output = NwbDataSet(output_path) output.set_sweep(sweep, None, v)
def get_ephys_data(self, specimen_id, file_name=None): """ Download electrophysiology traces for a single cell in the database. Parameters ---------- specimen_id: int The ID of a cell specimen to download. file_name: string File name to save/read the ephys features metadata as CSV. If file_name is None, the file_name will be pulled out of the manifest. If caching is disabled, no file will be saved. Default is None. Returns ------- NwbDataSet A class instance with helper methods for retrieving stimulus and response traces out of an NWB file. """ file_name = self.get_cache_path(file_name, self.EPHYS_DATA_KEY, specimen_id) if not os.path.exists(file_name): self.api.save_ephys_data(specimen_id, file_name) return NwbDataSet(file_name)
def get_model_spike_times_from_nwb(ends_with, specimen_id_directory, model_string, sweeps, where_running): ''' Gets the times of spike from the model nwb file inputs ends_with: string end of file searching for: options "_GLIF1_neuron_config.json","_GLIF2_neuron_config.json' etc." specimen_id_directory: string path to structured data directory containing neuron_config, preprocessor, etc., files. model_string: string string searching for in model name: options '(LIF)', '(LIF-R)', '(LIF-ASC)', '(LIF-R_ASC)', '(LIF-R_ASC_A') sweeps: list of integers integers refer to the sweep number in the electrophysiology .nwb data file where_running: string options are 'internal': the code is being run within the Institute and can therefore access the internal file system 'external': the code is being run outside the Institute and requires the use of the api to download the model nwb files Note that although ends_with and model_string should be appropriately paired, there is no check within this module to make sure that they are outputs: returns either a nan if the there is not a model in the structured data directory corresponding to what the requested ends_with variable or model_spike_times: list of numpy arrays each array contains the times of the spikes in each sweep ''' if where_running == 'internal': path = get_model_nwb_path_from_folder(ends_with, specimen_id_directory, model_string) #get nwb file path elif where_running == 'external': path = download_model_nwb_if_model_exists_in_SDD( ends_with, specimen_id_directory, model_string) #get nwb file path else: raise Exception( 'specify whether the code is being run internally or externally') if isinstance(path, basestring): model = NwbDataSet(path) model_spike_times = [] if sweeps == []: raise Exception('There are no sweeps to look at') for sw in sweeps: model_spike_times.append(model.get_spike_times(sw)) return model_spike_times else: return np.nan
def load_sweep(file_name, sweep_number): ''' Load the stimulus for a sweep from file. ''' logging.debug("loading sweep %d" % sweep_number) load_start_time = time.time() data = NwbDataSet(file_name).get_sweep(sweep_number) logging.debug("load time %f" % (time.time() - load_start_time)) return data
def get_sweep_data(nwb_file, sweep_number, time_scale=1e3, voltage_scale=1e3, stim_scale=1e12): """ Extract data and stim characteristics for a specific DC sweep from nwb file Parameters ---------- nwb_file : string File name of a pre-existing NWB file. sweep_number : integer time_scale : float Convert to ms scale voltage_scale : float Convert to mV scale stim_scale : float Convert to pA scale Returns ------- t : numpy array Sampled time points in ms v : numpy array Recorded voltage at the sampled time points in mV stim_start_time : float Stimulus start time in ms stim_end_time : float Stimulus end time in ms """ nwb = NwbDataSet(nwb_file) sweep = nwb.get_sweep(sweep_number) stim = sweep['stimulus'] * stim_scale # in pA stim_diff = np.diff(stim) stim_start = np.where(stim_diff != 0)[0][-2] stim_end = np.where(stim_diff != 0)[0][-1] # read v and t as numpy arrays v = sweep['response'] * voltage_scale # in mV dt = time_scale / sweep['sampling_rate'] # in ms num_samples = len(v) t = np.arange(num_samples) * dt stim_start_time = t[stim_start] stim_end_time = t[stim_end] return t, v, stim_start_time, stim_end_time
def expt_data_set(specimen_id): sql = """ select wkf.storage_directory || wkf.filename from well_known_files wkf join specimens sp on sp.ephys_roi_result_id = wkf.attachable_id where sp.id = %s and wkf.well_known_file_type_id = %s """ results = lims_utils.query(sql, (specimen_id, NWB_DOWNLOAD_TYPE_ID)) nwb_path = results[0][0] return NwbDataSet(nwb_path)
def ve_fi_curve(specimen_id, ve_path): data_set = NwbDataSet(ve_path) expt_set = expt_data_set(specimen_id) long_square_sweeps = lims_utils.get_sweeps_of_type("C1LSCOARSE", specimen_id, passed_only=True) fi_curve_data = dict([ amp_and_spike_count(data_set, sweep, expt_set) for sweep in long_square_sweeps ]) return fi_curve_stats(fi_curve_data)
def extract_single_sweep_features(features, nwb_file, sweep_number): ''' Run feature extraction on a single sweep. Parameters ---------- features: EphysFeatureExtractor instance nwb_file: string File name of an NWB file sweep_numbers: int Sweep number in the NWB file ''' nwb = NwbDataSet(nwb_file) data = nwb.get_sweep(sweep_number) v = data['response'] curr = data['stimulus'] idx0 = data['index_range'][0] idx1 = data['index_range'][1] if idx0 >= idx1: logging.warning("Sweep %s stop index precedes start index, skipping spike identification" % sweep_number) return hz = data['sampling_rate'] dt = 1.0 / hz t = np.arange(0, len(v)) * dt features.process_instance(sweep_number, v*1e3, curr*1e12, t, dt*idx0, dt*(idx1-idx0-2), None) results = {} results["mean"] = features.feature_list[-1].mean results["stdev"] = features.feature_list[-1].stdev return results
def output(): neuron_config = json_utilities.read('neuron_config.json') ephys_sweeps = json_utilities.read('ephys_sweeps.json') ephys_file_name = 'stimulus.nwb' # pull out the stimulus for the first sweep ephys_sweep = ephys_sweeps[0] ds = NwbDataSet(ephys_file_name) data = ds.get_sweep(ephys_sweep['sweep_number']) stimulus = data['stimulus'] # initialize the neuron # important! update the neuron's dt for your stimulus neuron = GlifNeuron.from_dict(neuron_config) neuron.dt = 1.0 / data['sampling_rate'] # simulate the neuron truncate = 56041 output = neuron.run(stimulus[0:truncate]) return output
def save_nwb(output_path, v, sweep): '''Save a single voltage output result into an existing sweep in a NWB file. This is intended to overwrite a recorded trace with a simulated voltage. Parameters ---------- output_path : string file name of a pre-existing NWB file. v : numpy array voltage sweep : integer which entry to overwrite in the file. ''' output = NwbDataSet(output_path) output.set_sweep(sweep, None, v) sweep_features = extract_cell_features.extract_sweep_features(output_path, [sweep]) spikes = sweep_features[sweep]['mean']['spikes'] spike_times = [ s['t'] for s in spikes ] output.set_spike_times(sweep, spike_times)
def output(neuron_config_file, ephys_sweeps_file): neuron_config = json_utilities.read(neuron_config_file) ephys_sweeps = json_utilities.read(ephys_sweeps_file) ephys_file_name = 'stimulus.nwb' # pull out the stimulus for the first sweep ephys_sweep = ephys_sweeps[0] ds = NwbDataSet(ephys_file_name) data = ds.get_sweep(ephys_sweep['sweep_number']) stimulus = data['stimulus'] # initialize the neuron # important! update the neuron's dt for your stimulus neuron = GlifNeuron.from_dict(neuron_config) neuron.dt = 1.0 / data['sampling_rate'] # simulate the neuron truncate = 56041 output = neuron.run(stimulus[0:truncate]) return output
def get_sweep_data(sweep_name): """ Input: sweep name (string) Output: NwbDataSet object """ global nwb_file_name try: num = int(sweep_name.split('_')[-1]) except: print("Unable to parse sweep number from '%s'" % str(sweep_name)) raise return NwbDataSet(nwb_file_name).get_sweep(num)
def read_stimulus(self, stimulus_path, sweep=0): '''load current values for a specific experiment sweep. Parameters ---------- stimulus path : string NWB file name sweep : integer, optional sweep index ''' Utils._log.info("reading stimulus path: %s, sweep %s", stimulus_path, sweep) stimulus_data = NwbDataSet(stimulus_path) sweep_data = stimulus_data.get_sweep(sweep) # convert to nA for NEURON self.stim_curr = sweep_data['stimulus'] * 1.0e9 # convert from Hz self.sampling_rate = 1.0e3 / sweep_data['sampling_rate']
def save_nwb(output_path, v, sweep, sweep_by_type= None): '''Save a single voltage output result into an existing sweep in a NWB file. This is intended to overwrite a recorded trace with a simulated voltage. Parameters ---------- output_path : string file name of a pre-existing NWB file. v : numpy array voltage sweep : integer which entry to overwrite in the file. ''' output = NwbDataSet(output_path) output.set_sweep(sweep, None, v) if sweep_by_type is not None: sweep_by_type = {t: [sweep] for t, ss in sweeps_by_type.items() if sweep in ss} sweep_features = extract_cell_features.extract_sweep_features(output, sweep_by_type) try: spikes = sweep_features[sweep]['spikes'] spike_times = [s['threshold_t'] for s in spikes] output.set_spike_times(sweep, spike_times) except Exception as e: logging.info("sweep %d has no sweep features. %s" % (sweep, e.args))
def get_data_sets_from_remote(upper_bound=2, lower_bound=None): try: with open('all_allen_cells.p', 'rb') as f: cells = pickle.load(f) ctc = CellTypesCache(manifest_file='cell_types/manifest.json') except: ctc = CellTypesCache(manifest_file='cell_types/manifest.json') cells = ctc.get_cells() with open('all_allen_cells.p', 'wb') as f: pickle.dump(cells, f) data = [] data_sets = [] path_name = 'data_nwbs' try: os.mkdir(path_name) except: print('directory already made.') ids = [c['id'] for c in cells] if upper_bound == None and lower_bound is None: limited_range = ids[0:-1] elif upper_bound is not None and lower_bound is not None: limited_range = ids[lower_bound:upper_bound] cnt = 0 for specimen_id in limited_range: temp_path = str(path_name) + str('/') + str(specimen_id) + '.p' if os.path.exists(temp_path): cnt += 1 for specimen_id in limited_range: temp_path = str(path_name) + str('/') + str(specimen_id) + '.p' if os.path.exists(temp_path): with open(temp_path, 'rb') as f: (data_set_nwb, sweeps, specimen_id) = pickle.load(f) data_sets.append((data_set_nwb, sweeps, specimen_id)) else: data_set = ctc.get_ephys_data(specimen_id) sweeps = ctc.get_ephys_sweeps(specimen_id) file_name = 'cell_types/specimen_' + str( specimen_id) + '/ephys.nwb' data_set_nwb = NwbDataSet(file_name) data_sets.append((data_set_nwb, sweeps, specimen_id)) with open(temp_path, 'wb') as f: pickle.dump((data_set_nwb, sweeps, specimen_id), f) return data_sets
def embed_spike_times(input_nwb_file, output_nwb_file, sweep_features): # embed spike times in NWB file tmp_nwb_file = output_nwb_file + ".tmp" shutil.copy(input_nwb_file, tmp_nwb_file) for sweep_num in sweep_features: spikes = sweep_features[sweep_num]['spikes'] spike_times = [ s['threshold_t'] for s in spikes ] NwbDataSet(tmp_nwb_file).set_spike_times(sweep_num, spike_times) try: shutil.move(tmp_nwb_file, output_nwb_file) except OSError as e: logging.error("Problem renaming file: %s -> %s" % (tmp_nwb_file, output_nwb_file)) raise e logging.debug("Embedded spike times into output.nwb file")
def load_experiment(specimen): path, bath, breakin, end, giga, last_bias, mid_bias, SS_amp, spec_name = Find_Critical_Sweeps( specimen) ds = NwbDataSet(path) if bath is 'error': bath_peak = 9999999 bath_ss = 9999999 else: bath_sweep = ds.get_sweep(bath) bath_i = bath_sweep['response'] * 1e12 bath_peak, bath_ss = get_Reses(70, bath_i) if breakin is 'error': breakin_peak = 9999999 breakin_ss = 9999999 else: breakin_sweep = ds.get_sweep(breakin) breakin_i = breakin_sweep['response'] * 1e12 breakin_peak, breakin_ss = get_Reses(70, breakin_i) if end is 'error': end_peak = 9999999 end_ss = 9999999 end_leak = 9999999 else: end_sweep = ds.get_sweep(end) end_i = end_sweep['response'] * 1e12 end_peak, end_ss = get_Reses(70, end_i) end_leak = np.mean(end_i[0:100]) if giga is 'error': giga_peak = 9999999 giga_ss = 9999999 else: giga_sweep = ds.get_sweep(giga) giga_i = giga_sweep['response'] * 1e12 giga_peak, giga_ss = get_Reses(70, giga_i) features = [] features.append(spec_name) features.append(bath_ss) features.append(breakin_peak) features.append(breakin_ss) features.append(end_peak) features.append(end_ss) features.append(end_leak) features.append(giga_ss) features.append(last_bias) features.append(mid_bias) features.append(SS_amp) return features
def main(): """Main sequence of pre-processing and passive fitting""" # This argschema package reads arguments from a JSON file module = ags.ArgSchemaParser(schema_type=PreprocessorParameters, logger_name=None) nwb_path = module.args["paths"][ "nwb"] # nwb - neurodata without borders (ephys data) swc_path = module.args["paths"]["swc"] # swc - morphology data storage_directory = module.args["paths"]["storage_directory"] try: paths, results, passive_info, s1_tasks, s2_tasks = \ preprocess(data_set=NwbDataSet(nwb_path), swc_data=pd.read_table(swc_path, sep='\s+', comment='#', header=None), dendrite_type_tag=module.args["dendrite_type_tag"], sweeps=module.args["sweeps"], bridge_avg=module.args["bridge_avg"], storage_directory=storage_directory) except NoUsableSweepsException as e: ju.write(module.args["output_json"], {'error': e.message}) return preprocess_results_path = os.path.join(storage_directory, "preprocess_results.json") ju.write(preprocess_results_path, results) passive_info_path = os.path.join(storage_directory, "passive_info.json") ju.write(passive_info_path, passive_info) paths.update({ "swc": swc_path, "nwb": nwb_path, "storage_directory": storage_directory, "preprocess_results": preprocess_results_path, "passive_info": passive_info_path, }) output = { "paths": paths, "stage_1_task_list": s1_tasks, "stage_2_task_list": s2_tasks, } ju.write(module.args["output_json"], output)
def prepare_nwb_output(nwb_stimulus_path, nwb_result_path): """Copy the stimulus file, zero out the recorded voltages and spike times. Parameters ---------- nwb_stimulus_path : string NWB file name nwb_result_path : string NWB file name """ copy(nwb_stimulus_path, nwb_result_path) data_set = NwbDataSet(nwb_result_path) data_set.fill_sweep_responses(0.0) for sweep in data_set.get_sweep_numbers(): data_set.set_spike_times(sweep, [])
def ve_ap_dim(specimen_id, ve_path): data_set = NwbDataSet(ve_path) expt_set = expt_data_set(specimen_id) long_square_sweeps = lims_utils.get_sweeps_of_type("C1LSCOARSE", specimen_id, passed_only=True) fi_curve_data = dict([ amp_and_spike_count(data_set, sweep, expt_set) for sweep in long_square_sweeps ]) sweeps_by_amp = { amp_and_spike_count(data_set, sweep, expt_set)[0]: sweep for sweep in long_square_sweeps } fi_arr = np.array([(amp, fi_curve_data[amp]) for amp in sorted(fi_curve_data.keys())]) spiking_sweeps = np.flatnonzero(fi_arr[:, 1]) if len(spiking_sweeps) == 0: return np.nan, np.nan rheo_sweep = sweeps_by_amp[fi_arr[spiking_sweeps[0], 0]] # print specimen_id, rheo_sweep v, i, t = lims_utils.get_sweep_v_i_t_from_set(data_set, rheo_sweep) swp_ext = EphysSweepFeatureExtractor(t, v, start=1.02, end=2.02, filter=None) swp_ext.process_spikes() if len(swp_ext.spike_feature("width")) == 0: print "NO SPIKES FOR {:d} ON SWEEP {:d}".format( specimen_id, sweeps_by_amp[fi_arr[spiking_sweeps[0], 0]]) print fi_arr print sweeps_by_amp return np.nan, np.nan return_vals = (swp_ext.spike_feature("width")[0] * 1e3, swp_ext.spike_feature("peak_v")[0] - swp_ext.spike_feature("trough_v")[0]) return return_vals
def prepare_nwb_output(nwb_stimulus_path, nwb_result_path): '''Copy the stimulus file, zero out the recorded voltages and spike times. Parameters ---------- nwb_stimulus_path : string NWB file name nwb_result_path : string NWB file name ''' output_dir = os.path.dirname(nwb_result_path) if not os.path.exists(output_dir): os.makedirs(output_dir) copy(nwb_stimulus_path, nwb_result_path) data_set = NwbDataSet(nwb_result_path) data_set.fill_sweep_responses(0.0, extend_experiment=True) for sweep in data_set.get_sweep_numbers(): data_set.set_spike_times(sweep, [])
def prepare_nwb_output(nwb_stimulus_path, nwb_result_path): '''Copy the stimulus file, zero out the recorded voltages and spike times. Parameters ---------- nwb_stimulus_path : string NWB file name nwb_result_path : string NWB file name ''' output_dir = os.path.dirname(nwb_result_path) if not os.path.exists(output_dir): os.makedirs(output_dir) copy(nwb_stimulus_path, nwb_result_path) data_set = NwbDataSet(nwb_result_path) data_set.fill_sweep_responses(0.0) for sweep in data_set.get_sweep_numbers(): data_set.set_spike_times(sweep, [])
soma = morphology.soma # all compartments are dictionaries of compartment properties # compartments also keep track of ids of their children for child in morphology.children_of(soma): print(child['x'], child['y'], child['z'], child['radius']) #=============================================================================== # example 4 #=============================================================================== from allensdk.core.nwb_data_set import NwbDataSet # if you ran the examples above, you will have a NWB file here file_name = 'cell_types/specimen_485909730/ephys.nwb' data_set = NwbDataSet(file_name) sweep_numbers = data_set.get_sweep_numbers() sweep_number = sweep_numbers[0] sweep_data = data_set.get_sweep(sweep_number) # spike times are in seconds relative to the start of the sweep spike_times = data_set.get_spike_times(sweep_number) # stimulus is a numpy array in amps stimulus = sweep_data['stimulus'] # response is a numpy array in volts reponse = sweep_data['response'] # sampling rate is in Hz
def prepare_stage_1(description, passive_fit_data): output_directory = description.manifest.get_path('WORKDIR') neuronal_model_data = ju.read(description.manifest.get_path('neuronal_model_data')) specimen_data = neuronal_model_data['specimen'] specimen_id = neuronal_model_data['specimen_id'] is_spiny = not any(t['name'] == u'dendrite type - aspiny' for t in specimen_data['specimen_tags']) all_sweeps = specimen_data['ephys_sweeps'] data_set = NwbDataSet(description.manifest.get_path('stimulus_path')) swc_path = description.manifest.get_path('MORPHOLOGY') if not os.path.exists(output_directory): os.makedirs(output_directory) ra = passive_fit_data['ra'] cm1 = passive_fit_data['cm1'] cm2 = passive_fit_data['cm2'] # Check for fi curve shift to decide to use core1 or core2 fi_shift, n_core2 = check_fi_shift.estimate_fi_shift(data_set, all_sweeps) fi_shift_threshold = 30.0 sweeps_to_fit = [] if abs(fi_shift) > fi_shift_threshold: _fit_stage_1_log.info("FI curve shifted; using Core 1") sweeps_to_fit = find_core1_trace(data_set, all_sweeps) else: sweeps_to_fit = find_core2_trace(data_set, all_sweeps) if sweeps_to_fit == []: _fit_stage_1_log.info("Not enough good Core 2 traces; using Core 1") sweeps_to_fit = find_core1_trace(data_set, all_sweeps) _fit_stage_1_log.debug("will use sweeps: " + str(sweeps_to_fit)) jxn = -14.0 t_set = [] v_set = [] i_set = [] for s in sweeps_to_fit: v, i, t = ephys_utils.get_sweep_v_i_t_from_set(data_set, s) v += jxn stim_start, stim_dur, stim_amp, start_idx, end_idx = ephys_utils.get_step_stim_characteristics(i, t) t_set.append(t) v_set.append(v) i_set.append(i) ext = EphysSweepSetFeatureExtractor(t_set, v_set, i_set, start=stim_start, end=(stim_start + stim_dur)) ext.process_spikes() ft = {} blacklist = ["isi_type"] for k in ext.sweeps()[0].spike_feature_keys(): if k in blacklist: continue pair = {} pair["mean"] = float(ext.spike_feature_averages(k).mean()) pair["stdev"] = float(ext.spike_feature_averages(k).std()) ft[k] = pair # "Delta" features sweep_avg_slow_trough_delta_time = [] sweep_avg_slow_trough_delta_v = [] sweep_avg_peak_trough_delta_time = [] for swp in ext.sweeps(): threshold_t = swp.spike_feature("threshold_t") fast_trough_t = swp.spike_feature("fast_trough_t") slow_trough_t = swp.spike_feature("slow_trough_t") delta_t = slow_trough_t - fast_trough_t delta_t[np.isnan(delta_t)] = 0. sweep_avg_slow_trough_delta_time.append(np.mean(delta_t[:-1] / np.diff(threshold_t))) fast_trough_v = swp.spike_feature("fast_trough_v") slow_trough_v = swp.spike_feature("slow_trough_v") delta_v = fast_trough_v - slow_trough_v delta_v[np.isnan(delta_v)] = 0. sweep_avg_slow_trough_delta_v.append(delta_v.mean()) ft["slow_trough_delta_time"] = {"mean": float(np.mean(sweep_avg_slow_trough_delta_time)), "stdev": float(np.std(sweep_avg_slow_trough_delta_time))} ft["slow_trough_delta_v"] = {"mean": float(np.mean(sweep_avg_slow_trough_delta_v)), "stdev": float(np.std(sweep_avg_slow_trough_delta_v))} baseline_v = float(ext.sweep_features("v_baseline").mean()) passive_fit_data["e_pas"] = baseline_v for k in ext.sweeps()[0].sweep_feature_keys(): pair = {} pair["mean"] = float(ext.sweep_features(k).mean()) pair["stdev"] = float(ext.sweep_features(k).std()) ft[k] = pair # Determine highest step to check for depolarization block noise_1_sweeps, _, _ = ephys_utils.get_sweeps_of_type("C1NSSEED_1", all_sweeps) noise_2_sweeps, _, _ = ephys_utils.get_sweeps_of_type("C1NSSEED_2", all_sweeps) step_sweeps, _, _ = ephys_utils.get_sweeps_of_type("C1LSCOARSE", all_sweeps) all_sweeps = noise_1_sweeps + noise_2_sweeps + step_sweeps max_i = 0 for s in all_sweeps: try: v, i, t = ephys_utils.get_sweep_v_i_t_from_set(data_set, s['sweep_number']) except: pass if np.max(i) > max_i: max_i = np.max(i) max_i += 10 # add 10 pA max_i *= 1e-3 # convert to nA # ----------- Generate output and submit jobs --------------- # Set up directories # Decide which fit(s) we are doing if (is_spiny and ft["width"]["mean"] < 0.8) or (not is_spiny and ft["width"]["mean"] > 0.8): fit_types = ["f6", "f12"] elif is_spiny: fit_types = ["f6"] else: fit_types = ["f12"] for fit_type in fit_types: fit_type_dir = os.path.join(output_directory, fit_type) if not os.path.exists(fit_type_dir): os.makedirs(fit_type_dir) for seed in SEEDS: seed_dir = "{:s}/s{:d}".format(fit_type_dir, seed) if not os.path.exists(seed_dir): os.makedirs(seed_dir) # Collect and save data for target.json file target_dict = {} target_dict["passive"] = [{ "ra": ra, "cm": { "soma": cm1, "axon": cm1, "dend": cm2 }, "e_pas": baseline_v }] swc_data = pd.read_table(swc_path, sep='\s', comment='#', header=None) has_apic = False if APICAL_DENDRITE_TYPE in pd.unique(swc_data[1]): has_apic = True _fit_stage_1_log.info("Has apical dendrite") else: _fit_stage_1_log.info("Does not have apical dendrite") if has_apic: target_dict["passive"][0]["cm"]["apic"] = cm2 target_dict["fitting"] = [{ "junction_potential": jxn, "sweeps": sweeps_to_fit, "passive_fit_info": passive_fit_data, "max_stim_test_na": max_i, }] target_dict["stimulus"] = [{ "amplitude": 1e-3 * stim_amp, "delay": 1000.0, "duration": 1e3 * stim_dur }] target_dict["manifest"] = [] target_dict["manifest"].append({"type": "file", "spec": swc_path, "key": "MORPHOLOGY"}) target_dict["target_features"] = collect_target_features(ft) target_file = os.path.join(output_directory, 'target.json') ju.write(target_file, target_dict) # Create config.json for each fit type config_base_data = ju.read(os.path.join(FIT_BASE_DIR, 'config_base.json')) jobs = [] for fit_type in fit_types: config = config_base_data.copy() fit_type_dir = os.path.join(output_directory, fit_type) config_path = os.path.join(fit_type_dir, "config.json") config["biophys"][0]["model_file"] = [ target_file, config_path] if has_apic: fit_style_file = os.path.join(FIT_BASE_DIR, 'fit_styles', '%s_fit_style.json' % (fit_type)) else: fit_style_file = os.path.join(FIT_BASE_DIR, "fit_styles", "%s_noapic_fit_style.json" % (fit_type)) config["biophys"][0]["model_file"].append(fit_style_file) config["manifest"].append({"type": "dir", "spec": fit_type_dir, "key": "FITDIR"}) ju.write(config_path, config) for seed in SEEDS: logfile = os.path.join(output_directory, fit_type, 's%d' % seed, 'stage_1.log') jobs.append({ 'config_path': os.path.abspath(config_path), 'fit_type': fit_type, 'log': os.path.abspath(logfile), 'seed': seed, 'num_processes': DEFAULT_NUM_PROCESSES }) return jobs
#=============================================================================== # example 4 #=============================================================================== import allensdk.core.json_utilities as json_utilities from allensdk.model.glif.glif_neuron import GlifNeuron from allensdk.core.nwb_data_set import NwbDataSet neuron_config = json_utilities.read('neuron_config.json')['566302806'] ephys_sweeps = json_utilities.read('ephys_sweeps.json') ephys_file_name = 'stimulus.nwb' # pull out the stimulus for the current-clamp first sweep ephys_sweep = next( s for s in ephys_sweeps if s['stimulus_units'] == 'Amps' ) ds = NwbDataSet(ephys_file_name) data = ds.get_sweep(ephys_sweep['sweep_number']) stimulus = data['stimulus'] # initialize the neuron # important! update the neuron's dt for your stimulus neuron = GlifNeuron.from_dict(neuron_config) neuron.dt = 1.0 / data['sampling_rate'] # simulate the neuron output = neuron.run(stimulus) voltage = output['voltage'] threshold = output['threshold'] spike_times = output['interpolated_spike_times']
raw_ephys_file_name = '%d_raw_data.nwb' % dataset_id if not os.path.isfile(raw_ephys_file_name): print('Downloading data: %s'%raw_ephys_file_name) ct.save_ephys_data(dataset_id, raw_ephys_file_name) print('Saved: %s'%raw_ephys_file_name) else: print('File: %s already present...'%raw_ephys_file_name) print('Loading data from: %s'%raw_ephys_file_name) from allensdk.core.nwb_data_set import NwbDataSet data_set = NwbDataSet(raw_ephys_file_name) import matplotlib.pyplot as plt import numpy as np plt.figure() sweep_numbers = sweep_numbers_for_data[dataset_id] subset = {} for sweep_number in sweep_numbers: sweep_data = data_set.get_sweep(sweep_number) # start/stop indices that exclude the experimental test pulse (if applicable) index_range = sweep_data['index_range']
raw_ephys_file_name = '%d_raw_data.nwb' % dataset_id info = {} import h5py import numpy as np h5f = h5py.File(raw_ephys_file_name, "r") metas = ['aibs_cre_line','aibs_dendrite_type','intracellular_ephys/Electrode 1/location'] for m in metas: d = h5f.get('/general/%s'%m) print("%s = \t%s"%(m,d.value)) info[m.split('/')[-1]]=str(d.value) h5f.close() from allensdk.core.nwb_data_set import NwbDataSet data_set = NwbDataSet(raw_ephys_file_name) sweep_numbers = data_set.get_experiment_sweep_numbers() #sweep_numbers = [33,45] sweep_numbers.sort() print("All sweeps for %s: %s"%(dataset_id, sweep_numbers)) subthreshs = {} spikings = {} spike_count = {} chosen = {} stimuli = {}
import allensdk.core.json_utilities as json_utilities from allensdk.model.glif.glif_neuron import GlifNeuron from allensdk.core.nwb_data_set import NwbDataSet neuron_config = json_utilities.read("neuron_config.json") ephys_sweeps = json_utilities.read("ephys_sweeps.json") ephys_file_name = "stimulus.nwb" # pull out the stimulus for the first sweep ephys_sweep = ephys_sweeps[0] ds = NwbDataSet(ephys_file_name) data = ds.get_sweep(ephys_sweep["sweep_number"]) stimulus = data["stimulus"] # initialize the neuron # important! update the neuron's dt for your stimulus neuron = GlifNeuron.from_dict(neuron_config) neuron.dt = 1.0 / data["sampling_rate"] # simulate the neuron output = neuron.run(stimulus) voltage = output["voltage"] threshold = output["threshold"] spike_times = output["interpolated_spike_times"]
def save_cell_data_web(self, acceptable_stimtypes, non_standard_nwb=False, ephys_dir='preprocessed', **kwargs): bpopt_stimtype_map = utility.bpopt_stimtype_map distinct_id_map = utility.aibs_stimname_map nwb_file = NwbDataSet(self.nwb_path) stim_map = defaultdict(list) stim_sweep_map = {} output_dir = os.path.join(os.getcwd(), ephys_dir) utility.create_dirpath(output_dir) sweep_numbers = kwargs.get('sweep_numbers') or nwb_file.get_sweep_numbers() for sweep_number in sweep_numbers: sweep_data = nwb_file.get_sweep_metadata(sweep_number) stim_type = sweep_data['aibs_stimulus_name'] try: stim_type = stim_type.decode('UTF-8') except: pass if stim_type in acceptable_stimtypes: sweep = nwb_file.get_sweep(sweep_number) start_idx, stop_idx = sweep['index_range'] stimulus_trace = sweep['stimulus'][start_idx:stop_idx] response_trace = sweep['response'][start_idx:stop_idx] sampling_rate = sweep['sampling_rate'] time = np.arange(0, len(stimulus_trace)) / sampling_rate trace_name = '%s_%d' % ( distinct_id_map[stim_type], sweep_number) if non_standard_nwb: calc_stimparams_func = self.calc_stimparams_nonstandard else: calc_stimparams_func = self.calc_stimparams stim_start, stim_stop, stim_amp_start, stim_amp_end, \ tot_duration, hold_curr = calc_stimparams_func( time, stimulus_trace, trace_name) response_trace_short_filename = '%s.%s' % (trace_name, 'txt') response_trace_filename = os.path.join( output_dir, response_trace_short_filename) time *= 1e3 # in ms response_trace *= 1e3 # in mV response_trace = utility.correct_junction_potential(response_trace, self.junction_potential) stimulus_trace *= 1e9 # downsampling time, stimulus_trace, response_trace = utility.downsample_ephys_data( time, stimulus_trace, response_trace) if stim_type in utility.bpopt_current_play_stimtypes: with open(response_trace_filename, 'wb') as response_trace_file: np.savetxt(response_trace_file, np.transpose([time, response_trace, stimulus_trace])) else: with open(response_trace_filename, 'wb') as response_trace_file: np.savetxt(response_trace_file, np.transpose([time, response_trace])) holding_current = hold_curr # sweep['bias_current'] stim_map[distinct_id_map[stim_type]].append([ trace_name, bpopt_stimtype_map[stim_type], holding_current/1e12, stim_amp_start / 1e12, stim_amp_end/1e12, stim_start * 1e3, stim_stop * 1e3, tot_duration * 1e3, response_trace_short_filename]) stim_sweep_map[trace_name] = sweep_number logger.debug('Writing stimmap.csv ...') stim_reps_sweep_map, stimmap_filename = self.write_stimmap_csv(stim_map, output_dir, stim_sweep_map) self.write_provenance( output_dir, self.nwb_path, stim_sweep_map, stim_reps_sweep_map) return output_dir, stimmap_filename
def extract_info_from_nwb_file(dataset_id, raw_ephys_file_name): info = {} import h5py import numpy as np h5f = h5py.File(raw_ephys_file_name, "r") metas = ['aibs_cre_line','aibs_dendrite_type','intracellular_ephys/Electrode 1/location'] for m in metas: d = h5f.get('/general/%s'%m) print("%s = \t%s"%(m,d.value)) info[m.split('/')[-1]]=str(d.value) h5f.close() from allensdk.core.nwb_data_set import NwbDataSet data_set = NwbDataSet(raw_ephys_file_name) sweep_numbers = data_set.get_experiment_sweep_numbers() if test: sweep_numbers = [33,45] sweep_numbers.sort() info[DH.DATASET] = dataset_id info[DH.COMMENT] = 'Data analysed on %s'%(time.ctime()) info[DH.PYELECTRO_VERSION] = pyel_ver info[DH.ALLENSDK_VERSION] = allensdk_ver info[DH.SWEEPS] = {} for sweep_number in sweep_numbers: sweep_data = data_set.get_sweep(sweep_number) if data_set.get_sweep_metadata(sweep_number)['aibs_stimulus_name'] == "Long Square": sweep_info = {} sweep_info[DH.METADATA] = data_set.get_sweep_metadata(sweep_number) info[DH.SWEEPS]['%i'%sweep_number] = sweep_info sweep_info[DH.SWEEP] = sweep_number # start/stop indices that exclude the experimental test pulse (if applicable) index_range = sweep_data['index_range'] # stimulus is a numpy array in amps stimulus = sweep_data['stimulus'][index_range[0]:index_range[-1]] # response is a numpy array in volts response = sweep_data['response'][index_range[0]:index_range[-1]]*1000 # sampling rate is in Hz sampling_rate = sweep_data['sampling_rate'] # define some time points in seconds (i.e., convert to absolute time) time_pts = np.arange(0,len(stimulus)/sampling_rate,1./sampling_rate)*1000 comment = 'Sweep: %i in %i; %sms -> %sms; %sA -> %sA; %smV -> %smV'%(sweep_number, dataset_id, time_pts[0], time_pts[-1], np.amin(stimulus), np.amax(stimulus), np.amin(response), np.amax(response)) print(comment) sweep_info[DH.COMMENT] = comment analysis = utils.simple_network_analysis({sweep_number:response}, time_pts, extra_targets = ['%s:value_280'%sweep_number, '%s:average_1000_1200'%sweep_number, '%s:average_100_200'%sweep_number], end_analysis=1500, plot=plot, show_plot_already=False, verbose=True) sweep_info[DH.ICLAMP_ANALYSIS] = analysis analysis_file_name = '%s_analysis.json'%(dataset_id) analysis_file = open(analysis_file_name, 'w') pretty = pp.pformat(info) pretty = pretty.replace('\'', '"') pretty = pretty.replace('u"', '"') analysis_file.write(pretty) analysis_file.close() print('Written info to %s'%analysis_file_name)