def test_spike_times(self): expected = [ 2.937305, 3.16453 , 3.24271 , 4.1622 , 4.24182 , 10.0898 , 10.132545, 10.176095, 10.2361 , 10.660655, 10.762125, 10.863465, 10.93833 , 11.140815, 11.19246 , 11.24553 , 11.696305, 11.812655, 11.90469 , 12.056755, 12.15794 , 12.233905, 12.47577 , 12.741295, 12.82861 , 12.923175, 18.05068 , 18.139875, 18.17693 , 18.221485, 18.24337 , 18.39981 , 18.470705, 18.759675, 18.82183 , 18.877475, 18.91033 , 18.941195, 19.050515, 19.12557 , 19.15963 , 19.188655, 19.226205, 19.29813 , 19.420665, 19.47627 , 19.763365, 19.824225, 19.897995, 19.93155 , 20.04916 , 20.11832 , 20.148755, 20.18004 , 20.22173 , 20.2433 , 20.40018 , 20.470915, 20.759715, 20.82156 , 20.866465, 20.90807 , 20.939175] bp = BiophysicalPerisomaticApi('http://api.brain-map.org') bp.cache_stimulus = True # change to False to not download the large stimulus NWB file neuronal_model_id = 472451419 # get this from the web site as above bp.cache_data(neuronal_model_id, working_directory='neuronal_model') cwd = os.path.realpath(os.curdir) print(cwd) os.chdir(os.path.join(cwd, 'neuronal_model')) manifest = ju.read('manifest.json') manifest['biophys'][0]['model_file'][0] = 'manifest_51.json' manifest['runs'][0]['sweeps'] = [51] ju.write('manifest_51.json', manifest) subprocess.call(['nrnivmodl', './modfiles']) run(Config().load('manifest_51.json')) #os.chdir(cwd) nwb_out = NwbDataSet('work/386049444.nwb') spikes = nwb_out.get_spike_times(51) numpy.testing.assert_array_almost_equal(spikes, expected)
def set_up_objective(self, measure='spike frequency'): ''' Prepares the model for parameter optimization by assigning the output measure to be used in the cost function. Parameters ---------- measure: string Name of the output measure to be used in optimization. Currently only 'spike frequency' is implemented. ''' if (measure == 'spike frequency'): # get the experimental data from the NWB file data_set = NwbDataSet( os.path.join( self.model_dir, self.description.manifest.get_path('stimulus_path'))) spike_times = data_set.get_spike_times(self.reference_sweep) # calculate firing frequency for the NWB data sum_intervals = 0.0 for i in range(len(spike_times) - 1): sum_intervals += (spike_times[i + 1] - spike_times[i]) self.reference_output = len(spike_times) / sum_intervals else: print "Model fitting using the output measure", measure, "has not been implemented yet."
def calc_ev(ew, folder, s, sweeps, stim_len, data_spike_times, dt): ''' ''' print ew, folder #convert data times to indicies data_spike_ind = [] for d in data_spike_times: data_spike_ind.append((d / dt).astype(int)) #get model data path = get_model_nwb_path_from_folder(ew, folder, s) #get nwb file path if isinstance(path, basestring): model = NwbDataSet(path) model_spike_ind = [] for sw in sweeps: spikes = (model.get_spike_times(sw) / dt).astype(int) model_spike_ind.append(spikes) #check to make sure all spike time arrays are the same for the model for ii in range(1, len(model_spike_ind)): if not np.array_equal(model_spike_ind[ii], model_spike_ind[ii - 1]): print 'MODEL SPIKE TIMES SHOULD BE THE SAME AND THEY ARE NOT!', os.path.basename( folder)[:9] print len(model_spike_ind), model_spike_ind # raise Exception('model spike times should be the same and they are not') return exVar(data_spike_ind, [model_spike_ind[0]], sigma, dt, stim_len) else: return np.nan
def ve_ramp_latency(specimen_id, ve_path): data_set = NwbDataSet(ve_path) ramp_sweeps = lims_utils.get_sweeps_of_type("C1RP25PR1S", specimen_id, passed_only=True) if len(ramp_sweeps) == 0: return np.nan spike_times = data_set.get_spike_times(ramp_sweeps[0]) if len(spike_times) > 0: return spike_times[0] else: return np.nan
def get_model_spike_times_from_nwb(ends_with, specimen_id_directory, model_string, sweeps, where_running): ''' Gets the times of spike from the model nwb file inputs ends_with: string end of file searching for: options "_GLIF1_neuron_config.json","_GLIF2_neuron_config.json' etc." specimen_id_directory: string path to structured data directory containing neuron_config, preprocessor, etc., files. model_string: string string searching for in model name: options '(LIF)', '(LIF-R)', '(LIF-ASC)', '(LIF-R_ASC)', '(LIF-R_ASC_A') sweeps: list of integers integers refer to the sweep number in the electrophysiology .nwb data file where_running: string options are 'internal': the code is being run within the Institute and can therefore access the internal file system 'external': the code is being run outside the Institute and requires the use of the api to download the model nwb files Note that although ends_with and model_string should be appropriately paired, there is no check within this module to make sure that they are outputs: returns either a nan if the there is not a model in the structured data directory corresponding to what the requested ends_with variable or model_spike_times: list of numpy arrays each array contains the times of the spikes in each sweep ''' if where_running == 'internal': path = get_model_nwb_path_from_folder(ends_with, specimen_id_directory, model_string) #get nwb file path elif where_running == 'external': path = download_model_nwb_if_model_exists_in_SDD( ends_with, specimen_id_directory, model_string) #get nwb file path else: raise Exception( 'specify whether the code is being run internally or externally') if isinstance(path, basestring): model = NwbDataSet(path) model_spike_times = [] if sweeps == []: raise Exception('There are no sweeps to look at') for sw in sweeps: model_spike_times.append(model.get_spike_times(sw)) return model_spike_times else: return np.nan
print(child['x'], child['y'], child['z'], child['radius']) #=============================================================================== # example 4 #=============================================================================== from allensdk.core.nwb_data_set import NwbDataSet # if you ran the examples above, you will have a NWB file here file_name = 'cell_types/specimen_485909730/ephys.nwb' data_set = NwbDataSet(file_name) sweep_numbers = data_set.get_sweep_numbers() sweep_number = sweep_numbers[0] sweep_data = data_set.get_sweep(sweep_number) # spike times are in seconds relative to the start of the sweep spike_times = data_set.get_spike_times(sweep_number) # stimulus is a numpy array in amps stimulus = sweep_data['stimulus'] # response is a numpy array in volts reponse = sweep_data['response'] # sampling rate is in Hz sampling_rate = sweep_data['sampling_rate'] # start/stop indices that exclude the experimental test pulse (if applicable) index_range = sweep_data['index_range']
from allensdk.core.nwb_data_set import NwbDataSet file_name = 'example.nwb' data_set = NwbDataSet(file_name) sweep_numbers = data_set.get_sweep_numbers() sweep_number = sweep_numbers[0] sweep_data = data_set.get_sweep(sweep_number) # spike times are in seconds relative to the start of the sweep spike_times = data_set.get_spike_times(sweep_number) # stimulus is a numpy array in amps stimulus = sweep_data['stimulus'] # response is a numpy array in volts reponse = sweep_data['response'] # sampling rate is in Hz sampling_rate = sweep_data['sampling_rate'] # start/stop indices that exclude the experimental test pulse (if applicable) index_range = sweep_data['index_range']
def noise1_response_comparison(ids, ve_paths, file_prefix): filename = 'Noise2' ids1 = find_cells_with_all_models(ids) colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k'] for specimen_id in ids1: data_set = expt_data_set(specimen_id) exp_sweeps = lims_utils.get_sweeps_of_type("C1NSSEED_2", specimen_id, passed_only=True) if len(exp_sweeps) == 0: continue fig, axes = plt.subplots(3, 1, sharex=True) plt.subplots_adjust(hspace=0.1, wspace=0.1) axes[2].set_xlim([0, 25]) sweep_data = data_set.get_sweep(exp_sweeps[0]) spike_times = data_set.get_spike_times(exp_sweeps[0]) index_range = sweep_data["index_range"] i = sweep_data["stimulus"][0:index_range[1] + 1] # in A v = sweep_data["response"][0:index_range[1] + 1] # in V i *= 1e12 # to pA v *= 1e3 # to mV sampling_rate = sweep_data["sampling_rate"] # in Hz t = np.arange(0, len(v)) * (1.0 / sampling_rate) axes[1].plot(t, v, color='k', linewidth=0.3) axes[0].scatter(spike_times, [10] * len(spike_times), color='k', s=100, marker="|", linewidth=0.3) axes[2].plot(t, i, color='k', linewidth=0.3) axes[1].set_ylabel("mV") axes[2].set_ylabel("pA") axes[2].set_xlabel("seconds") for model, label, color in zip(BASE_ORDER, LABELS, colors): ve_path = ve_paths[model][specimen_id] data_set = NwbDataSet(ve_path) exp_sweeps = lims_utils.get_sweeps_of_type("C1NSSEED_2", specimen_id, passed_only=True) sweep_data = data_set.get_sweep(exp_sweeps[0]) spike_times = data_set.get_spike_times(exp_sweeps[0]) index_range = sweep_data["index_range"] i = sweep_data["stimulus"][0:index_range[1] + 1] # in A v = sweep_data["response"][0:index_range[1] + 1] # in V i *= 1e12 # to pA v *= 1e3 # to mV sampling_rate = sweep_data["sampling_rate"] # in Hz t = np.arange(0, len(v)) * (1.0 / sampling_rate) axes[0].scatter(spike_times, [80 - (10 * BASE_ORDER.index(model))] * len(spike_times), color='k', s=100, marker="|", linewidth=0.3) axes[0].set_yticklabels([ "", 'Experimental Data', LABELS[6], LABELS[5], LABELS[4], LABELS[3], LABELS[2], LABELS[1], LABELS[0] ]) axes[0].set_ylabel("model type") axes[0].set_title("Spike Times") plt.subplots_adjust(hspace=0.1, wspace=0.1) plt.savefig(str(file_prefix + "_" + filename + "_" + str(specimen_id)), bbox_inches="tight") #plt.show() plt.close()