Пример #1
0
    def get_voltage(self, neuron_config, stim_name):

        ephys_sweeps = self.cfg.ephys_sweeps

        ephys_sweep = next(s for s in ephys_sweeps
                           if s['stimulus_name'] == stim_name)

        ds = NwbDataSet(self.ephys_file_name)
        data = ds.get_sweep(ephys_sweep['sweep_number'])
        stimulus = data['stimulus']
        stimulus = stimulus[stimulus != 0]
        stimulus = stimulus[:self.cfg.stimulus_allow]

        # initialize the neuron
        neuron = GlifNeuron.from_dict(neuron_config)

        # Set dt
        neuron.dt = 1.0 / data['sampling_rate']

        # simulate the neuron
        output = neuron.run(stimulus)

        voltage = output['voltage'] * 1e3

        voltage = voltage[~np.isnan(voltage)]
        voltage = voltage[:self.cfg.signal_allow]

        return output, voltage, neuron, stimulus
Пример #2
0
    def read_stimulus(self, stimulus_path, sweep=0):
        '''Load current values for a specific experiment sweep and setup simulation
        and stimulus sampling rates.

        NOTE: NEURON only allows simulation timestamps of multiples of 40KHz.  To 
        avoid aliasing, we set the simulation sampling rate to the least common
        multiple of the stimulus sampling rate and 40KHz.

        Parameters
        ----------
        stimulus path : string
            NWB file name
        sweep : integer, optional
            sweep index
        '''
        Utils._log.info("reading stimulus path: %s, sweep %s", stimulus_path,
                        sweep)

        stimulus_data = NwbDataSet(stimulus_path)
        sweep_data = stimulus_data.get_sweep(sweep)

        # convert to nA for NEURON
        self.stim_curr = sweep_data['stimulus'] * 1.0e9

        # convert from Hz
        hz = int(sweep_data['sampling_rate'])
        neuron_hz = Utils.nearest_neuron_sampling_rate(hz)

        self.simulation_sampling_rate = neuron_hz
        self.stimulus_sampling_rate = hz

        if hz != neuron_hz:
            Utils._log.debug(
                "changing sampling rate from %d to %d to avoid NEURON aliasing",
                hz, neuron_hz)
Пример #3
0
def calc_ev(ew, folder, s, sweeps, stim_len, data_spike_times, dt):
    '''
    '''
    print ew, folder
    #convert data times to indicies
    data_spike_ind = []
    for d in data_spike_times:
        data_spike_ind.append((d / dt).astype(int))

    #get model data
    path = get_model_nwb_path_from_folder(ew, folder, s)  #get nwb file path
    if isinstance(path, basestring):
        model = NwbDataSet(path)
        model_spike_ind = []
        for sw in sweeps:
            spikes = (model.get_spike_times(sw) / dt).astype(int)
            model_spike_ind.append(spikes)
        #check to make sure all spike time arrays are the same for the model
        for ii in range(1, len(model_spike_ind)):
            if not np.array_equal(model_spike_ind[ii],
                                  model_spike_ind[ii - 1]):
                print 'MODEL SPIKE TIMES SHOULD BE THE SAME AND THEY ARE NOT!', os.path.basename(
                    folder)[:9]
                print len(model_spike_ind), model_spike_ind


#                raise Exception('model spike times should be the same and they are not')
        return exVar(data_spike_ind, [model_spike_ind[0]], sigma, dt, stim_len)
    else:
        return np.nan
Пример #4
0
    def set_up_objective(self, measure='spike frequency'):
        '''
        Prepares the model for parameter optimization by assigning the output measure to be used in the cost function.
        
        Parameters
        ----------
        measure: string
            Name of the output measure to be used in optimization. Currently only 'spike frequency' is implemented.
        '''
        if (measure == 'spike frequency'):
            # get the experimental data from the NWB file
            data_set = NwbDataSet(
                os.path.join(
                    self.model_dir,
                    self.description.manifest.get_path('stimulus_path')))
            spike_times = data_set.get_spike_times(self.reference_sweep)

            # calculate firing frequency for the NWB data
            sum_intervals = 0.0
            for i in range(len(spike_times) - 1):
                sum_intervals += (spike_times[i + 1] - spike_times[i])

            self.reference_output = len(spike_times) / sum_intervals
        else:
            print "Model fitting using the output measure", measure, "has not been implemented yet."
Пример #5
0
def ve_tau(specimen_id, ve_path):
    #print(chr(27) + "[2J") # To clear terminal screen
    print "START VE_TAU " + str(specimen_id) + " " + str(ve_path)
    expt_taus = []
    data_set = NwbDataSet(ve_path)
    long_square_sweeps = lims_utils.get_sweeps_of_type("C1LSCOARSE",
                                                       specimen_id,
                                                       passed_only=True)
    print "ve specimen id= " + str(specimen_id)
    for sweep in long_square_sweeps:
        #print "ve_sweep_number: " + str(sweep)
        #print(data_set.get_sweep_metadata(sweep))
        try:
            (data_set.get_sweep_metadata(sweep)["aibs_stimulus_amplitude_pa"])
        except:
            continue
        else:
            if (data_set.get_sweep_metadata(sweep)
                ["aibs_stimulus_amplitude_pa"] < 0):
                v, i, t = lims_utils.get_sweep_v_i_t_from_set(data_set, sweep)
                sweep_feat = EphysSweepFeatureExtractor(
                    t, v)  # Get time and voltage of each hyperpolarizing sweep
                if np.isnan(sweep_feat):
                    continue
                else:
                    expt_taus.append(sweep_feat.estimate_time_constant(
                    ))  # Append time constant of each sweep to list
    mean_expt_tau = np.nanmean(expt_taus)  # Mean time constant for this cell
    print "mean_ve_tau= " + str(mean_expt_tau)
    return mean_expt_tau
Пример #6
0
def save_nwb(output_path, v, sweep, sweep_by_type= None):
    '''Save a single voltage output result into an existing sweep in a NWB file.
    This is intended to overwrite a recorded trace with a simulated voltage.

    Parameters
    ----------
    output_path : string
        file name of a pre-existing NWB file.
    v : numpy array
        voltage
    sweep : integer
        which entry to overwrite in the file.
    '''
    output = NwbDataSet(output_path)
    output.set_sweep(sweep, None, v)
    if sweep_by_type is not None:
        sweep_by_type = {t: [sweep]
                     for t, ss in sweeps_by_type.items() if sweep in ss}
        sweep_features = extract_cell_features.extract_sweep_features(output,
                                                                  sweep_by_type)
    try:
        spikes = sweep_features[sweep]['spikes']
        spike_times = [s['threshold_t'] for s in spikes]
        output.set_spike_times(sweep, spike_times)
    except Exception as e:
        logging.info("sweep %d has no sweep features. %s" % (sweep, e.args))
Пример #7
0
    def get_ephys_data(self, specimen_id, file_name=None):
        """
        Download electrophysiology traces for a single cell in the database.

        Parameters
        ----------
        
        specimen_id: int
            The ID of a cell specimen to download.

        file_name: string
            File name to save/read the ephys features metadata as CSV.  
            If file_name is None, the file_name will be pulled out of the 
            manifest.  If caching is disabled, no file will be saved. 
            Default is None.

        Returns
        -------
        NwbDataSet
            A class instance with helper methods for retrieving stimulus
            and response traces out of an NWB file.
        """

        file_name = self.get_cache_path(file_name, self.EPHYS_DATA_KEY, specimen_id)

        if not os.path.exists(file_name):
            self.api.save_ephys_data(specimen_id, file_name)

        return NwbDataSet(file_name)
Пример #8
0
def load_sweep(file_name, sweep_number):
    ''' Load the stimulus for a sweep from file. '''
    logging.debug("loading sweep %d" % sweep_number)

    load_start_time = time.time()
    data = NwbDataSet(file_name).get_sweep(sweep_number)

    logging.debug("load time %f" % (time.time() - load_start_time))

    return data
Пример #9
0
def ve_ramp_latency(specimen_id, ve_path):
    data_set = NwbDataSet(ve_path)
    ramp_sweeps = lims_utils.get_sweeps_of_type("C1RP25PR1S", specimen_id, passed_only=True)
    if len(ramp_sweeps) == 0:
        return np.nan
    spike_times = data_set.get_spike_times(ramp_sweeps[0])
    if len(spike_times) > 0:
        return spike_times[0]
    else:
        return np.nan
Пример #10
0
def load_experiment(specimen):

    path, bath, breakin, end, giga, last_bias, mid_bias, SS_amp, spec_name = Find_Critical_Sweeps(
        specimen)

    ds = NwbDataSet(path)

    if bath is 'error':
        bath_peak = 9999999
        bath_ss = 9999999
    else:
        bath_sweep = ds.get_sweep(bath)
        bath_i = bath_sweep['response'] * 1e12
        bath_peak, bath_ss = get_Reses(70, bath_i)

    if breakin is 'error':
        breakin_peak = 9999999
        breakin_ss = 9999999
    else:
        breakin_sweep = ds.get_sweep(breakin)
        breakin_i = breakin_sweep['response'] * 1e12
        breakin_peak, breakin_ss = get_Reses(70, breakin_i)

    if end is 'error':
        end_peak = 9999999
        end_ss = 9999999
        end_leak = 9999999
    else:
        end_sweep = ds.get_sweep(end)
        end_i = end_sweep['response'] * 1e12
        end_peak, end_ss = get_Reses(70, end_i)
        end_leak = np.mean(end_i[0:100])

    if giga is 'error':
        giga_peak = 9999999
        giga_ss = 9999999
    else:
        giga_sweep = ds.get_sweep(giga)
        giga_i = giga_sweep['response'] * 1e12
        giga_peak, giga_ss = get_Reses(70, giga_i)

    features = []
    features.append(spec_name)
    features.append(bath_ss)
    features.append(breakin_peak)
    features.append(breakin_ss)
    features.append(end_peak)
    features.append(end_ss)
    features.append(end_leak)
    features.append(giga_ss)
    features.append(last_bias)
    features.append(mid_bias)
    features.append(SS_amp)

    return features
def load_sweep(file_name, sweep_number, desired_dt=None, cut=0, bessel=False):
    '''load a data sweep and do specified data processing.
    Inputs:
        file_name: string
            name of .nwb data file
        sweep_number: 
            number specifying the sweep to be loaded
        desired_dt: 
            the size of the time step the data should be subsampled to
        cut:
            indicie of which to start reporting data (i.e. cut off data before this indicie)
        bessel: dictionary
            contains parameters 'N' and 'Wn' to implement standard python bessel filtering
    Returns:
        dictionary containing
            voltage: array
            current: array
            dt: time step of the returned data
            start_idx: the index at which the first stimulus starts (excluding the test pulse)
    '''
    ds = NwbDataSet(file_name)
    data = ds.get_sweep(sweep_number)

    data["dt"] = 1.0 / data["sampling_rate"]

    if cut > 0:
        data["response"] = data["response"][cut:]
        data["stimulus"] = data["stimulus"][cut:]

    if bessel:
        sample_freq = 1. / data["dt"]
        filt_coeff = (bessel["freq"]) / (
            sample_freq / 2.)  # filter fraction of Nyquist frequency
        b, a = signal.bessel(bessel["N"], filt_coeff, "low")
        data['response'] = signal.filtfilt(b, a, data['response'], axis=0)

    if desired_dt is not None:
        if data["dt"] != desired_dt:
            data["response"] = subsample_data(data["response"], "mean",
                                              data["dt"], desired_dt)
            data["stimulus"] = subsample_data(data["stimulus"], "mean",
                                              data["dt"], desired_dt)
            data["start_idx"] = int(data["index_range"][0] /
                                    (desired_dt / data["dt"]))
            data["dt"] = desired_dt

    if "start_idx" not in data:
        data["start_idx"] = data["index_range"][0]

    return {
        "voltage": data["response"],
        "current": data["stimulus"],
        "dt": data["dt"],
        "start_idx": data["start_idx"]
    }
Пример #12
0
def stimulus(neuron_config_file, ephys_sweeps_file):
    ephys_sweeps = json_utilities.read(ephys_sweeps_file)
    ephys_file_name = 'stimulus.nwb'

    # pull out the stimulus for the first sweep
    ephys_sweep = ephys_sweeps[0]
    ds = NwbDataSet(ephys_file_name)
    data = ds.get_sweep(ephys_sweep['sweep_number'])
    stimulus = data['stimulus']

    return stimulus
Пример #13
0
def ve_fi_curve(specimen_id, ve_path):
    data_set = NwbDataSet(ve_path)
    expt_set = expt_data_set(specimen_id)
    long_square_sweeps = lims_utils.get_sweeps_of_type("C1LSCOARSE",
                                                       specimen_id,
                                                       passed_only=True)
    fi_curve_data = dict([
        amp_and_spike_count(data_set, sweep, expt_set)
        for sweep in long_square_sweeps
    ])
    return fi_curve_stats(fi_curve_data)
Пример #14
0
def expt_data_set(specimen_id):
    sql = """
        select wkf.storage_directory || wkf.filename from well_known_files wkf
        join specimens sp on sp.ephys_roi_result_id = wkf.attachable_id
        where sp.id = %s
        and wkf.well_known_file_type_id = %s
    """

    results = lims_utils.query(sql, (specimen_id, NWB_DOWNLOAD_TYPE_ID))
    nwb_path = results[0][0]
    return NwbDataSet(nwb_path)
Пример #15
0
def load_experiment(file_name, sweep_number):
    ds = NwbDataSet(file_name)
    sweep = ds.get_sweep(sweep_number)

    r = sweep['index_range']
    v = sweep['response'] * 1e3
    i = sweep['stimulus'] * 1e12
    dt = 1.0 / sweep['sampling_rate']
    t = np.arange(0, len(v)) * dt

    return (v, i, t, r, dt)
Пример #16
0
def get_sweep_data(sweep_name):
    """ Input: sweep name (string)
        
        Output: NwbDataSet object
    """
    global nwb_file_name
    try:
        num = int(sweep_name.split('_')[-1])
    except:
        print("Unable to parse sweep number from '%s'" % str(sweep_name))
        raise
    return NwbDataSet(nwb_file_name).get_sweep(num)
Пример #17
0
def write_sweep_response(file_name, sweep_number, response, spike_times):
    ''' Overwrite the response in a file. '''

    logging.debug("writing sweep")

    write_start_time = time.time()
    ephds = NwbDataSet(file_name)

    ephds.set_sweep(sweep_number, stimulus=None, response=response)
    ephds.set_spike_times(sweep_number, spike_times)

    logging.debug("write time %f" % (time.time() - write_start_time))
def get_data_sets_from_remote(upper_bound=2, lower_bound=None):
    try:
        with open('all_allen_cells.p', 'rb') as f:
            cells = pickle.load(f)
        ctc = CellTypesCache(manifest_file='cell_types/manifest.json')

    except:
        ctc = CellTypesCache(manifest_file='cell_types/manifest.json')

        cells = ctc.get_cells()
        with open('all_allen_cells.p', 'wb') as f:
            pickle.dump(cells, f)
    data = []
    data_sets = []
    path_name = 'data_nwbs'

    try:
        os.mkdir(path_name)
    except:
        print('directory already made.')

    ids = [c['id'] for c in cells]
    if upper_bound == None and lower_bound is None:
        limited_range = ids[0:-1]
    elif upper_bound is not None and lower_bound is not None:
        limited_range = ids[lower_bound:upper_bound]
    cnt = 0
    for specimen_id in limited_range:
        temp_path = str(path_name) + str('/') + str(specimen_id) + '.p'
        if os.path.exists(temp_path):
            cnt += 1
    for specimen_id in limited_range:
        temp_path = str(path_name) + str('/') + str(specimen_id) + '.p'
        if os.path.exists(temp_path):
            with open(temp_path, 'rb') as f:
                (data_set_nwb, sweeps, specimen_id) = pickle.load(f)
            data_sets.append((data_set_nwb, sweeps, specimen_id))
        else:

            data_set = ctc.get_ephys_data(specimen_id)
            sweeps = ctc.get_ephys_sweeps(specimen_id)

            file_name = 'cell_types/specimen_' + str(
                specimen_id) + '/ephys.nwb'
            data_set_nwb = NwbDataSet(file_name)

            data_sets.append((data_set_nwb, sweeps, specimen_id))

            with open(temp_path, 'wb') as f:
                pickle.dump((data_set_nwb, sweeps, specimen_id), f)
    return data_sets
Пример #19
0
def embed_spike_times(input_nwb_file, output_nwb_file, sweep_features):
    # embed spike times in NWB file
    tmp_nwb_file = output_nwb_file + ".tmp"

    shutil.copy(input_nwb_file, tmp_nwb_file)
    for sweep_num in sweep_features:
        spikes = sweep_features[sweep_num]['spikes']
        spike_times = [ s['threshold_t'] for s in spikes ]
        NwbDataSet(tmp_nwb_file).set_spike_times(sweep_num, spike_times)

    try:
        shutil.move(tmp_nwb_file, output_nwb_file)
    except OSError as e:
        logging.error("Problem renaming file: %s -> %s" % (tmp_nwb_file, output_nwb_file))
        raise e
    logging.debug("Embedded spike times into output.nwb file")
Пример #20
0
def main():
    """Main sequence of pre-processing and passive fitting"""

    # This argschema package reads arguments from a JSON file
    module = ags.ArgSchemaParser(schema_type=PreprocessorParameters,
                                 logger_name=None)

    nwb_path = module.args["paths"][
        "nwb"]  # nwb - neurodata without borders (ephys data)
    swc_path = module.args["paths"]["swc"]  # swc - morphology data
    storage_directory = module.args["paths"]["storage_directory"]

    try:
        paths, results, passive_info, s1_tasks, s2_tasks = \
            preprocess(data_set=NwbDataSet(nwb_path),
                       swc_data=pd.read_table(swc_path, sep='\s+', comment='#', header=None),
                       dendrite_type_tag=module.args["dendrite_type_tag"],
                       sweeps=module.args["sweeps"],
                       bridge_avg=module.args["bridge_avg"],
                       storage_directory=storage_directory)
    except NoUsableSweepsException as e:
        ju.write(module.args["output_json"], {'error': e.message})
        return

    preprocess_results_path = os.path.join(storage_directory,
                                           "preprocess_results.json")
    ju.write(preprocess_results_path, results)

    passive_info_path = os.path.join(storage_directory, "passive_info.json")
    ju.write(passive_info_path, passive_info)

    paths.update({
        "swc": swc_path,
        "nwb": nwb_path,
        "storage_directory": storage_directory,
        "preprocess_results": preprocess_results_path,
        "passive_info": passive_info_path,
    })

    output = {
        "paths": paths,
        "stage_1_task_list": s1_tasks,
        "stage_2_task_list": s2_tasks,
    }

    ju.write(module.args["output_json"], output)
def get_model_spike_times_from_nwb(ends_with, specimen_id_directory,
                                   model_string, sweeps, where_running):
    ''' Gets the times of spike from the model nwb file
    inputs       
        ends_with: string
            end of file searching for:  options "_GLIF1_neuron_config.json","_GLIF2_neuron_config.json' etc."
        specimen_id_directory: string
            path to structured data directory containing neuron_config, preprocessor, etc., files.            
        model_string: string
            string searching for in model name: options '(LIF)', '(LIF-R)', '(LIF-ASC)', '(LIF-R_ASC)', '(LIF-R_ASC_A')
        sweeps: list of integers
            integers refer to the sweep number in the electrophysiology .nwb data file
        where_running: string
            options are 'internal': the code is being run within the Institute and can therefore access the internal file system
                        'external': the code is being run outside the Institute and requires the use of the api to download the model nwb files
        Note that although ends_with and model_string should be appropriately paired, there is no check
        within this module to make sure that they are
    outputs: returns either a 
        nan if the there is not a model in the structured data directory corresponding to what the requested ends_with variable  
        or 
        model_spike_times: list of numpy arrays 
            each array contains the times of the spikes in each sweep
        
            '''
    if where_running == 'internal':
        path = get_model_nwb_path_from_folder(ends_with, specimen_id_directory,
                                              model_string)  #get nwb file path
    elif where_running == 'external':
        path = download_model_nwb_if_model_exists_in_SDD(
            ends_with, specimen_id_directory, model_string)  #get nwb file path
    else:
        raise Exception(
            'specify whether the code is being run internally or externally')
    if isinstance(path, basestring):
        model = NwbDataSet(path)
        model_spike_times = []
        if sweeps == []:
            raise Exception('There are no sweeps to look at')
        for sw in sweeps:
            model_spike_times.append(model.get_spike_times(sw))
        return model_spike_times
    else:
        return np.nan
Пример #22
0
def get_sweep_data(nwb_file, sweep_number, time_scale=1e3, voltage_scale=1e3, stim_scale=1e12):
    """
    Extract data and stim characteristics for a specific DC sweep from nwb file
    Parameters
    ----------
    nwb_file : string
        File name of a pre-existing NWB file.
    sweep_number : integer
        
    time_scale : float
        Convert to ms scale
    voltage_scale : float
        Convert to mV scale
    stim_scale : float
        Convert to pA scale

    Returns
    -------
    t : numpy array
        Sampled time points in ms
    v : numpy array
        Recorded voltage at the sampled time points in mV
    stim_start_time : float
        Stimulus start time in ms
    stim_end_time : float
        Stimulus end time in ms
    """
    nwb = NwbDataSet(nwb_file)
    sweep = nwb.get_sweep(sweep_number)
    stim = sweep['stimulus'] * stim_scale  # in pA
    stim_diff = np.diff(stim)
    stim_start = np.where(stim_diff != 0)[0][-2]
    stim_end = np.where(stim_diff != 0)[0][-1]
    
    # read v and t as numpy arrays
    v = sweep['response'] * voltage_scale  # in mV
    dt = time_scale / sweep['sampling_rate']  # in ms
    num_samples = len(v)
    t = np.arange(num_samples) * dt
    stim_start_time = t[stim_start]
    stim_end_time = t[stim_end]
    return t, v, stim_start_time, stim_end_time
Пример #23
0
def prepare_nwb_output(nwb_stimulus_path, nwb_result_path):
    '''Copy the stimulus file, zero out the recorded voltages and spike times.

    Parameters
    ----------
    nwb_stimulus_path : string
        NWB file name
    nwb_result_path : string
        NWB file name
    '''

    output_dir = os.path.dirname(nwb_result_path)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    copy(nwb_stimulus_path, nwb_result_path)
    data_set = NwbDataSet(nwb_result_path)
    data_set.fill_sweep_responses(0.0, extend_experiment=True)
    for sweep in data_set.get_sweep_numbers():
        data_set.set_spike_times(sweep, [])
Пример #24
0
    def read_stimulus(self, stimulus_path, sweep=0):
        '''load current values for a specific experiment sweep.
        
        Parameters
        ----------
        stimulus path : string
            NWB file name
        sweep : integer, optional
            sweep index
        '''
        Utils._log.info("reading stimulus path: %s, sweep %s", stimulus_path,
                        sweep)

        stimulus_data = NwbDataSet(stimulus_path)
        sweep_data = stimulus_data.get_sweep(sweep)

        # convert to nA for NEURON
        self.stim_curr = sweep_data['stimulus'] * 1.0e9

        # convert from Hz
        self.sampling_rate = 1.0e3 / sweep_data['sampling_rate']
Пример #25
0
def output():
    neuron_config = json_utilities.read('neuron_config.json')
    ephys_sweeps = json_utilities.read('ephys_sweeps.json')
    ephys_file_name = 'stimulus.nwb'

    # pull out the stimulus for the first sweep
    ephys_sweep = ephys_sweeps[0]
    ds = NwbDataSet(ephys_file_name)
    data = ds.get_sweep(ephys_sweep['sweep_number'])
    stimulus = data['stimulus']

    # initialize the neuron
    # important! update the neuron's dt for your stimulus
    neuron = GlifNeuron.from_dict(neuron_config)
    neuron.dt = 1.0 / data['sampling_rate']

    # simulate the neuron
    truncate = 56041
    output = neuron.run(stimulus[0:truncate])

    return output
Пример #26
0
def ve_ap_dim(specimen_id, ve_path):
    data_set = NwbDataSet(ve_path)
    expt_set = expt_data_set(specimen_id)
    long_square_sweeps = lims_utils.get_sweeps_of_type("C1LSCOARSE",
                                                       specimen_id,
                                                       passed_only=True)
    fi_curve_data = dict([
        amp_and_spike_count(data_set, sweep, expt_set)
        for sweep in long_square_sweeps
    ])
    sweeps_by_amp = {
        amp_and_spike_count(data_set, sweep, expt_set)[0]: sweep
        for sweep in long_square_sweeps
    }
    fi_arr = np.array([(amp, fi_curve_data[amp])
                       for amp in sorted(fi_curve_data.keys())])

    spiking_sweeps = np.flatnonzero(fi_arr[:, 1])
    if len(spiking_sweeps) == 0:
        return np.nan, np.nan
    rheo_sweep = sweeps_by_amp[fi_arr[spiking_sweeps[0], 0]]
    #     print specimen_id, rheo_sweep

    v, i, t = lims_utils.get_sweep_v_i_t_from_set(data_set, rheo_sweep)
    swp_ext = EphysSweepFeatureExtractor(t,
                                         v,
                                         start=1.02,
                                         end=2.02,
                                         filter=None)
    swp_ext.process_spikes()
    if len(swp_ext.spike_feature("width")) == 0:
        print "NO SPIKES FOR {:d} ON SWEEP {:d}".format(
            specimen_id, sweeps_by_amp[fi_arr[spiking_sweeps[0], 0]])
        print fi_arr
        print sweeps_by_amp
        return np.nan, np.nan
    return_vals = (swp_ext.spike_feature("width")[0] * 1e3,
                   swp_ext.spike_feature("peak_v")[0] -
                   swp_ext.spike_feature("trough_v")[0])
    return return_vals
    def save_cell_data_web(self, acceptable_stimtypes, non_standard_nwb=False,
                           ephys_dir='preprocessed', **kwargs):

        bpopt_stimtype_map = utility.bpopt_stimtype_map
        distinct_id_map = utility.aibs_stimname_map
        nwb_file = NwbDataSet(self.nwb_path)

        stim_map = defaultdict(list)
        stim_sweep_map = {}
        output_dir = os.path.join(os.getcwd(), ephys_dir)
        utility.create_dirpath(output_dir)

        sweep_numbers = kwargs.get('sweep_numbers') or nwb_file.get_sweep_numbers()
        for sweep_number in sweep_numbers:
            sweep_data = nwb_file.get_sweep_metadata(sweep_number)
            stim_type = sweep_data['aibs_stimulus_name']

            try:
                stim_type = stim_type.decode('UTF-8')
            except:
                pass

            if stim_type in acceptable_stimtypes:
                sweep = nwb_file.get_sweep(sweep_number)

                start_idx, stop_idx = sweep['index_range']

                stimulus_trace = sweep['stimulus'][start_idx:stop_idx]
                response_trace = sweep['response'][start_idx:stop_idx]

                sampling_rate = sweep['sampling_rate']

                time = np.arange(0, len(stimulus_trace)) / sampling_rate
                trace_name = '%s_%d' % (
                    distinct_id_map[stim_type], sweep_number)

                if non_standard_nwb:
                    calc_stimparams_func = self.calc_stimparams_nonstandard
                else:
                    calc_stimparams_func = self.calc_stimparams

                stim_start, stim_stop, stim_amp_start, stim_amp_end, \
                    tot_duration, hold_curr = calc_stimparams_func(
                        time, stimulus_trace, trace_name)

                response_trace_short_filename = '%s.%s' % (trace_name, 'txt')
                response_trace_filename = os.path.join(
                    output_dir, response_trace_short_filename)

                time *= 1e3  # in ms
                response_trace *= 1e3  # in mV
                response_trace = utility.correct_junction_potential(response_trace,
                                                                    self.junction_potential)
                stimulus_trace *= 1e9

                # downsampling
                time, stimulus_trace, response_trace = utility.downsample_ephys_data(
                    time, stimulus_trace, response_trace)

                if stim_type in utility.bpopt_current_play_stimtypes:
                    with open(response_trace_filename, 'wb') as response_trace_file:
                        np.savetxt(response_trace_file,
                                   np.transpose([time, response_trace, stimulus_trace]))

                else:
                    with open(response_trace_filename, 'wb') as response_trace_file:
                        np.savetxt(response_trace_file,
                                   np.transpose([time, response_trace]))

                holding_current = hold_curr  # sweep['bias_current']

                stim_map[distinct_id_map[stim_type]].append([
                    trace_name,
                    bpopt_stimtype_map[stim_type],
                    holding_current/1e12,
                    stim_amp_start / 1e12,
                    stim_amp_end/1e12,
                    stim_start * 1e3,
                    stim_stop * 1e3,
                    tot_duration * 1e3,
                    response_trace_short_filename])

                stim_sweep_map[trace_name] = sweep_number

        logger.debug('Writing stimmap.csv ...')
        stim_reps_sweep_map, stimmap_filename = self.write_stimmap_csv(stim_map,
                                                                       output_dir, stim_sweep_map)

        self.write_provenance(
            output_dir,
            self.nwb_path,
            stim_sweep_map,
            stim_reps_sweep_map)

        return output_dir, stimmap_filename
Пример #28
0
def prepare_stage_1(description, passive_fit_data):
    output_directory = description.manifest.get_path('WORKDIR')
    neuronal_model_data = ju.read(description.manifest.get_path('neuronal_model_data'))
    specimen_data = neuronal_model_data['specimen']
    specimen_id = neuronal_model_data['specimen_id']
    is_spiny = not any(t['name'] == u'dendrite type - aspiny' for t in specimen_data['specimen_tags'])
    all_sweeps = specimen_data['ephys_sweeps']
    data_set = NwbDataSet(description.manifest.get_path('stimulus_path'))
    swc_path = description.manifest.get_path('MORPHOLOGY')
    
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)

    ra = passive_fit_data['ra']
    cm1 = passive_fit_data['cm1']
    cm2 = passive_fit_data['cm2']

    # Check for fi curve shift to decide to use core1 or core2
    fi_shift, n_core2 = check_fi_shift.estimate_fi_shift(data_set, all_sweeps)
    fi_shift_threshold = 30.0
    sweeps_to_fit = []
    if abs(fi_shift) > fi_shift_threshold:
        _fit_stage_1_log.info("FI curve shifted; using Core 1")
        sweeps_to_fit = find_core1_trace(data_set, all_sweeps)
    else:
        sweeps_to_fit = find_core2_trace(data_set, all_sweeps)

        if sweeps_to_fit == []:
            _fit_stage_1_log.info("Not enough good Core 2 traces; using Core 1")
            sweeps_to_fit = find_core1_trace(data_set, all_sweeps)

    _fit_stage_1_log.debug("will use sweeps: " + str(sweeps_to_fit))

    jxn = -14.0

    t_set = []
    v_set = []
    i_set = []
    for s in sweeps_to_fit:
        v, i, t = ephys_utils.get_sweep_v_i_t_from_set(data_set, s)
        v += jxn
        stim_start, stim_dur, stim_amp, start_idx, end_idx = ephys_utils.get_step_stim_characteristics(i, t)
        t_set.append(t)
        v_set.append(v)
        i_set.append(i)
    ext = EphysSweepSetFeatureExtractor(t_set, v_set, i_set, start=stim_start, end=(stim_start + stim_dur))
    ext.process_spikes()

    ft = {}
    blacklist = ["isi_type"]
    for k in ext.sweeps()[0].spike_feature_keys():
        if k in blacklist:
            continue
        pair = {}
        pair["mean"] = float(ext.spike_feature_averages(k).mean())
        pair["stdev"] = float(ext.spike_feature_averages(k).std())
        ft[k] = pair

    # "Delta" features
    sweep_avg_slow_trough_delta_time = []
    sweep_avg_slow_trough_delta_v = []
    sweep_avg_peak_trough_delta_time = []
    for swp in ext.sweeps():
        threshold_t = swp.spike_feature("threshold_t")
        fast_trough_t = swp.spike_feature("fast_trough_t")
        slow_trough_t = swp.spike_feature("slow_trough_t")

        delta_t = slow_trough_t - fast_trough_t
        delta_t[np.isnan(delta_t)] = 0.
        sweep_avg_slow_trough_delta_time.append(np.mean(delta_t[:-1] / np.diff(threshold_t)))

        fast_trough_v = swp.spike_feature("fast_trough_v")
        slow_trough_v = swp.spike_feature("slow_trough_v")
        delta_v = fast_trough_v - slow_trough_v
        delta_v[np.isnan(delta_v)] = 0.
        sweep_avg_slow_trough_delta_v.append(delta_v.mean())

    ft["slow_trough_delta_time"] = {"mean": float(np.mean(sweep_avg_slow_trough_delta_time)),
                                    "stdev": float(np.std(sweep_avg_slow_trough_delta_time))}
    ft["slow_trough_delta_v"] = {"mean": float(np.mean(sweep_avg_slow_trough_delta_v)),
                                 "stdev": float(np.std(sweep_avg_slow_trough_delta_v))}

    baseline_v = float(ext.sweep_features("v_baseline").mean())
    passive_fit_data["e_pas"] = baseline_v
    for k in ext.sweeps()[0].sweep_feature_keys():
        pair = {}
        pair["mean"] = float(ext.sweep_features(k).mean())
        pair["stdev"] = float(ext.sweep_features(k).std())
        ft[k] = pair

    # Determine highest step to check for depolarization block
    noise_1_sweeps, _, _ = ephys_utils.get_sweeps_of_type("C1NSSEED_1", all_sweeps)
    noise_2_sweeps, _, _ = ephys_utils.get_sweeps_of_type("C1NSSEED_2", all_sweeps)
    step_sweeps, _, _ = ephys_utils.get_sweeps_of_type("C1LSCOARSE", all_sweeps)
    all_sweeps = noise_1_sweeps + noise_2_sweeps + step_sweeps
    max_i = 0
    for s in all_sweeps:
        try:
            v, i, t = ephys_utils.get_sweep_v_i_t_from_set(data_set, s['sweep_number'])
        except:
            pass
        if np.max(i) > max_i:
            max_i = np.max(i)
    max_i += 10 # add 10 pA
    max_i *= 1e-3 # convert to nA

    # ----------- Generate output and submit jobs ---------------

    # Set up directories
    # Decide which fit(s) we are doing
    if (is_spiny and ft["width"]["mean"] < 0.8) or (not is_spiny and ft["width"]["mean"] > 0.8):
        fit_types = ["f6", "f12"]
    elif is_spiny:
        fit_types = ["f6"]
    else:
        fit_types = ["f12"]

    for fit_type in fit_types:
        fit_type_dir = os.path.join(output_directory, fit_type)
        if not os.path.exists(fit_type_dir):
            os.makedirs(fit_type_dir)
        for seed in SEEDS:
            seed_dir = "{:s}/s{:d}".format(fit_type_dir, seed)
            if not os.path.exists(seed_dir):
                os.makedirs(seed_dir)

    # Collect and save data for target.json file
    target_dict = {}
    target_dict["passive"] = [{
        "ra": ra,
        "cm": { "soma": cm1, "axon": cm1, "dend": cm2 },
        "e_pas": baseline_v
    }]

    swc_data = pd.read_table(swc_path, sep='\s', comment='#', header=None)
    has_apic = False
    if APICAL_DENDRITE_TYPE in pd.unique(swc_data[1]):
        has_apic = True
        _fit_stage_1_log.info("Has apical dendrite")
    else:
        _fit_stage_1_log.info("Does not have apical dendrite")

    if has_apic:
        target_dict["passive"][0]["cm"]["apic"] = cm2

    target_dict["fitting"] = [{
        "junction_potential": jxn,
        "sweeps": sweeps_to_fit,
        "passive_fit_info": passive_fit_data,
        "max_stim_test_na": max_i,        
    }]

    target_dict["stimulus"] = [{
        "amplitude": 1e-3 * stim_amp,
        "delay": 1000.0,
        "duration": 1e3 * stim_dur
    }]

    target_dict["manifest"] = []
    target_dict["manifest"].append({"type": "file", "spec": swc_path, "key": "MORPHOLOGY"})

    target_dict["target_features"] = collect_target_features(ft)

    target_file = os.path.join(output_directory, 'target.json')
    ju.write(target_file, target_dict)

    # Create config.json for each fit type
    config_base_data = ju.read(os.path.join(FIT_BASE_DIR,
                                            'config_base.json'))


    jobs = []
    for fit_type in fit_types:
        config = config_base_data.copy()
        fit_type_dir = os.path.join(output_directory, fit_type)
        config_path = os.path.join(fit_type_dir, "config.json")

        config["biophys"][0]["model_file"] = [ target_file, config_path]
        if has_apic:
            fit_style_file = os.path.join(FIT_BASE_DIR, 'fit_styles', '%s_fit_style.json' % (fit_type))
        else:
            fit_style_file = os.path.join(FIT_BASE_DIR, "fit_styles", "%s_noapic_fit_style.json" % (fit_type))

        config["biophys"][0]["model_file"].append(fit_style_file)
        config["manifest"].append({"type": "dir", "spec": fit_type_dir, "key": "FITDIR"})
        ju.write(config_path, config)

        for seed in SEEDS:
            logfile = os.path.join(output_directory, fit_type, 's%d' % seed, 'stage_1.log')
            jobs.append({
                    'config_path': os.path.abspath(config_path),
                    'fit_type': fit_type,
                    'log': os.path.abspath(logfile),
                    'seed': seed,
                    'num_processes': DEFAULT_NUM_PROCESSES
                    })
    return jobs
Пример #29
0
def select_model(fit_results,
                 path_info,
                 passive,
                 v_init,
                 noise_1_sweeps,
                 noise_2_sweeps,
                 max_attempts=20):
    """Choose model with best error that does not exhibit depolarization block
    on noise sweeps
    """

    errs = np.array([d["err"] for d in fit_results])
    sorted_order = np.argsort(errs)
    if len(noise_1_sweeps) == 0 and len(noise_2_sweeps) == 0:
        print "No noise stimulus available to test - selecting the model with lowest error"
        return fit_results[sorted_order[0]]

    nwb_path = path_info["nwb"]
    swc_path = path_info["swc"]

    fit_style_data = {}
    for fit_type in path_info["fit_styles"]:
        with open(path_info["fit_styles"][fit_type], "r") as f:
            fit_style_data[fit_type] = json.load(f)

    data_set = NwbDataSet(nwb_path)
    noise_stim = []
    max_t = 0
    dt = 0
    if len(noise_1_sweeps) > 0:
        v, i, t = sf.get_sweep_v_i_t_from_set(data_set, noise_1_sweeps[-1])
        i *= 1e-3  # to nA
        noise_stim.append(i)
        if np.max(t) > max_t:
            max_t = np.max(t)
        dt = t[1] - t[0]

    if len(noise_2_sweeps) > 0:
        v, i, t = sf.get_sweep_v_i_t_from_set(data_set, noise_2_sweeps[-1])
        i *= 1e-3  # to nA
        noise_stim.append(i)
        if np.max(t) > max_t:
            max_t = np.max(t)
        dt = t[1] - t[0]
    max_t *= 1e3  # to ms
    dt *= 1e3  # to ms
    print "Max t = ", max_t

    # Set up
    if max_attempts > len(sorted_order):
        max_attempts = len(sorted_order)

    for ind in sorted_order[:max_attempts]:
        print "Testing model ", ind

        fit = fit_results[ind]
        depol_okay = True

        utils = Utils(path_info["hoc_files"],
                      path_info["compiled_mod_library"])
        h = utils.h
        utils.generate_morphology(swc_path)
        utils.load_cell_parameters(
            passive, fit_style_data[fit["fit_type"]]["conditions"],
            fit_style_data[fit["fit_type"]]["channels"],
            fit_style_data[fit["fit_type"]]["addl_params"])
        utils.insert_iclamp()
        utils.set_iclamp_params(0, 0, 1e12)

        h.tstop = max_t
        h.celsius = fit_style_data[fit["fit_type"]]["conditions"]["celsius"]
        h.v_init = v_init
        h.dt = dt
        h.cvode.atolscale("cai", 1e-4)
        h.cvode.maxstep(10)
        v_vec, i_vec, t_vec = utils.record_values()

        for i in noise_stim:
            i_stim_vec = h.Vector(i)
            i_stim_vec.play(utils.stim._ref_amp, dt)
            utils.set_actual_parameters(fit["params"])
            print "Starting run"
            h.finitialize()
            h.run()
            print "Finished run"
            i_stim_vec.play_remove()
            if has_noise_block(v_vec.as_numpy(), t_vec.as_numpy()):
                depol_okay = False

        if depol_okay:
            print "Did not detect depolarization block on noise traces"
            return fit

    print "Failed to find model after looking at best {:d} organisms".format(
        max_attempts)
    return None
Пример #30
0
for dataset_id in dataset_ids:

    raw_ephys_file_name = '%d_raw_data.nwb' % dataset_id

    if not os.path.isfile(raw_ephys_file_name):
        print('Downloading data: %s' % raw_ephys_file_name)
        ct.save_ephys_data(dataset_id, raw_ephys_file_name)

        print('Saved: %s' % raw_ephys_file_name)
    else:
        print('File: %s already present...' % raw_ephys_file_name)

    print('Loading data from: %s' % raw_ephys_file_name)

    from allensdk.core.nwb_data_set import NwbDataSet
    data_set = NwbDataSet(raw_ephys_file_name)

    import matplotlib.pyplot as plt
    import numpy as np
    fig = plt.figure()

    sweep_numbers = sweep_numbers_for_data[dataset_id]

    subset = {}

    for sweep_number in sweep_numbers:
        sweep_data = data_set.get_sweep(sweep_number)

        # start/stop indices that exclude the experimental test pulse (if applicable)
        index_range = sweep_data['index_range']