Exemplo n.º 1
0
def create_data_set(sweep_info=None, nwb_file=None, ontology=None, api_sweeps=True, h5_file=None,validate_stim=True):
    """Create an appropriate EphysDataSet derived class for the given nwb_file

    Parameters
    ----------
    nwb_file: str file name

    Returns
    -------
    EphysDataSet derived object
    """

    if nwb_file is None:
        raise ValueError("Can not decide which EphysDataSet class to create without nwb_file")

    nwb_version = get_nwb_version(nwb_file)

    if nwb_version["major"] == 2:
        return HBGDataSet(sweep_info=sweep_info,
                          nwb_file=nwb_file,
                          ontology=ontology,
                          api_sweeps=api_sweeps,
                          validate_stim=validate_stim)

    elif nwb_version["major"] == 1 or nwb_version["major"] == 0:
        return AibsDataSet(sweep_info=sweep_info,
                           nwb_file=nwb_file,
                           ontology=ontology,
                           api_sweeps=api_sweeps,
                           h5_file=h5_file,
                           validate_stim=validate_stim)
    else:
        raise ValueError("Unsupported or unknown NWB major" +
                         "version {} ({})".format(nwb_version["major"], nwb_version["full"]))
Exemplo n.º 2
0
def test_main_abf(ontology, NWB_file):

    dataset = HBGDataSet(nwb_file=NWB_file, ontology=ontology)

    expected = {
        'stimulus_units': {
            0: 'A'
        },
        'clamp_mode': {
            0: 'CurrentClamp'
        },
        'sweep_number': {
            0: 0
        },
        'leak_pa': {
            0: None
        },
        'stimulus_code_ext': {
            0: None
        },
        'stimulus_scale_factor': {
            0: 1.0
        },
        'stimulus_code': {
            0: u'RAMP1'
        },
        'stimulus_name': {
            0: u'ramp stimulus'
        },
        'bridge_balance_mohm': {
            0: None
        }
    }

    compare_dicts(expected, dataset.sweep_table.to_dict())
Exemplo n.º 3
0
def test_main_dat(ontology, NWB_file):

    dataset = HBGDataSet(nwb_file=NWB_file, ontology=ontology)

    expected = {'stimulus_units': 'Volts',
                'clamp_mode': 'VoltageClamp',
                'sweep_number': 10101,
                'leak_pa': None,
                'stimulus_code_ext': u'extpinbath',
                'stimulus_scale_factor': 5000000.0,
                'stimulus_code': u'extpinbath',
                'stimulus_name': u'extpinbath stimulus',
                'bridge_balance_mohm': None
                }

    # only compare one sweep
    sweep_record = dataset.get_sweep_record(10101)
    compare_dicts(expected, sweep_record)
Exemplo n.º 4
0
def data_for_specimen_id(specimen_id,
                         sweep_qc_option,
                         data_source,
                         ap_window_length=0.006,
                         target_sampling_rate=10000,
                         nfiles=None):
    logging.debug("specimen_id: {}".format(specimen_id))

    lsq_fail = False
    ssq_fail = False
    ramp_fail = False
    # Find or retrieve NWB file and ancillary info and construct an AibsDataSet object
    ontology = StimulusOntology(
        ju.read(StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE))
    if data_source == "local":

        nwb_path = nfiles[specimen_id]
        if type(nwb_path) is dict and "error" in nwb_path:
            logging.warning(
                "Problem getting NWB file for specimen {:d}".format(
                    specimen_id))
            return nwb_path

        data_set = HBGDataSet(nwb_file=nwb_path, ontology=ontology)

    else:
        logging.error("invalid data source specified ({})".format(data_source))

    # Identify and preprocess long square sweeps
    try:
        lsq_sweep_numbers = categorize_iclamp_sweeps(
            data_set,
            ontology.long_square_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        (lsq_sweeps, lsq_features, lsq_start, lsq_end,
         lsq_spx) = preprocess_long_square_sweeps(data_set, lsq_sweep_numbers)
    except Exception as detail:
        lsq_fail = True
        logging.warning(
            "Exception when preprocessing long square sweeps from specimen {:d}"
            .format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Identify and preprocess short square sweeps
    try:
        ssq_sweep_numbers = categorize_iclamp_sweeps(
            data_set,
            ontology.short_square_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        ssq_sweeps, ssq_features = preprocess_short_square_sweeps(
            data_set, ssq_sweep_numbers)
    except Exception as detail:
        ssq_fail = True
        logging.warning(
            "Exception when preprocessing short square sweeps from specimen {:d}"
            .format(specimen_id))
        logging.warning(detail)
        {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Identify and preprocess ramp sweeps
    try:
        ramp_sweep_numbers = categorize_iclamp_sweeps(
            data_set,
            ontology.ramp_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        ramp_sweeps, ramp_features = preprocess_ramp_sweeps(
            data_set, ramp_sweep_numbers)
    except Exception as detail:
        ramp_fail = True
        logging.warning(
            "Exception when preprocessing ramp sweeps from specimen {:d}".
            format(specimen_id))
        logging.warning(detail)
        {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Calculate desired feature vectors
    result = {}
    try:
        (subthresh_hyperpol_dict, hyperpol_deflect_dict
         ) = fv.identify_subthreshold_hyperpol_with_amplitudes(
             lsq_features, lsq_sweeps)
        target_amps_for_step_subthresh = [-90, -70, -50, -30, -10]
        result["step_subthresh"] = fv.step_subthreshold(
            subthresh_hyperpol_dict,
            target_amps_for_step_subthresh,
            lsq_start,
            lsq_end,
            amp_tolerance=5)
        result["subthresh_norm"] = fv.subthresh_norm(subthresh_hyperpol_dict,
                                                     hyperpol_deflect_dict,
                                                     lsq_start, lsq_end)
        (subthresh_depol_dict,
         depol_deflect_dict) = fv.identify_subthreshold_depol_with_amplitudes(
             lsq_features, lsq_sweeps)
        result["subthresh_depol_norm"] = fv.subthresh_depol_norm(
            subthresh_depol_dict, depol_deflect_dict, lsq_start, lsq_end)
        isi_sweep, isi_sweep_spike_info = fv.identify_sweep_for_isi_shape(
            lsq_sweeps, lsq_features, lsq_end - lsq_start)
        result["isi_shape"] = fv.isi_shape(isi_sweep, isi_sweep_spike_info,
                                           lsq_end)

        if ssq_fail == False:
            # Calculate waveforms from each type of sweep
            spiking_ssq_sweep_list = [
                ssq_sweeps.sweeps[swp_ind]
                for swp_ind in ssq_features["common_amp_sweeps"].index
            ]
            spiking_ssq_info_list = [
                ssq_features["spikes_set"][swp_ind]
                for swp_ind in ssq_features["common_amp_sweeps"].index
            ]
            ssq_ap_v, ssq_ap_dv = fv.first_ap_vectors(
                spiking_ssq_sweep_list,
                spiking_ssq_info_list,
                target_sampling_rate=target_sampling_rate,
                window_length=ap_window_length,
                skip_clipped=True)
        else:
            ssq_ap_v, ssq_ap_dv = np.nan, np.nan

        rheo_ind = lsq_features["rheobase_sweep"].name
        sweep = lsq_sweeps.sweeps[rheo_ind]
        lsq_ap_v, lsq_ap_dv = fv.first_ap_vectors(
            [sweep], [lsq_features["spikes_set"][rheo_ind]],
            target_sampling_rate=target_sampling_rate,
            window_length=ap_window_length)

        if ramp_fail == False:

            spiking_ramp_sweep_list = [
                ramp_sweeps.sweeps[swp_ind]
                for swp_ind in ramp_features["spiking_sweeps"].index
            ]
            spiking_ramp_info_list = [
                ramp_features["spikes_set"][swp_ind]
                for swp_ind in ramp_features["spiking_sweeps"].index
            ]
            ramp_ap_v, ramp_ap_dv = fv.first_ap_vectors(
                spiking_ramp_sweep_list,
                spiking_ramp_info_list,
                target_sampling_rate=target_sampling_rate,
                window_length=ap_window_length,
                skip_clipped=True)
        else:
            ramp_ap_v, ramp_ap_dv = np.nan, np.nan

        if ramp_fail == True:
            ramp_ap_dv = np.copy(lsq_ap_dv)
            ramp_ap_v = np.copy(lsq_ap_v)

        if ssq_fail == True:
            ssq_ap_dv = np.copy(lsq_ap_dv)
            ssq_ap_v = np.copy(lsq_ap_v)

        # Combine so that differences can be assessed by analyses like sPCA
        result["first_ap_v"] = np.hstack([ssq_ap_v, lsq_ap_v, ramp_ap_v])
        result["first_ap_dv"] = np.hstack([ssq_ap_dv, lsq_ap_dv, ramp_ap_dv])

        target_amplitudes = np.arange(0, 120, 20)
        supra_info_list = fv.identify_suprathreshold_spike_info(
            lsq_features, target_amplitudes, shift=10)
        result["psth"] = fv.psth_vector(supra_info_list, lsq_start, lsq_end)
        result["inst_freq"] = fv.inst_freq_vector(supra_info_list, lsq_start,
                                                  lsq_end)

        spike_feature_list = [
            "upstroke_downstroke_ratio",
            "peak_v",
            "fast_trough_v",
            "threshold_v",
            "width",
        ]
        for feature in spike_feature_list:
            result["spiking_" + feature] = fv.spike_feature_vector(
                feature, supra_info_list, lsq_start, lsq_end)
            if feature == 'width':
                result["spiking_width"] = result["spiking_width"] / 2
    except Exception as detail:
        logging.warning(
            "Exception when processing specimen {:d}".format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "processing",
                "details": traceback.format_exc(limit=None)
            }
        }

    return result
Exemplo n.º 5
0
def test_get_clamp_mode(ontology, NWB_file):

    dataset = HBGDataSet(nwb_file=NWB_file, ontology=ontology)
    assert dataset.get_clamp_mode(10101) == dataset.VOLTAGE_CLAMP
Exemplo n.º 6
0
def test_get_stimulus_code(ontology, NWB_file):

    dataset = HBGDataSet(nwb_file=NWB_file, ontology=ontology)
    assert dataset.get_stimulus_code(10101) == u'extpinbath'
Exemplo n.º 7
0
def test_get_stimulus_units(ontology, NWB_file):

    dataset = HBGDataSet(nwb_file=NWB_file, ontology=ontology)
    assert dataset.get_stimulus_units(10101) == "V"