Exemple #1
0
def preprocess_short_square_sweeps(data_set,
                                   sweep_numbers,
                                   extra_dur=0.2,
                                   spike_window=0.05):
    if len(sweep_numbers) == 0:
        raise er.FeatureError(
            "No short square sweeps available for feature extraction")

    good_sweep_numbers, ssq_start, ssq_end = validate_sweeps(
        data_set, sweep_numbers, extra_dur=extra_dur)
    if len(good_sweep_numbers) == 0:
        raise er.FeatureError(
            "No short square sweeps were long enough or did not end early")
    ssq_sweeps = data_set.sweep_set(good_sweep_numbers)

    ssq_spx, ssq_spfx = dsf.extractors_for_sweeps(
        ssq_sweeps,
        est_window=[ssq_start, ssq_start + 0.001],
        start=ssq_start,
        end=ssq_end + spike_window,
        reject_at_stim_start_interval=0.0002,
        **dsf.detection_parameters(data_set.SHORT_SQUARE))
    ssq_an = spa.ShortSquareAnalysis(ssq_spx, ssq_spfx)
    ssq_features = ssq_an.analyze(ssq_sweeps)

    return ssq_sweeps, ssq_features
Exemple #2
0
def preprocess_long_square_sweeps(data_set,
                                  sweep_numbers,
                                  extra_dur=0.2,
                                  subthresh_min_amp=-100.):
    if len(sweep_numbers) == 0:
        raise er.FeatureError(
            "No long square sweeps available for feature extraction")

    good_sweep_numbers, lsq_start, lsq_end = validate_sweeps(
        data_set, sweep_numbers, extra_dur=extra_dur)
    if len(good_sweep_numbers) == 0:
        raise er.FeatureError(
            "No long square sweeps were long enough or did not end early")
    lsq_sweeps = data_set.sweep_set(good_sweep_numbers)

    lsq_spx, lsq_spfx = dsf.extractors_for_sweeps(lsq_sweeps,
                                                  start=lsq_start,
                                                  end=lsq_end,
                                                  min_peak=-25,
                                                  **dsf.detection_parameters(
                                                      data_set.LONG_SQUARE))
    lsq_an = spa.LongSquareAnalysis(lsq_spx,
                                    lsq_spfx,
                                    subthresh_min_amp=subthresh_min_amp)
    lsq_features = lsq_an.analyze(lsq_sweeps)

    return lsq_sweeps, lsq_features, lsq_start, lsq_end, lsq_spx
Exemple #3
0
def run_feature_extraction(input_nwb_file, stimulus_ontology_file,
                           output_nwb_file, qc_fig_dir, sweep_info, cell_info):

    lu.log_pretty_header("Extract ephys features", level=1)

    sp.drop_failed_sweeps(sweep_info)
    if len(sweep_info) == 0:
        raise er.FeatureError(
            "There are no QC-passed sweeps available to analyze")

    if not stimulus_ontology_file:
        stimulus_ontology_file = StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE
        logging.info(
            F"Ontology is not provided, using default {StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE}"
        )
    ont = StimulusOntology(ju.read(stimulus_ontology_file))

    data_set = create_data_set(sweep_info=sweep_info,
                               nwb_file=input_nwb_file,
                               ontology=ont,
                               api_sweeps=False)

    try:
        cell_features, sweep_features, cell_record, sweep_records = dsft.extract_data_set_features(
            data_set)

        if cell_info: cell_record.update(cell_info)

        cell_state = {"failed_fx": False, "fail_fx_message": None}

        feature_data = {
            'cell_features': cell_features,
            'sweep_features': sweep_features,
            'cell_record': cell_record,
            'sweep_records': sweep_records,
            'cell_state': cell_state
        }

    except (er.FeatureError, IndexError) as e:
        cell_state = {"failed_fx": True, "fail_fx_message": str(e)}
        logging.warning(e)
        feature_data = {'cell_state': cell_state}

    if not cell_state["failed_fx"]:
        sweep_spike_times = collect_spike_times(sweep_features)
        embed_spike_times(input_nwb_file, output_nwb_file, sweep_spike_times)

        if qc_fig_dir is None:
            logging.info("qc_fig_dir is not provided, will not save figures")
        else:
            plotqc.display_features(qc_fig_dir, data_set, feature_data)

        # On Windows int64 keys of sweep numbers cannot be converted to str by json.dump when serializing.
        # Thus, we are converting them here:
        feature_data["sweep_features"] = {
            str(k): v
            for k, v in feature_data["sweep_features"].items()
        }

    return feature_data
Exemple #4
0
def preprocess_ramp_sweeps(data_set, sweep_numbers):
    if len(sweep_numbers) == 0:
        raise er.FeatureError("No ramp sweeps available for feature extraction")

    ramp_sweeps = data_set.sweep_set(sweep_numbers)
    ramp_sweeps.select_epoch("recording")

    ramp_start, ramp_dur, _, _, _ = stf.get_stim_characteristics(ramp_sweeps.sweeps[0].i, ramp_sweeps.sweeps[0].t)
    ramp_spx, ramp_spfx = dsf.extractors_for_sweeps(ramp_sweeps,
                                                start = ramp_start,
                                                **dsf.detection_parameters(data_set.RAMP))
    ramp_an = spa.RampAnalysis(ramp_spx, ramp_spfx)
    ramp_features = ramp_an.analyze(ramp_sweeps)

    return ramp_sweeps, ramp_features, ramp_an
def run_feature_extraction(input_nwb_file,
                           stimulus_ontology_file,
                           output_nwb_file,
                           qc_fig_dir,
                           sweep_info,
                           cell_info,
                           write_spikes=True):

    lu.log_pretty_header("Extract ephys features", level=1)

    sp.drop_failed_sweeps(sweep_info)
    if len(sweep_info) == 0:
        raise er.FeatureError(
            "There are no QC-passed sweeps available to analyze")

    if not stimulus_ontology_file:
        stimulus_ontology_file = StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE
        logging.info(
            F"Ontology is not provided, using default {StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE}"
        )
    ont = StimulusOntology(ju.read(stimulus_ontology_file))

    data_set = create_ephys_data_set(sweep_info=sweep_info,
                                     nwb_file=input_nwb_file,
                                     ontology=ont)

    (cell_features, sweep_features, cell_record, sweep_records, cell_state,
     feature_states) = dsft.extract_data_set_features(data_set)

    if cell_state['failed_fx']:
        feature_data = {'cell_state': cell_state}
    else:
        if cell_info:
            cell_record.update(cell_info)

        feature_data = {
            'cell_features': cell_features,
            'sweep_features': sweep_features,
            'cell_record': cell_record,
            'sweep_records': sweep_records,
            'cell_state': cell_state,
            'feature_states': feature_states
        }

        if write_spikes:
            if not feature_states['sweep_features_state']['failed_fx']:
                sweep_spike_times = collect_spike_times(sweep_features)
                append_spike_times(input_nwb_file,
                                   sweep_spike_times,
                                   output_nwb_path=output_nwb_file)
            else:
                logging.warn("extract_sweep_features failed, "
                             "unable to write spikes")

        if qc_fig_dir is None:
            logging.info("qc_fig_dir is not provided, will not save figures")
        else:
            plotqc.display_features(qc_fig_dir, data_set, feature_data)

        # On Windows int64 keys of sweep numbers cannot be converted to str by json.dump when serializing.
        # Thus, we are converting them here:
        feature_data["sweep_features"] = {
            str(k): v
            for k, v in feature_data["sweep_features"].items()
        }

    return feature_data