コード例 #1
0
def run_feature_extraction(input_nwb_file, stimulus_ontology_file,
                           output_nwb_file, qc_fig_dir, sweep_info, cell_info):

    lu.log_pretty_header("Extract ephys features", level=1)

    sp.drop_failed_sweeps(sweep_info)
    if len(sweep_info) == 0:
        raise er.FeatureError(
            "There are no QC-passed sweeps available to analyze")

    if not stimulus_ontology_file:
        stimulus_ontology_file = StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE
        logging.info(
            F"Ontology is not provided, using default {StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE}"
        )
    ont = StimulusOntology(ju.read(stimulus_ontology_file))

    data_set = create_data_set(sweep_info=sweep_info,
                               nwb_file=input_nwb_file,
                               ontology=ont,
                               api_sweeps=False)

    try:
        cell_features, sweep_features, cell_record, sweep_records = dsft.extract_data_set_features(
            data_set)

        if cell_info: cell_record.update(cell_info)

        cell_state = {"failed_fx": False, "fail_fx_message": None}

        feature_data = {
            'cell_features': cell_features,
            'sweep_features': sweep_features,
            'cell_record': cell_record,
            'sweep_records': sweep_records,
            'cell_state': cell_state
        }

    except (er.FeatureError, IndexError) as e:
        cell_state = {"failed_fx": True, "fail_fx_message": str(e)}
        logging.warning(e)
        feature_data = {'cell_state': cell_state}

    if not cell_state["failed_fx"]:
        sweep_spike_times = collect_spike_times(sweep_features)
        embed_spike_times(input_nwb_file, output_nwb_file, sweep_spike_times)

        if qc_fig_dir is None:
            logging.info("qc_fig_dir is not provided, will not save figures")
        else:
            plotqc.display_features(qc_fig_dir, data_set, feature_data)

        # On Windows int64 keys of sweep numbers cannot be converted to str by json.dump when serializing.
        # Thus, we are converting them here:
        feature_data["sweep_features"] = {
            str(k): v
            for k, v in feature_data["sweep_features"].items()
        }

    return feature_data
コード例 #2
0
ファイル: run_qc.py プロジェクト: smestern/ipfx
def run_qc(stimulus_ontology_file, cell_features, sweep_features, qc_criteria):
    """

    Parameters
    ----------
    stimulus_ontology_file : str
        ontology file name
    cell_features: dict
        cell features
    sweep_features : list of dicts
        sweep features
    qc_criteria: dict
        qc criteria

    Returns
    -------
    dict
        containing state of the cell and sweeps
    """

    lu.log_pretty_header("Perform QC checks", level=1)

    if not stimulus_ontology_file:
        stimulus_ontology_file = StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE
        logging.info(
            F"Ontology is not provided, using default {StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE}"
        )
    ont = StimulusOntology(ju.read(stimulus_ontology_file))

    cell_state, sweep_states = qcp.qc_experiment(ont, cell_features,
                                                 sweep_features, qc_criteria)

    qc_summary(sweep_features, sweep_states, cell_features, cell_state)

    return dict(cell_state=cell_state, sweep_states=sweep_states)
コード例 #3
0
def run_sweep_extraction(input_nwb_file,
                         input_h5_file,
                         stimulus_ontology_file,
                         input_manual_values=None):
    """
    Parameters
    ----------
    input_nwb_file
    input_h5_file
    stimulus_ontology_file
    input_manual_values

    Returns
    -------

    """
    lu.log_pretty_header("Extract QC features", level=1)

    if input_manual_values is None:
        input_manual_values = {}

    manual_values = {}
    for mk in MANUAL_KEYS:
        if mk in input_manual_values:
            manual_values[mk] = input_manual_values[mk]

    if stimulus_ontology_file:
        mso.make_stimulus_ontology_from_lims(stimulus_ontology_file)
    else:
        stimulus_ontology_file = StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE
        logging.info(
            F"Ontology is not provided, using default {StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE}"
        )

    ont = StimulusOntology(ju.read(stimulus_ontology_file))
    ds = create_data_set(nwb_file=input_nwb_file,
                         h5_file=input_h5_file,
                         ontology=ont)

    cell_features, cell_tags = qcfe.cell_qc_features(ds, manual_values)

    for tag in cell_tags:
        logging.warning(tag)

    sweep_features = qcfe.sweep_qc_features(ds)

    return dict(
        cell_features=cell_features,
        cell_tags=cell_tags,
        sweep_features=sweep_features,
    )
コード例 #4
0
def run_sweep_extraction(input_nwb_file,
                         stimulus_ontology_file=None,
                         input_manual_values=None,
                         update_ontology=True,
                         **unused_args):
    """
    Parameters
    ----------
    input_nwb_file
    stimulus_ontology_file
    input_manual_values

    Returns
    -------
    """
    log_pretty_header("Extract QC features", level=1)

    if input_manual_values is None:
        input_manual_values = {}

    manual_values = {}
    for mk in MANUAL_KEYS:
        if mk in input_manual_values:
            manual_values[mk] = input_manual_values[mk]

    if stimulus_ontology_file and update_ontology:
        make_stimulus_ontology_from_lims(stimulus_ontology_file)
    if stimulus_ontology_file is None:
        stimulus_ontology_file = \
            StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE
        logging.info(f"Ontology is not provided, using default "
                     f"{StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE}")

    ont = StimulusOntology(json_utilities.read(stimulus_ontology_file))
    ds = create_ephys_data_set(nwb_file=input_nwb_file, ontology=ont)

    cell_features, cell_tags = cell_qc_features(ds, manual_values)

    for tag in cell_tags:
        logging.warning(tag)

    sweep_features = sweep_qc_features(ds)

    return {
        "cell_features": cell_features,
        "cell_tags": cell_tags,
        "sweep_features": sweep_features,
    }
コード例 #5
0
def main():
    """
    Usage:
    python run_pipeline.py --input_json INPUT_JSON --output_json OUTPUT_JSON

    """

    module = ags.ArgSchemaParser(schema_type=PipelineParameters)

    output = run_pipeline(module.args["input_nwb_file"],
                          module.args.get("input_h5_file", None),
                          module.args["output_nwb_file"],
                          module.args.get("stimulus_ontology_file", None),
                          module.args.get("qc_fig_dir", None),
                          module.args.get("qc_criteria", None),
                          module.args.get("manual_sweep_states", None))

    ju.write(module.args["output_json"], output)

    lu.log_pretty_header("Analysis completed!", level=1)
コード例 #6
0
ファイル: run_pipeline.py プロジェクト: tmchartrand/ipfx
def run_pipeline(input_nwb_file,
                 output_nwb_file,
                 stimulus_ontology_file,
                 qc_fig_dir,
                 qc_criteria,
                 manual_sweep_states,
                 write_spikes=True,
                 update_ontology=True):

    se_output = run_sweep_extraction(input_nwb_file,
                                     stimulus_ontology_file,
                                     update_ontology=update_ontology)

    sweep_props.drop_tagged_sweeps(se_output["sweep_features"])
    sweep_props.remove_sweep_feature("tags", se_output["sweep_features"])

    qc_output = run_qc(stimulus_ontology_file, se_output["cell_features"],
                       se_output["sweep_features"], qc_criteria)

    sweep_props.override_auto_sweep_states(manual_sweep_states,
                                           qc_output["sweep_states"])
    sweep_props.assign_sweep_states(qc_output["sweep_states"],
                                    se_output["sweep_features"])

    fx_output = run_feature_extraction(
        input_nwb_file,
        stimulus_ontology_file,
        output_nwb_file,
        qc_fig_dir,
        se_output['sweep_features'],
        se_output['cell_features'],
        write_spikes,
    )

    log_pretty_header("Analysis completed!", level=1)

    return {
        "sweep_extraction": se_output,
        "qc": qc_output,
        "feature_extraction": fx_output
    }
コード例 #7
0
ファイル: run_qc.py プロジェクト: smestern/ipfx
def qc_summary(sweep_features, sweep_states, cell_features, cell_state):
    """
    Output QC summary

    Parameters
    ----------
    sweep_features: list of dicts
    sweep_states: list of dict
    cell_features: list of dicts
    cell_state: dict

    Returns
    -------

    """
    lu.log_pretty_header("QC Summary:", level=2)

    logging.info("Cell State:")
    for k, v in cell_state.items():
        logging.info("%s:%s" % (k, v))

    logging.info("Sweep States:")

    sp.assign_sweep_states(sweep_states, sweep_features)
    sweep_table = pd.DataFrame(sweep_features)

    if sweep_features:
        for stimulus_name, sg_table in sweep_table.groupby("stimulus_name"):
            passed_sweep_numbers = sg_table[
                sg_table.passed == True].sweep_number.sort_values().values
            failed_sweep_numbers = sg_table[
                sg_table.passed == False].sweep_number.sort_values().values

            logging.info("{} sweeps passed: {}, failed {}".format(
                stimulus_name, passed_sweep_numbers, failed_sweep_numbers))
    else:
        logging.warning("No current clamp sweeps available for QC")
コード例 #8
0
ファイル: run_feature_extraction.py プロジェクト: ww2470/ipfx
def run_feature_extraction(input_nwb_file,
                           stimulus_ontology_file,
                           output_nwb_file,
                           qc_fig_dir,
                           sweep_info,
                           cell_info,
                           write_spikes=True):

    lu.log_pretty_header("Extract ephys features", level=1)

    sp.drop_failed_sweeps(sweep_info)
    if len(sweep_info) == 0:
        raise er.FeatureError(
            "There are no QC-passed sweeps available to analyze")

    if not stimulus_ontology_file:
        stimulus_ontology_file = StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE
        logging.info(
            F"Ontology is not provided, using default {StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE}"
        )
    ont = StimulusOntology(ju.read(stimulus_ontology_file))

    data_set = create_ephys_data_set(sweep_info=sweep_info,
                                     nwb_file=input_nwb_file,
                                     ontology=ont)

    (cell_features, sweep_features, cell_record, sweep_records, cell_state,
     feature_states) = dsft.extract_data_set_features(data_set)

    if cell_state['failed_fx']:
        feature_data = {'cell_state': cell_state}
    else:
        if cell_info:
            cell_record.update(cell_info)

        feature_data = {
            'cell_features': cell_features,
            'sweep_features': sweep_features,
            'cell_record': cell_record,
            'sweep_records': sweep_records,
            'cell_state': cell_state,
            'feature_states': feature_states
        }

        if write_spikes:
            if not feature_states['sweep_features_state']['failed_fx']:
                sweep_spike_times = collect_spike_times(sweep_features)
                append_spike_times(input_nwb_file,
                                   sweep_spike_times,
                                   output_nwb_path=output_nwb_file)
            else:
                logging.warn("extract_sweep_features failed, "
                             "unable to write spikes")

        if qc_fig_dir is None:
            logging.info("qc_fig_dir is not provided, will not save figures")
        else:
            plotqc.display_features(qc_fig_dir, data_set, feature_data)

        # On Windows int64 keys of sweep numbers cannot be converted to str by json.dump when serializing.
        # Thus, we are converting them here:
        feature_data["sweep_features"] = {
            str(k): v
            for k, v in feature_data["sweep_features"].items()
        }

    return feature_data