Exemplo n.º 1
0
def main():
    """
    Plot sweeps of a given ephys nwb file
    # Usage:
    $ python plot_ephys_nwb_file.py NWB_FILE_NAME

    """

    nwb_file = sys.argv[1]
    print("plotting file: %s" % nwb_file)

    stimulus_ontology_file = StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE
    ont = StimulusOntology(ju.read(stimulus_ontology_file))

    data_set = create_ephys_data_set(nwb_file=nwb_file)
    vclamp_sweep_table = data_set.filtered_sweep_table(
        clamp_mode=data_set.VOLTAGE_CLAMP)
    plot_data_set(data_set, vclamp_sweep_table, nwb_file)

    data_set = create_ephys_data_set(nwb_file=nwb_file)
    iclamp_sweep_table = data_set.filtered_sweep_table(
        clamp_mode=data_set.CURRENT_CLAMP)
    plot_data_set(data_set, iclamp_sweep_table, nwb_file)

    plt.show()
Exemplo n.º 2
0
    def run_feature_extraction(self):
        self.status_message.emit("Computing features, please wait.")
        drop_failed_sweeps(self.sweep_info)
        data_set = create_ephys_data_set(sweep_info=self.sweep_info,
                                   nwb_file=self.input_nwb_file,
                                   ontology=self.ontology)
        try:
            cell_features, sweep_features, cell_record, sweep_records,\
                cell_state, feature_states = extract_data_set_features(data_set)

            self.feature_data = {'cell_features': cell_features,
                                 'sweep_features': sweep_features,
                                 'cell_record': cell_record,
                                 'sweep_records': sweep_records,
                                 'cell_state': cell_state,
                                 'feature_states': feature_states
                                }

            self.new_state()
            self.status_message.emit("Done computing features!")

        except (FeatureError, IndexError) as ferr:
            exception_message("Feature extraction error",
                              f"failed feature extraction",
                              ferr
                              )
Exemplo n.º 3
0
def run_sweep_extraction(input_nwb_file,
                         stimulus_ontology_file=None,
                         input_manual_values=None,
                         update_ontology=True,
                         **unused_args):
    """
    Parameters
    ----------
    input_nwb_file
    stimulus_ontology_file
    input_manual_values

    Returns
    -------
    """
    log_pretty_header("Extract QC features", level=1)

    if input_manual_values is None:
        input_manual_values = {}

    manual_values = {}
    for mk in MANUAL_KEYS:
        if mk in input_manual_values:
            manual_values[mk] = input_manual_values[mk]

    if stimulus_ontology_file and update_ontology:
        make_stimulus_ontology_from_lims(stimulus_ontology_file)
    if stimulus_ontology_file is None:
        stimulus_ontology_file = \
            StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE
        logging.info(f"Ontology is not provided, using default "
                     f"{StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE}")

    ont = StimulusOntology(json_utilities.read(stimulus_ontology_file))
    ds = create_ephys_data_set(nwb_file=input_nwb_file, ontology=ont)

    cell_features, cell_tags = cell_qc_features(ds, manual_values)

    for tag in cell_tags:
        logging.warning(tag)

    sweep_features = sweep_qc_features(ds)

    return {
        "cell_features": cell_features,
        "cell_tags": cell_tags,
        "sweep_features": sweep_features,
    }
Exemplo n.º 4
0
    def run_extraction_and_auto_qc(self, nwb_path, stimulus_ontology, qc_criteria, commit=True):

        data_set = create_ephys_data_set(
            sweep_info=None,
            nwb_file=nwb_path,
            ontology=stimulus_ontology
        )

        cell_features, cell_tags, sweep_features = extract_qc_features(data_set)

        sweep_props.drop_tagged_sweeps(sweep_features)
        cell_state, cell_features, sweep_states, sweep_features = run_qc(
            stimulus_ontology, cell_features, sweep_features, qc_criteria
        )

        if commit:
            self.begin_commit_calculated.emit()

            self.stimulus_ontology = stimulus_ontology
            self.qc_criteria = qc_criteria
            self.nwb_path = nwb_path

            self.data_set = data_set
            self.cell_features = cell_features
            self.cell_tags = cell_tags
            self.cell_state = cell_state

            self.sweep_features = sweep_features
            self.sweep_states = sweep_states
            self.manual_qc_states = {sweep["sweep_number"]: "default" for sweep in self.sweep_features}

            self.end_commit_calculated.emit(
                self.sweep_features, self.sweep_states, self.manual_qc_states, self.data_set
            )

        self.data_changed.emit(self.nwb_path,
                               self.stimulus_ontology,
                               self.sweep_features,
                               self.cell_features)
Exemplo n.º 5
0
import os
import matplotlib.pyplot as plt
from ipfx.dataset.create import create_ephys_data_set
from ipfx.feature_extractor import SpikeFeatureExtractor

# Download and access the experimental data from DANDI archive per instructions in the documentation
# Example below will use an nwb file provided with the package

nwb_file = os.path.join(
    os.path.dirname(os.getcwd()),
    "data",
    "nwb2_H17.03.008.11.03.05.nwb"
)

# Create data set from the nwb file and choose a sweeep
dataset = create_ephys_data_set(nwb_file=nwb_file)
sweep = dataset.sweep(sweep_number=39)

# Configure the extractor to just detect spikes in the middle of the step
ext = SpikeFeatureExtractor(start=1.25, end=1.75)
results = ext.process(t=sweep.t, v=sweep.v, i=sweep.i)

# Plot the results, showing two features of the detected spikes
plt.plot(sweep.t, sweep.v)
plt.plot(results["peak_t"], results["peak_v"], 'r.')
plt.plot(results["threshold_t"], results["threshold_v"], 'k.')

# Set the plot limits to highlight where spikes are and axis labels
plt.xlim(0.5, 2.5)
plt.xlabel("Time (s)")
plt.ylabel("Membrane potential (mV)")
Exemplo n.º 6
0
def run_feature_extraction(input_nwb_file,
                           stimulus_ontology_file,
                           output_nwb_file,
                           qc_fig_dir,
                           sweep_info,
                           cell_info,
                           write_spikes=True):

    lu.log_pretty_header("Extract ephys features", level=1)

    sp.drop_failed_sweeps(sweep_info)
    if len(sweep_info) == 0:
        raise er.FeatureError(
            "There are no QC-passed sweeps available to analyze")

    if not stimulus_ontology_file:
        stimulus_ontology_file = StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE
        logging.info(
            F"Ontology is not provided, using default {StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE}"
        )
    ont = StimulusOntology(ju.read(stimulus_ontology_file))

    data_set = create_ephys_data_set(sweep_info=sweep_info,
                                     nwb_file=input_nwb_file,
                                     ontology=ont)

    (cell_features, sweep_features, cell_record, sweep_records, cell_state,
     feature_states) = dsft.extract_data_set_features(data_set)

    if cell_state['failed_fx']:
        feature_data = {'cell_state': cell_state}
    else:
        if cell_info:
            cell_record.update(cell_info)

        feature_data = {
            'cell_features': cell_features,
            'sweep_features': sweep_features,
            'cell_record': cell_record,
            'sweep_records': sweep_records,
            'cell_state': cell_state,
            'feature_states': feature_states
        }

        if write_spikes:
            if not feature_states['sweep_features_state']['failed_fx']:
                sweep_spike_times = collect_spike_times(sweep_features)
                append_spike_times(input_nwb_file,
                                   sweep_spike_times,
                                   output_nwb_path=output_nwb_file)
            else:
                logging.warn("extract_sweep_features failed, "
                             "unable to write spikes")

        if qc_fig_dir is None:
            logging.info("qc_fig_dir is not provided, will not save figures")
        else:
            plotqc.display_features(qc_fig_dir, data_set, feature_data)

        # On Windows int64 keys of sweep numbers cannot be converted to str by json.dump when serializing.
        # Thus, we are converting them here:
        feature_data["sweep_features"] = {
            str(k): v
            for k, v in feature_data["sweep_features"].items()
        }

    return feature_data
from ipfx.data_set_features import extract_data_set_features
from ipfx.utilities import drop_failed_sweeps

import numpy as np
from ipfx.feature_extractor import (SpikeFeatureExtractor,
                                    SpikeTrainFeatureExtractor)
import ipfx.stimulus_protocol_analysis as spa
from ipfx.epochs import get_stim_epoch
from ipfx.dataset.create import create_ephys_data_set
from ipfx.utilities import drop_failed_sweeps
import matplotlib.pyplot as plt
from ipfx.stimulus_protocol_analysis import RampAnalysis
import seaborn as sns

nwb_file = os.path.join("../data", "sub-599387254_ses-601506492_icephys.nwb")
data_set = create_ephys_data_set(nwb_file)
drop_failed_sweeps(data_set)
short_square_table = data_set.filtered_sweep_table(
    stimuli=data_set.ontology.long_square_names)

#all features
cell_features, sweep_features, cell_record, sweep_records, _, _ = \
    extract_data_set_features(data_set, subthresh_min_amp=-100.0)
print(cell_record)

ID = []
title = cell_record.keys()
res = pd.DataFrame(columns=title)
path_out = "../data/00020"
files_out = os.listdir(path_out)
for file_out in files_out:
Exemplo n.º 8
0
def dataset_for_specimen_id(specimen_id,
                            data_source,
                            ontology,
                            file_list=None):
    if data_source == "lims":
        nwb_path, h5_path = lims_nwb_information(specimen_id)
        if type(nwb_path) is dict and "error" in nwb_path:
            logging.warning(
                "Problem getting NWB file for specimen {:d} from LIMS".format(
                    specimen_id))
            return nwb_path

        try:
            data_set = create_ephys_data_set(nwb_file=nwb_path,
                                             ontology=ontology)
        except Exception as detail:
            logging.warning(
                "Exception when loading specimen {:d} from LIMS".format(
                    specimen_id))
            logging.warning(detail)
            return {
                "error": {
                    "type": "dataset",
                    "details": traceback.format_exc(limit=None)
                }
            }
    elif data_source == "sdk":
        nwb_path, sweep_info = sdk_nwb_information(specimen_id)
        try:
            data_set = create_ephys_data_set(nwb_file=nwb_path,
                                             sweep_info=sweep_info,
                                             ontology=ontology)
        except Exception as detail:
            logging.warning(
                "Exception when loading specimen {:d} via Allen SDK".format(
                    specimen_id))
            logging.warning(detail)
            return {
                "error": {
                    "type": "dataset",
                    "details": traceback.format_exc(limit=None)
                }
            }
    elif data_source == "filesystem":
        nwb_path = file_list[specimen_id]
        try:
            data_set = create_ephys_data_set(nwb_file=nwb_path)
        except Exception as detail:
            logging.warning(
                "Exception when loading specimen {:d} via file system".format(
                    specimen_id))
            logging.warning(detail)
            return {
                "error": {
                    "type": "dataset",
                    "details": traceback.format_exc(limit=None)
                }
            }
    else:
        logging.error("invalid data source specified ({})".format(data_source))

    return data_set