Ejemplo n.º 1
0
def feature_vector_input():

    TEST_DATA_PATH = os.path.join(os.path.dirname(__file__), 'data')

    nwb_file_name = "Pvalb-IRES-Cre;Ai14-415796.02.01.01.nwb"
    nwb_file_full_path = os.path.join(TEST_DATA_PATH, nwb_file_name)

    if not os.path.exists(nwb_file_full_path):
        download_file(nwb_file_name, nwb_file_full_path)

    data_set = AibsDataSet(nwb_file=nwb_file_full_path, ontology=ontology)

    lsq_sweep_numbers = [4, 5, 6, 16, 17, 18, 19, 20, 21]

    lsq_sweeps = data_set.sweep_set(lsq_sweep_numbers)
    lsq_sweeps.select_epoch("recording")
    lsq_sweeps.align_to_start_of_epoch("experiment")
    lsq_start, lsq_dur, _, _, _ = stf.get_stim_characteristics(lsq_sweeps.sweeps[0].i,
                                                               lsq_sweeps.sweeps[0].t)

    lsq_end = lsq_start + lsq_dur
    lsq_spx, lsq_spfx = dsf.extractors_for_sweeps(lsq_sweeps,
                                                  start=lsq_start,
                                                  end=lsq_end,
                                                  **dsf.detection_parameters(data_set.LONG_SQUARE))
    lsq_an = spa.LongSquareAnalysis(lsq_spx, lsq_spfx, subthresh_min_amp=-100.)

    lsq_features = lsq_an.analyze(lsq_sweeps)

    return lsq_sweeps, lsq_features, lsq_start, lsq_end
Ejemplo n.º 2
0
def test_get_stimulus_code_ext(NWB_file):
    default_ontology = StimulusOntology(
        ju.read(StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE))
    dataset = AibsDataSet(nwb_file=NWB_file, ontology=default_ontology)

    assert dataset.get_stimulus_code_ext("EXTPSMOKET180424",
                                         0) == "EXTPSMOKET180424[0]"
Ejemplo n.º 3
0
def dataset_for_specimen_id(specimen_id, data_source, ontology):
    # Find or retrieve NWB file and ancillary info and construct an AibsDataSet object
    if data_source == "lims":
        nwb_path, h5_path = lims_nwb_information(specimen_id)
        if type(nwb_path) is dict and "error" in nwb_path:
            logging.warning("Problem getting NWB file for specimen {:d} from LIMS".format(specimen_id))
            return nwb_path

        try:
            data_set = AibsDataSet(
                nwb_file=nwb_path, h5_file=h5_path, ontology=ontology)
        except Exception as detail:
            logging.warning("Exception when loading specimen {:d} from LIMS".format(specimen_id))
            logging.warning(detail)
            return {"error": {"type": "dataset", "details": traceback.format_exc(limit=None)}}
    elif data_source == "sdk":
        nwb_path, sweep_info = sdk_nwb_information(specimen_id)
        try:
            data_set = AibsDataSet(
                nwb_file=nwb_path, sweep_info=sweep_info, ontology=ontology)
        except Exception as detail:
            logging.warning("Exception when loading specimen {:d} via Allen SDK".format(specimen_id))
            logging.warning(detail)
            return {"error": {"type": "dataset", "details": traceback.format_exc(limit=None)}}
    else:
        logging.error("invalid data source specified ({})".format(data_source))

    return data_set
Ejemplo n.º 4
0
def feature_vector_input():

    TEST_DATA_PATH = os.path.join(os.path.dirname(__file__), 'data')

    nwb_file_name = "Pvalb-IRES-Cre;Ai14-415796.02.01.01.nwb"
    nwb_file_full_path = os.path.join(TEST_DATA_PATH, nwb_file_name)

    if not os.path.exists(nwb_file_full_path):
        download_file(nwb_file_name, nwb_file_full_path)

    data_set = AibsDataSet(nwb_file=nwb_file_full_path, ontology=ontology)

    lsq_sweep_numbers = data_set.filtered_sweep_table(
        clamp_mode=data_set.CURRENT_CLAMP,
        stimuli=ontology.long_square_names).sweep_number.sort_values().values

    lsq_sweeps = data_set.sweep_set(lsq_sweep_numbers)
    lsq_start, lsq_dur, _, _, _ = stf.get_stim_characteristics(
        lsq_sweeps.sweeps[0].i, lsq_sweeps.sweeps[0].t)

    lsq_end = lsq_start + lsq_dur
    lsq_spx, lsq_spfx = dsf.extractors_for_sweeps(lsq_sweeps,
                                                  start=lsq_start,
                                                  end=lsq_end,
                                                  **dsf.detection_parameters(
                                                      data_set.LONG_SQUARE))
    lsq_an = spa.LongSquareAnalysis(lsq_spx, lsq_spfx, subthresh_min_amp=-100.)

    lsq_features = lsq_an.analyze(lsq_sweeps)

    return lsq_sweeps, lsq_features, lsq_start, lsq_end
Ejemplo n.º 5
0
def test_get_clamp_mode(NWB_file):

    default_ontology = StimulusOntology(ju.read(StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE))

    dataset = AibsDataSet(nwb_file=NWB_file, ontology=default_ontology)

    assert dataset.get_clamp_mode(0) == dataset.VOLTAGE_CLAMP
Ejemplo n.º 6
0
def create_data_set(sweep_info=None, nwb_file=None, ontology=None, api_sweeps=True, h5_file=None,validate_stim=True):
    """Create an appropriate EphysDataSet derived class for the given nwb_file

    Parameters
    ----------
    nwb_file: str file name

    Returns
    -------
    EphysDataSet derived object
    """

    if nwb_file is None:
        raise ValueError("Can not decide which EphysDataSet class to create without nwb_file")

    nwb_version = get_nwb_version(nwb_file)

    if nwb_version["major"] == 2:
        return HBGDataSet(sweep_info=sweep_info,
                          nwb_file=nwb_file,
                          ontology=ontology,
                          api_sweeps=api_sweeps,
                          validate_stim=validate_stim)

    elif nwb_version["major"] == 1 or nwb_version["major"] == 0:
        return AibsDataSet(sweep_info=sweep_info,
                           nwb_file=nwb_file,
                           ontology=ontology,
                           api_sweeps=api_sweeps,
                           h5_file=h5_file,
                           validate_stim=validate_stim)
    else:
        raise ValueError("Unsupported or unknown NWB major" +
                         "version {} ({})".format(nwb_version["major"], nwb_version["full"]))
Ejemplo n.º 7
0
def data_for_specimen_id(specimen_id, passed_only):
    name, roi_id, specimen_id = lq.get_specimen_info_from_lims_by_id(specimen_id)
    nwb_path = lq.get_nwb_path_from_lims(roi_id)
    if len(nwb_path) == 0: # could not find an NWB file
        logging.debug("No NWB file for {:d}".format(specimen_id))
        return {"error": {"type": "no_nwb", "details": ""}}

    # Check if NWB has lab notebook information, or if additional hdf5 file is needed
    ontology = StimulusOntology(ju.read(StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE))
    h5_path = None
    with h5py.File(nwb_path, "r") as h5:
        if "general/labnotebook" not in h5:
            h5_path = lq.get_igorh5_path_from_lims(roi_id)

    try:
        data_set = AibsDataSet(nwb_file=nwb_path, h5_file=h5_path, ontology=ontology)
    except Exception as detail:
        logging.warn("Exception when processing specimen {:d}".format(specimen_id))
        logging.warn(detail)
#         return {"error": {"type": "dataset", "details": traceback.format_exc(limit=1)}}
        return {}

    try:
        lsq_sweep_numbers = categorize_iclamp_sweeps(data_set, ontology.long_square_names)
        ssq_sweep_numbers = categorize_iclamp_sweeps(data_set, ontology.short_square_names)
        ramp_sweep_numbers = categorize_iclamp_sweeps(data_set, ontology.ramp_names)
    except Exception as detail:
        logging.warn("Exception when processing specimen {:d}".format(specimen_id))
        logging.warn(detail)
#         return {"error": {"type": "sweep_table", "details": traceback.format_exc(limit=1)}}
        return {}

    try:
        result = extract_features(data_set, ramp_sweep_numbers, ssq_sweep_numbers, lsq_sweep_numbers)
    except Exception as detail:
        logging.warn("Exception when processing specimen {:d}".format(specimen_id))
        logging.warn(detail)
#         return {"error": {"type": "processing", "details": traceback.format_exc(limit=1)}}
        return {}

    result["specimen_id"] = specimen_id
    return result
    def save_cell_data(self, acceptable_stimtypes, non_standard_nwb=False,
                       ephys_dir='preprocessed'):

        bpopt_stimtype_map = utility.bpopt_stimtype_map
        distinct_id_map = utility.aibs_stimname_map
        # Note: may also need to provide h5 "lab notebok" and/or ontology
        from ipfx.stimulus import StimulusOntology
        from ipfx.epochs import get_recording_epoch
        import allensdk.core.json_utilities as ju
        ontology = StimulusOntology(
            ju.read(StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE))
        dataset = AibsDataSet(nwb_file=self.nwb_path, ontology=ontology)

        stim_map = defaultdict(list)
        stim_sweep_map = {}
        output_dir = os.path.join(os.getcwd(), ephys_dir)
        utility.create_dirpath(output_dir)

        # Note: are QC criteria appropriate for ramps + other stim?
        passed_sweep_nums = get_passed_sweeps(dataset, self.cell_id)
        for sweep_num in passed_sweep_nums:
            record = dataset.get_sweep_record(sweep_num)
            sweep_number = record[AibsDataSet.SWEEP_NUMBER]
            stim_type = record[AibsDataSet.STIMULUS_NAME]

            if stim_type in acceptable_stimtypes:
                # TODO: use dataset.sweep to get full object, epochs
                sweep = dataset.get_sweep_data(sweep_number)

                stimulus_trace = sweep['stimulus']
                response_trace = sweep['response']
                sampling_rate = sweep['sampling_rate']

                # remove missing data
                # start, end = get_recording_epoch(stimulus_trace)
                # stimulus_trace = stimulus_trace[:end]
                # response_trace = response_trace[:end]
                time = np.arange(0, len(stimulus_trace)) / sampling_rate

                trace_name = '%s_%d' % (
                    distinct_id_map[stim_type], sweep_number)

                if non_standard_nwb:
                    calc_stimparams_func = self.calc_stimparams_nonstandard
                else:
                    calc_stimparams_func = self.calc_stimparams_ipfx

                stim_start, stim_stop, stim_amp_start, stim_amp_end, \
                    tot_duration, hold_curr = calc_stimparams_func(
                        time, stimulus_trace, trace_name)

                response_trace_short_filename = '%s.%s' % (trace_name, 'txt')
                response_trace_filename = os.path.join(
                    output_dir, response_trace_short_filename)

                time *= 1e3  # in ms
                response_trace *= 1e3  # in mV
                response_trace = utility.correct_junction_potential(response_trace,
                                                                    self.junction_potential)
                stimulus_trace *= 1e9

                # downsampling
                time, stimulus_trace, response_trace = utility.downsample_ephys_data(
                    time, stimulus_trace, response_trace)

                # save current timeseries only when needed
                if stim_type in utility.bpopt_current_play_stimtypes:
                    with open(response_trace_filename, 'wb') as response_trace_file:
                        np.savetxt(response_trace_file,
                                   np.transpose([time, response_trace, stimulus_trace]))

                else:
                    with open(response_trace_filename, 'wb') as response_trace_file:
                        np.savetxt(response_trace_file,
                                   np.transpose([time, response_trace]))

                stim_map[distinct_id_map[stim_type]].append([
                    trace_name,
                    bpopt_stimtype_map[stim_type],
                    hold_curr / 1e12,
                    stim_amp_start / 1e12,
                    stim_amp_end / 1e12,
                    stim_start * 1e3,
                    stim_stop * 1e3,
                    tot_duration * 1e3,
                    response_trace_short_filename])

                stim_sweep_map[trace_name] = sweep_number

        logger.debug('Writing stimmap.csv ...')
        stim_reps_sweep_map, stimmap_filename = self.write_stimmap_csv(stim_map,
                                                                       output_dir, stim_sweep_map)

        self.write_provenance(
            output_dir,
            self.nwb_path,
            stim_sweep_map,
            stim_reps_sweep_map)

        return output_dir, stimmap_filename
Ejemplo n.º 9
0
from allensdk.api.queries.cell_types_api import CellTypesApi
from ipfx.aibs_data_set import AibsDataSet
from ipfx.ephys_extractor import SpikeExtractor

# Download and access the experimental data
ct = CellTypesApi()

specimen_id = 595570553
nwb_file = "%d.nwb" % specimen_id
sweep_info = ct.get_ephys_sweeps(specimen_id)

if not os.path.exists(nwb_file):
    ct.save_ephys_data(specimen_id, nwb_file)

# Get the data for the sweep into a format we can use
dataset = AibsDataSet(sweep_info=sweep_info, nwb_file=nwb_file)
sweep_number = 39
sweep = dataset.sweep(sweep_number)

# Extract information about the spikes
ext = SpikeExtractor()
results = ext.process(t=sweep.t, v=sweep.v, i=sweep.i)

# Plot the results, showing two features of the detected spikes
plt.plot(sweep.t, sweep.v)
plt.plot(results["peak_t"], results["peak_v"], 'r.')
plt.plot(results["threshold_t"], results["threshold_v"], 'k.')

# Set the plot limits to highlight where spikes are and set axis labels
plt.xlim(0.5, 2.5)
plt.xlabel("Time (s)")
Ejemplo n.º 10
0
from ipfx.aibs_data_set import AibsDataSet
from ipfx.ephys_extractor import SpikeExtractor, SpikeTrainFeatureExtractor
from ipfx.stimulus_protocol_analysis import ShortSquareAnalysis
import ipfx.ephys_features as ft

# download a specific experiment NWB file via AllenSDK
ct = CellTypesApi()

specimen_id = 595570553
nwb_file = "%d.nwb" % specimen_id
if not os.path.exists(nwb_file):
    ct.save_ephys_data(specimen_id, nwb_file)
sweep_info = ct.get_ephys_sweeps(specimen_id)

# build a data set and find the short squares
data_set = AibsDataSet(sweep_info=sweep_info, nwb_file=nwb_file)
shsq_table = data_set.filtered_sweep_table(
    stimuli=data_set.ontology.short_square_names)
shsq_sweep_set = data_set.sweep_set(shsq_table.sweep_number)

# Estimate the dv cutoff and threshold fraction (we know stimulus starts at 0.27s)
dv_cutoff, thresh_frac = ft.estimate_adjusted_detection_parameters(
    shsq_sweep_set.v, shsq_sweep_set.t, 0.27, 0.271)
# Build the extractors
start = 0.27
spx = SpikeExtractor(start=start, dv_cutoff=dv_cutoff, thresh_frac=thresh_frac)
sptrx = SpikeTrainFeatureExtractor(start=start, end=None)

# Run the analysis
shsq_analysis = ShortSquareAnalysis(spx, sptrx)
results = shsq_analysis.analyze(shsq_sweep_set)
Ejemplo n.º 11
0
def test_get_stimulus_units(NWB_file):

    default_ontology = StimulusOntology(
        ju.read(StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE))
    dataset = AibsDataSet(nwb_file=NWB_file, ontology=default_ontology)
    assert dataset.get_stimulus_units(0) == "Volts"
Ejemplo n.º 12
0
from allensdk.api.queries.cell_types_api import CellTypesApi
from ipfx.aibs_data_set import AibsDataSet
from ipfx.ephys_extractor import SpikeExtractor, SpikeTrainFeatureExtractor
from ipfx.stimulus_protocol_analysis import RampAnalysis

# download a specific experiment NWB file via AllenSDK
ct = CellTypesApi()

specimen_id = 595570553
nwb_file = "%d.nwb" % specimen_id
if not os.path.exists(nwb_file):
    ct.save_ephys_data(specimen_id, nwb_file)
sweep_info = ct.get_ephys_sweeps(specimen_id)

# Build the data set and find the ramp sweeps
data_set = AibsDataSet(sweep_info=sweep_info, nwb_file=nwb_file)
ramp_table = data_set.filtered_sweep_table(
    stimuli=data_set.ontology.ramp_names)
ramp_sweep_set = data_set.sweep_set(ramp_table.sweep_number)

# Build the extractors (we know stimulus starts at 0.27 s)
start = 0.27
spx = SpikeExtractor(start=start, end=None)
sptrx = SpikeTrainFeatureExtractor(start=start, end=None)

# Run the analysis
ramp_analysis = RampAnalysis(spx, sptrx)
results = ramp_analysis.analyze(ramp_sweep_set)

# Plot the sweeps and the latency to the first spike of each
sns.set_style("white")
Ejemplo n.º 13
0
def data_for_specimen_id(specimen_id,
                         sweep_qc_option,
                         data_source,
                         ap_window_length=0.005,
                         target_sampling_rate=50000):
    logging.debug("specimen_id: {}".format(specimen_id))

    # Find or retrieve NWB file and ancillary info and construct an AibsDataSet object
    ontology = StimulusOntology(
        ju.read(StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE))
    if data_source == "lims":
        nwb_path, h5_path = lims_nwb_information(specimen_id)
        if type(nwb_path) is dict and "error" in nwb_path:
            logging.warning(
                "Problem getting NWB file for specimen {:d} from LIMS".format(
                    specimen_id))
            return nwb_path

        try:
            data_set = AibsDataSet(nwb_file=nwb_path,
                                   h5_file=h5_path,
                                   ontology=ontology)
        except Exception as detail:
            logging.warning(
                "Exception when loading specimen {:d} from LIMS".format(
                    specimen_id))
            logging.warning(detail)
            return {
                "error": {
                    "type": "dataset",
                    "details": traceback.format_exc(limit=None)
                }
            }
    elif data_source == "sdk":
        ctc = CellTypesCache()

        morph = ctc.get_reconstruction(specimen_id)
        morph_table = ctc.get_morphology_features(specimen_id)
        morph_table.to_csv('cell_types\\specimen_' + str(specimen_id) + '\\' +
                           'morphology_features.csv')

        print("morph dl failed")
        nwb_path, sweep_info = sdk_nwb_information(specimen_id)
        try:
            data_set = AibsDataSet(nwb_file=nwb_path,
                                   sweep_info=sweep_info,
                                   ontology=ontology)
        except Exception as detail:
            logging.warning(
                "Exception when loading specimen {:d} via Allen SDK".format(
                    specimen_id))
            logging.warning(detail)
            return {
                "error": {
                    "type": "dataset",
                    "details": traceback.format_exc(limit=None)
                }
            }
    else:
        logging.error("invalid data source specified ({})".format(data_source))

    # Identify and preprocess long square sweeps
    try:
        lsq_sweep_numbers = categorize_iclamp_sweeps(
            data_set,
            ontology.long_square_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        (lsq_sweeps, lsq_features, lsq_start, lsq_end,
         lsq_spx) = preprocess_long_square_sweeps(data_set, lsq_sweep_numbers)
    except Exception as detail:
        logging.warning(
            "Exception when preprocessing long square sweeps from specimen {:d}"
            .format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Identify and preprocess short square sweeps
    try:
        ssq_sweep_numbers = categorize_iclamp_sweeps(
            data_set,
            ontology.short_square_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        ssq_sweeps, ssq_features = preprocess_short_square_sweeps(
            data_set, ssq_sweep_numbers)
    except Exception as detail:
        logging.warning(
            "Exception when preprocessing short square sweeps from specimen {:d}"
            .format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Identify and preprocess ramp sweeps
    try:
        ramp_sweep_numbers = categorize_iclamp_sweeps(
            data_set,
            ontology.ramp_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        ramp_sweeps, ramp_features = preprocess_ramp_sweeps(
            data_set, ramp_sweep_numbers)
    except Exception as detail:
        logging.warning(
            "Exception when preprocessing ramp sweeps from specimen {:d}".
            format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Calculate desired feature vectors
    result = {}

    (subthresh_hyperpol_dict, hyperpol_deflect_dict
     ) = fv.identify_subthreshold_hyperpol_with_amplitudes(
         lsq_features, lsq_sweeps)
    target_amps_for_step_subthresh = [-90, -70, -50, -30, -10]
    result["step_subthresh"] = fv.step_subthreshold(
        subthresh_hyperpol_dict,
        target_amps_for_step_subthresh,
        lsq_start,
        lsq_end,
        amp_tolerance=5)
    result["subthresh_norm"] = fv.subthresh_norm(subthresh_hyperpol_dict,
                                                 hyperpol_deflect_dict,
                                                 lsq_start, lsq_end)
    (subthresh_depol_dict,
     depol_deflect_dict) = fv.identify_subthreshold_depol_with_amplitudes(
         lsq_features, lsq_sweeps)
    result["subthresh_depol_norm"] = fv.subthresh_depol_norm(
        subthresh_depol_dict, depol_deflect_dict, lsq_start, lsq_end)
    isi_sweep, isi_sweep_spike_info = fv.identify_sweep_for_isi_shape(
        lsq_sweeps, lsq_features, lsq_end - lsq_start)
    result["isi_shape"] = fv.isi_shape(isi_sweep, isi_sweep_spike_info,
                                       lsq_end)

    # Calculate waveforms from each type of sweep
    spiking_ssq_sweep_list = [
        ssq_sweeps.sweeps[swp_ind]
        for swp_ind in ssq_features["common_amp_sweeps"].index
    ]
    spiking_ssq_info_list = [
        ssq_features["spikes_set"][swp_ind]
        for swp_ind in ssq_features["common_amp_sweeps"].index
    ]
    ssq_ap_v, ssq_ap_dv = fv.first_ap_vectors(
        spiking_ssq_sweep_list,
        spiking_ssq_info_list,
        target_sampling_rate=target_sampling_rate,
        window_length=ap_window_length,
        skip_clipped=True)

    rheo_ind = lsq_features["rheobase_sweep"].name
    sweep = lsq_sweeps.sweeps[rheo_ind]
    lsq_ap_v, lsq_ap_dv = fv.first_ap_vectors(
        [sweep], [lsq_features["spikes_set"][rheo_ind]],
        target_sampling_rate=target_sampling_rate,
        window_length=ap_window_length)

    spiking_ramp_sweep_list = [
        ramp_sweeps.sweeps[swp_ind]
        for swp_ind in ramp_features["spiking_sweeps"].index
    ]
    spiking_ramp_info_list = [
        ramp_features["spikes_set"][swp_ind]
        for swp_ind in ramp_features["spiking_sweeps"].index
    ]
    ramp_ap_v, ramp_ap_dv = fv.first_ap_vectors(
        spiking_ramp_sweep_list,
        spiking_ramp_info_list,
        target_sampling_rate=target_sampling_rate,
        window_length=ap_window_length,
        skip_clipped=True)

    # Combine so that differences can be assessed by analyses like sPCA
    result["first_ap_v"] = np.hstack([ssq_ap_v, lsq_ap_v, ramp_ap_v])
    result["first_ap_dv"] = np.hstack([ssq_ap_dv, lsq_ap_dv, ramp_ap_dv])

    target_amplitudes = np.arange(0, 120, 20)
    supra_info_list = fv.identify_suprathreshold_spike_info(lsq_features,
                                                            target_amplitudes,
                                                            shift=10)
    result["psth"] = fv.psth_vector(supra_info_list, lsq_start, lsq_end)
    result["inst_freq"] = fv.inst_freq_vector(supra_info_list, lsq_start,
                                              lsq_end)

    spike_feature_list = [
        "upstroke_downstroke_ratio",
        "peak_v",
        "fast_trough_v",
        "threshold_v",
        "width",
    ]
    for feature in spike_feature_list:
        result["spiking_" + feature] = fv.spike_feature_vector(
            feature, supra_info_list, lsq_start, lsq_end)

    return result
Ejemplo n.º 14
0
Run all analyses on NWB file
"""
from __future__ import print_function

import os
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from allensdk.api.queries.cell_types_api import CellTypesApi
from ipfx.aibs_data_set import AibsDataSet
from ipfx.data_set_features import extract_data_set_features

# download a specific experiment NWB file via AllenSDK
ct = CellTypesApi()

specimen_id = 595570553
nwb_file = "%d.nwb" % specimen_id
if not os.path.exists(nwb_file):
    ct.save_ephys_data(specimen_id, nwb_file)
sweep_info = ct.get_ephys_sweeps(specimen_id)

data_set = AibsDataSet(
    sweep_info=sweep_info,
    nwb_file=nwb_file)  # Download and access the experimental data

cell_features, sweep_features, cell_record, sweep_records = \
    extract_data_set_features(data_set, subthresh_min_amp=-100.0)

print(cell_record)
Ejemplo n.º 15
0
def test_validate_required_sweep_info(NWB_file):

    sweep_info = [{"sweep_number": 0}]
    dataset = AibsDataSet(sweep_info, nwb_file=NWB_file, api_sweeps=False)

    assert list(dataset.sweep_table).sort() == dataset.COLUMN_NAMES.sort()
Ejemplo n.º 16
0
def data_for_specimen_id(specimen_id, data_source, chirp_stimulus_codes):
    logging.debug("specimen_id: {}".format(specimen_id))

    # Manual edit ontology to identify chirp sweeps
    ontology_data = ju.read(StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE)
    mask = []
    for od in ontology_data:
        mask_val = True
        for tagset in od:
            for c in chirp_stimulus_codes:
                if c in tagset and "code" in tagset:
                    mask_val = False
                    break
        mask.append(mask_val)
    ontology_data = [od for od, m in zip(ontology_data, mask) if m is True]
    ontology_data.append([
        ["code"] + chirp_stimulus_codes,
        [
          "name",
          "Chirp",
        ],
        [
          "core",
          "Core 2"
        ]
    ])

    ontology = StimulusOntology(ontology_data)

    # Find or retrieve NWB file and ancillary info and construct an AibsDataSet object
    if data_source == "lims":
        nwb_path, h5_path = lims_nwb_information(specimen_id)
        if type(nwb_path) is dict and "error" in nwb_path:
            logging.warning("Problem getting NWB file for specimen {:d} from LIMS".format(specimen_id))
            return nwb_path

        try:
            data_set = AibsDataSet(
                nwb_file=nwb_path, h5_file=h5_path, ontology=ontology)
        except Exception as detail:
            logging.warning("Exception when loading specimen {:d} from LIMS".format(specimen_id))
            logging.warning(detail)
            return {"error": {"type": "dataset", "details": traceback.format_exc(limit=None)}}
    elif data_source == "sdk":
        nwb_path, sweep_info = sdk_nwb_information(specimen_id)
        try:
            data_set = AibsDataSet(
                nwb_file=nwb_path, sweep_info=sweep_info, ontology=ontology)
        except Exception as detail:
            logging.warning("Exception when loading specimen {:d} via Allen SDK".format(specimen_id))
            logging.warning(detail)
            return {"error": {"type": "dataset", "details": traceback.format_exc(limit=None)}}
    else:
        logging.error("invalid data source specified ({})".format(data_source))


    # Identify chirp sweeps

    try:
        iclamp_st = data_set.filtered_sweep_table(clamp_mode=data_set.CURRENT_CLAMP)
        iclamp_st = data_set.filtered_sweep_table(clamp_mode=data_set.CURRENT_CLAMP, stimuli=["Chirp"])
        chirp_sweep_numbers = iclamp_st["sweep_number"].sort_values().values
    except Exception as detail:
        logging.warning("Exception when identifying sweeps from specimen {:d}".format(specimen_id))
        logging.warning(detail)
        return {"error": {"type": "sweep_table", "details": traceback.format_exc(limit=1)}}

    if len(chirp_sweep_numbers) == 0:
        logging.info("No chirp sweeps for {:d}".format(specimen_id))
        return {"error": {"type": "processing", "details:": "no available chirp sweeps"}}

    try:
        result = chirp.extract_chirp_feature_vector(data_set, chirp_sweep_numbers)
    except Exception as detail:
        logging.warning("Exception when processing specimen {:d}".format(specimen_id))
        logging.warning(detail)
        return {"error": {"type": "processing", "details": traceback.format_exc(limit=1)}}

    return result
Ejemplo n.º 17
0
"""
Sweep QC Features
=================

Estimate sweep QC features
"""
from __future__ import print_function

import os
import pandas as pd
from ipfx.aibs_data_set import AibsDataSet
import ipfx.qc_features as qcf
from allensdk.api.queries.cell_types_api import CellTypesApi

specimen_id = 595570553
nwb_file = '%d.nwb' % specimen_id

# download a specific experiment NWB file via AllenSDK
ct = CellTypesApi()
if not os.path.exists(nwb_file):
    ct.save_ephys_data(specimen_id, nwb_file)
sweep_info = ct.get_ephys_sweeps(specimen_id)

data_set = AibsDataSet(sweep_info=sweep_info, nwb_file=nwb_file)

# run sweep QC
sweep_features = qcf.sweep_qc_features(data_set)

print(pd.DataFrame(sweep_features).head())