Esempio n. 1
0
def test_identify_isi_shape_one_spike():
    min_spike = 5

    test_input_amplitudes = [10, 20, 30, 40]
    test_avg_rate = [0, 1, 1, 1]
    test_features = {
        "sweeps": pd.DataFrame({
            "stim_amp": test_input_amplitudes,
            "avg_rate": test_avg_rate,
        }),
        "spikes_set": [None] * len(test_avg_rate),
    }

    # Random test sweeps
    np.random.seed(42)
    n_points = 100
    t = np.arange(n_points)
    i = np.zeros_like(t)
    epochs = {"sweep": (0, n_points - 1), "test": None, "recording": None, "experiment": None, "stim": None}
    sampling_rate = 1
    clamp_mode = "CurrentClamp"
    sweep_list = []
    for a in test_input_amplitudes:
        v = np.random.randn(n_points)
        test_sweep = Sweep(t, v, i, clamp_mode, sampling_rate, epochs=epochs)
        sweep_list.append(test_sweep)
    test_sweep_set = SweepSet(sweep_list)

    selected_sweep, _ = fv.identify_sweep_for_isi_shape(
        test_sweep_set, test_features, duration=1, min_spike=min_spike)

    assert np.array_equal(selected_sweep.v, sweep_list[1].v)
Esempio n. 2
0
def test_isi_shape(feature_vector_input):

    sweeps, features, start, end = feature_vector_input

    isi_sweep, isi_sweep_spike_info = fv.identify_sweep_for_isi_shape(
        sweeps, features, end - start)
    temp_data = fv.isi_shape(isi_sweep, isi_sweep_spike_info, end)

    test_data = np.load(os.path.join(TEST_OUTPUT_DIR, "isi_shape.npy"))

    assert np.array_equal(test_data, temp_data)
Esempio n. 3
0
def data_for_specimen_id(specimen_id,
                         sweep_qc_option,
                         data_source,
                         ap_window_length=0.006,
                         target_sampling_rate=10000,
                         nfiles=None):
    logging.debug("specimen_id: {}".format(specimen_id))

    lsq_fail = False
    ssq_fail = False
    ramp_fail = False
    # Find or retrieve NWB file and ancillary info and construct an AibsDataSet object
    ontology = StimulusOntology(
        ju.read(StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE))
    if data_source == "local":

        nwb_path = nfiles[specimen_id]
        if type(nwb_path) is dict and "error" in nwb_path:
            logging.warning(
                "Problem getting NWB file for specimen {:d}".format(
                    specimen_id))
            return nwb_path

        data_set = HBGDataSet(nwb_file=nwb_path, ontology=ontology)

    else:
        logging.error("invalid data source specified ({})".format(data_source))

    # Identify and preprocess long square sweeps
    try:
        lsq_sweep_numbers = categorize_iclamp_sweeps(
            data_set,
            ontology.long_square_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        (lsq_sweeps, lsq_features, lsq_start, lsq_end,
         lsq_spx) = preprocess_long_square_sweeps(data_set, lsq_sweep_numbers)
    except Exception as detail:
        lsq_fail = True
        logging.warning(
            "Exception when preprocessing long square sweeps from specimen {:d}"
            .format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Identify and preprocess short square sweeps
    try:
        ssq_sweep_numbers = categorize_iclamp_sweeps(
            data_set,
            ontology.short_square_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        ssq_sweeps, ssq_features = preprocess_short_square_sweeps(
            data_set, ssq_sweep_numbers)
    except Exception as detail:
        ssq_fail = True
        logging.warning(
            "Exception when preprocessing short square sweeps from specimen {:d}"
            .format(specimen_id))
        logging.warning(detail)
        {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Identify and preprocess ramp sweeps
    try:
        ramp_sweep_numbers = categorize_iclamp_sweeps(
            data_set,
            ontology.ramp_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        ramp_sweeps, ramp_features = preprocess_ramp_sweeps(
            data_set, ramp_sweep_numbers)
    except Exception as detail:
        ramp_fail = True
        logging.warning(
            "Exception when preprocessing ramp sweeps from specimen {:d}".
            format(specimen_id))
        logging.warning(detail)
        {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Calculate desired feature vectors
    result = {}
    try:
        (subthresh_hyperpol_dict, hyperpol_deflect_dict
         ) = fv.identify_subthreshold_hyperpol_with_amplitudes(
             lsq_features, lsq_sweeps)
        target_amps_for_step_subthresh = [-90, -70, -50, -30, -10]
        result["step_subthresh"] = fv.step_subthreshold(
            subthresh_hyperpol_dict,
            target_amps_for_step_subthresh,
            lsq_start,
            lsq_end,
            amp_tolerance=5)
        result["subthresh_norm"] = fv.subthresh_norm(subthresh_hyperpol_dict,
                                                     hyperpol_deflect_dict,
                                                     lsq_start, lsq_end)
        (subthresh_depol_dict,
         depol_deflect_dict) = fv.identify_subthreshold_depol_with_amplitudes(
             lsq_features, lsq_sweeps)
        result["subthresh_depol_norm"] = fv.subthresh_depol_norm(
            subthresh_depol_dict, depol_deflect_dict, lsq_start, lsq_end)
        isi_sweep, isi_sweep_spike_info = fv.identify_sweep_for_isi_shape(
            lsq_sweeps, lsq_features, lsq_end - lsq_start)
        result["isi_shape"] = fv.isi_shape(isi_sweep, isi_sweep_spike_info,
                                           lsq_end)

        if ssq_fail == False:
            # Calculate waveforms from each type of sweep
            spiking_ssq_sweep_list = [
                ssq_sweeps.sweeps[swp_ind]
                for swp_ind in ssq_features["common_amp_sweeps"].index
            ]
            spiking_ssq_info_list = [
                ssq_features["spikes_set"][swp_ind]
                for swp_ind in ssq_features["common_amp_sweeps"].index
            ]
            ssq_ap_v, ssq_ap_dv = fv.first_ap_vectors(
                spiking_ssq_sweep_list,
                spiking_ssq_info_list,
                target_sampling_rate=target_sampling_rate,
                window_length=ap_window_length,
                skip_clipped=True)
        else:
            ssq_ap_v, ssq_ap_dv = np.nan, np.nan

        rheo_ind = lsq_features["rheobase_sweep"].name
        sweep = lsq_sweeps.sweeps[rheo_ind]
        lsq_ap_v, lsq_ap_dv = fv.first_ap_vectors(
            [sweep], [lsq_features["spikes_set"][rheo_ind]],
            target_sampling_rate=target_sampling_rate,
            window_length=ap_window_length)

        if ramp_fail == False:

            spiking_ramp_sweep_list = [
                ramp_sweeps.sweeps[swp_ind]
                for swp_ind in ramp_features["spiking_sweeps"].index
            ]
            spiking_ramp_info_list = [
                ramp_features["spikes_set"][swp_ind]
                for swp_ind in ramp_features["spiking_sweeps"].index
            ]
            ramp_ap_v, ramp_ap_dv = fv.first_ap_vectors(
                spiking_ramp_sweep_list,
                spiking_ramp_info_list,
                target_sampling_rate=target_sampling_rate,
                window_length=ap_window_length,
                skip_clipped=True)
        else:
            ramp_ap_v, ramp_ap_dv = np.nan, np.nan

        if ramp_fail == True:
            ramp_ap_dv = np.copy(lsq_ap_dv)
            ramp_ap_v = np.copy(lsq_ap_v)

        if ssq_fail == True:
            ssq_ap_dv = np.copy(lsq_ap_dv)
            ssq_ap_v = np.copy(lsq_ap_v)

        # Combine so that differences can be assessed by analyses like sPCA
        result["first_ap_v"] = np.hstack([ssq_ap_v, lsq_ap_v, ramp_ap_v])
        result["first_ap_dv"] = np.hstack([ssq_ap_dv, lsq_ap_dv, ramp_ap_dv])

        target_amplitudes = np.arange(0, 120, 20)
        supra_info_list = fv.identify_suprathreshold_spike_info(
            lsq_features, target_amplitudes, shift=10)
        result["psth"] = fv.psth_vector(supra_info_list, lsq_start, lsq_end)
        result["inst_freq"] = fv.inst_freq_vector(supra_info_list, lsq_start,
                                                  lsq_end)

        spike_feature_list = [
            "upstroke_downstroke_ratio",
            "peak_v",
            "fast_trough_v",
            "threshold_v",
            "width",
        ]
        for feature in spike_feature_list:
            result["spiking_" + feature] = fv.spike_feature_vector(
                feature, supra_info_list, lsq_start, lsq_end)
            if feature == 'width':
                result["spiking_width"] = result["spiking_width"] / 2
    except Exception as detail:
        logging.warning(
            "Exception when processing specimen {:d}".format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "processing",
                "details": traceback.format_exc(limit=None)
            }
        }

    return result
def main(nwb_file, output_dir, project, **kwargs):
    nwb = MiesNwb(nwb_file)

    # SPECIFICS FOR EXAMPLE NWB =========

    # Only analyze one channel at a time
    channel = 0

    # We can work out code to automatically extract these based on stimulus names later.
    if_sweep_inds = [39, 45]
    targetv_sweep_inds = [15, 21]

    # END SPECIFICS =====================

    # Assemble all Recordings and convert to Sweeps
    supra_sweep_ids = list(range(*if_sweep_inds))
    sub_sweep_ids = list(range(*targetv_sweep_inds))

    supra_recs = [nwb.contents[i][channel] for i in supra_sweep_ids]
    sub_recs = [nwb.contents[i][channel] for i in sub_sweep_ids]

    # Build sweep sets
    lsq_supra_sweep_list, lsq_supra_dur = recs_to_sweeps(supra_recs)
    lsq_sub_sweep_list, lsq_sub_dur = recs_to_sweeps(sub_recs)
    lsq_supra_sweeps = SweepSet(lsq_supra_sweep_list)
    lsq_sub_sweeps = SweepSet(lsq_sub_sweep_list)

    lsq_supra_start = 0
    lsq_supra_end = lsq_supra_dur
    lsq_sub_start = 0
    lsq_sub_end = lsq_sub_dur

    # Pre-process sweeps
    lsq_supra_spx, lsq_supra_spfx = dsf.extractors_for_sweeps(
        lsq_supra_sweeps, start=lsq_supra_start, end=lsq_supra_end)
    lsq_supra_an = spa.LongSquareAnalysis(lsq_supra_spx,
                                          lsq_supra_spfx,
                                          subthresh_min_amp=-100.,
                                          require_subthreshold=False)
    lsq_supra_features = lsq_supra_an.analyze(lsq_supra_sweeps)

    lsq_sub_spx, lsq_sub_spfx = dsf.extractors_for_sweeps(lsq_sub_sweeps,
                                                          start=lsq_sub_start,
                                                          end=lsq_sub_end)
    lsq_sub_an = spa.LongSquareAnalysis(lsq_sub_spx,
                                        lsq_sub_spfx,
                                        subthresh_min_amp=-100.,
                                        require_suprathreshold=False)
    lsq_sub_features = lsq_sub_an.analyze(lsq_sub_sweeps)

    # Calculate feature vectors
    result = {}
    (subthresh_hyperpol_dict, hyperpol_deflect_dict
     ) = fv.identify_subthreshold_hyperpol_with_amplitudes(
         lsq_sub_features, lsq_sub_sweeps)
    target_amps_for_step_subthresh = [-90, -70, -50, -30, -10]
    result["step_subthresh"] = fv.step_subthreshold(
        subthresh_hyperpol_dict,
        target_amps_for_step_subthresh,
        lsq_sub_start,
        lsq_sub_end,
        amp_tolerance=5)
    result["subthresh_norm"] = fv.subthresh_norm(subthresh_hyperpol_dict,
                                                 hyperpol_deflect_dict,
                                                 lsq_sub_start, lsq_sub_end)

    (subthresh_depol_dict,
     depol_deflect_dict) = fv.identify_subthreshold_depol_with_amplitudes(
         lsq_supra_features, lsq_supra_sweeps)
    result["subthresh_depol_norm"] = fv.subthresh_depol_norm(
        subthresh_depol_dict, depol_deflect_dict, lsq_supra_start,
        lsq_supra_end)
    isi_sweep, isi_sweep_spike_info = fv.identify_sweep_for_isi_shape(
        lsq_supra_sweeps, lsq_supra_features, lsq_supra_end - lsq_supra_start)
    result["isi_shape"] = fv.isi_shape(isi_sweep, isi_sweep_spike_info,
                                       lsq_supra_end)

    # Calculate AP waveform from long squares
    rheo_ind = lsq_supra_features["rheobase_sweep"].name
    sweep = lsq_supra_sweeps.sweeps[rheo_ind]
    lsq_ap_v, lsq_ap_dv = fv.first_ap_vectors(
        [sweep], [lsq_supra_features["spikes_set"][rheo_ind]],
        window_length=ap_window_length)

    result["first_ap_v"] = lsq_ap_v
    result["first_ap_dv"] = lsq_ap_dv

    target_amplitudes = np.arange(0, 120, 20)
    supra_info_list = fv.identify_suprathreshold_sweep_sequence(
        lsq_supra_features, target_amplitudes, shift=10)
    result["psth"] = fv.psth_vector(supra_info_list, lsq_supra_start,
                                    lsq_supra_end)
    result["inst_freq"] = fv.inst_freq_vector(supra_info_list, lsq_supra_start,
                                              lsq_supra_end)
    spike_feature_list = [
        "upstroke_downstroke_ratio",
        "peak_v",
        "fast_trough_v",
        "threshold_v",
        "width",
    ]
    for feature in spike_feature_list:
        result["spiking_" + feature] = fv.spike_feature_vector(
            feature, supra_info_list, lsq_supra_start, lsq_supra_end)

    # Save the results
    specimen_ids = [0]
    results = [result]

    filtered_set = [(i, r) for i, r in zip(specimen_ids, results)
                    if not "error" in r.keys()]
    error_set = [{
        "id": i,
        "error": d
    } for i, d in zip(specimen_ids, results) if "error" in d.keys()]
    if len(filtered_set) == 0:
        logging.info("No specimens had results")
        return

    with open(os.path.join(output_dir, "fv_errors_{:s}.json".format(project)),
              "w") as f:
        json.dump(error_set, f, indent=4)

    used_ids, results = zip(*filtered_set)
    logging.info("Finished with {:d} processed specimens".format(
        len(used_ids)))

    k_sizes = {}
    for k in results[0].keys():
        if k not in k_sizes and results[0][k] is not None:
            k_sizes[k] = len(results[0][k])
        data = np.array([
            r[k] if k in r else np.nan * np.zeros(k_sizes[k]) for r in results
        ])
        if len(data.shape) == 1:  # it'll be 1D if there's just one specimen
            data = np.reshape(data, (1, -1))
        if data.shape[0] < len(used_ids):
            logging.warn("Missing data!")
            missing = np.array([k not in r for r in results])
            print(k, np.array(used_ids)[missing])
        np.save(
            os.path.join(output_dir, "fv_{:s}_{:s}.npy".format(k, project)),
            data)

    np.save(os.path.join(output_dir, "fv_ids_{:s}.npy".format(project)),
            used_ids)
Esempio n. 5
0
def test_identify_sweep_for_isi_shape():
    sweeps = {
        "index": [0, 1, 2, 3, 4, 5, 6, 7, 8],
        "columns": [
            "avg_rate",
            "peak_deflect",
            "stim_amp",
            "v_baseline",
            "sag",
            "adapt",
            "latency",
            "isi_cv",
            "mean_isi",
            "median_isi",
            "first_isi",
        ],
        "data": [
            [
                0.0,
                (-70.65, 35892),
                -29.999998092651367,
                -68.25825500488281,
                0.054702017456293106,
                np.nan,
                np.nan,
                np.nan,
                np.nan,
                np.nan,
                np.nan,
            ],
            [
                0.0,
                (-72.2, 33470),
                -50.0,
                -68.39005279541016,
                0.01751580648124218,
                np.nan,
                np.nan,
                np.nan,
                np.nan,
                np.nan,
                np.nan,
            ],
            [
                0.0,
                (-73.66875, 79087),
                -70.0,
                -68.42821502685547,
                0.004760173615068197,
                np.nan,
                np.nan,
                np.nan,
                np.nan,
                np.nan,
                np.nan,
            ],
            [
                0.0,
                (-30.550001, 31322),
                469.9999694824219,
                -68.29730987548828,
                -8.956585884094238,
                np.nan,
                np.nan,
                np.nan,
                np.nan,
                np.nan,
                np.nan,
            ],
            [
                0.0,
                (-28.54375, 31625),
                479.9999694824219,
                -68.26968383789062,
                -9.093791961669922,
                np.nan,
                np.nan,
                np.nan,
                np.nan,
                np.nan,
                np.nan,
            ],
            [
                5.000000000000001,
                (14.98125, 30920),
                489.9999694824219,
                -67.90441131591797,
                -8.981210708618164,
                0.00839006531426236,
                0.018079999999999985,
                0.026166652896782595,
                0.016109999999999985,
                0.01608000000000004,
                0.015880000000000005,
            ],
            [
                30.000000000000004,
                (15.19375, 32169),
                520.0,
                -67.4559097290039,
                -8.814754486083984,
                0.00801722722893242,
                0.01963999999999999,
                0.1548801847128805,
                0.015706896551724133,
                0.0156400000000001,
                0.011479999999999935,
            ],
            [
                72.00000000000001,
                (15.31875, 31168),
                540.0,
                -67.43457794189453,
                -6.910084247589111,
                0.001789278456852954,
                0.012699999999999934,
                0.09433347064888947,
                0.013758591549295774,
                0.013900000000000023,
                0.010319999999999996,
            ],
            [
                81.00000000000001,
                (15.7125, 52887),
                560.0,
                -67.80497741699219,
                -6.805542945861816,
                0.002374929272995164,
                0.011459999999999915,
                0.0921604402099859,
                0.0123335,
                0.012390000000000012,
                0.009160000000000057,
            ],
        ],
    }
    end = 1.5
    start = 0.5

    class SomeSweeps:
        @property
        def sweeps(self):
            ndata = len(sweeps["data"][0])
            nsweeps = len(sweeps["index"])
            return np.arange(nsweeps * ndata).reshape(nsweeps, ndata)

    isi_sweep, isi_sweep_spike_info = fv.identify_sweep_for_isi_shape(
        SomeSweeps(),
        {
            "sweeps": pd.DataFrame(**sweeps),
            "spikes_set": {
                5: "foo"
            }
        },
        end - start,
    )

    assert isi_sweep_spike_info == "foo"
    assert np.allclose(isi_sweep, np.arange(55, 66))
Esempio n. 6
0
def data_for_specimen_id(
    specimen_id,
    sweep_qc_option,
    data_source,
    ontology,
    ap_window_length=0.005,
    target_sampling_rate=50000,
    file_list=None,
):
    """
    Extract feature vector from given cell identified by the specimen_id
    Parameters
    ----------
    specimen_id : int
        cell identified
    sweep_qc_option : str
        see CollectFeatureVectorParameters input schema for details
    data_source: str
        see CollectFeatureVectorParameters input schema for details
    ontology : stimulus.StimulusOntology
        mapping of stimuli names to stimulus codes
    ap_window_length : float
        see CollectFeatureVectorParameters input schema for details
    target_sampling_rate : float
        sampling rate
    file_list : list of str
        nwbfile names
    Returns
    -------
    dict :
        features for a given cell specimen_id

    """
    logging.debug("specimen_id: {}".format(specimen_id))

    # Find or retrieve NWB file and ancillary info and construct an AibsDataSet object
    data_set = su.dataset_for_specimen_id(specimen_id, data_source, ontology,
                                          file_list)
    if type(data_set) is dict and "error" in data_set:
        logging.warning(
            "Problem getting AibsDataSet for specimen {:d} from LIMS".format(
                specimen_id))
        return data_set

    # Identify and preprocess long square sweeps
    try:
        lsq_sweep_numbers = su.categorize_iclamp_sweeps(
            data_set,
            ontology.long_square_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        (lsq_sweeps, lsq_features, _, lsq_start,
         lsq_end) = su.preprocess_long_square_sweeps(data_set,
                                                     lsq_sweep_numbers)

    except Exception as detail:
        logging.warning(
            "Exception when preprocessing long square sweeps from specimen {:d}"
            .format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Identify and preprocess short square sweeps
    try:
        ssq_sweep_numbers = su.categorize_iclamp_sweeps(
            data_set,
            ontology.short_square_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        ssq_sweeps, ssq_features, _ = su.preprocess_short_square_sweeps(
            data_set, ssq_sweep_numbers)
    except Exception as detail:
        logging.warning(
            "Exception when preprocessing short square sweeps from specimen {:d}"
            .format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Identify and preprocess ramp sweeps
    try:
        ramp_sweep_numbers = su.categorize_iclamp_sweeps(
            data_set,
            ontology.ramp_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        ramp_sweeps, ramp_features, _ = su.preprocess_ramp_sweeps(
            data_set, ramp_sweep_numbers)
    except Exception as detail:
        logging.warning(
            "Exception when preprocessing ramp sweeps from specimen {:d}".
            format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Calculate desired feature vectors
    result = {}

    if data_source == "filesystem":
        result["id"] = [specimen_id]

    try:
        (subthresh_hyperpol_dict, hyperpol_deflect_dict
         ) = fv.identify_subthreshold_hyperpol_with_amplitudes(
             lsq_features, lsq_sweeps)
        target_amps_for_step_subthresh = [-90, -70, -50, -30, -10]
        result["step_subthresh"] = fv.step_subthreshold(
            subthresh_hyperpol_dict,
            target_amps_for_step_subthresh,
            lsq_start,
            lsq_end,
            amp_tolerance=5)
        result["subthresh_norm"] = fv.subthresh_norm(subthresh_hyperpol_dict,
                                                     hyperpol_deflect_dict,
                                                     lsq_start, lsq_end)
        (subthresh_depol_dict,
         depol_deflect_dict) = fv.identify_subthreshold_depol_with_amplitudes(
             lsq_features, lsq_sweeps)
        result["subthresh_depol_norm"] = fv.subthresh_depol_norm(
            subthresh_depol_dict, depol_deflect_dict,
            np.round(lsq_start, decimals=3), np.round(lsq_end, decimals=3))
        isi_sweep, isi_sweep_spike_info = fv.identify_sweep_for_isi_shape(
            lsq_sweeps, lsq_features, lsq_end - lsq_start)
        result["isi_shape"] = fv.isi_shape(isi_sweep, isi_sweep_spike_info,
                                           lsq_end)

        # Calculate waveforms from each type of sweep
        spiking_ssq_sweep_list = [
            ssq_sweeps.sweeps[swp_ind]
            for swp_ind in ssq_features["common_amp_sweeps"].index
        ]
        spiking_ssq_info_list = [
            ssq_features["spikes_set"][swp_ind]
            for swp_ind in ssq_features["common_amp_sweeps"].index
        ]
        ssq_ap_v, ssq_ap_dv = fv.first_ap_vectors(
            spiking_ssq_sweep_list,
            spiking_ssq_info_list,
            target_sampling_rate=target_sampling_rate,
            window_length=ap_window_length,
            skip_clipped=True)

        rheo_ind = lsq_features["rheobase_sweep"].name
        sweep = lsq_sweeps.sweeps[rheo_ind]
        lsq_ap_v, lsq_ap_dv = fv.first_ap_vectors(
            [sweep], [lsq_features["spikes_set"][rheo_ind]],
            target_sampling_rate=target_sampling_rate,
            window_length=ap_window_length)

        spiking_ramp_sweep_list = [
            ramp_sweeps.sweeps[swp_ind]
            for swp_ind in ramp_features["spiking_sweeps"].index
        ]
        spiking_ramp_info_list = [
            ramp_features["spikes_set"][swp_ind]
            for swp_ind in ramp_features["spiking_sweeps"].index
        ]
        ramp_ap_v, ramp_ap_dv = fv.first_ap_vectors(
            spiking_ramp_sweep_list,
            spiking_ramp_info_list,
            target_sampling_rate=target_sampling_rate,
            window_length=ap_window_length,
            skip_clipped=True)

        # Combine so that differences can be assessed by analyses like sPCA
        result["first_ap_v"] = np.hstack([ssq_ap_v, lsq_ap_v, ramp_ap_v])
        result["first_ap_dv"] = np.hstack([ssq_ap_dv, lsq_ap_dv, ramp_ap_dv])

        target_amplitudes = np.arange(0, 120, 20)
        supra_info_list = fv.identify_suprathreshold_spike_info(
            lsq_features, target_amplitudes, shift=10)
        result["psth"] = fv.psth_vector(supra_info_list, lsq_start, lsq_end)
        result["inst_freq"] = fv.inst_freq_vector(supra_info_list, lsq_start,
                                                  lsq_end)

        spike_feature_list = [
            "upstroke_downstroke_ratio",
            "peak_v",
            "fast_trough_v",
            "threshold_v",
            "width",
        ]
        for feature in spike_feature_list:
            result["spiking_" + feature] = fv.spike_feature_vector(
                feature, supra_info_list, lsq_start, lsq_end)
    except Exception as detail:
        logging.warning(
            "Exception when processing specimen {:d}".format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "processing",
                "details": traceback.format_exc(limit=None)
            }
        }

    return result