Ejemplo n.º 1
0
def test_isi_shape_one_spike():
    # Test sweep
    np.random.seed(42)
    v = np.zeros(1000)
    v[100:400] = np.linspace(-30, 0, 300)
    print(v[280:300])
    t = np.arange(len(v))
    i = np.zeros_like(t)
    epochs = {"sweep": (0, len(v) - 1), "test": None, "recording": None, "experiment": None, "stim": None}
    sampling_rate = 1
    clamp_mode = "CurrentClamp"
    test_sweep = Sweep(t, v, i, clamp_mode, sampling_rate, epochs=epochs)
    end = t[-100]

    test_threshold_index = [80]
    test_fast_trough_index = [100]
    test_threshold_v = [0]

    test_spike_info = pd.DataFrame({
        "threshold_index": test_threshold_index,
        "fast_trough_index": test_fast_trough_index,
        "threshold_v": test_threshold_v,
        "fast_trough_t": test_fast_trough_index,
    })

    n_points = 100
    isi_norm = fv.isi_shape(test_sweep, test_spike_info, end, n_points=n_points,
        steady_state_interval=10, single_max_duration=500)
    assert len(isi_norm) == n_points

    assert isi_norm[0] < 0
    assert isi_norm[0] >= -30
Ejemplo n.º 2
0
def test_isi_shape_skip_short():
    # Random test sweep
    np.random.seed(42)
    v = np.random.randn(1000)
    t = np.arange(len(v))
    i = np.zeros_like(t)
    epochs = {"sweep": (0, len(v) - 1), "test": None, "recording": None, "experiment": None, "stim": None}
    sampling_rate = 1
    clamp_mode = "CurrentClamp"
    test_sweep = Sweep(t, v, i, clamp_mode, sampling_rate, epochs=epochs)
    end = t[-100]

    test_subsample = 3
    test_threshold_index = np.array([100, 130, 150 + 100 * test_subsample])
    test_fast_trough_index = test_threshold_index + 20
    test_threshold_v = np.random.randint(-100, -20, size=len(test_threshold_index))

    test_spike_info = pd.DataFrame({
        "threshold_index": test_threshold_index,
        "fast_trough_index": test_fast_trough_index,
        "threshold_v": test_threshold_v,
        "fast_trough_t": test_fast_trough_index,
    })

    n_points = 100
    isi_norm = fv.isi_shape(test_sweep, test_spike_info, end, n_points=n_points)
    assert len(isi_norm) == n_points

    # Should only use second ISI
    assert isi_norm[0] == (test_sweep.v[test_fast_trough_index[1]:test_fast_trough_index[1] + test_subsample].mean()
        - test_threshold_v[1])
Ejemplo n.º 3
0
def test_isi_shape(feature_vector_input):

    sweeps, features, start, end = feature_vector_input

    isi_sweep, isi_sweep_spike_info = fv.identify_sweep_for_isi_shape(
        sweeps, features, end - start)
    temp_data = fv.isi_shape(isi_sweep, isi_sweep_spike_info, end)

    test_data = np.load(os.path.join(TEST_OUTPUT_DIR, "isi_shape.npy"))

    assert np.array_equal(test_data, temp_data)
Ejemplo n.º 4
0
def test_isi_shape_aligned():
    # Random test sweep
    np.random.seed(42)
    v = np.random.randn(1000)
    t = np.arange(len(v))
    i = np.zeros_like(t)
    epochs = {
        "sweep": (0, len(v) - 1),
        "test": None,
        "recording": None,
        "experiment": None,
        "stim": None,
    }
    sampling_rate = 1
    clamp_mode = "CurrentClamp"
    test_sweep = Sweep(t, v, i, clamp_mode, sampling_rate, epochs=epochs)
    end = t[-100]

    test_threshold_index = np.array([100, 220, 340])
    test_fast_trough_index = test_threshold_index + 20
    test_threshold_v = np.random.randint(
        -100, -20, size=len(test_threshold_index)
    )

    test_spike_info = pd.DataFrame(
        {
            "threshold_index": test_threshold_index,
            "fast_trough_index": test_fast_trough_index,
            "threshold_v": test_threshold_v,
            "fast_trough_t": test_fast_trough_index,
        }
    )

    n_points = 100
    isi_norm = fv.isi_shape(
        test_sweep, test_spike_info, end, n_points=n_points
    )
    assert len(isi_norm) == n_points
    assert isi_norm[0] == np.mean(
        test_sweep.v[test_fast_trough_index[:-1]] - test_threshold_v[:-1]
    )
Ejemplo n.º 5
0
def test_isi_shape():

    sweep_spike_info = {
        "fast_trough_index": [0, 10, -10000],
        "threshold_index": [-10000, 10, 20],
        "threshold_v": [1, 2, -10000],
    }

    class Sweep:
        @property
        def v(self):
            return np.arange(20)

        @property
        def t(self):
            return np.arange(20)

    obtained = fv.isi_shape(Sweep(),
                            pd.DataFrame(sweep_spike_info),
                            50,
                            n_points=10)
    assert np.allclose(np.arange(3.5, 13.5, 1.0), obtained)
Ejemplo n.º 6
0
def data_for_specimen_id(specimen_id,
                         sweep_qc_option,
                         data_source,
                         ap_window_length=0.006,
                         target_sampling_rate=10000,
                         nfiles=None):
    logging.debug("specimen_id: {}".format(specimen_id))

    lsq_fail = False
    ssq_fail = False
    ramp_fail = False
    # Find or retrieve NWB file and ancillary info and construct an AibsDataSet object
    ontology = StimulusOntology(
        ju.read(StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE))
    if data_source == "local":

        nwb_path = nfiles[specimen_id]
        if type(nwb_path) is dict and "error" in nwb_path:
            logging.warning(
                "Problem getting NWB file for specimen {:d}".format(
                    specimen_id))
            return nwb_path

        data_set = HBGDataSet(nwb_file=nwb_path, ontology=ontology)

    else:
        logging.error("invalid data source specified ({})".format(data_source))

    # Identify and preprocess long square sweeps
    try:
        lsq_sweep_numbers = categorize_iclamp_sweeps(
            data_set,
            ontology.long_square_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        (lsq_sweeps, lsq_features, lsq_start, lsq_end,
         lsq_spx) = preprocess_long_square_sweeps(data_set, lsq_sweep_numbers)
    except Exception as detail:
        lsq_fail = True
        logging.warning(
            "Exception when preprocessing long square sweeps from specimen {:d}"
            .format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Identify and preprocess short square sweeps
    try:
        ssq_sweep_numbers = categorize_iclamp_sweeps(
            data_set,
            ontology.short_square_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        ssq_sweeps, ssq_features = preprocess_short_square_sweeps(
            data_set, ssq_sweep_numbers)
    except Exception as detail:
        ssq_fail = True
        logging.warning(
            "Exception when preprocessing short square sweeps from specimen {:d}"
            .format(specimen_id))
        logging.warning(detail)
        {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Identify and preprocess ramp sweeps
    try:
        ramp_sweep_numbers = categorize_iclamp_sweeps(
            data_set,
            ontology.ramp_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        ramp_sweeps, ramp_features = preprocess_ramp_sweeps(
            data_set, ramp_sweep_numbers)
    except Exception as detail:
        ramp_fail = True
        logging.warning(
            "Exception when preprocessing ramp sweeps from specimen {:d}".
            format(specimen_id))
        logging.warning(detail)
        {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Calculate desired feature vectors
    result = {}
    try:
        (subthresh_hyperpol_dict, hyperpol_deflect_dict
         ) = fv.identify_subthreshold_hyperpol_with_amplitudes(
             lsq_features, lsq_sweeps)
        target_amps_for_step_subthresh = [-90, -70, -50, -30, -10]
        result["step_subthresh"] = fv.step_subthreshold(
            subthresh_hyperpol_dict,
            target_amps_for_step_subthresh,
            lsq_start,
            lsq_end,
            amp_tolerance=5)
        result["subthresh_norm"] = fv.subthresh_norm(subthresh_hyperpol_dict,
                                                     hyperpol_deflect_dict,
                                                     lsq_start, lsq_end)
        (subthresh_depol_dict,
         depol_deflect_dict) = fv.identify_subthreshold_depol_with_amplitudes(
             lsq_features, lsq_sweeps)
        result["subthresh_depol_norm"] = fv.subthresh_depol_norm(
            subthresh_depol_dict, depol_deflect_dict, lsq_start, lsq_end)
        isi_sweep, isi_sweep_spike_info = fv.identify_sweep_for_isi_shape(
            lsq_sweeps, lsq_features, lsq_end - lsq_start)
        result["isi_shape"] = fv.isi_shape(isi_sweep, isi_sweep_spike_info,
                                           lsq_end)

        if ssq_fail == False:
            # Calculate waveforms from each type of sweep
            spiking_ssq_sweep_list = [
                ssq_sweeps.sweeps[swp_ind]
                for swp_ind in ssq_features["common_amp_sweeps"].index
            ]
            spiking_ssq_info_list = [
                ssq_features["spikes_set"][swp_ind]
                for swp_ind in ssq_features["common_amp_sweeps"].index
            ]
            ssq_ap_v, ssq_ap_dv = fv.first_ap_vectors(
                spiking_ssq_sweep_list,
                spiking_ssq_info_list,
                target_sampling_rate=target_sampling_rate,
                window_length=ap_window_length,
                skip_clipped=True)
        else:
            ssq_ap_v, ssq_ap_dv = np.nan, np.nan

        rheo_ind = lsq_features["rheobase_sweep"].name
        sweep = lsq_sweeps.sweeps[rheo_ind]
        lsq_ap_v, lsq_ap_dv = fv.first_ap_vectors(
            [sweep], [lsq_features["spikes_set"][rheo_ind]],
            target_sampling_rate=target_sampling_rate,
            window_length=ap_window_length)

        if ramp_fail == False:

            spiking_ramp_sweep_list = [
                ramp_sweeps.sweeps[swp_ind]
                for swp_ind in ramp_features["spiking_sweeps"].index
            ]
            spiking_ramp_info_list = [
                ramp_features["spikes_set"][swp_ind]
                for swp_ind in ramp_features["spiking_sweeps"].index
            ]
            ramp_ap_v, ramp_ap_dv = fv.first_ap_vectors(
                spiking_ramp_sweep_list,
                spiking_ramp_info_list,
                target_sampling_rate=target_sampling_rate,
                window_length=ap_window_length,
                skip_clipped=True)
        else:
            ramp_ap_v, ramp_ap_dv = np.nan, np.nan

        if ramp_fail == True:
            ramp_ap_dv = np.copy(lsq_ap_dv)
            ramp_ap_v = np.copy(lsq_ap_v)

        if ssq_fail == True:
            ssq_ap_dv = np.copy(lsq_ap_dv)
            ssq_ap_v = np.copy(lsq_ap_v)

        # Combine so that differences can be assessed by analyses like sPCA
        result["first_ap_v"] = np.hstack([ssq_ap_v, lsq_ap_v, ramp_ap_v])
        result["first_ap_dv"] = np.hstack([ssq_ap_dv, lsq_ap_dv, ramp_ap_dv])

        target_amplitudes = np.arange(0, 120, 20)
        supra_info_list = fv.identify_suprathreshold_spike_info(
            lsq_features, target_amplitudes, shift=10)
        result["psth"] = fv.psth_vector(supra_info_list, lsq_start, lsq_end)
        result["inst_freq"] = fv.inst_freq_vector(supra_info_list, lsq_start,
                                                  lsq_end)

        spike_feature_list = [
            "upstroke_downstroke_ratio",
            "peak_v",
            "fast_trough_v",
            "threshold_v",
            "width",
        ]
        for feature in spike_feature_list:
            result["spiking_" + feature] = fv.spike_feature_vector(
                feature, supra_info_list, lsq_start, lsq_end)
            if feature == 'width':
                result["spiking_width"] = result["spiking_width"] / 2
    except Exception as detail:
        logging.warning(
            "Exception when processing specimen {:d}".format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "processing",
                "details": traceback.format_exc(limit=None)
            }
        }

    return result
def main(nwb_file, output_dir, project, **kwargs):
    nwb = MiesNwb(nwb_file)

    # SPECIFICS FOR EXAMPLE NWB =========

    # Only analyze one channel at a time
    channel = 0

    # We can work out code to automatically extract these based on stimulus names later.
    if_sweep_inds = [39, 45]
    targetv_sweep_inds = [15, 21]

    # END SPECIFICS =====================

    # Assemble all Recordings and convert to Sweeps
    supra_sweep_ids = list(range(*if_sweep_inds))
    sub_sweep_ids = list(range(*targetv_sweep_inds))

    supra_recs = [nwb.contents[i][channel] for i in supra_sweep_ids]
    sub_recs = [nwb.contents[i][channel] for i in sub_sweep_ids]

    # Build sweep sets
    lsq_supra_sweep_list, lsq_supra_dur = recs_to_sweeps(supra_recs)
    lsq_sub_sweep_list, lsq_sub_dur = recs_to_sweeps(sub_recs)
    lsq_supra_sweeps = SweepSet(lsq_supra_sweep_list)
    lsq_sub_sweeps = SweepSet(lsq_sub_sweep_list)

    lsq_supra_start = 0
    lsq_supra_end = lsq_supra_dur
    lsq_sub_start = 0
    lsq_sub_end = lsq_sub_dur

    # Pre-process sweeps
    lsq_supra_spx, lsq_supra_spfx = dsf.extractors_for_sweeps(
        lsq_supra_sweeps, start=lsq_supra_start, end=lsq_supra_end)
    lsq_supra_an = spa.LongSquareAnalysis(lsq_supra_spx,
                                          lsq_supra_spfx,
                                          subthresh_min_amp=-100.,
                                          require_subthreshold=False)
    lsq_supra_features = lsq_supra_an.analyze(lsq_supra_sweeps)

    lsq_sub_spx, lsq_sub_spfx = dsf.extractors_for_sweeps(lsq_sub_sweeps,
                                                          start=lsq_sub_start,
                                                          end=lsq_sub_end)
    lsq_sub_an = spa.LongSquareAnalysis(lsq_sub_spx,
                                        lsq_sub_spfx,
                                        subthresh_min_amp=-100.,
                                        require_suprathreshold=False)
    lsq_sub_features = lsq_sub_an.analyze(lsq_sub_sweeps)

    # Calculate feature vectors
    result = {}
    (subthresh_hyperpol_dict, hyperpol_deflect_dict
     ) = fv.identify_subthreshold_hyperpol_with_amplitudes(
         lsq_sub_features, lsq_sub_sweeps)
    target_amps_for_step_subthresh = [-90, -70, -50, -30, -10]
    result["step_subthresh"] = fv.step_subthreshold(
        subthresh_hyperpol_dict,
        target_amps_for_step_subthresh,
        lsq_sub_start,
        lsq_sub_end,
        amp_tolerance=5)
    result["subthresh_norm"] = fv.subthresh_norm(subthresh_hyperpol_dict,
                                                 hyperpol_deflect_dict,
                                                 lsq_sub_start, lsq_sub_end)

    (subthresh_depol_dict,
     depol_deflect_dict) = fv.identify_subthreshold_depol_with_amplitudes(
         lsq_supra_features, lsq_supra_sweeps)
    result["subthresh_depol_norm"] = fv.subthresh_depol_norm(
        subthresh_depol_dict, depol_deflect_dict, lsq_supra_start,
        lsq_supra_end)
    isi_sweep, isi_sweep_spike_info = fv.identify_sweep_for_isi_shape(
        lsq_supra_sweeps, lsq_supra_features, lsq_supra_end - lsq_supra_start)
    result["isi_shape"] = fv.isi_shape(isi_sweep, isi_sweep_spike_info,
                                       lsq_supra_end)

    # Calculate AP waveform from long squares
    rheo_ind = lsq_supra_features["rheobase_sweep"].name
    sweep = lsq_supra_sweeps.sweeps[rheo_ind]
    lsq_ap_v, lsq_ap_dv = fv.first_ap_vectors(
        [sweep], [lsq_supra_features["spikes_set"][rheo_ind]],
        window_length=ap_window_length)

    result["first_ap_v"] = lsq_ap_v
    result["first_ap_dv"] = lsq_ap_dv

    target_amplitudes = np.arange(0, 120, 20)
    supra_info_list = fv.identify_suprathreshold_sweep_sequence(
        lsq_supra_features, target_amplitudes, shift=10)
    result["psth"] = fv.psth_vector(supra_info_list, lsq_supra_start,
                                    lsq_supra_end)
    result["inst_freq"] = fv.inst_freq_vector(supra_info_list, lsq_supra_start,
                                              lsq_supra_end)
    spike_feature_list = [
        "upstroke_downstroke_ratio",
        "peak_v",
        "fast_trough_v",
        "threshold_v",
        "width",
    ]
    for feature in spike_feature_list:
        result["spiking_" + feature] = fv.spike_feature_vector(
            feature, supra_info_list, lsq_supra_start, lsq_supra_end)

    # Save the results
    specimen_ids = [0]
    results = [result]

    filtered_set = [(i, r) for i, r in zip(specimen_ids, results)
                    if not "error" in r.keys()]
    error_set = [{
        "id": i,
        "error": d
    } for i, d in zip(specimen_ids, results) if "error" in d.keys()]
    if len(filtered_set) == 0:
        logging.info("No specimens had results")
        return

    with open(os.path.join(output_dir, "fv_errors_{:s}.json".format(project)),
              "w") as f:
        json.dump(error_set, f, indent=4)

    used_ids, results = zip(*filtered_set)
    logging.info("Finished with {:d} processed specimens".format(
        len(used_ids)))

    k_sizes = {}
    for k in results[0].keys():
        if k not in k_sizes and results[0][k] is not None:
            k_sizes[k] = len(results[0][k])
        data = np.array([
            r[k] if k in r else np.nan * np.zeros(k_sizes[k]) for r in results
        ])
        if len(data.shape) == 1:  # it'll be 1D if there's just one specimen
            data = np.reshape(data, (1, -1))
        if data.shape[0] < len(used_ids):
            logging.warn("Missing data!")
            missing = np.array([k not in r for r in results])
            print(k, np.array(used_ids)[missing])
        np.save(
            os.path.join(output_dir, "fv_{:s}_{:s}.npy".format(k, project)),
            data)

    np.save(os.path.join(output_dir, "fv_ids_{:s}.npy".format(project)),
            used_ids)
Ejemplo n.º 8
0
def data_for_specimen_id(
    specimen_id,
    sweep_qc_option,
    data_source,
    ontology,
    ap_window_length=0.005,
    target_sampling_rate=50000,
    file_list=None,
):
    """
    Extract feature vector from given cell identified by the specimen_id
    Parameters
    ----------
    specimen_id : int
        cell identified
    sweep_qc_option : str
        see CollectFeatureVectorParameters input schema for details
    data_source: str
        see CollectFeatureVectorParameters input schema for details
    ontology : stimulus.StimulusOntology
        mapping of stimuli names to stimulus codes
    ap_window_length : float
        see CollectFeatureVectorParameters input schema for details
    target_sampling_rate : float
        sampling rate
    file_list : list of str
        nwbfile names
    Returns
    -------
    dict :
        features for a given cell specimen_id

    """
    logging.debug("specimen_id: {}".format(specimen_id))

    # Find or retrieve NWB file and ancillary info and construct an AibsDataSet object
    data_set = su.dataset_for_specimen_id(specimen_id, data_source, ontology,
                                          file_list)
    if type(data_set) is dict and "error" in data_set:
        logging.warning(
            "Problem getting AibsDataSet for specimen {:d} from LIMS".format(
                specimen_id))
        return data_set

    # Identify and preprocess long square sweeps
    try:
        lsq_sweep_numbers = su.categorize_iclamp_sweeps(
            data_set,
            ontology.long_square_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        (lsq_sweeps, lsq_features, _, lsq_start,
         lsq_end) = su.preprocess_long_square_sweeps(data_set,
                                                     lsq_sweep_numbers)

    except Exception as detail:
        logging.warning(
            "Exception when preprocessing long square sweeps from specimen {:d}"
            .format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Identify and preprocess short square sweeps
    try:
        ssq_sweep_numbers = su.categorize_iclamp_sweeps(
            data_set,
            ontology.short_square_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        ssq_sweeps, ssq_features, _ = su.preprocess_short_square_sweeps(
            data_set, ssq_sweep_numbers)
    except Exception as detail:
        logging.warning(
            "Exception when preprocessing short square sweeps from specimen {:d}"
            .format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Identify and preprocess ramp sweeps
    try:
        ramp_sweep_numbers = su.categorize_iclamp_sweeps(
            data_set,
            ontology.ramp_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        ramp_sweeps, ramp_features, _ = su.preprocess_ramp_sweeps(
            data_set, ramp_sweep_numbers)
    except Exception as detail:
        logging.warning(
            "Exception when preprocessing ramp sweeps from specimen {:d}".
            format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Calculate desired feature vectors
    result = {}

    if data_source == "filesystem":
        result["id"] = [specimen_id]

    try:
        (subthresh_hyperpol_dict, hyperpol_deflect_dict
         ) = fv.identify_subthreshold_hyperpol_with_amplitudes(
             lsq_features, lsq_sweeps)
        target_amps_for_step_subthresh = [-90, -70, -50, -30, -10]
        result["step_subthresh"] = fv.step_subthreshold(
            subthresh_hyperpol_dict,
            target_amps_for_step_subthresh,
            lsq_start,
            lsq_end,
            amp_tolerance=5)
        result["subthresh_norm"] = fv.subthresh_norm(subthresh_hyperpol_dict,
                                                     hyperpol_deflect_dict,
                                                     lsq_start, lsq_end)
        (subthresh_depol_dict,
         depol_deflect_dict) = fv.identify_subthreshold_depol_with_amplitudes(
             lsq_features, lsq_sweeps)
        result["subthresh_depol_norm"] = fv.subthresh_depol_norm(
            subthresh_depol_dict, depol_deflect_dict,
            np.round(lsq_start, decimals=3), np.round(lsq_end, decimals=3))
        isi_sweep, isi_sweep_spike_info = fv.identify_sweep_for_isi_shape(
            lsq_sweeps, lsq_features, lsq_end - lsq_start)
        result["isi_shape"] = fv.isi_shape(isi_sweep, isi_sweep_spike_info,
                                           lsq_end)

        # Calculate waveforms from each type of sweep
        spiking_ssq_sweep_list = [
            ssq_sweeps.sweeps[swp_ind]
            for swp_ind in ssq_features["common_amp_sweeps"].index
        ]
        spiking_ssq_info_list = [
            ssq_features["spikes_set"][swp_ind]
            for swp_ind in ssq_features["common_amp_sweeps"].index
        ]
        ssq_ap_v, ssq_ap_dv = fv.first_ap_vectors(
            spiking_ssq_sweep_list,
            spiking_ssq_info_list,
            target_sampling_rate=target_sampling_rate,
            window_length=ap_window_length,
            skip_clipped=True)

        rheo_ind = lsq_features["rheobase_sweep"].name
        sweep = lsq_sweeps.sweeps[rheo_ind]
        lsq_ap_v, lsq_ap_dv = fv.first_ap_vectors(
            [sweep], [lsq_features["spikes_set"][rheo_ind]],
            target_sampling_rate=target_sampling_rate,
            window_length=ap_window_length)

        spiking_ramp_sweep_list = [
            ramp_sweeps.sweeps[swp_ind]
            for swp_ind in ramp_features["spiking_sweeps"].index
        ]
        spiking_ramp_info_list = [
            ramp_features["spikes_set"][swp_ind]
            for swp_ind in ramp_features["spiking_sweeps"].index
        ]
        ramp_ap_v, ramp_ap_dv = fv.first_ap_vectors(
            spiking_ramp_sweep_list,
            spiking_ramp_info_list,
            target_sampling_rate=target_sampling_rate,
            window_length=ap_window_length,
            skip_clipped=True)

        # Combine so that differences can be assessed by analyses like sPCA
        result["first_ap_v"] = np.hstack([ssq_ap_v, lsq_ap_v, ramp_ap_v])
        result["first_ap_dv"] = np.hstack([ssq_ap_dv, lsq_ap_dv, ramp_ap_dv])

        target_amplitudes = np.arange(0, 120, 20)
        supra_info_list = fv.identify_suprathreshold_spike_info(
            lsq_features, target_amplitudes, shift=10)
        result["psth"] = fv.psth_vector(supra_info_list, lsq_start, lsq_end)
        result["inst_freq"] = fv.inst_freq_vector(supra_info_list, lsq_start,
                                                  lsq_end)

        spike_feature_list = [
            "upstroke_downstroke_ratio",
            "peak_v",
            "fast_trough_v",
            "threshold_v",
            "width",
        ]
        for feature in spike_feature_list:
            result["spiking_" + feature] = fv.spike_feature_vector(
                feature, supra_info_list, lsq_start, lsq_end)
    except Exception as detail:
        logging.warning(
            "Exception when processing specimen {:d}".format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "processing",
                "details": traceback.format_exc(limit=None)
            }
        }

    return result