示例#1
0
def test_psth_between_sweep_interpolation():
    feature = "test_feature_name"
    test_spike_times = ([0.25, 0.75], [0.2, 0.5, 0.6, 0.7])
    available_list = []
    for tst in test_spike_times:
        spike_info = pd.DataFrame({
            "threshold_t": tst,
        })
        available_list.append(spike_info)

    start = 0
    end = 1
    width = 20
    n_bins = int((end - start) / (width * 0.001))

    si_list = [None, available_list[0], None, available_list[1], None]
    output = fv.psth_vector(si_list, start=start, end=end, width=width)

    assert np.array_equal(output[:n_bins], output[n_bins:2 * n_bins])
    assert np.array_equal(output[3 * n_bins:4 * n_bins],
                          output[4 * n_bins:5 * n_bins])
    assert np.array_equal(
        output[2 * n_bins:3 * n_bins],
        np.vstack([output[n_bins:2 * n_bins],
                   output[4 * n_bins:5 * n_bins]]).mean(axis=0))
示例#2
0
def test_psth_compressed_firing():
    test_spike_times = [0.2, 0.202, 0.204]
    spike_info = pd.DataFrame({"threshold_t": test_spike_times})
    start = 0
    end = 1
    width = 50

    # All spikes are within one bin
    output = fv.psth_vector([spike_info], start=start, end=end, width=width)
    assert np.sum(output > 0) == 1
    assert np.max(output) == len(test_spike_times) / (width * 0.001)
示例#3
0
def test_psth_number_of_spikes():
    np.random.seed(42)
    n_spikes = np.random.randint(0, 100)
    start = 1.02
    end = 2.02
    width = 50
    test_spike_times = np.random.random(n_spikes) * (end - start) + start
    spike_info = pd.DataFrame({"threshold_t": test_spike_times})

    output = fv.psth_vector([spike_info], start=start, end=end, width=width)
    assert np.isclose(output.mean(), n_spikes)
示例#4
0
def test_psth_sparse_firing():
    test_spike_times = [0.2, 0.5]
    spike_info = pd.DataFrame({"threshold_t": test_spike_times})
    start = 0
    end = 1
    width = 50

    # All spikes are in own bins
    output = fv.psth_vector([spike_info], start=start, end=end, width=width)
    assert np.sum(output > 0) == len(test_spike_times)
    assert np.max(output) == 1 / (width * 0.001)
示例#5
0
def test_psth_duration_rounding():
    start_a, end_a = 1.02, 2.02
    start_b, end_b = 1.02, 2.0199999999999996

    np.random.seed(42)
    n_spikes = np.random.randint(0, 100)

    test_spike_times = np.random.random(n_spikes) * (end_a - start_a) + start_a
    spike_info = pd.DataFrame({"threshold_t": test_spike_times})

    width = 50

    output_a = fv.psth_vector([spike_info],
                              start=start_a,
                              end=end_a,
                              width=width)
    output_b = fv.psth_vector([spike_info],
                              start=start_b,
                              end=end_b,
                              width=width)

    assert output_a.shape == output_b.shape
示例#6
0
def data_for_specimen_id(specimen_id,
                         sweep_qc_option,
                         data_source,
                         ap_window_length=0.006,
                         target_sampling_rate=10000,
                         nfiles=None):
    logging.debug("specimen_id: {}".format(specimen_id))

    lsq_fail = False
    ssq_fail = False
    ramp_fail = False
    # Find or retrieve NWB file and ancillary info and construct an AibsDataSet object
    ontology = StimulusOntology(
        ju.read(StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE))
    if data_source == "local":

        nwb_path = nfiles[specimen_id]
        if type(nwb_path) is dict and "error" in nwb_path:
            logging.warning(
                "Problem getting NWB file for specimen {:d}".format(
                    specimen_id))
            return nwb_path

        data_set = HBGDataSet(nwb_file=nwb_path, ontology=ontology)

    else:
        logging.error("invalid data source specified ({})".format(data_source))

    # Identify and preprocess long square sweeps
    try:
        lsq_sweep_numbers = categorize_iclamp_sweeps(
            data_set,
            ontology.long_square_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        (lsq_sweeps, lsq_features, lsq_start, lsq_end,
         lsq_spx) = preprocess_long_square_sweeps(data_set, lsq_sweep_numbers)
    except Exception as detail:
        lsq_fail = True
        logging.warning(
            "Exception when preprocessing long square sweeps from specimen {:d}"
            .format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Identify and preprocess short square sweeps
    try:
        ssq_sweep_numbers = categorize_iclamp_sweeps(
            data_set,
            ontology.short_square_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        ssq_sweeps, ssq_features = preprocess_short_square_sweeps(
            data_set, ssq_sweep_numbers)
    except Exception as detail:
        ssq_fail = True
        logging.warning(
            "Exception when preprocessing short square sweeps from specimen {:d}"
            .format(specimen_id))
        logging.warning(detail)
        {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Identify and preprocess ramp sweeps
    try:
        ramp_sweep_numbers = categorize_iclamp_sweeps(
            data_set,
            ontology.ramp_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        ramp_sweeps, ramp_features = preprocess_ramp_sweeps(
            data_set, ramp_sweep_numbers)
    except Exception as detail:
        ramp_fail = True
        logging.warning(
            "Exception when preprocessing ramp sweeps from specimen {:d}".
            format(specimen_id))
        logging.warning(detail)
        {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Calculate desired feature vectors
    result = {}
    try:
        (subthresh_hyperpol_dict, hyperpol_deflect_dict
         ) = fv.identify_subthreshold_hyperpol_with_amplitudes(
             lsq_features, lsq_sweeps)
        target_amps_for_step_subthresh = [-90, -70, -50, -30, -10]
        result["step_subthresh"] = fv.step_subthreshold(
            subthresh_hyperpol_dict,
            target_amps_for_step_subthresh,
            lsq_start,
            lsq_end,
            amp_tolerance=5)
        result["subthresh_norm"] = fv.subthresh_norm(subthresh_hyperpol_dict,
                                                     hyperpol_deflect_dict,
                                                     lsq_start, lsq_end)
        (subthresh_depol_dict,
         depol_deflect_dict) = fv.identify_subthreshold_depol_with_amplitudes(
             lsq_features, lsq_sweeps)
        result["subthresh_depol_norm"] = fv.subthresh_depol_norm(
            subthresh_depol_dict, depol_deflect_dict, lsq_start, lsq_end)
        isi_sweep, isi_sweep_spike_info = fv.identify_sweep_for_isi_shape(
            lsq_sweeps, lsq_features, lsq_end - lsq_start)
        result["isi_shape"] = fv.isi_shape(isi_sweep, isi_sweep_spike_info,
                                           lsq_end)

        if ssq_fail == False:
            # Calculate waveforms from each type of sweep
            spiking_ssq_sweep_list = [
                ssq_sweeps.sweeps[swp_ind]
                for swp_ind in ssq_features["common_amp_sweeps"].index
            ]
            spiking_ssq_info_list = [
                ssq_features["spikes_set"][swp_ind]
                for swp_ind in ssq_features["common_amp_sweeps"].index
            ]
            ssq_ap_v, ssq_ap_dv = fv.first_ap_vectors(
                spiking_ssq_sweep_list,
                spiking_ssq_info_list,
                target_sampling_rate=target_sampling_rate,
                window_length=ap_window_length,
                skip_clipped=True)
        else:
            ssq_ap_v, ssq_ap_dv = np.nan, np.nan

        rheo_ind = lsq_features["rheobase_sweep"].name
        sweep = lsq_sweeps.sweeps[rheo_ind]
        lsq_ap_v, lsq_ap_dv = fv.first_ap_vectors(
            [sweep], [lsq_features["spikes_set"][rheo_ind]],
            target_sampling_rate=target_sampling_rate,
            window_length=ap_window_length)

        if ramp_fail == False:

            spiking_ramp_sweep_list = [
                ramp_sweeps.sweeps[swp_ind]
                for swp_ind in ramp_features["spiking_sweeps"].index
            ]
            spiking_ramp_info_list = [
                ramp_features["spikes_set"][swp_ind]
                for swp_ind in ramp_features["spiking_sweeps"].index
            ]
            ramp_ap_v, ramp_ap_dv = fv.first_ap_vectors(
                spiking_ramp_sweep_list,
                spiking_ramp_info_list,
                target_sampling_rate=target_sampling_rate,
                window_length=ap_window_length,
                skip_clipped=True)
        else:
            ramp_ap_v, ramp_ap_dv = np.nan, np.nan

        if ramp_fail == True:
            ramp_ap_dv = np.copy(lsq_ap_dv)
            ramp_ap_v = np.copy(lsq_ap_v)

        if ssq_fail == True:
            ssq_ap_dv = np.copy(lsq_ap_dv)
            ssq_ap_v = np.copy(lsq_ap_v)

        # Combine so that differences can be assessed by analyses like sPCA
        result["first_ap_v"] = np.hstack([ssq_ap_v, lsq_ap_v, ramp_ap_v])
        result["first_ap_dv"] = np.hstack([ssq_ap_dv, lsq_ap_dv, ramp_ap_dv])

        target_amplitudes = np.arange(0, 120, 20)
        supra_info_list = fv.identify_suprathreshold_spike_info(
            lsq_features, target_amplitudes, shift=10)
        result["psth"] = fv.psth_vector(supra_info_list, lsq_start, lsq_end)
        result["inst_freq"] = fv.inst_freq_vector(supra_info_list, lsq_start,
                                                  lsq_end)

        spike_feature_list = [
            "upstroke_downstroke_ratio",
            "peak_v",
            "fast_trough_v",
            "threshold_v",
            "width",
        ]
        for feature in spike_feature_list:
            result["spiking_" + feature] = fv.spike_feature_vector(
                feature, supra_info_list, lsq_start, lsq_end)
            if feature == 'width':
                result["spiking_width"] = result["spiking_width"] / 2
    except Exception as detail:
        logging.warning(
            "Exception when processing specimen {:d}".format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "processing",
                "details": traceback.format_exc(limit=None)
            }
        }

    return result
def main(nwb_file, output_dir, project, **kwargs):
    nwb = MiesNwb(nwb_file)

    # SPECIFICS FOR EXAMPLE NWB =========

    # Only analyze one channel at a time
    channel = 0

    # We can work out code to automatically extract these based on stimulus names later.
    if_sweep_inds = [39, 45]
    targetv_sweep_inds = [15, 21]

    # END SPECIFICS =====================

    # Assemble all Recordings and convert to Sweeps
    supra_sweep_ids = list(range(*if_sweep_inds))
    sub_sweep_ids = list(range(*targetv_sweep_inds))

    supra_recs = [nwb.contents[i][channel] for i in supra_sweep_ids]
    sub_recs = [nwb.contents[i][channel] for i in sub_sweep_ids]

    # Build sweep sets
    lsq_supra_sweep_list, lsq_supra_dur = recs_to_sweeps(supra_recs)
    lsq_sub_sweep_list, lsq_sub_dur = recs_to_sweeps(sub_recs)
    lsq_supra_sweeps = SweepSet(lsq_supra_sweep_list)
    lsq_sub_sweeps = SweepSet(lsq_sub_sweep_list)

    lsq_supra_start = 0
    lsq_supra_end = lsq_supra_dur
    lsq_sub_start = 0
    lsq_sub_end = lsq_sub_dur

    # Pre-process sweeps
    lsq_supra_spx, lsq_supra_spfx = dsf.extractors_for_sweeps(
        lsq_supra_sweeps, start=lsq_supra_start, end=lsq_supra_end)
    lsq_supra_an = spa.LongSquareAnalysis(lsq_supra_spx,
                                          lsq_supra_spfx,
                                          subthresh_min_amp=-100.,
                                          require_subthreshold=False)
    lsq_supra_features = lsq_supra_an.analyze(lsq_supra_sweeps)

    lsq_sub_spx, lsq_sub_spfx = dsf.extractors_for_sweeps(lsq_sub_sweeps,
                                                          start=lsq_sub_start,
                                                          end=lsq_sub_end)
    lsq_sub_an = spa.LongSquareAnalysis(lsq_sub_spx,
                                        lsq_sub_spfx,
                                        subthresh_min_amp=-100.,
                                        require_suprathreshold=False)
    lsq_sub_features = lsq_sub_an.analyze(lsq_sub_sweeps)

    # Calculate feature vectors
    result = {}
    (subthresh_hyperpol_dict, hyperpol_deflect_dict
     ) = fv.identify_subthreshold_hyperpol_with_amplitudes(
         lsq_sub_features, lsq_sub_sweeps)
    target_amps_for_step_subthresh = [-90, -70, -50, -30, -10]
    result["step_subthresh"] = fv.step_subthreshold(
        subthresh_hyperpol_dict,
        target_amps_for_step_subthresh,
        lsq_sub_start,
        lsq_sub_end,
        amp_tolerance=5)
    result["subthresh_norm"] = fv.subthresh_norm(subthresh_hyperpol_dict,
                                                 hyperpol_deflect_dict,
                                                 lsq_sub_start, lsq_sub_end)

    (subthresh_depol_dict,
     depol_deflect_dict) = fv.identify_subthreshold_depol_with_amplitudes(
         lsq_supra_features, lsq_supra_sweeps)
    result["subthresh_depol_norm"] = fv.subthresh_depol_norm(
        subthresh_depol_dict, depol_deflect_dict, lsq_supra_start,
        lsq_supra_end)
    isi_sweep, isi_sweep_spike_info = fv.identify_sweep_for_isi_shape(
        lsq_supra_sweeps, lsq_supra_features, lsq_supra_end - lsq_supra_start)
    result["isi_shape"] = fv.isi_shape(isi_sweep, isi_sweep_spike_info,
                                       lsq_supra_end)

    # Calculate AP waveform from long squares
    rheo_ind = lsq_supra_features["rheobase_sweep"].name
    sweep = lsq_supra_sweeps.sweeps[rheo_ind]
    lsq_ap_v, lsq_ap_dv = fv.first_ap_vectors(
        [sweep], [lsq_supra_features["spikes_set"][rheo_ind]],
        window_length=ap_window_length)

    result["first_ap_v"] = lsq_ap_v
    result["first_ap_dv"] = lsq_ap_dv

    target_amplitudes = np.arange(0, 120, 20)
    supra_info_list = fv.identify_suprathreshold_sweep_sequence(
        lsq_supra_features, target_amplitudes, shift=10)
    result["psth"] = fv.psth_vector(supra_info_list, lsq_supra_start,
                                    lsq_supra_end)
    result["inst_freq"] = fv.inst_freq_vector(supra_info_list, lsq_supra_start,
                                              lsq_supra_end)
    spike_feature_list = [
        "upstroke_downstroke_ratio",
        "peak_v",
        "fast_trough_v",
        "threshold_v",
        "width",
    ]
    for feature in spike_feature_list:
        result["spiking_" + feature] = fv.spike_feature_vector(
            feature, supra_info_list, lsq_supra_start, lsq_supra_end)

    # Save the results
    specimen_ids = [0]
    results = [result]

    filtered_set = [(i, r) for i, r in zip(specimen_ids, results)
                    if not "error" in r.keys()]
    error_set = [{
        "id": i,
        "error": d
    } for i, d in zip(specimen_ids, results) if "error" in d.keys()]
    if len(filtered_set) == 0:
        logging.info("No specimens had results")
        return

    with open(os.path.join(output_dir, "fv_errors_{:s}.json".format(project)),
              "w") as f:
        json.dump(error_set, f, indent=4)

    used_ids, results = zip(*filtered_set)
    logging.info("Finished with {:d} processed specimens".format(
        len(used_ids)))

    k_sizes = {}
    for k in results[0].keys():
        if k not in k_sizes and results[0][k] is not None:
            k_sizes[k] = len(results[0][k])
        data = np.array([
            r[k] if k in r else np.nan * np.zeros(k_sizes[k]) for r in results
        ])
        if len(data.shape) == 1:  # it'll be 1D if there's just one specimen
            data = np.reshape(data, (1, -1))
        if data.shape[0] < len(used_ids):
            logging.warn("Missing data!")
            missing = np.array([k not in r for r in results])
            print(k, np.array(used_ids)[missing])
        np.save(
            os.path.join(output_dir, "fv_{:s}_{:s}.npy".format(k, project)),
            data)

    np.save(os.path.join(output_dir, "fv_ids_{:s}.npy".format(project)),
            used_ids)
示例#8
0
def data_for_specimen_id(
    specimen_id,
    sweep_qc_option,
    data_source,
    ontology,
    ap_window_length=0.005,
    target_sampling_rate=50000,
    file_list=None,
):
    """
    Extract feature vector from given cell identified by the specimen_id
    Parameters
    ----------
    specimen_id : int
        cell identified
    sweep_qc_option : str
        see CollectFeatureVectorParameters input schema for details
    data_source: str
        see CollectFeatureVectorParameters input schema for details
    ontology : stimulus.StimulusOntology
        mapping of stimuli names to stimulus codes
    ap_window_length : float
        see CollectFeatureVectorParameters input schema for details
    target_sampling_rate : float
        sampling rate
    file_list : list of str
        nwbfile names
    Returns
    -------
    dict :
        features for a given cell specimen_id

    """
    logging.debug("specimen_id: {}".format(specimen_id))

    # Find or retrieve NWB file and ancillary info and construct an AibsDataSet object
    data_set = su.dataset_for_specimen_id(specimen_id, data_source, ontology,
                                          file_list)
    if type(data_set) is dict and "error" in data_set:
        logging.warning(
            "Problem getting AibsDataSet for specimen {:d} from LIMS".format(
                specimen_id))
        return data_set

    # Identify and preprocess long square sweeps
    try:
        lsq_sweep_numbers = su.categorize_iclamp_sweeps(
            data_set,
            ontology.long_square_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        (lsq_sweeps, lsq_features, _, lsq_start,
         lsq_end) = su.preprocess_long_square_sweeps(data_set,
                                                     lsq_sweep_numbers)

    except Exception as detail:
        logging.warning(
            "Exception when preprocessing long square sweeps from specimen {:d}"
            .format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Identify and preprocess short square sweeps
    try:
        ssq_sweep_numbers = su.categorize_iclamp_sweeps(
            data_set,
            ontology.short_square_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        ssq_sweeps, ssq_features, _ = su.preprocess_short_square_sweeps(
            data_set, ssq_sweep_numbers)
    except Exception as detail:
        logging.warning(
            "Exception when preprocessing short square sweeps from specimen {:d}"
            .format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Identify and preprocess ramp sweeps
    try:
        ramp_sweep_numbers = su.categorize_iclamp_sweeps(
            data_set,
            ontology.ramp_names,
            sweep_qc_option=sweep_qc_option,
            specimen_id=specimen_id)
        ramp_sweeps, ramp_features, _ = su.preprocess_ramp_sweeps(
            data_set, ramp_sweep_numbers)
    except Exception as detail:
        logging.warning(
            "Exception when preprocessing ramp sweeps from specimen {:d}".
            format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "sweep_table",
                "details": traceback.format_exc(limit=None)
            }
        }

    # Calculate desired feature vectors
    result = {}

    if data_source == "filesystem":
        result["id"] = [specimen_id]

    try:
        (subthresh_hyperpol_dict, hyperpol_deflect_dict
         ) = fv.identify_subthreshold_hyperpol_with_amplitudes(
             lsq_features, lsq_sweeps)
        target_amps_for_step_subthresh = [-90, -70, -50, -30, -10]
        result["step_subthresh"] = fv.step_subthreshold(
            subthresh_hyperpol_dict,
            target_amps_for_step_subthresh,
            lsq_start,
            lsq_end,
            amp_tolerance=5)
        result["subthresh_norm"] = fv.subthresh_norm(subthresh_hyperpol_dict,
                                                     hyperpol_deflect_dict,
                                                     lsq_start, lsq_end)
        (subthresh_depol_dict,
         depol_deflect_dict) = fv.identify_subthreshold_depol_with_amplitudes(
             lsq_features, lsq_sweeps)
        result["subthresh_depol_norm"] = fv.subthresh_depol_norm(
            subthresh_depol_dict, depol_deflect_dict,
            np.round(lsq_start, decimals=3), np.round(lsq_end, decimals=3))
        isi_sweep, isi_sweep_spike_info = fv.identify_sweep_for_isi_shape(
            lsq_sweeps, lsq_features, lsq_end - lsq_start)
        result["isi_shape"] = fv.isi_shape(isi_sweep, isi_sweep_spike_info,
                                           lsq_end)

        # Calculate waveforms from each type of sweep
        spiking_ssq_sweep_list = [
            ssq_sweeps.sweeps[swp_ind]
            for swp_ind in ssq_features["common_amp_sweeps"].index
        ]
        spiking_ssq_info_list = [
            ssq_features["spikes_set"][swp_ind]
            for swp_ind in ssq_features["common_amp_sweeps"].index
        ]
        ssq_ap_v, ssq_ap_dv = fv.first_ap_vectors(
            spiking_ssq_sweep_list,
            spiking_ssq_info_list,
            target_sampling_rate=target_sampling_rate,
            window_length=ap_window_length,
            skip_clipped=True)

        rheo_ind = lsq_features["rheobase_sweep"].name
        sweep = lsq_sweeps.sweeps[rheo_ind]
        lsq_ap_v, lsq_ap_dv = fv.first_ap_vectors(
            [sweep], [lsq_features["spikes_set"][rheo_ind]],
            target_sampling_rate=target_sampling_rate,
            window_length=ap_window_length)

        spiking_ramp_sweep_list = [
            ramp_sweeps.sweeps[swp_ind]
            for swp_ind in ramp_features["spiking_sweeps"].index
        ]
        spiking_ramp_info_list = [
            ramp_features["spikes_set"][swp_ind]
            for swp_ind in ramp_features["spiking_sweeps"].index
        ]
        ramp_ap_v, ramp_ap_dv = fv.first_ap_vectors(
            spiking_ramp_sweep_list,
            spiking_ramp_info_list,
            target_sampling_rate=target_sampling_rate,
            window_length=ap_window_length,
            skip_clipped=True)

        # Combine so that differences can be assessed by analyses like sPCA
        result["first_ap_v"] = np.hstack([ssq_ap_v, lsq_ap_v, ramp_ap_v])
        result["first_ap_dv"] = np.hstack([ssq_ap_dv, lsq_ap_dv, ramp_ap_dv])

        target_amplitudes = np.arange(0, 120, 20)
        supra_info_list = fv.identify_suprathreshold_spike_info(
            lsq_features, target_amplitudes, shift=10)
        result["psth"] = fv.psth_vector(supra_info_list, lsq_start, lsq_end)
        result["inst_freq"] = fv.inst_freq_vector(supra_info_list, lsq_start,
                                                  lsq_end)

        spike_feature_list = [
            "upstroke_downstroke_ratio",
            "peak_v",
            "fast_trough_v",
            "threshold_v",
            "width",
        ]
        for feature in spike_feature_list:
            result["spiking_" + feature] = fv.spike_feature_vector(
                feature, supra_info_list, lsq_start, lsq_end)
    except Exception as detail:
        logging.warning(
            "Exception when processing specimen {:d}".format(specimen_id))
        logging.warning(detail)
        return {
            "error": {
                "type": "processing",
                "details": traceback.format_exc(limit=None)
            }
        }

    return result