Beispiel #1
0
def feature_vector_input():

    TEST_DATA_PATH = os.path.join(os.path.dirname(__file__), 'data')

    nwb_file_name = "Pvalb-IRES-Cre;Ai14-415796.02.01.01.nwb"
    nwb_file_full_path = os.path.join(TEST_DATA_PATH, nwb_file_name)

    if not os.path.exists(nwb_file_full_path):
        download_file(nwb_file_name, nwb_file_full_path)

    data_set = AibsDataSet(nwb_file=nwb_file_full_path, ontology=ontology)

    lsq_sweep_numbers = data_set.filtered_sweep_table(
        clamp_mode=data_set.CURRENT_CLAMP,
        stimuli=ontology.long_square_names).sweep_number.sort_values().values

    lsq_sweeps = data_set.sweep_set(lsq_sweep_numbers)
    lsq_start, lsq_dur, _, _, _ = stf.get_stim_characteristics(
        lsq_sweeps.sweeps[0].i, lsq_sweeps.sweeps[0].t)

    lsq_end = lsq_start + lsq_dur
    lsq_spx, lsq_spfx = dsf.extractors_for_sweeps(lsq_sweeps,
                                                  start=lsq_start,
                                                  end=lsq_end,
                                                  **dsf.detection_parameters(
                                                      data_set.LONG_SQUARE))
    lsq_an = spa.LongSquareAnalysis(lsq_spx, lsq_spfx, subthresh_min_amp=-100.)

    lsq_features = lsq_an.analyze(lsq_sweeps)

    return lsq_sweeps, lsq_features, lsq_start, lsq_end
Beispiel #2
0
from ipfx.ephys_extractor import SpikeExtractor, SpikeTrainFeatureExtractor
from ipfx.stimulus_protocol_analysis import ShortSquareAnalysis
import ipfx.ephys_features as ft

# download a specific experiment NWB file via AllenSDK
ct = CellTypesApi()

specimen_id = 595570553
nwb_file = "%d.nwb" % specimen_id
if not os.path.exists(nwb_file):
    ct.save_ephys_data(specimen_id, nwb_file)
sweep_info = ct.get_ephys_sweeps(specimen_id)

# build a data set and find the short squares
data_set = AibsDataSet(sweep_info=sweep_info, nwb_file=nwb_file)
shsq_table = data_set.filtered_sweep_table(
    stimuli=data_set.ontology.short_square_names)
shsq_sweep_set = data_set.sweep_set(shsq_table.sweep_number)

# Estimate the dv cutoff and threshold fraction (we know stimulus starts at 0.27s)
dv_cutoff, thresh_frac = ft.estimate_adjusted_detection_parameters(
    shsq_sweep_set.v, shsq_sweep_set.t, 0.27, 0.271)
# Build the extractors
start = 0.27
spx = SpikeExtractor(start=start, dv_cutoff=dv_cutoff, thresh_frac=thresh_frac)
sptrx = SpikeTrainFeatureExtractor(start=start, end=None)

# Run the analysis
shsq_analysis = ShortSquareAnalysis(spx, sptrx)
results = shsq_analysis.analyze(shsq_sweep_set)

# Plot the sweeps at the lowest amplitude that evoked the most spikes
Beispiel #3
0
import os
import matplotlib.pyplot as plt

# download a specific experiment NWB file via AllenSDK
ct = CellTypesApi()

specimen_id = 595570553
nwb_file = "%d.nwb" % specimen_id
if not os.path.exists(nwb_file):
    ct.save_ephys_data(specimen_id, nwb_file)
sweep_info = ct.get_ephys_sweeps(specimen_id)

# build a data set and find the long squares
data_set = AibsDataSet(sweep_info=sweep_info, nwb_file=nwb_file)
lsq_table = data_set.filtered_sweep_table(
    stimuli=data_set.ontology.long_square_names)
lsq_set = data_set.sweep_set(lsq_table.sweep_number)

# build the extractors
spx = efex.SpikeExtractor(start=0.27, end=1.27)
spfx = efex.SpikeTrainFeatureExtractor(start=0.27, end=1.27)

# run the analysis and print out a few of the features
lsqa = spa.LongSquareAnalysis(spx, spfx, subthresh_min_amp=-100.0)
data = lsqa.analyze(lsq_set)

for field in [
        'tau', 'v_baseline', 'input_resistance', 'vm_for_sag', 'fi_fit_slope',
        'sag', 'rheobase_i'
]:
    print("%s: %s" % (field, str(data[field])))
Beispiel #4
0
from ipfx.aibs_data_set import AibsDataSet
from ipfx.ephys_extractor import SpikeExtractor, SpikeTrainFeatureExtractor
from ipfx.stimulus_protocol_analysis import RampAnalysis

# download a specific experiment NWB file via AllenSDK
ct = CellTypesApi()

specimen_id = 595570553
nwb_file = "%d.nwb" % specimen_id
if not os.path.exists(nwb_file):
    ct.save_ephys_data(specimen_id, nwb_file)
sweep_info = ct.get_ephys_sweeps(specimen_id)

# Build the data set and find the ramp sweeps
data_set = AibsDataSet(sweep_info=sweep_info, nwb_file=nwb_file)
ramp_table = data_set.filtered_sweep_table(
    stimuli=data_set.ontology.ramp_names)
ramp_sweep_set = data_set.sweep_set(ramp_table.sweep_number)

# Build the extractors (we know stimulus starts at 0.27 s)
start = 0.27
spx = SpikeExtractor(start=start, end=None)
sptrx = SpikeTrainFeatureExtractor(start=start, end=None)

# Run the analysis
ramp_analysis = RampAnalysis(spx, sptrx)
results = ramp_analysis.analyze(ramp_sweep_set)

# Plot the sweeps and the latency to the first spike of each
sns.set_style("white")
for swp in ramp_sweep_set.sweeps:
    plt.plot(swp.t, swp.v, linewidth=0.5)
Beispiel #5
0
def data_for_specimen_id(specimen_id, data_source, chirp_stimulus_codes):
    logging.debug("specimen_id: {}".format(specimen_id))

    # Manual edit ontology to identify chirp sweeps
    ontology_data = ju.read(StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE)
    mask = []
    for od in ontology_data:
        mask_val = True
        for tagset in od:
            for c in chirp_stimulus_codes:
                if c in tagset and "code" in tagset:
                    mask_val = False
                    break
        mask.append(mask_val)
    ontology_data = [od for od, m in zip(ontology_data, mask) if m is True]
    ontology_data.append([
        ["code"] + chirp_stimulus_codes,
        [
          "name",
          "Chirp",
        ],
        [
          "core",
          "Core 2"
        ]
    ])

    ontology = StimulusOntology(ontology_data)

    # Find or retrieve NWB file and ancillary info and construct an AibsDataSet object
    if data_source == "lims":
        nwb_path, h5_path = lims_nwb_information(specimen_id)
        if type(nwb_path) is dict and "error" in nwb_path:
            logging.warning("Problem getting NWB file for specimen {:d} from LIMS".format(specimen_id))
            return nwb_path

        try:
            data_set = AibsDataSet(
                nwb_file=nwb_path, h5_file=h5_path, ontology=ontology)
        except Exception as detail:
            logging.warning("Exception when loading specimen {:d} from LIMS".format(specimen_id))
            logging.warning(detail)
            return {"error": {"type": "dataset", "details": traceback.format_exc(limit=None)}}
    elif data_source == "sdk":
        nwb_path, sweep_info = sdk_nwb_information(specimen_id)
        try:
            data_set = AibsDataSet(
                nwb_file=nwb_path, sweep_info=sweep_info, ontology=ontology)
        except Exception as detail:
            logging.warning("Exception when loading specimen {:d} via Allen SDK".format(specimen_id))
            logging.warning(detail)
            return {"error": {"type": "dataset", "details": traceback.format_exc(limit=None)}}
    else:
        logging.error("invalid data source specified ({})".format(data_source))


    # Identify chirp sweeps

    try:
        iclamp_st = data_set.filtered_sweep_table(clamp_mode=data_set.CURRENT_CLAMP)
        iclamp_st = data_set.filtered_sweep_table(clamp_mode=data_set.CURRENT_CLAMP, stimuli=["Chirp"])
        chirp_sweep_numbers = iclamp_st["sweep_number"].sort_values().values
    except Exception as detail:
        logging.warning("Exception when identifying sweeps from specimen {:d}".format(specimen_id))
        logging.warning(detail)
        return {"error": {"type": "sweep_table", "details": traceback.format_exc(limit=1)}}

    if len(chirp_sweep_numbers) == 0:
        logging.info("No chirp sweeps for {:d}".format(specimen_id))
        return {"error": {"type": "processing", "details:": "no available chirp sweeps"}}

    try:
        result = chirp.extract_chirp_feature_vector(data_set, chirp_sweep_numbers)
    except Exception as detail:
        logging.warning("Exception when processing specimen {:d}".format(specimen_id))
        logging.warning(detail)
        return {"error": {"type": "processing", "details": traceback.format_exc(limit=1)}}

    return result