示例#1
0
def test_chirp_downsample():
    # Stuff for sweep construction
    sampling_rate = 2000
    t = np.arange(0, 20 * sampling_rate) * (1 / sampling_rate)
    clamp_mode = "CurrentClamp"
    epochs = {
        "sweep": (0, len(t) - 1),
        "test": None,
        "recording": None,
        "experiment": None,
        "stim": None
    }

    base_chirp = scipy.signal.chirp(t, 0.5, 20, 40, method="linear")
    i = base_chirp

    # linear descreasing profile
    profile = np.linspace(1., 0.5, num=len(t))
    v = base_chirp * profile
    test_sweep = Sweep(t, v, i, clamp_mode, sampling_rate, epochs=epochs)
    sweep_set = SweepSet([test_sweep])

    amp, phase, freq = chirp.chirp_amp_phase(sweep_set,
                                             start=0,
                                             end=19.9,
                                             down_rate=sampling_rate / 2)

    # Confirm goes from 1 to 0.5
    tol = 0.1
    assert np.abs(amp[0] - 1) < tol
    assert np.abs(amp[-1] - 0.5) < tol
示例#2
0
def test_identify_isi_shape_one_spike():
    min_spike = 5

    test_input_amplitudes = [10, 20, 30, 40]
    test_avg_rate = [0, 1, 1, 1]
    test_features = {
        "sweeps": pd.DataFrame({
            "stim_amp": test_input_amplitudes,
            "avg_rate": test_avg_rate,
        }),
        "spikes_set": [None] * len(test_avg_rate),
    }

    # Random test sweeps
    np.random.seed(42)
    n_points = 100
    t = np.arange(n_points)
    i = np.zeros_like(t)
    epochs = {"sweep": (0, n_points - 1), "test": None, "recording": None, "experiment": None, "stim": None}
    sampling_rate = 1
    clamp_mode = "CurrentClamp"
    sweep_list = []
    for a in test_input_amplitudes:
        v = np.random.randn(n_points)
        test_sweep = Sweep(t, v, i, clamp_mode, sampling_rate, epochs=epochs)
        sweep_list.append(test_sweep)
    test_sweep_set = SweepSet(sweep_list)

    selected_sweep, _ = fv.identify_sweep_for_isi_shape(
        test_sweep_set, test_features, duration=1, min_spike=min_spike)

    assert np.array_equal(selected_sweep.v, sweep_list[1].v)
示例#3
0
def validate_sweeps(data_set, sweep_numbers, extra_dur=0.2):
    check_sweeps = data_set.sweep_set(sweep_numbers)
    check_sweeps.select_epoch("recording")
    valid_sweep_stim = []
    start = None
    dur = None
    for swp in check_sweeps.sweeps:
        if len(swp.t) == 0:
            valid_sweep_stim.append(False)
            continue

        swp_start, swp_dur, _, _, _ = stf.get_stim_characteristics(
            swp.i, swp.t)
        if swp_start is None:
            valid_sweep_stim.append(False)
        else:
            start = swp_start
            dur = swp_dur
            valid_sweep_stim.append(True)
    if start is None:
        # Could not find any sweeps to define stimulus interval
        return [], None, None

    end = start + dur

    # Check that all sweeps are long enough and not ended early
    good_sweeps = [
        s for s, v in zip(check_sweeps.sweeps, valid_sweep_stim)
        if s.t[-1] >= end + extra_dur and v is True and not np.all(
            s.v[tsu.find_time_index(s.t, end) -
                100:tsu.find_time_index(s.t, end)] == 0)
    ]
    return SweepSet(sweeps=good_sweeps), start, end
示例#4
0
def sweep_set_for_model(t, v, i):
    """Generate a SweepSet object based on a single model sweep

    Parameters
    ----------
    t: array
        Time data (sec)
    v: array
        Voltage data (mV)
    i: array
        Current stimulus data (nA)

    Returns
    -------
    SweepSet
        Contains one Sweep object
    """
    sampling_rate = 1 / (t[1] - t[0])
    sweep = Sweep(t=t,
                  v=v,
                  i=i,
                  sampling_rate=sampling_rate,
                  sweep_number=None,
                  clamp_mode="CurrentClamp",
                  epochs=None,
                  )
    return SweepSet([sweep])
示例#5
0
def test_divide_chirps_by_stimulus():
    # Stuff for sweep construction
    sampling_rate = 2000
    t = np.arange(0, 20 * sampling_rate) * (1 / sampling_rate)
    clamp_mode = "CurrentClamp"
    epochs = {
        "sweep": (0, len(t) - 1),
        "test": None,
        "recording": None,
        "experiment": None,
        "stim": None
    }

    i1 = scipy.signal.chirp(t, 0.5, 20, 40, method="linear")
    i2 = scipy.signal.chirp(t, 0.5, 20, 40, method="logarithmic")
    # linear descreasing profile
    profile = np.linspace(1., 0.5, num=len(t))

    sweep_set = SweepSet([
        Sweep(t, i * profile, i, clamp_mode, sampling_rate, epochs=epochs)
        for i in (i1, i2, i2)
    ])

    divided_list = chirp.divide_chirps_by_stimulus(sweep_set)
    lengths = [len(d.sweeps) for d in divided_list]
    assert np.min(lengths) == 1
    assert np.max(lengths) == 2
    bigger_sweep_set = divided_list[np.argmax(lengths)]
    smaller_sweep_set = divided_list[np.argmin(lengths)]
    assert np.all(bigger_sweep_set.sweeps[0].i == bigger_sweep_set.sweeps[1].i)
    assert np.any(
        bigger_sweep_set.sweeps[0].i != smaller_sweep_set.sweeps[0].i)
示例#6
0
def sweeps_from_nwb(nwb_data, sweep_number_list):
    """ Generate a SweepSet object from an NWB reader and list of sweep numbers

    Sweeps should be in current-clamp mode.

    Parameters
    ----------
    nwb_data: NwbReader
    sweep_number_list: list
        List of sweep numbers

    Returns
    -------
    sweeps: SweepSet
    stim_start: float
        Start time of stimulus (seconds)
    stim_end: float
        End time of stimulus (seconds)
    """

    sweep_list = []
    start = None
    dur = None
    for sweep_number in sweep_number_list:
        sweep_data = nwb_data.get_sweep_data(sweep_number)
        sampling_rate = sweep_data["sampling_rate"]
        dt = 1.0 / sampling_rate
        t = np.arange(0, len(sweep_data["stimulus"])) * dt
        v = sweep_data["response"] * 1e3  # data from NWB now comes in Volts
        i = sweep_data["stimulus"] * 1e12  # data from NWB now comes in Amps
        sweep = Sweep(
            t=t,
            v=v,
            i=i,
            sampling_rate=sampling_rate,
            sweep_number=sweep_number,
            clamp_mode="CurrentClamp",
            epochs=None,
        )
        sweep_list.append(sweep)
        start, dur, _, _, _ = stf.get_stim_characteristics(i, t)
    if start is None or dur is None:
        return SweepSet(sweep_list), None, None
    else:
        return SweepSet(sweep_list), start, start + dur
示例#7
0
def test_identify_sub_hyperpol_levels():
    test_input_amplitudes = [-2000, -100, -90, -50, -10, 10]
    test_features = {
        "subthreshold_sweeps": pd.DataFrame(
            {
                "stim_amp": test_input_amplitudes,
                "peak_deflect": list(
                    zip(
                        np.zeros(len(test_input_amplitudes)),
                        np.zeros(len(test_input_amplitudes)),
                    )
                ),
                "v_baseline": np.ones(len(test_input_amplitudes)),
            }
        )
    }

    # Random test sweeps
    np.random.seed(42)
    v = np.random.randn(100)
    t = np.arange(len(v))
    i = np.zeros_like(t)
    epochs = {
        "sweep": (0, len(v) - 1),
        "test": None,
        "recording": None,
        "experiment": None,
        "stim": None,
    }
    sampling_rate = 1
    clamp_mode = "CurrentClamp"
    sweep_list = []
    for a in test_input_amplitudes:
        test_sweep = Sweep(t, v, i, clamp_mode, sampling_rate, epochs=epochs)
        sweep_list.append(test_sweep)
    test_sweep_set = SweepSet(sweep_list)

    amp_sweep_dict, deflect_dict = fv.identify_subthreshold_hyperpol_with_amplitudes(
        test_features, test_sweep_set
    )

    for k in amp_sweep_dict:
        assert k in deflect_dict
        assert len(deflect_dict[k]) == 2

    less_than_one_nanoamp = [a for a in test_input_amplitudes if a < -1000]
    for a in less_than_one_nanoamp:
        assert a not in amp_sweep_dict

    depolarizing = [a for a in test_input_amplitudes if a >= 0]
    for a in depolarizing:
        assert a not in amp_sweep_dict

    should_belong = [a for a in test_input_amplitudes if a >= 0 and a < -1000]
    for a in should_belong:
        assert a in amp_sweep_dict
示例#8
0
def test_chirp_output():
    # Stuff for sweep construction
    sampling_rate = 2000
    t = np.arange(0, 20 * sampling_rate) * (1 / sampling_rate)
    clamp_mode = "CurrentClamp"
    epochs = {
        "sweep": (0, len(t) - 1),
        "test": None,
        "recording": None,
        "experiment": None,
        "stim": None
    }

    base_chirp = scipy.signal.chirp(t, 0.5, 20, 40, method="linear")
    i = base_chirp

    # linear descreasing profile
    profile = np.linspace(1., 0.5, num=len(t))
    v = base_chirp * profile
    test_sweep = Sweep(t, v, i, clamp_mode, sampling_rate, epochs=epochs)
    sweep_set = SweepSet([test_sweep])

    amp, phase, freq = chirp.chirp_amp_phase(sweep_set, start=0, end=19.9)

    # Confirm goes from 1 to 0.5
    tol = 0.1
    assert np.abs(amp[0] - 1) < tol
    assert np.abs(amp[-1] - 0.5) < tol

    # "resonant" profile
    profile = (-(t - 10)**2 + 100) / 100 + 1
    v = base_chirp * profile
    test_sweep = Sweep(t, v, i, clamp_mode, sampling_rate, epochs=epochs)
    sweep_set = SweepSet([test_sweep])

    amp, phase, freq = chirp.chirp_amp_phase(sweep_set, start=0, end=19.9)

    # Confirm it peaks around 2 near 20 Hz
    amp_tol = 0.1
    freq_tol = 1

    assert np.abs(np.max(amp) - 2) < amp_tol
    assert np.abs(freq[np.argmax(amp)] - 20) < freq_tol
示例#9
0
def test_identify_sub_depol_levels_without_subthreshold_sweeps():
    test_input_amplitudes = [-50, -10, 10, 20]
    test_avg_rate = [0, 0, 0, 5]
    test_features = {"sweeps": pd.DataFrame({
        "stim_amp": test_input_amplitudes,
        "peak_deflect": list(zip(np.zeros(len(test_input_amplitudes)),
            np.zeros(len(test_input_amplitudes)))),
        "v_baseline": np.ones(len(test_input_amplitudes)),
        "avg_rate": test_avg_rate,
    })}

    # Random test sweeps
    np.random.seed(42)
    v = np.random.randn(100)
    t = np.arange(len(v))
    i = np.zeros_like(t)
    epochs = {"sweep": (0, len(v) - 1), "test": None, "recording": None, "experiment": None, "stim": None}
    sampling_rate = 1
    clamp_mode = "CurrentClamp"
    sweep_list = []
    for a in test_input_amplitudes:
        test_sweep = Sweep(t, v, i, clamp_mode, sampling_rate, epochs=epochs)
        sweep_list.append(test_sweep)
    test_sweep_set = SweepSet(sweep_list)

    amp_sweep_dict, deflect_dict = fv.identify_subthreshold_depol_with_amplitudes(
        test_features, test_sweep_set)

    for k in amp_sweep_dict:
        assert k in deflect_dict
        assert len(deflect_dict[k]) == 2

    depolarizing_spiking = [a for a, r in zip(test_input_amplitudes, test_avg_rate)
         if a > 0 and r > 0]
    for a in depolarizing_spiking:
        assert a not in amp_sweep_dict

    depolarizing_non_spiking = [a for a, r in zip(test_input_amplitudes, test_avg_rate)
         if a > 0 and r == 0]
    for a in depolarizing_non_spiking:
        assert a in amp_sweep_dict

    hyperpolarizing = [a for a in test_input_amplitudes if a <= 0]
    for a in hyperpolarizing:
        assert a not in amp_sweep_dict
示例#10
0
def get_chirp_features(recordings, cell_id=''):
    errors = []
    if len(recordings) == 0:
        errors.append('No chirp sweeps for cell %s' % cell_id)
        return {}, errors

    sweep_list = []
    for rec in recordings:
        sweep = MPSweep(rec)
        if sweep is not None:
            sweep_list.append(sweep)

    if len(sweep_list) == 0:
        errors.append('No chirp sweeps passed qc for cell %s' % cell_id)
        return {}, errors

    sweep_set = SweepSet(sweep_list)
    try:
        all_chirp_features = extract_chirp_fft(sweep_set,
                                               min_freq=1,
                                               max_freq=15)
        results = {
            'chirp_peak_freq': all_chirp_features['peak_freq'],
            'chirp_3db_freq': all_chirp_features['3db_freq'],
            'chirp_peak_ratio': all_chirp_features['peak_ratio'],
            'chirp_peak_impedance':
            all_chirp_features['peak_impedance'] * 1e9,  #unscale from mV/pA,
            'chirp_sync_freq': all_chirp_features['sync_freq'],
            'chirp_inductive_phase':
            all_chirp_features['total_inductive_phase'],
        }
    except FeatureError as exc:
        logger.warning(
            f'Error processing chirps for cell {cell_id}: {str(exc)}')
        errors.append('Error processing chirps for cell %s: %s' %
                      (cell_id, str(exc)))
        results = {}

    return results, errors
示例#11
0
    def sweep_set(
            self,
            sweep_numbers: Union[Sequence[int], int, None] = None) -> SweepSet:
        """Construct a SweepSet object, which offers convenient access to an 
        ordered collection of sweeps.

        Parameters
        ----------
        sweep_numbers : Identifiers for the sweeps which will make up this set. 
            If None, use all available sweeps.

        Returns
        -------
        A SweepSet constructed from the requested sweeps
        """

        if sweep_numbers is None:
            _sweep_numbers: Sequence = self._data.sweep_numbers
        elif not hasattr(sweep_numbers, "__len__"):  # not testing for order
            _sweep_numbers = [sweep_numbers]
        else:
            _sweep_numbers = sweep_numbers  # type: ignore

        return SweepSet([self.sweep(num) for num in _sweep_numbers])
示例#12
0
 def sweep_set(self, sweep_numbers):
     try:
         return SweepSet([self.sweep(sn) for sn in sweep_numbers])
     except TypeError:  # not iterable
         return SweepSet([self.sweep(sweep_numbers)])
def main(nwb_file, output_dir, project, **kwargs):
    nwb = MiesNwb(nwb_file)

    # SPECIFICS FOR EXAMPLE NWB =========

    # Only analyze one channel at a time
    channel = 0

    # We can work out code to automatically extract these based on stimulus names later.
    if_sweep_inds = [39, 45]
    targetv_sweep_inds = [15, 21]

    # END SPECIFICS =====================

    # Assemble all Recordings and convert to Sweeps
    supra_sweep_ids = list(range(*if_sweep_inds))
    sub_sweep_ids = list(range(*targetv_sweep_inds))

    supra_recs = [nwb.contents[i][channel] for i in supra_sweep_ids]
    sub_recs = [nwb.contents[i][channel] for i in sub_sweep_ids]

    # Build sweep sets
    lsq_supra_sweep_list, lsq_supra_dur = recs_to_sweeps(supra_recs)
    lsq_sub_sweep_list, lsq_sub_dur = recs_to_sweeps(sub_recs)
    lsq_supra_sweeps = SweepSet(lsq_supra_sweep_list)
    lsq_sub_sweeps = SweepSet(lsq_sub_sweep_list)

    lsq_supra_start = 0
    lsq_supra_end = lsq_supra_dur
    lsq_sub_start = 0
    lsq_sub_end = lsq_sub_dur

    # Pre-process sweeps
    lsq_supra_spx, lsq_supra_spfx = dsf.extractors_for_sweeps(
        lsq_supra_sweeps, start=lsq_supra_start, end=lsq_supra_end)
    lsq_supra_an = spa.LongSquareAnalysis(lsq_supra_spx,
                                          lsq_supra_spfx,
                                          subthresh_min_amp=-100.,
                                          require_subthreshold=False)
    lsq_supra_features = lsq_supra_an.analyze(lsq_supra_sweeps)

    lsq_sub_spx, lsq_sub_spfx = dsf.extractors_for_sweeps(lsq_sub_sweeps,
                                                          start=lsq_sub_start,
                                                          end=lsq_sub_end)
    lsq_sub_an = spa.LongSquareAnalysis(lsq_sub_spx,
                                        lsq_sub_spfx,
                                        subthresh_min_amp=-100.,
                                        require_suprathreshold=False)
    lsq_sub_features = lsq_sub_an.analyze(lsq_sub_sweeps)

    # Calculate feature vectors
    result = {}
    (subthresh_hyperpol_dict, hyperpol_deflect_dict
     ) = fv.identify_subthreshold_hyperpol_with_amplitudes(
         lsq_sub_features, lsq_sub_sweeps)
    target_amps_for_step_subthresh = [-90, -70, -50, -30, -10]
    result["step_subthresh"] = fv.step_subthreshold(
        subthresh_hyperpol_dict,
        target_amps_for_step_subthresh,
        lsq_sub_start,
        lsq_sub_end,
        amp_tolerance=5)
    result["subthresh_norm"] = fv.subthresh_norm(subthresh_hyperpol_dict,
                                                 hyperpol_deflect_dict,
                                                 lsq_sub_start, lsq_sub_end)

    (subthresh_depol_dict,
     depol_deflect_dict) = fv.identify_subthreshold_depol_with_amplitudes(
         lsq_supra_features, lsq_supra_sweeps)
    result["subthresh_depol_norm"] = fv.subthresh_depol_norm(
        subthresh_depol_dict, depol_deflect_dict, lsq_supra_start,
        lsq_supra_end)
    isi_sweep, isi_sweep_spike_info = fv.identify_sweep_for_isi_shape(
        lsq_supra_sweeps, lsq_supra_features, lsq_supra_end - lsq_supra_start)
    result["isi_shape"] = fv.isi_shape(isi_sweep, isi_sweep_spike_info,
                                       lsq_supra_end)

    # Calculate AP waveform from long squares
    rheo_ind = lsq_supra_features["rheobase_sweep"].name
    sweep = lsq_supra_sweeps.sweeps[rheo_ind]
    lsq_ap_v, lsq_ap_dv = fv.first_ap_vectors(
        [sweep], [lsq_supra_features["spikes_set"][rheo_ind]],
        window_length=ap_window_length)

    result["first_ap_v"] = lsq_ap_v
    result["first_ap_dv"] = lsq_ap_dv

    target_amplitudes = np.arange(0, 120, 20)
    supra_info_list = fv.identify_suprathreshold_sweep_sequence(
        lsq_supra_features, target_amplitudes, shift=10)
    result["psth"] = fv.psth_vector(supra_info_list, lsq_supra_start,
                                    lsq_supra_end)
    result["inst_freq"] = fv.inst_freq_vector(supra_info_list, lsq_supra_start,
                                              lsq_supra_end)
    spike_feature_list = [
        "upstroke_downstroke_ratio",
        "peak_v",
        "fast_trough_v",
        "threshold_v",
        "width",
    ]
    for feature in spike_feature_list:
        result["spiking_" + feature] = fv.spike_feature_vector(
            feature, supra_info_list, lsq_supra_start, lsq_supra_end)

    # Save the results
    specimen_ids = [0]
    results = [result]

    filtered_set = [(i, r) for i, r in zip(specimen_ids, results)
                    if not "error" in r.keys()]
    error_set = [{
        "id": i,
        "error": d
    } for i, d in zip(specimen_ids, results) if "error" in d.keys()]
    if len(filtered_set) == 0:
        logging.info("No specimens had results")
        return

    with open(os.path.join(output_dir, "fv_errors_{:s}.json".format(project)),
              "w") as f:
        json.dump(error_set, f, indent=4)

    used_ids, results = zip(*filtered_set)
    logging.info("Finished with {:d} processed specimens".format(
        len(used_ids)))

    k_sizes = {}
    for k in results[0].keys():
        if k not in k_sizes and results[0][k] is not None:
            k_sizes[k] = len(results[0][k])
        data = np.array([
            r[k] if k in r else np.nan * np.zeros(k_sizes[k]) for r in results
        ])
        if len(data.shape) == 1:  # it'll be 1D if there's just one specimen
            data = np.reshape(data, (1, -1))
        if data.shape[0] < len(used_ids):
            logging.warn("Missing data!")
            missing = np.array([k not in r for r in results])
            print(k, np.array(used_ids)[missing])
        np.save(
            os.path.join(output_dir, "fv_{:s}_{:s}.npy".format(k, project)),
            data)

    np.save(os.path.join(output_dir, "fv_ids_{:s}.npy".format(project)),
            used_ids)
示例#14
0
def cell_id_to_sweep_set(abf_file_name, meta_info_df):

    curr_file = abf_file_name

    meta_dict = meta_info_df

    #curr_file = '15o08020.abf'

    meta_row = meta_dict.loc[meta_dict['cell_id'] == curr_file]

    file_path = meta_row['full_path'].values[0]
    stim_file_path = meta_row['stim_path'].values[0]

    resp_abf = pyabf.ABF(file_path)
    stim_abf = pyabf.ABF(
        stim_file_path
    )  # for some files we're using stim traces from a different file

    num_sweeps = int(meta_row['num_sweeps'].values[0])

    stim_channel_num = int(meta_row['stim_chan'].values[0])
    response_chan_num = int(meta_row['resp_chan'].values[0])
    stim_gain = meta_row['stim_gain'].values[0]
    response_gain = meta_row['resp_gain'].values[0]

    start_time = meta_row['stim_start_time'].values[0]
    end_time = meta_row['stim_end_time'].values[0]
    resp_sampling_rate = meta_row['resp_sampling_rate'].values[0]
    stim_sampling_rate = meta_row['stim_sampling_rate'].values[0]
    resp_offset = meta_row['resp_offset'].values[0]
    stim_name = meta_row['stim_name'].values[0]

    stim_dict = get_stim_info(stim_abf, stim_channel_num, stim_gain, stim_name)
    stim_amps = stim_dict['stim_amp_vec']

    # curr_epoch = (int(start_time*10000), int(end_time*10000))
    # print(curr_epoch)

    clamp_mode = "CurrentClamp"

    sweep_list = list()

    for i in range(0, num_sweeps):
        sweep_num = i
        resp_abf.setSweep(sweep_num, channel=response_chan_num)

        time_vec = resp_abf.sweepX
        response_vec = resp_abf.sweepY * response_gain + resp_offset

        stim_abf.setSweep(sweep_num, channel=stim_channel_num)
        if stim_name == 'sweepY':
            stim_vec = stim_abf.sweepY * stim_gain
        else:
            stim_vec = stim_abf.sweepC * stim_gain

        # sometimes, when we get stim from a different file, they have diff samp rates 0_o
        if stim_sampling_rate != resp_sampling_rate:
            new_stim_vec = np.zeros(len(time_vec))
            inds = np.where((time_vec > start_time) & (time_vec < end_time))
            new_stim_vec[inds] = stim_amps[i]
            stim_vec = new_stim_vec
            #stim_vec = signal.resample(stim_vec, len(time_vec))

        sweep = Sweep(
            t=time_vec,
            v=response_vec,
            i=stim_vec,
            sampling_rate=resp_sampling_rate,
            sweep_number=i,
            clamp_mode=clamp_mode,
            #epochs = curr_epoch
        )
        sweep_list.append(sweep)
    sweep_set = SweepSet(sweep_list)
    return (sweep_set, start_time, end_time)
示例#15
0
def select_core_1_or_core_2_sweeps(core_1_lsq,
                                   core_1_start,
                                   core_1_end,
                                   core_2_lsq,
                                   core_2_start,
                                   core_2_end,
                                   fi_shift_threshold=30.0):
    """Identify the sweep or sweeps to use as targets for optimization

    Prefers Core 2 sweeps because they are longer and usually have repeats.
    Selects a Core 1 sweep if the Core 2 sweeps do not exist or if the
    f-I curve has shifted by at least 30 pA (since Core 1 occurs earlier in the
    overall experimental protocol)

    Parameters
    ----------
    core_1_lsq: SweepSet
        "Core 1" long-square sweeps (1 second long stimulus, no repeats expected)
    core_1_start: float
        Start time of stimulus interval for Core 1 sweeps
    core_1_end: float
        End time of stimulus interval for Core 1 sweeps
    core_2_lsq: SweepSet
        "Core 2" long-square sweeps (2 seconds long stimulus, repeats expected)
    core_2_start: float
        Start time of stimulus interval for Core 2 sweeps
    core_2_end: float
        End time of stimulus interval for Core 2 sweeps
    fi_shift_threshold: float (optional, default 30.0)
        Maximum allowed f-I curve shift to still select Core 2 sweeps

    Returns
    -------
    sweeps_to_fit: SweepSet

    start: float
        time of stimulus start (in seconds)
    end: float
        time of stimulus end (in seconds)
    """

    use_core_1 = False
    if len(core_2_lsq.sweeps) == 0:
        logging.info("No Core 2 sweeps available")
        use_core_1 = True
    else:
        fi_shift = check_fi_shift.estimate_fi_shift(core_1_lsq, core_1_start,
                                                    core_1_end, core_2_lsq,
                                                    core_2_start, core_2_end)
        if abs(fi_shift) > fi_shift_threshold:
            logging.info("f-I curve shifted by {} (exceeding +/- {}); "
                         "using Core 1".format(fi_shift, fi_shift_threshold))
            use_core_1 = True

    if not use_core_1:
        # Try to find Core 2 sweeps that work
        core2_analysis = StepAnalysis(core_2_start, core_2_end)
        core2_analysis.analyze(core_2_lsq)
        core2_sweep_features = core2_analysis.sweep_features()
        stim_amps = np.rint(core2_sweep_features["stim_amp"].values)

        n_good = {}
        sweeps_by_amp = {}
        for amp, swp, spike_data in zip(stim_amps, core_2_lsq.sweeps,
                                        core2_analysis.spikes_data()):
            if spike_data.shape[0] == 0:
                continue

            spike_times = spike_data["threshold_t"].values
            if is_sweep_good_quality(spike_times, core_2_start, core_2_end):
                if amp in n_good:
                    n_good[amp] += 1
                    sweeps_by_amp[amp].append(swp)
                else:
                    n_good[amp] = 1
                    sweeps_by_amp[amp] = [swp]

        if len(n_good) == 0 or max(n_good.values()) <= 1:
            logging.info("Not enough good Core 2 traces; using Core 1")
            use_core_1 = True

    if use_core_1:
        sweeps_to_fit = select_core1_trace(core_1_lsq, core_1_start,
                                           core_1_end)
        start = core_1_start
        end = core_1_end
    else:
        best_amp = max(n_good, key=(lambda key: n_good[key]))
        sweeps_to_fit = sweeps_by_amp[best_amp]
        start = core_2_start
        end = core_2_end

    return SweepSet(sweeps_to_fit), start, end
示例#16
0
def get_long_square_features(recordings, cell_id=''):
    errors = []
    if len(recordings) == 0:
        errors.append('No long pulse sweeps for cell %s' % cell_id)
        return {}, errors

    min_pulse_dur = np.inf
    sweep_list = []
    for rec in recordings:
        pulse_times = get_pulse_times(rec)
        if pulse_times is None:
            continue

        # pulses may have different durations as well, so we just use the smallest duration
        start, end = pulse_times
        min_pulse_dur = min(min_pulse_dur, end - start)

        sweep = MPSweep(rec, -start)
        if sweep is not None:
            sweep_list.append(sweep)

    if len(sweep_list) == 0:
        errors.append('No long square sweeps passed qc for cell %s' % cell_id)
        return {}, errors

    sweep_set = SweepSet(sweep_list)
    spx, spfx = extractors_for_sweeps(sweep_set, start=0, end=min_pulse_dur)
    lsa = LongSquareAnalysis(spx,
                             spfx,
                             subthresh_min_amp=-200,
                             require_subthreshold=False,
                             require_suprathreshold=False)

    try:
        analysis = lsa.analyze(sweep_set)
    except FeatureError as exc:
        err = f'Error running long square analysis for cell {cell_id}: {str(exc)}'
        logger.warning(err)
        errors.append(err)
        return {}, errors

    analysis_dict = lsa.as_dict(analysis)
    output = get_complete_long_square_features(analysis_dict)

    results = {
        'rheobase':
        output.get('rheobase_i', np.nan) * 1e-12,  #unscale from pA,
        'fi_slope':
        output.get('fi_fit_slope', np.nan) * 1e-12,  #unscale from pA,
        'input_resistance':
        output.get('input_resistance', np.nan) * 1e6,  #unscale from MOhm,
        'input_resistance_ss':
        output.get('input_resistance_ss', np.nan) * 1e6,  #unscale from MOhm,
        'tau':
        output.get('tau', np.nan),
        'sag':
        output.get('sag', np.nan),
        'sag_peak_t':
        output.get('sag_peak_t', np.nan),
        'sag_depol':
        output.get('sag_depol', np.nan),
        'sag_peak_t_depol':
        output.get('sag_peak_t_depol', np.nan),
        'ap_upstroke_downstroke_ratio':
        output.get('upstroke_downstroke_ratio_hero', np.nan),
        'ap_upstroke':
        output.get('upstroke_hero', np.nan) * 1e-3,  #unscale from mV
        'ap_downstroke':
        output.get('downstroke_hero', np.nan) * 1e-3,  #unscale from mV
        'ap_width':
        output.get('width_hero', np.nan),
        'ap_threshold_v':
        output.get('threshold_v_hero', np.nan) * 1e-3,  #unscale from mV
        'ap_peak_deltav':
        output.get('peak_deltav_hero', np.nan) * 1e-3,  #unscale from mV
        'ap_fast_trough_deltav':
        output.get('fast_trough_deltav_hero', np.nan) * 1e-3,  #unscale from mV
        'firing_rate_rheo':
        output.get('avg_rate_rheo', np.nan),
        'latency_rheo':
        output.get('latency_rheo', np.nan),
        'firing_rate_40pa':
        output.get('avg_rate_hero', np.nan),
        'latency_40pa':
        output.get('latency_hero', np.nan),
        'adaptation_index':
        output.get('adapt_mean', np.nan),
        'isi_cv':
        output.get('isi_cv_mean', np.nan),
        'isi_adapt_ratio':
        output.get('isi_adapt_ratio', np.nan),
        'upstroke_adapt_ratio':
        output.get('upstroke_adapt_ratio', np.nan),
        'downstroke_adapt_ratio':
        output.get('downstroke_adapt_ratio', np.nan),
        'width_adapt_ratio':
        output.get('width_adapt_ratio', np.nan),
        'threshold_v_adapt_ratio':
        output.get('threshold_v_adapt_ratio', np.nan),
    }
    return results, errors