def pause_metrics(self): """Estimate average number of pauses and average fraction of time spent in a pause Attempts to detect pauses with a variety of conditions and averages results together. Pauses that are consistently detected contribute more to estimates. Returns ------- avg_n_pauses : average number of pauses detected across conditions avg_pause_frac : average fraction of interval (between start and end) spent in a pause max_reliability : max fraction of times most reliable pause was detected given weights tested n_max_rel_pauses : number of pauses detected with `max_reliability` """ thresholds = self._spikes_df["threshold_index"].values.astype(int) isis = ft.get_isis(self.t, thresholds) weight = 1.0 pause_list = self._process_pauses(weight) if len(pause_list) == 0: return 0, 0. n_pauses = len(pause_list) pause_frac = isis[pause_list].sum() pause_frac /= self.end - self.start return n_pauses, pause_frac
def _process_spike_related_features(self): t = self.t if len(self._spikes_df) == 0: self._sweep_features["avg_rate"] = 0 return thresholds = self._spikes_df["threshold_index"].values.astype(int) isis = ft.get_isis(t, thresholds) with warnings.catch_warnings(): # ignore mean of empty slice warnings here warnings.filterwarnings("ignore", category=RuntimeWarning, module="numpy") sweep_level_features = { "adapt": ft.adaptation_index(isis), "latency": ft.latency(t, thresholds, self.start), "isi_cv": (isis.std() / isis.mean()) if len(isis) >= 1 else np.nan, "mean_isi": isis.mean(), "median_isi": np.median(isis), "first_isi": isis[0] if len(isis) >= 1 else np.nan, "avg_rate": ft.average_rate(t, thresholds, self.start, self.end), } for k, v in sweep_level_features.iteritems(): self._sweep_features[k] = v
def _process_pauses(self, cost_weight=1.0): # Pauses are unusually long ISIs with a "detour reset" among delay resets thresholds = self._spikes_df["threshold_index"].values.astype(int) isis = ft.get_isis(self.t, thresholds) isi_types = self._spikes_df["isi_type"][:-1].values return ft.detect_pauses(isis, isi_types, cost_weight)
def _process_spike_related_features(self): t = self.t if len(self._spikes_df) == 0: self._sweep_features["avg_rate"] = 0 return # Start recently added peak_heights = None if not self._spikes_df.empty: peak_heights = self._spikes_df['peak_v'].values - self._spikes_df['threshold_v'].values # End recently added thresholds = self._spikes_df["threshold_index"].values.astype(int) isis = ft.get_isis(t, thresholds) with warnings.catch_warnings(): # ignore mean of empty slice warnings here warnings.filterwarnings("ignore", category=RuntimeWarning, module="numpy") sweep_level_features = { "adapt": ft.adaptation_index(isis), "latency": ft.latency(t, thresholds, self.start), "isi_cv": (isis.std() / isis.mean()) if len(isis) >= 1 else np.nan, "mean_isi": isis.mean() if len(isis) > 0 else np.nan, "median_isi": np.median(isis), "first_isi": isis[0] if len(isis) >= 1 else np.nan, # We want at least 3 peaks (i.e. 2 isis) to calculate the adaptation index (given in percentage) "isi_adapt": (isis[1]/isis[0]) if len(isis) >= 2 else np.nan, # Start recently added #"AP_amp_adapt": self._spikes_df['peak_height'][1]/self._spikes_df['peak_height'][0] if self._spikes_df.shape[1] >= 2 else np.nan, "AP_amp_adapt": (peak_heights[1]/peak_heights[0]) if peak_heights.size >= 2 else np.nan, #"AP_amp_change": ft.ap_amp_change(self._spikes_df['peak_height'].values) if self._spikes_df.shape.shape[1] >= 2 else np.nan, "AP_amp_adapt_average": ft.ap_amp_adaptation(peak_heights) if peak_heights.size >= 2 else np.nan, # End recently added "AP_fano_factor": ((peak_heights.std()**2)/peak_heights.mean()) if peak_heights.size >=2 else np.nan, "AP_cv": ((peak_heights.std())/peak_heights.mean()) if peak_heights.size >=2 else np.nan, "isi_adapt_average": ft.isi_adaptation(isis) if len(isis) >= 2 else np.nan, #"norm_sq_isis": ft.norm_sq_diff(isis) if len(isis) >= 2 else np.nan, # You could in principle make the Fano factor and cv 0 for n = 1 ISI, but we choose to make them Nan, i.e. they # are not that informative here "fano_factor": ((isis.std()**2) / isis.mean()) if len(isis) > 1 else np.nan, "cv": (isis.std() / isis.mean()) if len(isis) > 1 else np.nan, "avg_rate": ft.average_rate(t, thresholds, self.start, self.end) } for k, v in six.iteritems(sweep_level_features): self._sweep_features[k] = v
def _process_bursts(self, tol=0.5, pause_cost=1.0): thresholds = self._spikes_df["threshold_index"].values.astype(int) isis = ft.get_isis(self.t, thresholds) isi_types = self._spikes_df["isi_type"][:-1].values fast_tr_v = self._spikes_df["fast_trough_v"].values fast_tr_t = self._spikes_df["fast_trough_t"].values slow_tr_v = self._spikes_df["slow_trough_v"].values slow_tr_t = self._spikes_df["slow_trough_t"].values thr_v = self._spikes_df["threshold_v"].values bursts = ft.detect_bursts(isis, isi_types, fast_tr_v, fast_tr_t, slow_tr_v, slow_tr_t, thr_v, tol, pause_cost) return np.array(bursts)