def _get_baseline_voltage(self):
        v = self.v
        t = self.t
        filter_frequency = 1.  # in kHz

        # Look at baseline interval before start if start is defined
        if self.start is not None:
            return ft.average_voltage(v, t,
                                      self.start - self.baseline_interval,
                                      self.start)

        # Otherwise try to find an interval where things are pretty flat
        dv = ft.calculate_dvdt(v, t, filter_frequency)
        non_flat_points = np.flatnonzero(
            np.abs(dv >= self.baseline_detect_thresh))
        flat_intervals = t[non_flat_points[1:]] - t[non_flat_points[:-1]]
        long_flat_intervals = np.flatnonzero(
            flat_intervals >= self.baseline_interval)
        if long_flat_intervals.size > 0:
            interval_index = long_flat_intervals[0] + 1
            baseline_end_time = t[non_flat_points[interval_index]]
            return ft.average_voltage(
                v, t, baseline_end_time - self.baseline_interval,
                baseline_end_time)
        else:
            logging.info(
                "Could not find sufficiently flat interval for automatic baseline voltage",
                RuntimeWarning)
            return np.nan
Exemple #2
0
    def _get_baseline_voltage(self):
        v = self.v
        t = self.t
        filter_frequency = 1. # in kHz

        # Look at baseline interval before start if start is defined
        if self.start is not None:
            return ft.average_voltage(v, t, self.start - self.baseline_interval, self.start)

        # Otherwise try to find an interval where things are pretty flat
        dv = ft.calculate_dvdt(v, t, filter_frequency)
        non_flat_points = np.flatnonzero(np.abs(dv >= self.baseline_detect_thresh))
        flat_intervals = t[non_flat_points[1:]] - t[non_flat_points[:-1]]
        long_flat_intervals = np.flatnonzero(flat_intervals >= self.baseline_interval)
        if long_flat_intervals.size > 0:
            interval_index = long_flat_intervals[0] + 1
            baseline_end_time = t[non_flat_points[interval_index]]
            return ft.average_voltage(v, t, baseline_end_time - self.baseline_interval,
                                      baseline_end_time)
        else:
            logging.info("Could not find sufficiently flat interval for automatic baseline voltage", RuntimeWarning)
            return np.nan
Exemple #3
0
    def _process_individual_spikes(self):
        v = self.v
        t = self.t
        dvdt = ft.calculate_dvdt(v, t, self.filter)

        # Basic features of spikes
        putative_spikes = ft.detect_putative_spikes(v, t, self.start, self.end,
                                                    self.filter, self.dv_cutoff)
        peaks = ft.find_peak_indexes(v, t, putative_spikes, self.end)
        putative_spikes, peaks = ft.filter_putative_spikes(v, t, putative_spikes, peaks,
                                                           self.min_height, self.min_peak)

        if not putative_spikes.size:
            # Save time if no spikes detected
            self._spikes_df = DataFrame()
            return

        upstrokes = ft.find_upstroke_indexes(v, t, putative_spikes, peaks, self.filter, dvdt)
        thresholds = ft.refine_threshold_indexes(v, t, upstrokes, self.thresh_frac,
                                                 self.filter, dvdt)
        thresholds, peaks, upstrokes = ft.check_thresholds_and_peaks(v, t, thresholds, peaks,
                                                                     upstrokes, self.max_interval)

        if not thresholds.size:
            # Save time if no spikes detected
            self._spikes_df = DataFrame()
            return


        # Spike list and thresholds have been refined - now find other features
        upstrokes = ft.find_upstroke_indexes(v, t, thresholds, peaks, self.filter, dvdt)
        troughs = ft.find_trough_indexes(v, t, thresholds, peaks, self.end)
        downstrokes = ft.find_downstroke_indexes(v, t, peaks, troughs, self.filter, dvdt)
        trough_details = ft.analyze_trough_details(v, t, thresholds, peaks, self.end,
                                                   self.filter, dvdt=dvdt)
        widths = ft.find_widths(v, t, thresholds, peaks, trough_details[1])

        # Points where we care about t, v, and i if available
        vit_data_indexes = {
            "threshold": thresholds,
            "peak": peaks,
            "trough": troughs,
        }

        # Points where we care about t and dv/dt
        dvdt_data_indexes = {
            "upstroke": upstrokes,
            "downstroke": downstrokes
        }

        # Trough details
        isi_types = trough_details[0]
        trough_detail_indexes = dict(zip(["fast_trough", "adp", "slow_trough"], trough_details[1:]))

        # Redundant, but ensures that DataFrame has right number of rows
        # Any better way to do it?
        spikes_df = DataFrame(data=thresholds, columns=["threshold_index"])

        for k, vals in vit_data_indexes.iteritems():
            spikes_df[k + "_index"] = np.nan
            spikes_df[k + "_t"] = np.nan
            spikes_df[k + "_v"] = np.nan

            if len(vals) > 0:
                spikes_df.ix[:len(vals) - 1, k + "_index"] = vals
                spikes_df.ix[:len(vals) - 1, k + "_t"] = t[vals]
                spikes_df.ix[:len(vals) - 1, k + "_v"] = v[vals]

            if self.i is not None:
                spikes_df[k + "_i"] = np.nan
                if len(vals) > 0:
                    spikes_df.ix[:len(vals) - 1, k + "_i"] = self.i[vals]

        for k, vals in dvdt_data_indexes.iteritems():
            spikes_df[k + "_index"] = np.nan
            spikes_df[k] = np.nan
            if len(vals) > 0:
                spikes_df.ix[:len(vals) - 1, k + "_index"] = vals
                spikes_df.ix[:len(vals) - 1, k + "_t"] = t[vals]
                spikes_df.ix[:len(vals) - 1, k + "_v"] = v[vals]
                spikes_df.ix[:len(vals) - 1, k] = dvdt[vals]

        spikes_df["isi_type"] = isi_types

        for k, vals in trough_detail_indexes.iteritems():
            spikes_df[k + "_index"] = np.nan
            if np.any(~np.isnan(vals)):
                spikes_df.ix[~np.isnan(vals), k + "_index"] = vals[~np.isnan(vals)]

            spikes_df[k + "_t"] = np.nan
            if np.any(~np.isnan(vals)):
                spikes_df.ix[~np.isnan(vals), k + "_t"] = t[vals[~np.isnan(vals)].astype(int)]

            spikes_df[k + "_v"] = np.nan
            if np.any(~np.isnan(vals)):
                spikes_df.ix[~np.isnan(vals), k + "_v"] = v[vals[~np.isnan(vals)].astype(int)]

            if self.i is not None:
                spikes_df[k + "_i"] = np.nan
                if np.any(~np.isnan(vals)):
                    spikes_df.ix[~np.isnan(vals), k + "_i"] = self.i[vals[~np.isnan(vals)].astype(int)]

        spikes_df["width"] = np.nan
        spikes_df.ix[:len(widths)-1, "width"] = widths


        spikes_df["upstroke_downstroke_ratio"] = spikes_df["upstroke"] / -spikes_df["downstroke"]

        self._spikes_df = spikes_df
    def _process_individual_spikes(self):
        v = self.v
        t = self.t
        dvdt = ft.calculate_dvdt(v, t, self.filter)

        # Basic features of spikes
        putative_spikes = ft.detect_putative_spikes(v, t, self.start, self.end,
                                                    self.filter, self.dv_cutoff)
        peaks = ft.find_peak_indexes(v, t, putative_spikes, self.end)
        putative_spikes, peaks = ft.filter_putative_spikes(v, t, putative_spikes, peaks,
                                                           self.min_height, self.min_peak, 
                                                           dvdt=dvdt, filter=self.filter)

        if not putative_spikes.size:
            # Save time if no spikes detected
            self._spikes_df = DataFrame()
            return
        
        upstrokes = ft.find_upstroke_indexes(v, t, putative_spikes, peaks, self.filter, dvdt)
        thresholds = ft.refine_threshold_indexes(v, t, upstrokes, filter = self.filter, dvdt = dvdt)
        thresholds, peaks, upstrokes, clipped = ft.check_thresholds_and_peaks(v, t, thresholds, peaks,
                                                                     upstrokes, end = self.end, max_interval =  self.max_interval, 
                                                                     filter = self.filter)
        if not thresholds.size:
            # Save time if no spikes detected
            self._spikes_df = DataFrame()
            return
        
        
        # You can comment the following lines if you don't want to check this
        thresholds, peaks, clipped = ft.check_threshold_w_peak(v, t, thresholds, peaks, clipped)
        if not thresholds.size:
            self._spikes_df = DataFrame()
            return
        
        
        # Spike list and thresholds have been refined - now find other features
        upstrokes = ft.find_upstroke_indexes(v, t, thresholds, peaks, self.filter, dvdt)
        troughs = ft.find_trough_indexes(v, t, thresholds, peaks, clipped, self.end)
        
        # You can comment the following lines if you don't want to check this
        thresholds, upstrokes, peaks, troughs, clipped = ft.check_trough_w_peak(thresholds, upstrokes, peaks, troughs, \
                                                                    clipped, filter = 10., dvdt = None)
        # Maybe you have nothing anymore so save time by the following:
        if not thresholds.size:
            self._spikes_df = DataFrame()
            return
        
        
        downstrokes = ft.find_downstroke_indexes(v, t, peaks, troughs, clipped, self.filter, dvdt)
        trough_details, clipped = ft.analyze_trough_details(v, t, thresholds, peaks, clipped, self.end,
                                                            self.filter, dvdt=dvdt)
        widths = ft.find_widths_wrt_threshold(v, t, thresholds, peaks, trough_details[1], clipped)

        base_clipped_list = []

        # Points where we care about t, v, and i if available
        vit_data_indexes = {
            "threshold": thresholds,
            "peak": peaks,
            "trough": troughs,
        }
        base_clipped_list += ["trough"]

        # Points where we care about t and dv/dt
        dvdt_data_indexes = {
            "upstroke": upstrokes,
            "downstroke": downstrokes
        }
        base_clipped_list += ["downstroke"]

        # Trough details
        isi_types = trough_details[0]
        trough_detail_indexes = dict(zip(["fast_trough", "adp", "slow_trough"], trough_details[1:]))
        base_clipped_list += ["fast_trough", "adp", "slow_trough"]

        # Redundant, but ensures that DataFrame has right number of rows
        # Any better way to do it?
        spikes_df = DataFrame(data=thresholds, columns=["threshold_index"])
        spikes_df["clipped"] = clipped

        for k, all_vals in six.iteritems(vit_data_indexes):
            valid_ind = ~np.isnan(all_vals)
            vals = all_vals[valid_ind].astype(int)
            spikes_df[k + "_index"] = np.nan
            spikes_df[k + "_t"] = np.nan
            spikes_df[k + "_v"] = np.nan

            if len(vals) > 0:
                spikes_df.loc[valid_ind, k + "_index"] = vals
                spikes_df.loc[valid_ind, k + "_t"] = t[vals]
                spikes_df.loc[valid_ind, k + "_v"] = v[vals]

            if self.i is not None:
                spikes_df[k + "_i"] = np.nan
                if len(vals) > 0:
                    spikes_df.loc[valid_ind, k + "_i"] = self.i[vals]

            if k in base_clipped_list:
                self._affected_by_clipping += [
                    k + "_index",
                    k + "_t",
                    k + "_v",
                    k + "_i",
                ]

        for k, all_vals in six.iteritems(dvdt_data_indexes):
            valid_ind = ~np.isnan(all_vals)
            vals = all_vals[valid_ind].astype(int)
            spikes_df[k + "_index"] = np.nan
            spikes_df[k] = np.nan
            if len(vals) > 0:
                spikes_df.loc[valid_ind, k + "_index"] = vals
                spikes_df.loc[valid_ind, k + "_t"] = t[vals]
                spikes_df.loc[valid_ind, k + "_v"] = v[vals]
                spikes_df.loc[valid_ind, k] = dvdt[vals]

                if k in base_clipped_list:
                    self._affected_by_clipping += [
                    k + "_index",
                    k + "_t",
                    k + "_v",
                    k,
                ]

        spikes_df["isi_type"] = isi_types
        self._affected_by_clipping += ["isi_type"]

        for k, all_vals in six.iteritems(trough_detail_indexes):
            valid_ind = ~np.isnan(all_vals)
            vals = all_vals[valid_ind].astype(int)
            spikes_df[k + "_index"] = np.nan
            spikes_df[k + "_t"] = np.nan
            spikes_df[k + "_v"] = np.nan
            if len(vals) > 0:
                spikes_df.loc[valid_ind, k + "_index"] = vals
                spikes_df.loc[valid_ind, k + "_t"] = t[vals]
                spikes_df.loc[valid_ind, k + "_v"] = v[vals]

            if self.i is not None:
                spikes_df[k + "_i"] = np.nan
                if len(vals) > 0:
                    spikes_df.loc[valid_ind, k + "_i"] = self.i[vals]

            if k in base_clipped_list:
                self._affected_by_clipping += [
                    k + "_index",
                    k + "_t",
                    k + "_v",
                    k + "_i",
                ]

        spikes_df["width"] = widths
        self._affected_by_clipping += ["width"]

        spikes_df["upstroke_downstroke_ratio"] = spikes_df["upstroke"] / -spikes_df["downstroke"]
        self._affected_by_clipping += ["upstroke_downstroke_ratio"]

        self._spikes_df = spikes_df
    def _process_individual_spikes(self):
        v = self.v
        t = self.t
        dvdt = ft.calculate_dvdt(v, t, self.filter)

        # Basic features of spikes
        putative_spikes = ft.detect_putative_spikes(v, t, self.start, self.end,
                                                    self.filter,
                                                    self.dv_cutoff)
        peaks = ft.find_peak_indexes(v, t, putative_spikes, self.end)
        putative_spikes, peaks = ft.filter_putative_spikes(
            v, t, putative_spikes, peaks, self.min_height, self.min_peak)

        if not putative_spikes.size:
            # Save time if no spikes detected
            self._spikes_df = DataFrame()
            return

        upstrokes = ft.find_upstroke_indexes(v, t, putative_spikes, peaks,
                                             self.filter, dvdt)
        thresholds = ft.refine_threshold_indexes(v, t, upstrokes,
                                                 self.thresh_frac, self.filter,
                                                 dvdt)
        thresholds, peaks, upstrokes = ft.check_thresholds_and_peaks(
            v, t, thresholds, peaks, upstrokes, self.max_interval)

        if not thresholds.size:
            # Save time if no spikes detected
            self._spikes_df = DataFrame()
            return

        # Spike list and thresholds have been refined - now find other features
        upstrokes = ft.find_upstroke_indexes(v, t, thresholds, peaks,
                                             self.filter, dvdt)
        troughs = ft.find_trough_indexes(v, t, thresholds, peaks, self.end)
        downstrokes = ft.find_downstroke_indexes(v, t, peaks, troughs,
                                                 self.filter, dvdt)
        trough_details = ft.analyze_trough_details(v,
                                                   t,
                                                   thresholds,
                                                   peaks,
                                                   self.end,
                                                   self.filter,
                                                   dvdt=dvdt)
        widths = ft.find_widths(v, t, thresholds, peaks, trough_details[1])

        # Points where we care about t, v, and i if available
        vit_data_indexes = {
            "threshold": thresholds,
            "peak": peaks,
            "trough": troughs,
        }

        # Points where we care about t and dv/dt
        dvdt_data_indexes = {"upstroke": upstrokes, "downstroke": downstrokes}

        # Trough details
        isi_types = trough_details[0]
        trough_detail_indexes = dict(
            zip(["fast_trough", "adp", "slow_trough"], trough_details[1:]))

        # Redundant, but ensures that DataFrame has right number of rows
        # Any better way to do it?
        spikes_df = DataFrame(data=thresholds, columns=["threshold_index"])

        for k, vals in vit_data_indexes.iteritems():
            spikes_df[k + "_index"] = np.nan
            spikes_df[k + "_t"] = np.nan
            spikes_df[k + "_v"] = np.nan

            if len(vals) > 0:
                spikes_df.ix[:len(vals) - 1, k + "_index"] = vals
                spikes_df.ix[:len(vals) - 1, k + "_t"] = t[vals]
                spikes_df.ix[:len(vals) - 1, k + "_v"] = v[vals]

            if self.i is not None:
                spikes_df[k + "_i"] = np.nan
                if len(vals) > 0:
                    spikes_df.ix[:len(vals) - 1, k + "_i"] = self.i[vals]

        for k, vals in dvdt_data_indexes.iteritems():
            spikes_df[k + "_index"] = np.nan
            spikes_df[k] = np.nan
            if len(vals) > 0:
                spikes_df.ix[:len(vals) - 1, k + "_index"] = vals
                spikes_df.ix[:len(vals) - 1, k + "_t"] = t[vals]
                spikes_df.ix[:len(vals) - 1, k + "_v"] = v[vals]
                spikes_df.ix[:len(vals) - 1, k] = dvdt[vals]

        spikes_df["isi_type"] = isi_types

        for k, vals in trough_detail_indexes.iteritems():
            spikes_df[k + "_index"] = np.nan
            if np.any(~np.isnan(vals)):
                spikes_df.ix[~np.isnan(vals),
                             k + "_index"] = vals[~np.isnan(vals)]

            spikes_df[k + "_t"] = np.nan
            if np.any(~np.isnan(vals)):
                spikes_df.ix[~np.isnan(vals),
                             k + "_t"] = t[vals[~np.isnan(vals)].astype(int)]

            spikes_df[k + "_v"] = np.nan
            if np.any(~np.isnan(vals)):
                spikes_df.ix[~np.isnan(vals),
                             k + "_v"] = v[vals[~np.isnan(vals)].astype(int)]

            if self.i is not None:
                spikes_df[k + "_i"] = np.nan
                if np.any(~np.isnan(vals)):
                    spikes_df.ix[~np.isnan(vals), k + "_i"] = self.i[
                        vals[~np.isnan(vals)].astype(int)]

        spikes_df["width"] = np.nan
        spikes_df.ix[:len(widths) - 1, "width"] = widths

        spikes_df["upstroke_downstroke_ratio"] = spikes_df[
            "upstroke"] / -spikes_df["downstroke"]

        self._spikes_df = spikes_df