コード例 #1
0
def christov_single(fields, record, save_path, sig, save=True):
    detectors = Detectors(fields['fs'])
    r_peaks = detectors.christov_detector(sig[:, 0])
    if save:
        save_prediction(r_peaks, record, save_path)
    else:
        return r_peaks
コード例 #2
0
def heart_rate_variability(sample, lead, rpeak_method = 'string'):
    curdir = 'DATA\TrainData_FeatureExtraction'
    [all_data, header_data, BAD_LABELS] = data_read.data_files_load(curdir)

    data = all_data[sample][lead]

    """INITIALIZE DETECTOR CLASS WITH THE SAMPLING RATE:"""
    detectors = Detectors(500)

    """FIND RPEAK USING ONE OF THE METHODS BELOW--------------------"""

    if rpeak_method == 'hamilton' or rpeak_method == 'string':
        #Hamilton.
        r_peaks = detectors.hamilton_detector(data)

    elif rpeak_method == 'christov':
        #Christov
        r_peaks = detectors.christov_detector(data)

    elif rpeak_method == 'engelse':
        #Engelse and Zeelenberg
        r_peaks = detectors.engzee_detector(data)

    elif rpeak_method == 'pan':
        #Pan and Tompkins
        r_peaks = detectors.pan_tompkins_detector(data)

    elif rpeak_method == 'stationary_wavelet':
        #Stationary Wavelet Transform
        r_peaks = detectors.swt_detector(data)

    elif rpeak_method == 'two_moving_average':
        #Two Moving Average
        r_peaks = detectors.two_average_detector(data)

    #elif rpeak_method == 'matched_filter':
        #Matched Filter
        #go to pyhrv documentation to find the template file
        #r_peaks = detectors.matched_filter_detector(data,template_file)

    """COMPUTE NNI SERIES-------------------------------------------"""
    nn = nn_intervals(r_peaks) #nni seems to be off by a factor of 3
    print("\n\n", nn, "\n\n")

    """PLOT ECG/TACHOGRAM-------------------------------------------"""
    #plot_ecg(data, sampling_rate = 500)
    #tachogram(nn, sampling_rate = 500)

    """COMPUTE HRV--------------------------------------------------"""
    results = hrv(nn, None, None, 500)

    """COMPUTE HR PARAMETERS--(SOMETHING IS WRONG HERE BPM TOO HIGH)"""
    hr = heart_rate(nn)

    """COMPUTE FREQUENCY ANALYSIS-----------------------------------"""
    freq_results = results['fft_bands']

    return results, hr, freq_results
コード例 #3
0
ファイル: detection.py プロジェクト: jingyijingli/systole
def ecg_peaks(x, sfreq=1000, new_sfreq=1000, method='pan-tompkins',
              find_local=True, win_size=100):
    """A simple wrapper for many popular R peaks detectors algorithms.

    This function calls methods from the py-ecg-detectors [#]_ module.

    Parameters
    ----------
    x : list or 1d array-like
        The oxi signal.
    sfreq : int
        The sampling frequency. Default is set to 75 Hz.
    method : str
        The method used. Can be one of the following: 'hamilton', 'christov',
        'engelse-zeelenberg', 'pan-tompkins', 'wavelet-transform',
        'moving-average'.
    find_local : bool
        If *True*, will use peaks indexs to search for local peaks given the
        window size (win_size).
    win_size : int
        Size of the time window used by :py:func:`systole.utils.to_neighbour()`

    Returns
    -------
    peaks : 1d array-like
        Numpy array containing peaks index.
    resampled_signal : 1d array-like
        Signal resampled to the `new_sfreq` frequency.

    Notes
    -----
    This function will call the py-ecg-detectors package to perform R wave
    detection.

    .. warning :: This function will resample the signal to 1000 Hz.

    Examples
    --------
    >>> from systole import import_dataset
    >>> from systole.detection import ecg_peaks
    >>> signal_df = import_dataset()[:20*2000]
    >>> signal, peaks = ecg_peaks(signal_df.ecg.to_numpy(), method='hamilton',
    >>>                           sfreq=2000, find_local=True)
    >>> print(f'{sum(peaks)} peaks detected.')
    24 peaks detected.

    References
    ----------
    .. [#] Howell, L., Porr, B. Popular ECG R peak detectors written in
    python. DOI: 10.5281/zenodo.3353396
    """

    if isinstance(x, list):
        x = np.asarray(x)

    # Interpolate
    f = interp1d(np.arange(0, len(x)/sfreq, 1/sfreq),
                 x,
                 fill_value="extrapolate")
    time = np.arange(0, len(x)/sfreq, 1/new_sfreq)
    x = f(time)

    # Copy resampled signal for output
    resampled_signal = np.copy(x)

    detectors = Detectors(new_sfreq)

    if method == 'hamilton':
        peaks_idx = detectors.hamilton_detector(resampled_signal)
    elif method == 'christov':
        peaks_idx = detectors.christov_detector(resampled_signal)
    elif method == 'engelse-zeelenberg':
        peaks_idx = detectors.engzee_detector(resampled_signal)
    elif method == 'pan-tompkins':
        peaks_idx = detectors.pan_tompkins_detector(resampled_signal)
    elif method == 'wavelet-transform':
        peaks_idx = detectors.swt_detector(resampled_signal)
    elif method == 'moving-average':
        peaks_idx = detectors.two_average_detector(resampled_signal)
    else:
        raise ValueError(
            'Invalid method provided, should be: hamilton,',
            'christov, engelse-zeelenberg, pan-tompkins, wavelet-transform,',
            'moving-average')
    peaks = np.zeros(len(resampled_signal), dtype=bool)
    peaks[peaks_idx] = True

    if find_local is True:
        peaks = to_neighbour(resampled_signal, peaks, size=win_size)

    return resampled_signal, peaks
コード例 #4
0
def preprocessing_AFDB(record, start=1, stop=None, sep=",", fs=250):
    dataset_dir = "dataset/AFDB record_%s/" % record
    csv_filenames = []
    for filename in os.listdir(dataset_dir):
        if filename.find(".csv") > -1:
            csv_filenames.append(filename)
    print("[INFO] detected CSV file :", csv_filenames)

    print("[INFO] Read annotation file...")
    file = open(dataset_dir + 'annotation.txt', "r")
    annotations = file.readlines()
    file.close()

    label_idx = []
    for item in annotations[start:stop]:
        item_split = item.split()
        label_idx.append([
            item_split[0].replace("[", "").replace("]", ""),
            item_split[-1].replace("(", "")
        ])

    print("[INFO] Read CSV...")

    # - Read & formatting ECG data
    def read_csv_to_df(filename, folder, sep=sep):
        df = pd.read_csv(folder + filename, sep=sep)
        df = df.iloc[:, 0:2]
        print("[INFO] finish read file - %s" % filename)

        #df = df.drop(0)
        df.columns = ['Time', 'ECG']

        #df['ECG'] = df['ECG'].str.replace(';', '')
        df['ECG'] = pd.to_numeric(df['ECG'])

        # peak reduction
        df[df['ECG'] > 2] = 2
        df[df['ECG'] < -2] = -2
        print("[INFO] finish data cleansing - %s" % filename)

        df["Time"] = df['Time'].str.replace("[", "")
        df["Time"] = df['Time'].str.replace("]", "")
        df["Time"] = df['Time'].str.replace("'", "")

        df["Time"] = pd.to_datetime(df["Time"], errors='coerce')
        print("[INFO] finish time cleansing -  %s" % filename)

        df.set_index("Time", inplace=True)
        return df

    # - concate datafarame
    list_df_ecg = []
    for name in csv_filenames:
        df = read_csv_to_df(name, dataset_dir)
        list_df_ecg.append(df)

    df_ecg = pd.concat(list_df_ecg)

    # - Split Normal (N) and AFIB data
    N_range = []
    AFIB_range = []

    for i in range(len(label_idx) - 1):
        tm_str = label_idx[i][0]
        next_tm_str = label_idx[i + 1][0]
        tm = pd.to_datetime(tm_str)
        next_tm = pd.to_datetime(next_tm_str)

        if label_idx[i][1] == 'N':
            N_range.append([tm, next_tm])
        else:
            AFIB_range.append([tm, next_tm])

    if not os.path.exists("dataset_split_per_class"):
        os.mkdir("dataset_split_per_class")

    N = []
    for ix, nr in enumerate(N_range):
        result = df_ecg.between_time(nr[0].time(), nr[1].time())
        result.to_csv("dataset_split_per_class/%s_%s_%s_%s.csv" %
                      ('N', record, 'ECG1', ix))
        N.append(result)

    AFIB = []
    for ix, ar in enumerate(AFIB_range):
        result = df_ecg.between_time(ar[0].time(), ar[1].time())
        result.to_csv("dataset_split_per_class/%s_%s_%s_%s.csv" %
                      ('AF', record, 'ECG1', ix))
        AFIB.append(result)

    print("[INFO] Split per-16s & apply Baseline Wander Removal")
    # - split each N & AFIB dataframe to 16s sequence and apply Baseline Removal
    from scipy import sparse
    from scipy.sparse.linalg import spsolve
    from datetime import timedelta

    def baseline_als(y, lam=10000, p=0.05, n_iter=10):
        L = len(y)
        D = sparse.diags([1, -2, 1], [0, -1, -2], shape=(L, L - 2))
        w = np.ones(L)
        for i in range(n_iter):
            W = sparse.spdiags(w, 0, L, L)
            Z = W + lam * D.dot(D.transpose())
            z = spsolve(Z, w * y)
            w = p * (y > z) + (1 - p) * (y < z)
        return z

    def perdelta(start, end, delta):
        curr = start
        while curr < end:
            yield curr
            curr += delta

    time_interval_N = []
    for N_item in N:
        if len(N_item) > 0:
            intr = [
                time_result for time_result in perdelta(
                    N_item.index[0], N_item.index[-1], timedelta(seconds=16))
            ]
            time_interval_N.append(intr)

    time_interval_AFIB = []
    for AFIB_item in AFIB:
        if len(AFIB_item) > 0:
            intr = [
                time_result
                for time_result in perdelta(AFIB_item.index[0], AFIB_item.
                                            index[-1], timedelta(seconds=16))
            ]
            time_interval_AFIB.append(intr)

    ECG_ALS = []
    ECG_ALS_label = []

    for time_interval in time_interval_N:
        for time_intv in list(zip(time_interval, time_interval[1:])):
            X = df_ecg.between_time(time_intv[0].time(), time_intv[1].time())
            X_val = X.values[:, 0]
            if len(X_val) > 0:
                ALS = X_val - baseline_als(X_val)
                ECG_ALS.append(np.array(ALS))
                ECG_ALS_label.append('N')

    for time_interval in time_interval_AFIB:
        for time_intv in list(zip(time_interval, time_interval[1:])):
            X = df_ecg.between_time(time_intv[0].time(), time_intv[1].time())
            X_val = X.values[:, 0]
            if len(X_val) > 0:
                ALS = X_val - baseline_als(X_val)
                ECG_ALS.append(np.array(ALS))
                ECG_ALS_label.append('AF')

    print("[INFO] Signal Normalization...")
    # - Signal normalization from -1 to 1
    from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler

    scaler = MaxAbsScaler()
    ECG_ALS_Norm = []

    for als in ECG_ALS:
        als = np.expand_dims(als, 1)
        scaler = scaler.fit(als)

        als_norm = scaler.transform(als)
        ECG_ALS_Norm.append(als_norm)

    print("[INFO] R-R peak detection & split ...")
    # - QRS Detection
    from ecgdetectors import Detectors

    detectors = Detectors(fs)

    # - Split each 16s to 1.2 x R-R sequence
    # - Padding the sequence with zero for length 300 point

    ECG_split = []
    ECG_split_label = []
    for i in range(len(ECG_ALS_Norm)):
        data = np.array(ECG_ALS_Norm[i])
        if len(data) > 0:
            r_peaks = []
            try:
                r_peaks = detectors.christov_detector(data)
            except:
                print("cannot find R peaks in ALS Norm, idx %d" % i)
            RRs = np.diff(r_peaks)
            RRs_med = np.median(RRs)
            if not np.isnan(RRs_med) and RRs_med > 0 and len(r_peaks) > 0:
                for rp in r_peaks[:-1]:
                    split = data[:, 0][rp:rp + int(RRs_med * 1.2)]
                    pad = np.zeros(300)
                    n = len(split) if len(split) <= 300 else 300
                    pad[0:n] = split[0:n]
                    ECG_split.append(pad)
                    ECG_split_label.append(ECG_ALS_label[i])

    print("[INFO] Save preprocessed data to CSV file for record %s..." %
          record)
    data = []
    for i in range(len(ECG_split)):
        x = list(ECG_split[i])
        x.append(ECG_split_label[i])
        data.append(x)

    ECG = pd.DataFrame(data)
    ECG.to_csv("dataset/AFDB_%s_sequence_300_pt.csv" % record,
               index=False,
               header=False)

    print("-------------------------- *** --------------------------\n\n")
コード例 #5
0
def run_algo(algorithm: str, sig: numpy.ndarray,
             freq_sampling: int) -> List[int]:
    """
    run a qrs detector on a signal

    :param algorithm: name of the qrs detector to use
    :type algorithm: str
    :param sig: values of the sampled signal to study
    :type sig: ndarray
    :param freq_sampling: value of sampling frequency of the signal
    :type freq_sampling: int
    :return: localisations of qrs detections
    :rtype: list(int)
    """
    detectors = Detectors(freq_sampling)
    if algorithm == 'Pan-Tompkins-ecg-detector':
        qrs_detections = detectors.pan_tompkins_detector(sig)
    elif algorithm == 'Hamilton-ecg-detector':
        qrs_detections = detectors.hamilton_detector(sig)
    elif algorithm == 'Christov-ecg-detector':
        qrs_detections = detectors.christov_detector(sig)
    elif algorithm == 'Engelse-Zeelenberg-ecg-detector':
        qrs_detections = detectors.engzee_detector(sig)
    elif algorithm == 'SWT-ecg-detector':
        qrs_detections = detectors.swt_detector(sig)
    elif algorithm == 'Matched-filter-ecg-detector' and freq_sampling == 360:
        qrs_detections = detectors.matched_filter_detector(
            sig, 'templates/template_360hz.csv')
    elif algorithm == 'Matched-filter-ecg-detector' and freq_sampling == 250:
        qrs_detections = detectors.matched_filter_detector(
            sig, 'templates/template_250hz.csv')
    elif algorithm == 'Two-average-ecg-detector':
        qrs_detections = detectors.two_average_detector(sig)
    elif algorithm == 'Hamilton-biosppy':
        qrs_detections = bsp_ecg.ecg(signal=sig,
                                     sampling_rate=freq_sampling,
                                     show=False)[2]
    elif algorithm == 'Christov-biosppy':
        order = int(0.3 * freq_sampling)
        filtered, _, _ = bsp_tools.filter_signal(signal=sig,
                                                 ftype='FIR',
                                                 band='bandpass',
                                                 order=order,
                                                 frequency=[3, 45],
                                                 sampling_rate=freq_sampling)
        rpeaks, = bsp_ecg.christov_segmenter(signal=filtered,
                                             sampling_rate=freq_sampling)
        rpeaks, = bsp_ecg.correct_rpeaks(signal=filtered,
                                         rpeaks=rpeaks,
                                         sampling_rate=freq_sampling,
                                         tol=0.05)
        _, qrs_detections = bsp_ecg.extract_heartbeats(
            signal=filtered,
            rpeaks=rpeaks,
            sampling_rate=freq_sampling,
            before=0.2,
            after=0.4)
    elif algorithm == 'Engelse-Zeelenberg-biosppy':
        order = int(0.3 * freq_sampling)
        filtered, _, _ = bsp_tools.filter_signal(signal=sig,
                                                 ftype='FIR',
                                                 band='bandpass',
                                                 order=order,
                                                 frequency=[3, 45],
                                                 sampling_rate=freq_sampling)
        rpeaks, = bsp_ecg.engzee_segmenter(signal=filtered,
                                           sampling_rate=freq_sampling)
        rpeaks, = bsp_ecg.correct_rpeaks(signal=filtered,
                                         rpeaks=rpeaks,
                                         sampling_rate=freq_sampling,
                                         tol=0.05)
        _, qrs_detections = bsp_ecg.extract_heartbeats(
            signal=filtered,
            rpeaks=rpeaks,
            sampling_rate=freq_sampling,
            before=0.2,
            after=0.4)
    elif algorithm == 'Gamboa-biosppy':
        order = int(0.3 * freq_sampling)
        filtered, _, _ = bsp_tools.filter_signal(signal=sig,
                                                 ftype='FIR',
                                                 band='bandpass',
                                                 order=order,
                                                 frequency=[3, 45],
                                                 sampling_rate=freq_sampling)
        rpeaks, = bsp_ecg.gamboa_segmenter(signal=filtered,
                                           sampling_rate=freq_sampling)
        rpeaks, = bsp_ecg.correct_rpeaks(signal=filtered,
                                         rpeaks=rpeaks,
                                         sampling_rate=freq_sampling,
                                         tol=0.05)
        _, qrs_detections = bsp_ecg.extract_heartbeats(
            signal=filtered,
            rpeaks=rpeaks,
            sampling_rate=freq_sampling,
            before=0.2,
            after=0.4)
    elif algorithm == 'mne-ecg':
        qrs_detections = mne_ecg.qrs_detector(freq_sampling, sig)
    elif algorithm == 'heartpy':
        rol_mean = rolling_mean(sig, windowsize=0.75, sample_rate=100.0)
        qrs_detections = hp_pkdetection.detect_peaks(
            sig, rol_mean, ma_perc=20, sample_rate=100.0)['peaklist']
    elif algorithm == 'gqrs-wfdb':
        qrs_detections = processing.qrs.gqrs_detect(sig=sig, fs=freq_sampling)
    elif algorithm == 'xqrs-wfdb':
        qrs_detections = processing.xqrs_detect(sig=sig, fs=freq_sampling)
    else:
        raise ValueError(
            f'Sorry... unknown algorithm. Please check the list {algorithms_list}'
        )
    cast_qrs_detections = [int(element) for element in qrs_detections]
    return cast_qrs_detections
コード例 #6
0
ファイル: rpeak_detection.py プロジェクト: bahp/vital-sqi
    def ecg_detector(self, s, detector_type="pan_tompkins"):
        """
        Expose

        ECG peak detector from the github
        https://github.com/berndporr/py-ecg-detectors

        Parameters
        ----------
        s :
            Input signal

        fs:
            The signal frequency. Default is '256 Hz'

        detector_type:
            'hamilton': Open Source ECG Analysis Software Documentation,
            E.P.Limited, 2002.

            'christov':Real time electrocardiogram QRS detection using combined
            adaptive threshold

            'engzee': A single scan algorithm for QRS detection and
            feature extraction

            'swt': Real-time QRS detector using Stationary Wavelet Transform
            for Automated ECG Analysis.
            Uses the Pan and Tompkins thresolding.

            'mva': Frequency Bands Effects on QRS Detection.

            'mtemp':

            'pan_tompkins': A Real-Time QRS Detection Algorithm

            Default = 'pan_tompkins'


        Returns
        -------
        type
            an array of 1-D numpy array represent the peak list

        """
        if self.wave_type == 'ppg':
            warnings.warn("A ECG detectors is using on PPG waveform. "
                          "Output may produce incorrect result")
        detector = Detectors(self.fs)
        if detector_type == 'hamilton':
            res = detector.hamilton_detector(s)
        elif detector_type == 'christov':
            res = detector.christov_detector(s)
        elif detector_type == 'engzee':
            res = detector.engzee_detector(s)
        elif detector_type == 'swt':
            res = detector.swt_detector(s)
        elif detector_type == 'mva':
            res = detector.two_average_detector(s)
        elif detector_type == 'mtemp':
            res = self.matched_filter_detector(s)
        else:
            res = detector.pan_tompkins_detector(s)
        return np.array(res)