Esempio n. 1
0
def pan_tompkins_single(fields, record, save_path, sig, save=True):
    detectors = Detectors(fields['fs'])
    r_peaks = detectors.pan_tompkins_detector(sig[:, 0])
    if save:
        save_prediction(r_peaks, record, save_path)
    else:
        return r_peaks
Esempio n. 2
0
def stationary_wavelet_transform_single(fields, record, save_path, sig, save=True):
    detectors = Detectors(fields['fs'])
    r_peaks = detectors.swt_detector(sig[:, 0])
    if save:
        save_prediction(r_peaks, record, save_path)
    else:
        return r_peaks
Esempio n. 3
0
def test_compute_qrs_frames_correlation(ecg_signal=ecg_signal,
                                        qrs_frames_none=qrs_frames_none,
                                        fs=fs):
    detectors = Detectors(fs)
    qrs_frames_swt = detectors.swt_detector(ecg_signal)
    qrs_frames_hamilton = bsp_ecg.hamilton_segmenter(
        signal=np.array(ecg_signal), sampling_rate=fs)[0]

    correlation_coefs = compute_qrs_frames_correlation(
        qrs_frames_1=qrs_frames_swt,
        qrs_frames_2=qrs_frames_hamilton,
        sampling_frequency=fs)

    assert isinstance(correlation_coefs, float)

    # test for qrs without length

    correlation_coefs_none_1 = compute_qrs_frames_correlation(
        qrs_frames_1=qrs_frames_none,
        qrs_frames_2=qrs_frames_hamilton,
        sampling_frequency=fs)

    assert correlation_coefs_none_1 == 0

    correlation_coefs_none_2 = compute_qrs_frames_correlation(
        qrs_frames_1=qrs_frames_swt,
        qrs_frames_2=qrs_frames_none,
        sampling_frequency=fs)

    assert correlation_coefs_none_2 == 0
Esempio n. 4
0
 def test_addRepetitionDelay(self):
     ecg, fecg = self.dataUtils.readData(0)
     detectors = Detectors(200)
     # fecgDelayed = self.dataUtils.createDelayRepetition(fecg, 4, 5)
     # self.assertIsNotNone(fecgDelayed)
     r_peaks = detectors.hamilton_detector(ecg)
     pass
Esempio n. 5
0
def two_moving_average_single(fields, record, save_path, sig, save=True):
    detectors = Detectors(fields['fs'])
    r_peaks = detectors.two_average_detector(sig[:, 0])
    if save:
        save_prediction(r_peaks, record, save_path)
    else:
        return r_peaks
Esempio n. 6
0
def qsqi(ecg_signal: list, sampling_frequency: int) -> float:
    """Matching Degree of R Peak Detection

    Two R wave detection algorithms are compared with their respective number
    of R waves detected.

    * Hamilton
    * SWT (Stationary Wavelet Transform)

    Parameters
    ----------
    ecg_signal : list
        Input ECG signal
    sampling_frequency : list
        Input ecg sampling frequency

    Returns
    -------
    q_sqi_score : float

    """
    detectors = Detectors(sampling_frequency)
    qrs_frames_swt = detectors.swt_detector(ecg_signal)
    qrs_frames_hamilton = bsp_ecg.hamilton_segmenter(
        signal=np.array(ecg_signal), sampling_rate=sampling_frequency)[0]

    q_sqi_score = compute_qrs_frames_correlation(qrs_frames_hamilton,
                                                 qrs_frames_swt,
                                                 sampling_frequency)

    return q_sqi_score
def stopmeasuring():
    global sp
    print('killing process with id: ', sp)
    os.system("taskkill /f /t /pid " + str(sp))
    print("subprocess stopped")
    df = pd.read_csv('data.csv', index_col=None, header=0)
    ecg_signal = df['ECG'] * 1000
    sr = 60
    time = np.linspace(0, len(ecg_signal) / sr, len(ecg_signal))
    detectors = Detectors(sr)
    r_peaks = detectors.pan_tompkins_detector(ecg_signal)
    peaks = np.array(time)[r_peaks]
    RRinterval = np.diff(peaks)
    median = np.median(RRinterval)
    RRinterval = np.append(RRinterval, median)
    df = df.iloc[r_peaks]
    df["RRinterval"] = RRinterval
    df["RRinterval"] = np.where(df["RRinterval"] < 0.5, median,
                                df["RRinterval"])
    df["RRinterval"] = np.where(df["RRinterval"] > 1.5, median,
                                df["RRinterval"])
    df["RRinterval"] = signal.medfilt(df["RRinterval"], 5)
    rri = df['RRinterval']
    diff_rri = np.diff(rri)
    NNRR = round(len(np.diff(rri)) / len(rri), 6)
    RMSSD = np.sqrt(np.mean(diff_rri**2))
    SDNN = np.nanstd(rri, ddof=1)
    AVNN = np.nanmean(rri)
    nn50 = np.sum(np.abs(diff_rri) > 0.05)
    pNN50 = nn50 / len(rri)
    df['NNRR'] = NNRR
    df['RMSSD'] = RMSSD
    df['SDNN'] = SDNN
    df['AVNN'] = AVNN
    df['pNN50'] = pNN50
    print(df.describe())
    df = df.replace([np.inf, -np.inf], np.nan)
    df['HR'].fillna((df['HR'].mean()), inplace=True)
    df['HR'] = signal.medfilt(df['HR'], 5)
    df['RESP'].fillna((df['RESP'].mean()), inplace=True)
    df['RESP'] = signal.medfilt(df['RESP'], 5)
    df['EMG'].fillna((df['EMG'].mean()), inplace=True)
    df['EMG'] = signal.medfilt(df['EMG'], 5)
    df['ECG'].fillna((df['ECG'].mean()), inplace=True)
    df['ECG'] = signal.medfilt(df['ECG'], 5)
    df = df.fillna(df.mean())
    df.to_csv('test_data2.csv', index=False)
    df = df.drop(['time'], axis=1)
    predictions = ExtraTreesClassifier.predict(df)
    print('predictions: ', predictions)
    c = (predictions == 0).sum()
    c1 = (predictions == 1).sum()
    if c < c1:
        class_ = 'stressed'
        return render_template('stressed.html', pred=class_)
    else:
        class_ = 'not stressed'
        return render_template('notstressed.html', pred=class_)
    print(class_)
def detect_r_peaks(SampleRate, data):
    """
    R peaks detection and RR intervals calculation
    """

    detectors = Detectors(SampleRate)  # use library for peak detection to get array of r peak positions
    r_peaks = detectors.engzee_detector(data)
    r_peaks = np.array(r_peaks)  # convert list to array
    return r_peaks
Esempio n. 9
0
def detect_qrs_swt(ecg_data, fs):
    qrs_frames = []
    try:
        detectors = Detectors(fs)  # Explain why
        qrs_frames = detectors.swt_detector(ecg_data)
    except Exception:
        # raise ValueError("swt")
        print("Exception in detect_qrs_swt")
    return qrs_frames
Esempio n. 10
0
    def __init__(self, path):
        self.path = path
        self.file = path.split(".")[0][-3:]

        self.good = {
            'N': 1,
            'L': 1,
            'R': 1,
            'B': 1,
            'A': 2,
            'a': 2,
            'J': 2,
            'S': 2,
            'V': 3,
            'r': 3,
            'F': 4,
            'e': 2,
            'j': 2,
            'n': 2,
            'E': 3,
            '/': 5,
            'f': 5,
            'Q': 5,
            '?': 5
        }
        self.ann_arr = [
            "(AB", "(AFIB", "(AFL", "(B", "(BII", "(IVR", "(N", "(NOD", "(P",
            "(PREX", "(SBR", "(SVTA", "(T", "(VFL", "(VT"
        ]
        self.quality = ["TS", "PSE", "MISSB", "U", "qq", 0]
        self.detectors = Detectors(360)

        self.record1 = None
        self.record2 = None
        self.record_detail = None
        self.n_ann = None  # number of annotations
        self.inds = None  # indexes of annotation according to 650 000 array values
        self.arrhyt = None  # arrythmias names and 0 if no, according to self.inds
        self.beat = None  # beat names, according to self.inds

        # read data for the sample
        self.read_record()
        self.read_annot()

        self.good_int = [
            i for i in range(len(self.ann_arr))
        ]  #arrhythmias indexes coresponding value sin self.ann_arr

        self.beats_full = None  # 0 for no annotation from 1- 5 name of beat
        self.arrhyt_full = None  # -1 for no annotation 0-14 name of arrhythmia

        self.clean_ann_beat()
        self.convert_arr_ann()

        self.r_peaks = None
        self.get_r_peaks()
Esempio n. 11
0
def engelze_zeelenberg_single(fields, record, save_path, sig, save=True):
    detectors = Detectors(fields['fs'])
    try:
        r_peaks = detectors.engzee_detector(sig[:, 0])
    except Exception:
        r_peaks = [0]
    if save:
        save_prediction(r_peaks, record, save_path)
    else:
        return r_peaks
Esempio n. 12
0
    def pan_tompkins_detector(signal):
        from ecgdetectors import Detectors
        detectors = Detectors(500)
        annotation = []

        for i in range(0, 8):
            annotation.append(detectors.pan_tompkins_detector(signal[i][:]))

        # annotation.append(detectors.pan_tompkins_detector(signal[1][:]))
        return annotation
Esempio n. 13
0
def heart_rate_variability(sample, lead, rpeak_method = 'string'):
    curdir = 'DATA\TrainData_FeatureExtraction'
    [all_data, header_data, BAD_LABELS] = data_read.data_files_load(curdir)

    data = all_data[sample][lead]

    """INITIALIZE DETECTOR CLASS WITH THE SAMPLING RATE:"""
    detectors = Detectors(500)

    """FIND RPEAK USING ONE OF THE METHODS BELOW--------------------"""

    if rpeak_method == 'hamilton' or rpeak_method == 'string':
        #Hamilton.
        r_peaks = detectors.hamilton_detector(data)

    elif rpeak_method == 'christov':
        #Christov
        r_peaks = detectors.christov_detector(data)

    elif rpeak_method == 'engelse':
        #Engelse and Zeelenberg
        r_peaks = detectors.engzee_detector(data)

    elif rpeak_method == 'pan':
        #Pan and Tompkins
        r_peaks = detectors.pan_tompkins_detector(data)

    elif rpeak_method == 'stationary_wavelet':
        #Stationary Wavelet Transform
        r_peaks = detectors.swt_detector(data)

    elif rpeak_method == 'two_moving_average':
        #Two Moving Average
        r_peaks = detectors.two_average_detector(data)

    #elif rpeak_method == 'matched_filter':
        #Matched Filter
        #go to pyhrv documentation to find the template file
        #r_peaks = detectors.matched_filter_detector(data,template_file)

    """COMPUTE NNI SERIES-------------------------------------------"""
    nn = nn_intervals(r_peaks) #nni seems to be off by a factor of 3
    print("\n\n", nn, "\n\n")

    """PLOT ECG/TACHOGRAM-------------------------------------------"""
    #plot_ecg(data, sampling_rate = 500)
    #tachogram(nn, sampling_rate = 500)

    """COMPUTE HRV--------------------------------------------------"""
    results = hrv(nn, None, None, 500)

    """COMPUTE HR PARAMETERS--(SOMETHING IS WRONG HERE BPM TOO HIGH)"""
    hr = heart_rate(nn)

    """COMPUTE FREQUENCY ANALYSIS-----------------------------------"""
    freq_results = results['fft_bands']

    return results, hr, freq_results
Esempio n. 14
0
def predict_labels(ecg_leads,fs,ecg_names,use_pretrained=False):
    '''
    Parameters
    ----------
    model_name : str
        Dateiname des Models. In Code-Pfad
    ecg_leads : list of numpy-Arrays
        EKG-Signale.
    fs : float
        Sampling-Frequenz der Signale.
    ecg_names : list of str
        eindeutige Bezeichnung für jedes EKG-Signal.

    Returns
    -------
    predictions : list of tuples
        ecg_name und eure Diagnose
    '''

#------------------------------------------------------------------------------
# Euer Code ab hier  
# From here add some our code 
    model_name = "model.npy"   
    if use_pretrained:
        model_name = "model_pretrained.npy"
    with open(model_name, 'rb') as f:  
        th_opt = np.load(f)         # Lade simples Model (1 Parameter)

    detectors = Detectors(fs)        # Initialisierung des QRS-Detektors

    predictions = list()
    
    for idx,ecg_lead in enumerate(ecg_leads):
        r_peaks = detectors.hamilton_detector(ecg_lead)     # Detektion der QRS-Komplexe
        sdnn = np.std(np.diff(r_peaks)/fs*1000) 
        if sdnn < th_opt:
            predictions.append((ecg_names[idx], 'N'))
        else:
            predictions.append((ecg_names[idx], 'A'))
        if ((idx+1) % 100)==0:
            print(str(idx+1) + "\t Dateien wurden verarbeitet.")
            
            
#------------------------------------------------------------------------------    
    return predictions # Liste von Tupels im Format (ecg_name,label) - Muss unverändert bleiben!
                               
                               
        
Esempio n. 15
0
def extract_features(data, sampling_rate):
    detector = Detectors(sampling_rate)
    hrv_class = HRV(sampling_rate)

    # Borrar si todo va bien
    # np.array_split(data, sampling_rate)

    # PROCESAR POR TROZOS

    # FOR splitted data
    # Unir horizontalmente ecg y eda
    # unir verticalmente cada iteracion

    splitted = split_in_seconds(data, sampling_rate, 30)
    seccion = None
    names = None
    for idx, m in enumerate(splitted):
        print(f'Procesando split {idx} con longitud {len(m)}')

        try:
            ecg, names_ecg = ecg_processing(m[:, 1], detector, hrv_class)
            eda, names_eda = eda_processing(m[:, 0])
            full_iteration = np.hstack((eda, ecg))
            if seccion is None:
                seccion = np.empty((0, len(full_iteration)))
                names = names_eda + names_ecg + ["stress_lvl"]
            seccion = np.vstack((seccion, full_iteration))
        except Exception:
            print("Error en un dato. Continuando...")
            continue
    return seccion, names
Esempio n. 16
0
 def test_ecg_detectors_package(self):
     for i in range(100, 300):
         try:
             record_name = self.mitdb + str(i)
             sig, fields = wfdb.rdsamp(record_name, channels=[0])
             detectors = Detectors(fields['fs'])
             r_peaks = detectors.pan_tompkins_detector(sig[:, 0])
             samples = np.array(r_peaks)
             wfdb.wrann(str(i),
                        'atr',
                        sample=samples,
                        write_dir="data/ann",
                        symbol=(['N'] * len(r_peaks)))
             print(i)
         except Exception as e:
             print(i, e)
Esempio n. 17
0
def find_peaks(ecg_df):
    unfiltered_ecg = ecg_df['ecg'].values
    fs = 500  # sampling freq. in Hz

    detectors = Detectors(fs)

    # r_peaks = detectors.two_average_detector(unfiltered_ecg)
    # r_peaks = detectors.matched_filter_detector(unfiltered_ecg,"templates/template_250hz.csv")
    # r_peaks = detectors.swt_detector(unfiltered_ecg)
    r_peaks = detectors.engzee_detector(unfiltered_ecg)
    # r_peaks = detectors.christov_detector(unfiltered_ecg)
    # r_peaks = detectors.hamilton_detector(unfiltered_ecg)
    # r_peaks = detectors.pan_tompkins_detector(unfiltered_ecg)

    r_peaks = ecg_df['timestamp'][r_peaks].values

    return r_peaks
Esempio n. 18
0
def ECG_segment(dataset, channel_num=0):
    segments_per_ECG = []
    all_preprocessed_datas = []
    all_r_peaks = []
    for rec in dataset:
        ecg_all_channel = rec[0]
        ecg_channel_1 = ecg_all_channel[:, channel_num]
        #Done by gautham
        pre_precocessed_data = overall_preprocessing(ecg_channel_1)
        pre_precocessed_data = pre_precocessed_data[:-9]

        detectors = Detectors(fs)
        #Peak Detection
        r_peaks = detectors.pan_tompkins_detector(pre_precocessed_data)
        all_r_peaks.append(r_peaks)
        #Peak Segmentation
        all_segments = []
        count = 0
        for i in range(len(r_peaks)):
            data_segment = np.zeros((1, 500), dtype=float)
            data_segment = data_segment.ravel()

            if ((r_peaks[i] - 250) >= 0
                    and (r_peaks[i] + 250) < len(pre_precocessed_data)):
                data_segment = pre_precocessed_data[r_peaks[i] -
                                                    250:r_peaks[i] + 250]
                count = count + 1
            elif ((r_peaks[i] - 250) < 0):
                nZeros = 250 - r_peaks[i]
                data_segment[nZeros:500] = pre_precocessed_data[0:r_peaks[i] +
                                                                250]
                count = count + 1
            elif (r_peaks[i] + 250 > len(pre_precocessed_data)):
                nZeros = r_peaks[i] + 250 - len(pre_precocessed_data)
                data_segment[0:500 - nZeros] = pre_precocessed_data[
                    r_peaks[i] - 250:len(pre_precocessed_data)]
                count = count + 1
            all_segments.append(data_segment)

        all_preprocessed_datas.append(pre_precocessed_data)
        segments_per_ECG.append(all_segments)

    return segments_per_ECG, all_preprocessed_datas, all_r_peaks
Esempio n. 19
0
def run_MIT_tests():
    # MIT-BIH database testing
    mit_test = MITDB_test()
    mit_detectors = Detectors(360)
    
    # test single detector
    matched_filter_mit = mit_test.single_classifier_test(mit_detectors.matched_filter_detector, tolerance=0)
    np.savetxt('matched_filter_mit.csv', matched_filter_mit, fmt='%i', delimiter=',')

    # test all detectors on MITDB, will save results to csv, will take some time
    mit_test.classifer_test_all()
Esempio n. 20
0
def det_from_name(detector_name, fs):

    detectors = Detectors(fs)
    detectors.engzee_fake_delay = 10

    if detector_name == 'two_average':
        return detectors.two_average_detector
    elif detector_name == 'matched_filter':
        return detectors.matched_filter_detector
    elif detector_name == 'swt':
        return detectors.swt_detector
    elif detector_name == 'engzee':
        return detectors.engzee_detector
    elif detector_name == 'christov':
        return detectors.christov_detector
    elif detector_name == 'hamilton':
        return detectors.hamilton_detector
    elif detector_name == 'pan_tompkins':
        return detectors.pan_tompkins_detector
    else:
        raise RuntimeError('invalid detector name!')
Esempio n. 21
0
def num_beats(voltage, time):
    """Counts number of heartbeats in ECG strip data

    The peaks of an ECG indicate a heart beat, with each
    peak indicative of a QRS wave. By counting these beats,
    the heart rate can be determined, which can further be
    used to diagnose deeper conditions.

    Args:
        voltage (list): voltage data of the ECG strip
        time (list): time data of the ECG strip

    Returns:
        int: number of peaks/beats in the ECG strip
        list: indices of ECG peaks identified
    """
    logging.info("Calculating number of beats in ECG trace")
    fs = 1 / (time[1] - time[0])
    detectors = Detectors(fs)
    unfiltered_ecg = voltage
    r_peaks = detectors.pan_tompkins_detector(unfiltered_ecg)
    num_peaks = len(r_peaks)
    return num_peaks, r_peaks
total_subjects = 25
subject = []

if len(sys.argv) < 2:
    print("Specify 'e' for Einthoven or 'v' for chest strap ECG.")
    exit(1)

for i in range(total_subjects):
    #for i in range(2):
    print(i)
    sitting_class = Ecg(data_path, i, 'sitting')
    sitting_class.filter_data()
    maths_class = Ecg(data_path, i, 'maths')
    maths_class.filter_data()

    detectors = Detectors(sitting_class.fs)

    if sitting_class.anno_cs_exists and maths_class.anno_cs_exists and (i !=
                                                                        11):
        subject.append(i)

        hrv_class = HRV(sitting_class.fs)

        if "e" in sys.argv[1]:
            ecg_channel_sitting = sitting_class.einthoven_II
            ecg_channel_maths = maths_class.einthoven_II
        elif "v" in sys.argv[1]:
            ecg_channel_sitting = sitting_class.cs_V2_V1
            ecg_channel_maths = maths_class.cs_V2_V1
        else:
            print(
Esempio n. 23
0
        interval_differences_for_jitter.append(difference)

    missed_beats = unused_anno #for clarity
    extra_beats = extra_det_posn #for clarity
    
    return interval_differences_for_jitter, missed_beats, extra_beats

#%%
"""
***************************
    START OF MAIN CODE   
***************************
"""

fs = 250 #sampling rate
detectors = Detectors(fs)# Initialise detectors for 250Hz sample rate (GUDB)

current_dir = pathlib.Path(__file__).resolve()

#%% Initialise parameters for analysis

save_global_results = True # when 'True' saves global jitter, missed, extra values as csv and prints

trim=True # CHANGE TO FALSE IF YOU DONT WANT TO TRIM
a = 10 # number of annotated beats to trim from start
b = -5 # number of annotated beats to trim from end
# * Values chosen by observation of detector settling intervals required *

#initialise for plots (*if used*)
plt.rc('xtick',labelsize=10)
plt.rc('ytick',labelsize=10)
Esempio n. 24
0
                nan_count += 1

            data.append([user_id, action, ecg1, ecg2, radari, radarq, bp])
dataframe = pd.DataFrame(
    data, columns=['user', 'action', 'ecg1', 'ecg2', 'radari', 'radarq', 'bp'])
#dataframe.to_pickle('/scratch/lnw8px/ECG_radar/data.pkl')
print('how many values with nan are present now ? - ' + str(nan_count))

#detecting the R peaks from the ECG signal to detect heart rate
#use a library called ecgdetectors from Glasgov university
from ecgdetectors import Detectors

path = '/scratch/lnw8px/ECG_radar/clinical/GDN0001/GDN0001_1_Resting.mat'
x = loadmat(path)
fs_ecg = x['fs_ecg'][0, 0]
detectors = Detectors(fs_ecg)
ecg2 = x['tfm_ecg2'].squeeze()
#select a part of the signal for easy visualization
sig = ecg2[8000:16000]
#detect R peaks with engzee_detector
#see https://github.com/berndporr/py-ecg-detectors for details
r_peaks = detectors.engzee_detector(sig)

plt.figure()
plt.plot(sig)
plt.plot(r_peaks, sig[r_peaks], 'ro')
plt.title('Detected R-peaks')

#get the time difference between r_peaks to calculate the time between two heart beats
sig = ecg2
r_peaks = detectors.engzee_detector(sig)
Esempio n. 25
0
    def prep_data(self):
        """Function that:
        -Initializes ecgdetector class instance
        -Runs stationary wavelet transform peak detection
            -Implements 0.1-10Hz bandpass filter
            -DB3 wavelet transformation
            -Pan-Tompkins peak detection thresholding
        -Calculates RR intervals
        -Removes first peak if it is within median RR interval / 2 from start of window
        -Calculates average HR in the window
        -Determines if there are enough beats in the window to indicate a possible valid period
        """

        # Initializes Detectors class instance with sample rate
        detectors = Detectors(self.fs)

        # Runs peak detection on raw data ----------------------------------------------------------------------------
        # Uses ecgdetectors package -> stationary wavelet transformation + Pan-Tompkins peak detection algorithm
        self.r_peaks = list(detectors.swt_detector(unfiltered_ecg=self.filt_data))

        # List of peak indexes relative to start of data file (i = 0)
        self.output_r_peaks = [i + self.start_index for i in self.r_peaks]

        # Checks to see if there are enough potential peaks to correspond to correct HR range ------------------------
        # Requires number of beats in window that corresponds to ~40 bpm to continue
        # Prevents the math in the self.hr calculation from returning "valid" numbers with too few beats
        # i.e. 3 beats in 3 seconds (HR = 60bpm) but nothing detected for rest of epoch
        if len(self.r_peaks) >= np.floor(40/60*self.epoch_len):
            self.enough_beats = True

            n_beats = len(self.r_peaks)  # number of beats in window
            delta_t = (self.r_peaks[-1] - self.r_peaks[0]) / self.fs  # time between first and last beat, seconds
            self.hr = 60 * (n_beats-1) / delta_t  # average HR, bpm

        # Stops function if not enough peaks found to be a potential valid period
        # Threshold corresponds to number of beats in the window for a HR of 40 bpm
        if len(self.r_peaks) < np.floor(40/60*self.epoch_len):
            self.enough_beats = False
            self.valid_period = False
            return

        # Calculates RR intervals in seconds -------------------------------------------------------------------------
        for peak1, peak2 in zip(self.r_peaks[:], self.r_peaks[1:]):
            rr_interval = (peak2 - peak1) / self.fs
            self.delta_rr.append(rr_interval)

        # Approach 1: median RR characteristics ----------------------------------------------------------------------
        # Calculates median RR-interval in seconds
        median_rr = np.median(self.delta_rr)

        # Converts median_rr to samples
        self.median_rr = int(median_rr * self.fs)

        # Removes any peak too close to start/end of data section: affects windowing later on ------------------------
        # Peak removed if within median_rr/2 samples of start of window
        # Peak removed if within median_rr/2 samples of end of window
        for i, peak in enumerate(self.r_peaks):
            if peak < (self.median_rr/2 + 1) or (self.epoch_len*self.fs - peak) < (self.median_rr/2 + 1):
                self.removed_peak.append(self.r_peaks.pop(i))
                self.removal_indexes.append(i)

        # Removes RR intervals corresponding to
        if len(self.removal_indexes) != 0:
            self.delta_rr = [self.delta_rr[i] for i in range(len(self.r_peaks)) if i not in self.removal_indexes]

        # Calculates range of ECG voltage ----------------------------------------------------------------------------
        self.volt_range = max(self.raw_data) - min(self.raw_data)
Esempio n. 26
0
def run_GUDB_tests(leads):
    # GUDB database testing
    gu_test = GUDB_test()
    gu_detectors = Detectors(250)
    gu_test.classifer_test_all(tolerance=0, config=leads)
Esempio n. 27
0
import numpy as np
import matplotlib.pyplot as plt
import pathlib
from ecgdetectors import Detectors

current_dir = pathlib.Path(__file__).resolve()

example_dir = current_dir.parent / 'example_data' / 'ECG.tsv'
unfiltered_ecg_dat = np.loadtxt(example_dir)
unfiltered_ecg = unfiltered_ecg_dat[:, 0]
fs = 250

detectors = Detectors(fs)

#r_peaks = detectors.two_average_detector(unfiltered_ecg)
#r_peaks = detectors.matched_filter_detector(unfiltered_ecg)
r_peaks = detectors.swt_detector(unfiltered_ecg)
#r_peaks = detectors.engzee_detector(unfiltered_ecg)
#r_peaks = detectors.christov_detector(unfiltered_ecg)
#r_peaks = detectors.hamilton_detector(unfiltered_ecg)
#r_peaks = detectors.pan_tompkins_detector(unfiltered_ecg)

plt.figure()
plt.plot(unfiltered_ecg)
plt.plot(r_peaks, unfiltered_ecg[r_peaks], 'ro')
plt.title('Detected R-peaks')

plt.show()
Esempio n. 28
0
import numpy as np
from tester_MITDB import MITDB_test
from tester_GUDB import GUDB_test
from ecgdetectors import Detectors

# MIT-BIH database testing
mit_test = MITDB_test(r'C:\Users\Luis\Documents\MITDB')
mit_detectors = Detectors(360)

# test single detector
matched_filter_mit = mit_test.single_classifier_test(
    mit_detectors.matched_filter_detector, tolerance=0, print_results=True)
np.savetxt('matched_filter_mit.csv',
           matched_filter_mit,
           fmt='%i',
           delimiter=',')

# test all detectors on MITDB, will save results to csv, will take some time
mit_results = mit_test.classifer_test_all()

# McNemars stats test
Z_value_mit = mit_test.mcnemars_test(mit_detectors.swt_detector,
                                     mit_detectors.two_average_detector,
                                     print_results=True)

# GUDB database testing
gu_test = GUDB_test()
gu_detectors = Detectors(250)

swt_gudb = gu_test.single_classifier_test(gu_detectors.swt_detector,
                                          tolerance=0,
Esempio n. 29
0
import pandas as pd
import neurokit2 as nk
from sklearn.datasets import make_hastie_10_2
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score
from wettbewerb import load_references
from imblearn.over_sampling import SMOTE

import warnings
warnings.filterwarnings("ignore")

ecg_leads, ecg_labels, fs, ecg_names = load_references(
)  # Importiere EKG-Dateien, zugehörige Diagnose, Sampling-Frequenz (Hz) und Name                                                # Sampling-Frequenz 300 Hz

features = np.array([])
detectors = Detectors(fs)  # Initialisierung des QRS-Detektors
labels = np.array([])

for idx, ecg_lead in enumerate(ecg_leads):
    if (ecg_labels[idx] == "N") or (ecg_labels[idx] == "A"):
        peaks, info = nk.ecg_peaks(ecg_lead, sampling_rate=fs)
        peaks = peaks.astype('float64')
        hrv = nk.hrv_time(peaks, sampling_rate=fs)
        hrv = hrv.astype('float64')
        features = np.append(features, [
            hrv['HRV_CVNN'], hrv['HRV_CVSD'], hrv['HRV_HTI'], hrv['HRV_IQRNN'],
            hrv['HRV_MCVNN'], hrv['HRV_MadNN'], hrv['HRV_MeanNN'],
            hrv['HRV_MedianNN'], hrv['HRV_RMSSD'], hrv['HRV_SDNN'],
            hrv['HRV_SDSD'], hrv['HRV_TINN'], hrv['HRV_pNN20'],
            hrv['HRV_pNN50']
        ])
Esempio n. 30
0
import wfdb
from ecgdetectors import Detectors
import numpy as np

DATA_PATH = "/data/"
SAVE_PATH = "/pred/"

with open(DATA_PATH + "RECORDS", 'r') as f:
    records = f.readlines()
    records = list(map(lambda r: r.strip("\n"), records))

for record in records:
    sig, fields = wfdb.rdsamp(DATA_PATH + record, channels=[0])
    detectors = Detectors(fields['fs'])
    r_peaks = detectors.pan_tompkins_detector(sig[:, 0])
    if len(r_peaks) > 0:
        samples = np.array(r_peaks)
        wfdb.wrann(record,
                   'atr',
                   sample=samples,
                   write_dir=SAVE_PATH,
                   symbol=(['N'] * len(r_peaks)))