Exemplo n.º 1
0
def test_ecg_peaks():

    sampling_rate = 1000
    noise = 0.15

    ecg = nk.ecg_simulate(duration=120,
                          sampling_rate=sampling_rate,
                          noise=noise,
                          random_state=42)
    ecg_cleaned_nk = nk.ecg_clean(ecg,
                                  sampling_rate=sampling_rate,
                                  method="neurokit")

    # Test without request to correct artifacts.
    signals, info = nk.ecg_peaks(ecg_cleaned_nk,
                                 correct_artifacts=False,
                                 method="neurokit")

    assert signals.shape == (120000, 1)
    assert np.allclose(signals["ECG_R_Peaks"].values.sum(dtype=np.int64),
                       139,
                       atol=1)

    # Test with request to correct artifacts.
    signals, info = nk.ecg_peaks(ecg_cleaned_nk,
                                 correct_artifacts=True,
                                 method="neurokit")

    assert signals.shape == (120000, 1)
    assert np.allclose(signals["ECG_R_Peaks"].values.sum(dtype=np.int64),
                       139,
                       atol=1)
Exemplo n.º 2
0
def extract_rpeak_features(row, signal):
    """
    Extract the R peak features.

    :param row: a `BaseDataset` row to calculate the features from
    :param signal: the raw ECG signal
    :return: `row` with the added features
    """
    ecg_cleaned = nk.ecg_clean(signal, sampling_rate=row.Fs)

    peaks, info = nk.ecg_peaks(ecg_cleaned, sampling_rate=row.Fs)
    r_peaks_sec = np.where(peaks['ECG_R_Peaks'].to_numpy() == 1)[0].astype(
        np.float32)
    r_peaks_sec /= row.Fs  # get R-peak times in seconds

    num_peaks = len(r_peaks_sec)
    if num_peaks > 2:
        hrv = nk.hrv(peaks, sampling_rate=row.Fs, show=False).iloc[0]
        row = row.append(hrv)
    row['N_QRS'] = num_peaks

    rr = np.diff(r_peaks_sec)
    row = row.append(get_statistics(rr, 'RR'))
    row = row.append(get_statistics(signal, 'signal'))

    return row, info
def get_HRVs_values(data, header_data):

    filter_lowcut = 0.001
    filter_highcut = 15.0
    filter_order = 1

    tmp_hea = header_data[0].split(' ')
    ptID = tmp_hea[0]
    num_leads = int(tmp_hea[1])
    sample_Fs = int(tmp_hea[2])
    gain_lead = np.zeros(num_leads)

    for ii in range(num_leads):
        tmp_hea = header_data[ii + 1].split(' ')
        gain_lead[ii] = int(tmp_hea[2].split('/')[0])

    # for testing, we included the mean age of 57 if the age is a NaN
    # This value will change as more data is being released
    for iline in header_data:
        if iline.startswith('#Age'):
            tmp_age = iline.split(': ')[1].strip()
            age = int(tmp_age if tmp_age != 'NaN' else 57)
        elif iline.startswith('#Sex'):
            tmp_sex = iline.split(': ')[1]
            if tmp_sex.strip() == 'Female':
                sex = 1
            else:
                sex = 0
        elif iline.startswith('#Dx'):
            label = iline.split(': ')[1].split(',')[0]

    signal = data[1]
    gain = gain_lead[1]

    ecg_signal = nk.ecg_clean(signal * gain,
                              sampling_rate=sample_Fs,
                              method="biosppy")
    _, rpeaks = nk.ecg_peaks(ecg_signal, sampling_rate=sample_Fs)
    hrv_time = nk.hrv_time(rpeaks, sampling_rate=sample_Fs)
    # hrv_non = nk.hrv_nonlinear(rpeaks, sampling_rate=sample_Fs)
    try:
        signal_peak, waves_peak = nk.ecg_delineate(ecg_signal,
                                                   rpeaks,
                                                   sampling_rate=sample_Fs)
        p_peaks = waves_peak['ECG_P_Peaks']
    except ValueError:
        print('Exception raised!')
        pass

    p_peaks = np.asarray(p_peaks, dtype=float)
    p_peaks = p_peaks[~np.isnan(p_peaks)]
    p_peaks = [int(a) for a in p_peaks]
    mean_P_Peaks = np.mean([signal[w] for w in p_peaks])

    hrv_time['mean_P_Peaks'] = mean_P_Peaks
    hrv_time['age'] = age
    hrv_time['label'] = label
    # df = pd.concat([hrv_time, hrv_non], axis=1)

    return hrv_time
Exemplo n.º 4
0
def apply_pan_tompkins_old(x,
                           n_beats=110,
                           fs=460,
                           left_data=0.25,
                           right_data=0.5):
    _, rpeaks = nk.ecg_peaks(x, sampling_rate=fs)

    # Select 0.25 seconds left of peak and 0.5 seconds right of beat
    cleaned_signal_dict = {}
    for peak in rpeaks['ECG_R_Peaks']:
        left_index = int(left_data * fs)
        right_index = int(right_data * fs)
        cleaned_signal_dict.update(
            {peak: x[peak - left_index:peak + right_index]})

    # If n_beats is defined, returned those beats
    if n_beats and n_beats <= len(cleaned_signal_dict):
        # Get index of middle beat
        i = int(len(cleaned_signal_dict) / 2)
        half_n_beats = int(n_beats / 2)
        valid_keys = list(cleaned_signal_dict.keys())[i - half_n_beats:i +
                                                      half_n_beats]
        return {
            key: val
            for key, val in cleaned_signal_dict.items() if key in valid_keys
        }

    # Otherwise return all beats except first and last
    valid_keys = list(cleaned_signal_dict.keys())[1:-1]
    return {
        key: val
        for key, val in cleaned_signal_dict.items() if key in valid_keys
    }
Exemplo n.º 5
0
def test_ecg_rate():

    sampling_rate = 1000
    noise = 0.15

    ecg = nk.ecg_simulate(duration=120,
                          sampling_rate=sampling_rate,
                          noise=noise,
                          random_state=42)
    ecg_cleaned_nk = nk.ecg_clean(ecg,
                                  sampling_rate=sampling_rate,
                                  method="neurokit")

    signals, info = nk.ecg_peaks(ecg_cleaned_nk, method="neurokit")

    # Test without desired length.
    rate = nk.ecg_rate(rpeaks=info, sampling_rate=sampling_rate)

    assert rate.shape == (info["ECG_R_Peaks"].size, )
    assert np.allclose(rate.mean(), 70, atol=2)

    # Test with desired length.
    test_length = 1200
    rate = nk.ecg_rate(rpeaks=info,
                       sampling_rate=sampling_rate,
                       desired_length=test_length)

    assert rate.shape == (test_length, )
    assert np.allclose(rate.mean(), 70, atol=2)
Exemplo n.º 6
0
def test_ecg_delineate():

    sampling_rate = 1000

    # test with simulated signals
    ecg = nk.ecg_simulate(duration=20,
                          sampling_rate=sampling_rate,
                          random_state=42)
    _, rpeaks = nk.ecg_peaks(ecg, sampling_rate=sampling_rate)
    number_rpeaks = len(rpeaks['ECG_R_Peaks'])

    # Method 1: derivative
    _, waves_derivative = nk.ecg_delineate(ecg,
                                           rpeaks,
                                           sampling_rate=sampling_rate)
    assert len(waves_derivative['ECG_P_Peaks']) == number_rpeaks
    assert len(waves_derivative['ECG_Q_Peaks']) == number_rpeaks
    assert len(waves_derivative['ECG_S_Peaks']) == number_rpeaks
    assert len(waves_derivative['ECG_T_Peaks']) == number_rpeaks
    assert len(waves_derivative['ECG_P_Onsets']) == number_rpeaks
    assert len(waves_derivative['ECG_T_Offsets']) == number_rpeaks

    # Method 2: CWT
    _, waves_cwt = nk.ecg_delineate(ecg,
                                    rpeaks,
                                    sampling_rate=sampling_rate,
                                    method='cwt')
    assert np.allclose(len(waves_cwt['ECG_P_Peaks']), 22, atol=1)
    assert np.allclose(len(waves_cwt['ECG_T_Peaks']), 22, atol=1)
    assert np.allclose(len(waves_cwt['ECG_R_Onsets']), 23, atol=1)
    assert np.allclose(len(waves_cwt['ECG_R_Offsets']), 23, atol=1)
    assert np.allclose(len(waves_cwt['ECG_P_Onsets']), 22, atol=1)
    assert np.allclose(len(waves_cwt['ECG_P_Offsets']), 22, atol=1)
    assert np.allclose(len(waves_cwt['ECG_T_Onsets']), 22, atol=1)
    assert np.allclose(len(waves_cwt['ECG_T_Offsets']), 22, atol=1)
    def qrs_detection_pantompkins_vs_neurokit(self, dataset):
        row = dataset.data.iloc[50]
        signal = dataset.read_record(row.Record)[:row.Fs * 20 + 1]

        method_names = {'pantompkins': 'Pan–Tompkins', 'neurokit': 'Neurokit'}

        for method in ['pantompkins', 'neurokit']:
            ecg_cleaned = nk.ecg_clean(signal,
                                       sampling_rate=row.Fs,
                                       method=method)
            peaks, info = nk.ecg_peaks(ecg_cleaned,
                                       sampling_rate=row.Fs,
                                       method=method)

            r_peaks = np.where(peaks['ECG_R_Peaks'].to_numpy() == 1)[0]

            fig = go.Figure()
            fig.add_trace(
                go.Scatter(x=np.arange(len(signal)) / row.Fs, y=signal))
            fig.add_trace(
                go.Scatter(mode='markers',
                           x=r_peaks / row.Fs,
                           y=signal[r_peaks]))
            fig.update_traces(marker=dict(size=8))
            self.set_ecg_layout(
                fig,
                title=f'{row.Record} - R peaks ({method_names[method]} method)',
                showlegend=False,
                xaxis=dict(range=[0, 20]),
                yaxis=dict(range=[-5000, 5000]))
            self.save_image(fig, f'qrs_{method}.png', width=900, height=300)
Exemplo n.º 8
0
def find_R_peaks(ecg_data, samplefreq):
    try:
        _, rpeaks = nk.ecg_peaks(ecg_data, sampling_rate=samplefreq)
        r_peaks = rpeaks['ECG_R_Peaks']
        r_peaks = np.delete(r_peaks,
                            np.where(np.isnan(r_peaks))[0]).astype(int)

    except:
        print("cleaning data")
        cleaned_ecg = nk.ecg_clean(ecg_data,
                                   sampling_rate=samplefreq,
                                   method="neurokit")
        try:
            _, rpeaks = nk.ecg_peaks(cleaned_ecg, sampling_rate=samplefreq)
            r_peaks = rpeaks['ECG_R_Peaks']
            r_peaks = np.delete(r_peaks,
                                np.where(np.isnan(r_peaks))[0]).astype(int)
        except:
            print("could not analyse cleaned ECG")
            #Midlertidig løsning:
            r_peaks = np.array([0, 1, 2, 3])
    return r_peaks
Exemplo n.º 9
0
def compute_qrs_ratio(x, fs=460, length_of_beat=0.75):
    # Find Q and S peaks
    _, rpeaks = nk.ecg_peaks(x, sampling_rate=fs)
    _, waves_peak = nk.ecg_delineate(x,
                                     rpeaks,
                                     sampling_rate=fs,
                                     method="peak")

    # Compute diff across Q and S peaks
    Q_waves = waves_peak['ECG_Q_Peaks']
    S_waves = waves_peak['ECG_S_Peaks']
    diff = [s - q for s, q in zip(S_waves, Q_waves)]

    # Divide the diff by 0.75 seconds (length of 1 beat in samples)
    duration_samples = int(length_of_beat * fs)
    ratio = [i / duration_samples for i in diff if not np.isnan(i)]
    return sum(ratio) / len(ratio)
def generate_features(ecg, header):
    #input: 12-lead ecg and its header
    fs = 500
    features = {}

    lead_names = []
    for iline in header:
        if '.mat' in iline:
            name = iline.split(' 0 ')[2].strip()
            lead_names.append(name)

    for ecg_signal, lead in zip(ecg, lead_names):

        ecg_cleaned = nk.ecg_clean(ecg_signal, sampling_rate=fs)

        if np.all((ecg_cleaned == 0)):
            return None
        else:
            _, rpeaks = nk.ecg_peaks(ecg_cleaned, sampling_rate=fs)

            if rpeaks['ECG_R_Peaks'].size == 0:
                return None
            else:
                try:
                    signal_dwt, waves_dwt = nk.ecg_delineate(
                        ecg_cleaned,
                        rpeaks['ECG_R_Peaks'],
                        sampling_rate=fs,
                        method="dwt")
                    biphase, areas, t_till_peaks, ampls, dur, idxs, pq_intervals = p_peak_features(
                        ecg_cleaned, waves_dwt)
                    features_for_single_lead = {
                        'PQ_int': calculate_features(pq_intervals),
                        'P_dur': calculate_features(dur),
                        'Area/Dur_P': calculate_features(idxs),
                        'Area_under_P': calculate_features(areas),
                        'P_amp': calculate_features(ampls),
                        'Time_till_P': calculate_features(t_till_peaks),
                        'Biphase_P': calculate_features(biphase)
                    }
                except IndexError:
                    return None

        features[lead] = features_for_single_lead

    return features
Exemplo n.º 11
0
def my_processing(ecg_signal):
    # Try processing
    ecg_cleaned = nk.ecg_clean(ecg_signal, sampling_rate=300, method="biosppy")
    instant_peaks, rpeaks = nk.ecg_peaks(ecg_cleaned,
                                         sampling_rate=300,
                                         method='hamilton2002')
    info = rpeaks
    try:
        # Additional info of the ecg signal
        delineate_signal, delineate_waves = nk.ecg_delineate(
            ecg_cleaned=ecg_cleaned,
            rpeaks=rpeaks,
            sampling_rate=300,
            method='cwt')
    except:
        delineate_signal = np.NaN
        delineate_waves = np.NaN
    return ecg_cleaned, delineate_signal, delineate_waves, info
Exemplo n.º 12
0
def Get_R_Peaks(ecg_signal):
    # Extract R-peak locations from ecg signal file
    print(
        "========          Retrieving R Peaks of QRS complex         ========")
    _, rpeaks = nk.ecg_peaks(ecg_signal, sampling_rate=3000)

    #Print to console
    for key in rpeaks:
        print(key, ' : ', rpeaks[key])
    print()

    # Visualize R-peaks in ECG signal **for Jupyter Notebook
    plot_Rpeak_Signal = nk.events_plot(rpeaks['ECG_R_Peaks'], ecg_signal)
    plot_Rpeak_Signal.savefig("rpeaks_signal", dpi=300)

    # Visual R-peak locations for first 5 5 R-peaks  **for Jupyter Notebook
    plot_Rpeak_Head = nk.events_plot(rpeaks['ECG_R_Peaks'][:5],
                                     ecg_signal[:20000])
    plot_Rpeak_Head.savefig("rpeaks_head", dpi=300)
    return rpeaks
Exemplo n.º 13
0
def find_hr(signal, t):
    _, r_peaks = nk.ecg_peaks(signal, sampling_rate=500)
    r_peaks = r_peaks['ECG_R_Peaks']

    num_beats = len(r_peaks)
    window_len = (t[r_peaks[-1]] - t[r_peaks[0]])
    hr = (num_beats - 1) * 60 / window_len

    # Update ECG plot

    plt.clf()
    plt.plot(t, signal)
    plt.plot(t[r_peaks], signal[r_peaks], "x")
    plt.title("Electrocardiogram (ECG), Heart Rate: %.1f bpm" % hr)
    plt.xlabel("Time (seconds)")
    plt.ylabel("Amplitude (arbitrary)")
    plt.pause(0.0001)
    plt.show()

    return hr
Exemplo n.º 14
0
from wettbewerb import load_references
from imblearn.over_sampling import SMOTE

import warnings
warnings.filterwarnings("ignore")

ecg_leads, ecg_labels, fs, ecg_names = load_references(
)  # Importiere EKG-Dateien, zugehörige Diagnose, Sampling-Frequenz (Hz) und Name                                                # Sampling-Frequenz 300 Hz

features = np.array([])
detectors = Detectors(fs)  # Initialisierung des QRS-Detektors
labels = np.array([])

for idx, ecg_lead in enumerate(ecg_leads):
    if (ecg_labels[idx] == "N") or (ecg_labels[idx] == "A"):
        peaks, info = nk.ecg_peaks(ecg_lead, sampling_rate=fs)
        peaks = peaks.astype('float64')
        hrv = nk.hrv_time(peaks, sampling_rate=fs)
        hrv = hrv.astype('float64')
        features = np.append(features, [
            hrv['HRV_CVNN'], hrv['HRV_CVSD'], hrv['HRV_HTI'], hrv['HRV_IQRNN'],
            hrv['HRV_MCVNN'], hrv['HRV_MadNN'], hrv['HRV_MeanNN'],
            hrv['HRV_MedianNN'], hrv['HRV_RMSSD'], hrv['HRV_SDNN'],
            hrv['HRV_SDSD'], hrv['HRV_TINN'], hrv['HRV_pNN20'],
            hrv['HRV_pNN50']
        ])
        features = features.astype('float64')
        labels = np.append(labels, ecg_labels[idx])

features = features.reshape(int(len(features) / 14), 14)
x = np.isnan(features)
Exemplo n.º 15
0
 def _find_R_peaks(ecg_data, samplefreq):
     _, rpeaks = nk.ecg_peaks(ecg_data[1], sampling_rate=samplefreq)
     r_peaks = rpeaks['ECG_R_Peaks']
     r_peaks = np.delete(r_peaks,
                         np.where(np.isnan(r_peaks))[0]).astype(int)
     return r_peaks
Exemplo n.º 16
0
import pandas as pd
import csv

data = pd.read_csv('/content/drive/My Drive/Anxiety Spider Dataset/ECG/ECG_Combined.csv',header=None)

data.head

len(data)

ecg=data.iloc[256]
nk.signal_plot(ecg)

signals, info = nk.ecg_process(ecg,sampling_rate=100)
signals

peaks, info = nk.ecg_peaks(ecg, sampling_rate=100)
nk.hrv(peaks, sampling_rate=100, show=True)

ecg_features=nk.hrv(peaks, sampling_rate=100)

# X=ecg_features[["HRV_RMSSD","HRV_MeanNN","HRV_SDNN", "HRV_SDSD", "HRV_CVNN", "HRV_CVSD", "HRV_MedianNN", "HRV_MadNN", "HRV_MCVNN", "HRV_IQRNN", "HRV_pNN50", "HRV_pNN20"]]
# X

data_features=[]
for i in range(0,len(data)):
  ecg=data.iloc[i]
  peaks, info = nk.ecg_peaks(ecg, sampling_rate=100)
  ecg_features=nk.hrv(peaks, sampling_rate=100)
  X=ecg_features[["HRV_RMSSD","HRV_MeanNN","HRV_SDNN", "HRV_SDSD", "HRV_CVNN", "HRV_CVSD", "HRV_MedianNN", "HRV_MadNN", "HRV_MCVNN", "HRV_IQRNN", "HRV_pNN50", "HRV_pNN20", "HRV_TINN",	"HRV_HTI",	"HRV_ULF",	"HRV_VLF",	"HRV_LF",	"HRV_HF",	"HRV_VHF",	"HRV_LFHF", "HRV_LFn",	"HRV_HFn",	"HRV_LnHF",	"HRV_SD1",	"HRV_SD2",	"HRV_SD1SD2",	"HRV_S",	"HRV_CSI",	"HRV_CVI", "HRV_CSI_Modified", "HRV_PIP",	"HRV_IALS",	"HRV_PSS",	"HRV_PAS",	"HRV_GI",	"HRV_SI",	"HRV_AI",	"HRV_PI",	"HRV_C1d",	"HRV_C1a",	"HRV_SD1d", "HRV_SD1a",	"HRV_C2d",	"HRV_C2a",	"HRV_SD2d",	"HRV_SD2a",	"HRV_Cd",	"HRV_Ca",	"HRV_SDNNd",	"HRV_SDNNa",	"HRV_ApEn",	"HRV_SampEn"]]
  data_features.append(X)
Exemplo n.º 17
0
def kalidas2017(ecg, sampling_rate):
    signal, info = nk.ecg_peaks(ecg,
                                sampling_rate=sampling_rate,
                                method="kalidas2017")
    return info["ECG_R_Peaks"]
Exemplo n.º 18
0
def engzeemod2012(ecg, sampling_rate):
    signal, info = nk.ecg_peaks(ecg,
                                sampling_rate=sampling_rate,
                                method="engzeemod2012")
    return info["ECG_R_Peaks"]
Exemplo n.º 19
0
plot = nk.signal_plot([original, distorted, cleaned])

# Save plot
fig = plt.gcf()
fig.set_size_inches(10, 6)
fig.savefig("README_signalprocessing.png", dpi=300, h_pad=3)

# =============================================================================
# Heart Rate Variability
# =============================================================================

# Download data
data = nk.data("bio_resting_8min_100hz")

# Find peaks
peaks, info = nk.ecg_peaks(data["ECG"], sampling_rate=100)

# Compute HRV indices
hrv = nk.hrv(peaks, sampling_rate=100, show=True)
hrv

# Save plot
fig = plt.gcf()
fig.set_size_inches(10 * 1.5, 6 * 1.5, forward=True)
fig.savefig("README_hrv.png", dpi=300, h_pad=3)

# =============================================================================
# ECG Delineation
# =============================================================================

# Download data
Exemplo n.º 20
0
def predict_labels(ecg_leads, fs, ecg_names, use_pretrained=False):
    '''
    Parameters
    ----------
    model_name : str
        Dateiname des Models. In Code-Pfad
    ecg_leads : list of numpy-Arrays
        EKG-Signale.
    fs : float
        Sampling-Frequenz der Signale.
    ecg_names : list of str
        eindeutige Bezeichnung für jedes EKG-Signal.

    Returns
    -------
    predictions : list of tuples
        ecg_name und eure Diagnose
    '''
    #------------------------------------------------------------------------------
    # Euer Code ab hier
    #     model_name = "model.npy"
    #     if use_pretrained:
    #         model_name = "model_pretrained.npy"
    #     with open(model_name, 'rb') as f:
    #         th_opt = np.load(f)         # Lade simples Model (1 Parameter)
    #
    #     detectors = Detectors(fs)        # Initialisierung des QRS-Detektors
    #
    #     predictions = list()
    #
    #     for idx,ecg_lead in enumerate(ecg_leads):
    #         r_peaks = detectors.hamilton_detector(ecg_lead)     # Detektion der QRS-Komplexe
    #         sdnn = np.std(np.diff(r_peaks)/fs*1000)
    #         if sdnn < th_opt:
    #             predictions.append((ecg_names[idx], 'N'))
    #         else:
    #             predictions.append((ecg_names[idx], 'A'))
    #         if ((idx+1) % 100)==0:
    #             print(str(idx+1) + "\t Dateien wurden verarbeitet.")
    #
    #
    # #------------------------------------------------------------------------------
    #     return predictions # Liste von Tupels im Format (ecg_name,label) - Muss unverändert bleiben!

    # Load test-dataset
    #     from tqdm import tqdm
    #     import os
    #     from os import listdir
    #     from os.path import isfile, join
    #     import cv2

    #####
    #     THRESHOLD = 0.5

    #     def precision(y_true, y_pred, threshold_shift=0.5 - THRESHOLD):

    #         # just in case
    #         y_pred = K.clip(y_pred, 0, 1)

    #         # shifting the prediction threshold from .5 if needed
    #         y_pred_bin = K.round(y_pred + threshold_shift)

    #         tp = K.sum(K.round(y_true * y_pred_bin)) + K.epsilon()
    #         fp = K.sum(K.round(K.clip(y_pred_bin - y_true, 0, 1)))

    #         precision = tp / (tp + fp)
    #         return precision

    #     def recall(y_true, y_pred, threshold_shift=0.5 - THRESHOLD):

    #         # just in case
    #         y_pred = K.clip(y_pred, 0, 1)

    #         # shifting the prediction threshold from .5 if needed
    #         y_pred_bin = K.round(y_pred + threshold_shift)

    #         tp = K.sum(K.round(y_true * y_pred_bin)) + K.epsilon()
    #         fn = K.sum(K.round(K.clip(y_true - y_pred_bin, 0, 1)))

    #         recall = tp / (tp + fn)
    #         return recall

    #     def fbeta(y_true, y_pred, threshold_shift=0.5 - THRESHOLD):
    #         beta = 1

    #         # just in case
    #         y_pred = K.clip(y_pred, 0, 1)

    #         # shifting the prediction threshold from .5 if needed
    #         y_pred_bin = K.round(y_pred + threshold_shift)

    #         tp = K.sum(K.round(y_true * y_pred_bin)) + K.epsilon()
    #         fp = K.sum(K.round(K.clip(y_pred_bin - y_true, 0, 1)))
    #         fn = K.sum(K.round(K.clip(y_true - y_pred, 0, 1)))

    #         precision = tp / (tp + fp)
    #         recall = tp / (tp + fn)

    #         beta_squared = beta ** 2
    #         return (beta_squared + 1) * (precision * recall) / (beta_squared * precision + recall)

    #####

    # ecg_leads,ecg_labels,fs,ecg_names = load_references('../test/') # Importiere EKG-Dateien, zugehörige Diagnose, Sampling-Frequenz (Hz) und Name                                                # Sampling-Frequenz 300 Hz

    #     test_set = ecg_leads
    #     if os.path.exists("../test/ecg_images"):
    #         print("File exists.")
    #     else:
    #         os.mkdir("../test/ecg_images")
    #     for i in tqdm(range(len(test_set))):
    #         data = ecg_leads[i].reshape(len(ecg_leads[i]), 1)
    #         plt.figure(figsize=(60, 5))
    #         plt.xlim(0, len(ecg_leads[i]))
    #         # plt.plot(data, color='black', linewidth=0.1)
    #         plt.savefig('../test/ecg_images/{}.png'.format(ecg_names[i]))

    #     onlyfiles = [f for f in listdir("../test/ecg_images") if isfile(join("../test/ecg_images", f))]

    #     df = pd.read_csv("../test/REFERENCE.csv", header=None)
    #     x = []
    #     y = []
    #     name_test =[]

    #     for i in range(len(onlyfiles)):
    #         if (df.iloc[i][1] == "N") or (df.iloc[i][1] == "A"):
    #             image = cv2.imread("../test/ecg_images/{}".format(onlyfiles[i]))
    #             gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    #             resize_x = cv2.resize(gray, (128, 1024))
    #             reshape_x = np.asarray(resize_x).reshape(resize_x.shape[0], resize_x.shape[1], 1)
    #             x.append(reshape_x)
    #             y.append(df.iloc[i][1])
    #             name_test.append(df.iloc[i])

    #     for n, i in enumerate(y):
    #         if i == "N":
    #             y[n] = 0
    #         elif i == "A":
    #             y[n] = 1

    #     x = np.asarray(x).astype(int)
    #     y = np.asarray(y).astype(int)

    #     test_images, test_labels = x, y
    # # Normalize pixel values to be between 0 and 1
    #     test_images = test_images / 255.0

    #     import keras
    #     import tensorflow as tf
    #     from keras.models import load_model

    # model = load_model('./pred_model.h5', custom_objects={'fbeta': fbeta})
    #     model = tf.keras.models.load_model('./pred_model.h5', custom_objects={'fbeta': fbeta})
    # model = tf.keras.models.load_model('./pred_model.h5', custom_objects={'fbeta': fbeta})
    # converter = tf.lite.TFLiteConverter.from_keras_model(model)  # .from_saved_model(saved_model_dir)
    # tflite_model = converter.convert()
    # open("model.tflite", "wb").write(tflite_model)

    #     pred_labels = model.predict_classes(test_images)

    #     pred_labels = np.asarray(pred_labels).astype('str')
    #     for n, i in enumerate(pred_labels):
    #         if i == '0':
    #             pred_labels[n] = "N"
    #         elif i == '1':
    #             pred_labels[n] = "A"
    #-------------------------------------------------------------------------------
    ''' Gradient Boosting Classfier '''
    # import warnings
    # warnings.filterwarnings("ignore")
    #
    # test_features = np.array([])
    #
    # for idx, ecg_lead in enumerate(ecg_leads):
    #     peaks, info = nk.ecg_peaks(ecg_lead, sampling_rate= 200)
    #     peaks = peaks.astype('float64')
    #     hrv = nk.hrv_time(peaks, sampling_rate= fs)
    #     hrv = hrv.astype('float64')
    #     test_features = np.append(test_features, [hrv['HRV_CVNN'], hrv['HRV_CVSD'], hrv['HRV_HTI'], hrv['HRV_IQRNN'], hrv['HRV_MCVNN'], hrv['HRV_MadNN'],  hrv['HRV_MeanNN'], hrv['HRV_MedianNN'], hrv['HRV_RMSSD'], hrv['HRV_SDNN'], hrv['HRV_SDSD'], hrv['HRV_TINN'], hrv['HRV_pNN20'],hrv['HRV_pNN50'] ])
    #     test_features = test_features.astype('float64')
    #
    # test_features= test_features.reshape(int(len(test_features)/14), 14)
    # x = np.isnan(test_features)
    # # replacing NaN values with 0
    # test_features[x] = 0
    #
    # X_test = test_features
    #
    # # with trained model to predict
    # loaded_model = joblib.load('GradientBoostingClassifier')
    # pred_labels = loaded_model.predict(X_test)

    # # a list of tuple
    # predictions = list()
    #
    # for idx in range(len(X_test)):
    #     predictions.append((ecg_names[idx], pred_labels[idx]))
    #------------------------------------------------------------------
    ''' Convolutional Neural Network '''
    # import warnings
    # warnings.filterwarnings("ignore")
    # from tensorflow.keras.models import load_model
    # from tensorflow.keras.optimizers import SGD
    # import numpy as np
    #
    # ###
    # def f1(y_true, y_pred):
    #     def recall(y_true, y_pred):
    #         """Recall metric.
    #
    #         Only computes a batch-wise average of recall.
    #
    #         Computes the recall, a metric for multi-label classification of
    #         how many relevant items are selected.
    #         """
    #         true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    #         possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    #         recall = true_positives / (possible_positives + K.epsilon())
    #         return recall
    #
    #     def precision(y_true, y_pred):
    #         """Precision metric.
    #
    #         Only computes a batch-wise average of precision.
    #
    #         Computes the precision, a metric for multi-label classification of
    #         how many selected items are relevant.
    #         """
    #         true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    #         predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    #         precision = true_positives / (predicted_positives + K.epsilon())
    #         return precision
    #
    #     precision = precision(y_true, y_pred)
    #     recall = recall(y_true, y_pred)
    #     return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
    #
    # def f1_weighted(true, pred):  # shapes (batch, 4)
    #
    #     # for metrics include these two lines, for loss, don't include them
    #     # these are meant to round 'pred' to exactly zeros and ones
    #     # predLabels = K.argmax(pred, axis=-1)
    #     # pred = K.one_hot(predLabels, 4)
    #
    #     ground_positives = K.sum(true, axis=0) + K.epsilon()  # = TP + FN
    #     pred_positives = K.sum(pred, axis=0) + K.epsilon()  # = TP + FP
    #     true_positives = K.sum(true * pred, axis=0) + K.epsilon()  # = TP
    #     # all with shape (4,)
    #
    #     precision = true_positives / pred_positives
    #     recall = true_positives / ground_positives
    #     # both = 1 if ground_positives == 0 or pred_positives == 0
    #     # shape (4,)
    #
    #     f1 = 2 * (precision * recall) / (precision + recall + K.epsilon())
    #     # still with shape (4,)
    #
    #     weighted_f1 = f1 * ground_positives / K.sum(ground_positives)
    #     weighted_f1 = K.sum(weighted_f1)
    #
    #     return 1 - weighted_f1  # for metrics, return only 'weighted_f1'
    # ###
    #
    # cnn_best_model = load_model('cnn_best_model.h5', custom_objects={'f1_weighted': f1_weighted, 'f1': f1})
    #
    # sgd = SGD(lr=0.00005, decay=1e-6, momentum=0.9, nesterov=True)
    # cnn_best_model.compile(optimizer=sgd,  # change: use SGD
    #                        loss=f1_weighted,  # 'binary_crossentropy' #'mean_squared_error' #categorical_crossentropy
    #                        metrics=[f1, "binary_accuracy"])
    #
    # test_features = np.array([])
    #
    # for idx, ecg_lead in enumerate(ecg_leads):
    #     peaks, info = nk.ecg_peaks(ecg_lead, sampling_rate= 200)
    #     peaks = peaks.astype('float64')
    #     hrv = nk.hrv_time(peaks, sampling_rate= fs)
    #     hrv = hrv.astype('float64')
    #     test_features = np.append(test_features, [hrv['HRV_CVNN'], hrv['HRV_CVSD'], hrv['HRV_HTI'], hrv['HRV_IQRNN'], hrv['HRV_MCVNN'], hrv['HRV_MadNN'],  hrv['HRV_MeanNN'], hrv['HRV_MedianNN'], hrv['HRV_RMSSD'], hrv['HRV_SDNN'], hrv['HRV_SDSD'], hrv['HRV_TINN'], hrv['HRV_pNN20'],hrv['HRV_pNN50'] ])
    #     test_features = test_features.astype('float64')
    #
    # test_features= test_features.reshape(int(len(test_features)/14), 14)
    # x = np.isnan(test_features)
    # # replacing NaN values with 0
    # test_features[x] = 0
    #
    # X_test = test_features
    # X_test_arr = np.array(X_test).reshape(np.array(X_test).shape[0], np.array(X_test).shape[1], 1)
    # # with trained model to predict
    # y_pred = cnn_best_model.predict(X_test_arr)
    # pred_labels = [np.argmax(y, axis=None, out=None) for y in y_pred]
    #
    # for n, i in enumerate(pred_labels):
    #     if i == 0:
    #         pred_labels[n] = 'N'
    #     if i == 1:
    #         pred_labels[n] = 'A'
    #
    # predictions = list()
    #
    # for idx in range(len(X_test)):
    #     predictions.append((ecg_names[idx], pred_labels[idx]))
    # ------------------------------------------------------------------------------
    # ------------------------------------------------------------------
    ''' AdaBoost Classifier '''
    # import warnings
    # warnings.filterwarnings("ignore")
    #
    # test_features = np.array([])
    #
    # for idx, ecg_lead in enumerate(ecg_leads):
    #     peaks, info = nk.ecg_peaks(ecg_lead, sampling_rate= 200)
    #     peaks = peaks.astype('float64')
    #     hrv = nk.hrv_time(peaks, sampling_rate= fs)
    #     hrv = hrv.astype('float64')
    #     test_features = np.append(test_features, [hrv['HRV_CVNN'], hrv['HRV_CVSD'], hrv['HRV_HTI'], hrv['HRV_IQRNN'], hrv['HRV_MCVNN'], hrv['HRV_MadNN'],  hrv['HRV_MeanNN'], hrv['HRV_MedianNN'], hrv['HRV_RMSSD'], hrv['HRV_SDNN'], hrv['HRV_SDSD'], hrv['HRV_TINN'], hrv['HRV_pNN20'],hrv['HRV_pNN50'] ])
    #     test_features = test_features.astype('float64')
    #
    # test_features= test_features.reshape(int(len(test_features)/14), 14)
    # x = np.isnan(test_features)
    # # replacing NaN values with 0
    # test_features[x] = 0
    #
    # X_test = test_features
    #
    # # with trained model to predict
    # loaded_model = joblib.load('AdaBoostClassifier')
    # pred_labels = loaded_model.predict(X_test)
    #-----------------------------------------------------------------------------------------
    ''' XGBoost Classifier '''
    import warnings
    warnings.filterwarnings("ignore")

    test_features = np.array([])

    for idx, ecg_lead in enumerate(ecg_leads):
        peaks, info = nk.ecg_peaks(ecg_lead, sampling_rate=fs)
        peaks = peaks.astype('float64')
        hrv = nk.hrv_time(peaks, sampling_rate=fs)
        hrv = hrv.astype('float64')
        test_features = np.append(test_features, [
            hrv['HRV_CVNN'], hrv['HRV_CVSD'], hrv['HRV_HTI'], hrv['HRV_IQRNN'],
            hrv['HRV_MCVNN'], hrv['HRV_MadNN'], hrv['HRV_MeanNN'],
            hrv['HRV_MedianNN'], hrv['HRV_RMSSD'], hrv['HRV_SDNN'],
            hrv['HRV_SDSD'], hrv['HRV_TINN'], hrv['HRV_pNN20'],
            hrv['HRV_pNN50']
        ])
        test_features = test_features.astype('float64')

    test_features = test_features.reshape(int(len(test_features) / 14), 14)
    x = np.isnan(test_features)
    # replacing NaN values with 0
    test_features[x] = 0

    X_test = test_features

    # with trained model to predict
    loaded_model = joblib.load('XGBoostClassifier')
    pred_labels = loaded_model.predict(X_test)

    # a list of tuple
    predictions = list()

    for idx in range(len(X_test)):
        predictions.append((ecg_names[idx], pred_labels[idx]))

    return predictions  # Liste von Tupels im Format (ecg_name,label) - Muss unverändert bleiben!
Exemplo n.º 21
0
ecg_wave = np.load('test.npy')  # 10초 ECG (sampling rate=500)
#ecg_wave.shape -> (5000,)


def bandpass(lowcut, highcut, order=5):
    nyq = 0.5 * 500
    low = lowcut / nyq
    high = highcut / nyq
    b, a = butter(order, [low, high], btype='band')
    return b, a


b, a = bandpass(0.5, 45)
filtered_ecg = signal.filtfilt(b, a, ecg_wave[250:4750])

_, rpeaks = nk.ecg_peaks(filtered_ecg, sampling_rate=500)

_, waves_cwt = nk.ecg_delineate(filtered_ecg,
                                rpeaks,
                                sampling_rate=500,
                                method="dwt",
                                show=False,
                                show_type='peaks')

# 관심 ECG 설정

interest_ecg = filtered_ecg[rpeaks['ECG_R_Peaks'][0]:rpeaks['ECG_R_Peaks'][1]]

# template ECG 설정

template_ecg = []
Exemplo n.º 22
0
import numpy as np
import pandas as pd

import neurokit2 as nk

sampling_rate = 1000

for heartrate in [80]:
    # Simulate signal
    ecg = nk.ecg_simulate(duration=60,
                          sampling_rate=sampling_rate,
                          heartrate=heartrate,
                          noise=0)

    # Segment
    _, rpeaks = nk.ecg_peaks(ecg, sampling_rate=sampling_rate)
#    _, waves = nk.ecg_delineator(ecg, rpeaks=rpeaks["ECG_R_Peaks"])
Exemplo n.º 23
0
def apply_pan_tompkins(x,
                       n_beats=10,
                       beat_length=512,
                       fs=460,
                       standardize=False):
    _, rpeaks = nk.ecg_peaks(x, sampling_rate=fs)

    # Standardize if necessary
    if standardize:
        scaler = MinMaxScaler()
        x = np.reshape(x, [-1, 1])
        x = scaler.fit_transform(x)
        x = np.reshape(x, [-1])

    # Create dict of peaks no longer than peak_length
    cleaned_signal_dict = {}
    peaks = rpeaks['ECG_R_Peaks']

    # Loop through all the peaks
    for i in range(len(peaks)):
        # Get current peak
        current_peak = rpeaks['ECG_R_Peaks'][i]

        # If current peak is the last peak, set the last peak to be last sample of signal
        if i == len(peaks) - 1:
            next_peak = len(x) - 1
        # Otherwise next peak is next peak in the peaks list
        else:
            # Get current and next peak
            next_peak = rpeaks['ECG_R_Peaks'][i + 1]

        # Check if distance to next peak is less than the required beat length
        if (next_peak - current_peak) < beat_length:
            # If so, compute additional zeros we need to add to fill the beat
            additional_zeros = beat_length - (next_peak - current_peak)
            additional_zeros_list = np.zeros(additional_zeros)

            # Add the signal plus additional zeros to form entire beat
            total_beat = np.concatenate(
                (x[current_peak:next_peak], additional_zeros_list))
            cleaned_signal_dict.update({current_peak: total_beat})

        # Otherwise just return the signal within the required beat length
        else:
            total_beat = x[current_peak:current_peak + beat_length]
            cleaned_signal_dict.update({current_peak: total_beat})

    # If n_beats is defined, returned those beats
    if n_beats and n_beats <= len(cleaned_signal_dict):
        # Get index of middle beat
        i = int(len(cleaned_signal_dict) / 2)
        half_n_beats = int(n_beats / 2)
        valid_keys = list(cleaned_signal_dict.keys())[i - half_n_beats:i +
                                                      half_n_beats]
        return {
            key: val
            for key, val in cleaned_signal_dict.items() if key in valid_keys
        }

    # Otherwise return all beats except first and last
    valid_keys = list(cleaned_signal_dict.keys())[1:-1]
    return {
        key: val
        for key, val in cleaned_signal_dict.items() if key in valid_keys
    }
    def extract_features(
        self,
        clean_method: str = "neurokit",
        r_method: str = "neurokit",
        wave_method: str = "dwt",
        min_peaks: int = 200,
        size: int = 200000,
    ):
        """
        Function to extract the ecg features using the neurokit2 package. That
        is the P, Q, R, S and T peaks and the P, QRS and T waves onsets and
        offsets. The result is saved internally.

        :param clean_method: <str> The processing pipeline to apply. Can be one of
                             ‘neurokit’ (default), ‘biosppy’, ‘pantompkins1985’,
                             ‘hamilton2002’, ‘elgendi2010’, ‘engzeemod2012’.
        :param r_method: <str> The algorithm to be used for R-peak detection. Can be one
                         of ‘neurokit’ (default), ‘pantompkins1985’, ‘hamilton2002’,
                         ‘christov2004’, ‘gamboa2008’, ‘elgendi2010’, ‘engzeemod2012’
                         or ‘kalidas2017’.
        :param wave_method: <str> Can be one of ‘dwt’ (default) for discrete
                            wavelet transform or ‘cwt’ for continuous wavelet transform.
        :param min_peaks: <int> Minimum R peaks to be detected to proceed with
                          further calculations.
        :param size: <int> ECG sample size to analyze per loop.
        """
        if not self.lead:
            return

        for i, _ in enumerate(self.sampling_rate):
            sampling_rate = self.sampling_rate[i][0]
            init = self.sampling_rate[i][1]
            if i == len(self.sampling_rate) - 1:
                ecg_signal_size = (
                    ECG_TMAPS[f"{self.lead}_value"].tensor_from_file(
                        ECG_TMAPS[f"{self.lead}_value"],
                        self,
                        visit=self.visit,
                    )[0][init:].shape[0])
            else:
                ecg_signal_size = self.sampling_rate[i + 1][1] - init
            if size < ecg_signal_size:
                end = init + size
            else:
                end = init + ecg_signal_size
            while init < ecg_signal_size + self.sampling_rate[i][1]:
                ecg_signal = ECG_TMAPS[f"{self.lead}_value"].tensor_from_file(
                    ECG_TMAPS[f"{self.lead}_value"],
                    self,
                    visit=self.visit,
                )[0][init:end]
                ecg_signal = nk.ecg_clean(ecg_signal, sampling_rate,
                                          clean_method)
                try:
                    _, r_peaks = nk.ecg_peaks(ecg_signal, sampling_rate,
                                              r_method)
                except IndexError:
                    init = end
                    end = init + size
                    if end > ecg_signal_size + self.sampling_rate[i][1]:
                        end = ecg_signal_size + self.sampling_rate[i][1]
                    continue

                if len(r_peaks["ECG_R_Peaks"]) < min_peaks:
                    init = end
                    end = init + size
                    if end > ecg_signal_size + self.sampling_rate[i][1]:
                        end = ecg_signal_size + self.sampling_rate[i][1]
                    continue
                _, waves_peaks = nk.ecg_delineate(ecg_signal, r_peaks,
                                                  sampling_rate)
                _, waves_peaks_2 = nk.ecg_delineate(
                    ecg_signal,
                    r_peaks,
                    sampling_rate,
                    wave_method,
                )
                waves_peaks.update(waves_peaks_2)
                for peak_type in r_peaks:
                    if peak_type not in self.r_peaks:
                        self.r_peaks[peak_type] = r_peaks[peak_type]
                    else:
                        self.r_peaks[peak_type] = np.append(
                            self.r_peaks[peak_type],
                            r_peaks[peak_type],
                        )
                for peak_type in waves_peaks:
                    if peak_type not in self.waves_peaks:
                        self.waves_peaks[peak_type] = waves_peaks[peak_type]
                    else:
                        self.waves_peaks[peak_type] = np.append(
                            self.waves_peaks[peak_type],
                            waves_peaks[peak_type],
                        )
                init = end
                end = init + size
                if end > ecg_signal_size + self.sampling_rate[i][1]:
                    end = ecg_signal_size + self.sampling_rate[i][1]

        for peak_type in self.r_peaks:
            self.r_peaks[peak_type] = list(self.r_peaks[peak_type])
        for peak_type in self.waves_peaks:
            self.waves_peaks[peak_type] = list(self.waves_peaks[peak_type])
Exemplo n.º 25
0
def rodrigues2020(ecg, sampling_rate):
    signal, info = nk.ecg_peaks(ecg,
                                sampling_rate=sampling_rate,
                                method="rodrigues2020")
    return info["ECG_R_Peaks"]
    def extract_features_tmaps(
        self,
        signal_tm: TensorMap,
        clean_method: str = "neurokit",
        r_method: str = "neurokit",
        wave_method: str = "dwt",
        min_peaks: int = 200,
    ):
        """
        Function to extract the ecg features using the neurokit2 package. That
        is the P, Q, R, S and T peaks and the P, QRS and T waves onsets and
        offsets. The result is saved internally.

        :param signal_tm: <TensorMap>
        :param clean_method: <str> The processing pipeline to apply. Can be one of
                             ‘neurokit’ (default), ‘biosppy’, ‘pantompkins1985’,
                             ‘hamilton2002’, ‘elgendi2010’, ‘engzeemod2012’.
        :param r_method: <str> The algorithm to be used for R-peak detection. Can be one
                         of ‘neurokit’ (default), ‘pantompkins1985’, ‘hamilton2002’,
                         ‘christov2004’, ‘gamboa2008’, ‘elgendi2010’, ‘engzeemod2012’
                         or ‘kalidas2017’.
        :param wave_method: <str> Can be one of ‘dwt’ (default) for discrete
                            wavelet transform or ‘cwt’ for continuous wavelet transform.
        :param min_peaks: <int> Minimum R peaks to be detected to proceed with
                          further calculations.
        """
        for i, _ in enumerate(self.sampling_rate):
            sampling_rate = self.sampling_rate[i][0]
            init = self.sampling_rate[i][1]
            if i == len(self.sampling_rate) - 1:
                end = -1
            else:
                end = self.sampling_rate[i + 1][1]
            ecg_signal = signal_tm.tensor_from_file(signal_tm,
                                                    self)[0][init:end]
            ecg_signal = nk.ecg_clean(ecg_signal, sampling_rate, clean_method)

            try:
                _, r_peaks = nk.ecg_peaks(ecg_signal, sampling_rate, r_method)
            except IndexError:
                continue
            if len(r_peaks["ECG_R_Peaks"]) < min_peaks:
                continue
            _, waves_peaks = nk.ecg_delineate(ecg_signal, r_peaks,
                                              sampling_rate)
            _, waves_peaks_2 = nk.ecg_delineate(
                ecg_signal,
                r_peaks,
                sampling_rate,
                wave_method,
            )
            waves_peaks.update(waves_peaks_2)
            for peak_type in r_peaks:
                if peak_type not in self.r_peaks:
                    self.r_peaks[peak_type] = r_peaks[peak_type]
                else:
                    self.r_peaks[peak_type] = np.append(
                        self.r_peaks[peak_type],
                        r_peaks[peak_type],
                    )
            for peak_type in waves_peaks:
                if peak_type not in self.waves_peaks:
                    self.waves_peaks[peak_type] = waves_peaks[peak_type]
                else:
                    self.waves_peaks[peak_type] = np.append(
                        self.waves_peaks[peak_type],
                        waves_peaks[peak_type],
                    )

        for peak_type in self.r_peaks:
            self.r_peaks[peak_type] = list(self.r_peaks[peak_type])
        for peak_type in self.waves_peaks:
            self.waves_peaks[peak_type] = list(self.waves_peaks[peak_type])
Exemplo n.º 27
0
def create_df(dataframe: pd.DataFrame) -> pd.DataFrame:
    # get lengths of signals for each sample
    lengths = []
    width = dataframe.shape[1]

    for row in dataframe.index.tolist():
        temp_width = width
        for item in dataframe.loc[row][::-1]:
            if not pd.isna(item) and isinstance(item, float):
                temp_width -= 1
                break

            temp_width -= 1

        lengths.append(temp_width)

    """
    README
    
    For the following features we measured: [mean, median, 5 % percentile, 95 % percentile, standard deviation]
    R-peak location were retrieved by nk.ecg_peaks
    Q-peak and S-location were retrieved by nk.ecg_delineate
    
    ?_ampl_*        ?-Peak amplitude
    ?_nr_peaks      number of ?-Peaks
    ?_diff_*        Interval between ?-Peaks
    QRS_diff_*      QRS duration
    len_*           length of signal
    Qual_*          quality of signal measured with nk.ecg_quality
    sign_*          signal
    
    Also the output from nk.hrv_time which contains different measurements for the heart rate variation (HRV*) was added
    
    Additionally one 'typical' heartbeat was greated (all length 180):
    
    MN_*            mean signal
    MD_*            median signal
    P5_*            5 % percentile signal
    P95_*           95 % percentile signal
    SD_*            standard deviation of signal
    """

    names = ['R_ampl_mean', 'R_ampl_median', 'R_ampl_perc5', 'R_ampl_perc95', 'R_ampl_sd', 'R_nr_peaks',
             'len_mean', 'len_median', 'len_perc5', 'len_perc95', 'len_sd',
             'sign_mean', 'sign_median', 'sign_perc5', 'sign_perc95', 'sign_sd',
             'Qual_mean', 'Qual_median', 'Qual_perc5', 'Qual_perc95', 'Qual_sd',
             'Q_ampl_mean', 'Q_ampl_median', 'Q_ampl_perc5', 'Q_ampl_perc95', 'Q_ampl_sd', 'Q_nr_peaks',
             'Q_diff_mean', 'Q_diff_median', 'Q_diff_perc5', 'Q_diff_perc95', 'Q_diff_sd',
             'S_ampl_mean', 'S_ampl_median', 'S_ampl_perc5', 'S_ampl_perc95', 'S_ampl_sd', 'S_nr_peaks',
             'S_diff_mean', 'S_diff_median', 'S_diff_perc5', 'S_diff_perc95', 'S_diff_sd',
             'P_ampl_mean', 'P_ampl_median', 'P_ampl_perc5', 'P_ampl_perc95', 'P_ampl_sd', 'P_nr_peaks',
             'T_ampl_mean', 'T_ampl_median', 'T_ampl_perc5', 'T_ampl_perc95', 'T_ampl_sd', 'T_nr_peaks',
             'QRS_diff_mean', 'QRS_diff_median', 'QRS_diff_perc5', 'QRS_diff_perc95', 'QRS_diff_sd',
             'PR_diff_mean', 'PR_diff_median', 'PR_diff_perc5', 'PR_diff_perc95', 'PR_diff_sd',
             'RT_diff_mean', 'RT_diff_median', 'RT_diff_perc5', 'RT_diff_perc95', 'RT_diff_sd',
             'HRV_RMSSD', 'HRV_MeanNN', 'HRV_SDNN', 'HRV_SDSD', 'HRV_CVNN', 'HRV_CVSD', 'HRV_MedianNN',
             'HRV_MadNN', 'HRV_MCVNN', 'HRV_IQRNN', 'HRV_pNN50', 'HRV_pNN20', 'HRV_TINN', 'HRV_HTI',
             'HRV_ULF','HRV_VLF','HRV_LF','HRV_HF','HRV_VHF','HRV_LFHF','HRV_LFn','HRV_HFn', 	'HRV_LnHF',
             'HRV_SD1','HRV_SD2', 'HRV_SD1SD2','HRV_S','HRV_CSI','HRV_CVI','HRV_CSI_Modified', 'HRV_PIP',
             'HRV_IALS','HRV_PSS','HRV_PAS','HRV_GI','HRV_SI','HRV_AI','HRV_PI','HRV_C1d','HRV_C1a','HRV_SD1d',
             'HRV_SD1a','HRV_C2d','HRV_C2a','HRV_SD2d','HRV_SD2a','HRV_Cd','HRV_Ca','HRV_SDNNd','HRV_SDNNa','HRV_ApEn',
             'HRV_SampEn','J_LF','J_HF','J_L/H']


    template_len = 180

    mean_names = ['MN_' + str(index) for index in range(template_len)]
    median_names = ['MD_' + str(index) for index in range(template_len)]
    perc5_names = ['P5_' + str(index) for index in range(template_len)]
    perc95_names = ['P95_' + str(index) for index in range(template_len)]
    sd_names = ['SD_' + str(index) for index in range(template_len)]

    wavelet = 'db3'

    wl_len = int(np.floor((template_len + pywt.Wavelet(wavelet).dec_len - 1) / 2))

    wl_mean_names = ['WLMN_' + str(index) for index in range(2*wl_len)]
    wl_median_names = ['WLMD_' + str(index) for index in range(2*wl_len)]
    wl_perc5_names = ['WLP5_' + str(index) for index in range(2*wl_len)]
    wl_perc95_names = ['WLP95_' + str(index) for index in range(2*wl_len)]
    wl_sd_names = ['WLSD_' + str(index) for index in range(2*wl_len)]

    typical_signal_names = mean_names + median_names + perc5_names + perc95_names + sd_names + wl_mean_names + \
                           wl_median_names + wl_perc5_names + wl_perc95_names + wl_sd_names

    names += typical_signal_names

    data = np.empty([dataframe.shape[0], len(names)])

    iteration = 0
    for row_index, row in dataframe.iterrows():
        print(row_index)

        # Retrieve ECG data
        ecg_signal = row[:lengths[iteration] + 1]
        ecg_signal = nk.ecg_clean(ecg_signal, sampling_rate=SAMPLING_RATE)

        # Find R-peaks
        peaks, info = nk.ecg_peaks(ecg_signal, sampling_rate=SAMPLING_RATE)

        # R amplitude
        R_amplitudes = ecg_signal[info['ECG_R_Peaks']]

        # Check if the signal is flipped
        # Check if we have enough peaks to retrieve more information
        if len(R_amplitudes) > 4:

            _, waves_peak = nk.ecg_delineate(ecg_signal, info, sampling_rate=300, show=False)

            # Q amplitude

            # remove nan values
            Q_amplitudes = [ecg_signal[peak_index] if str(peak_index) != 'nan' else - np.infty for peak_index in
                            waves_peak['ECG_Q_Peaks']]

            if np.sum([1 if np.abs(rpeak) > np.abs(Q_amplitudes[index]) else -1 for index, rpeak in
                       enumerate(R_amplitudes)]) < 0:
                print("flip", row_index)

                ecg_signal = -ecg_signal

                peaks, info = nk.ecg_peaks(ecg_signal, sampling_rate=300)

                # R amplitude
                R_amplitudes = ecg_signal[info['ECG_R_Peaks']]

                if len(R_amplitudes) > 4:
                    _, waves_peak = nk.ecg_delineate(ecg_signal, info, sampling_rate=300, show=False)

        data_temp = []
        if len(R_amplitudes) > 0:
            data_temp = [np.mean(R_amplitudes),
                         np.median(R_amplitudes),
                         np.percentile(R_amplitudes, q=5),
                         np.percentile(R_amplitudes, q=95),
                         np.std(R_amplitudes),
                         len(R_amplitudes)]
        else:
            empty = np.empty([6])
            empty[:] = np.NaN
            data_temp += empty.tolist()

        # length of signal
        data_new = [np.mean(lengths[iteration] / SAMPLING_RATE),
                    np.median(lengths[iteration] / SAMPLING_RATE),
                    np.percentile(lengths[iteration] / SAMPLING_RATE, q=5),
                    np.percentile(lengths[iteration] / SAMPLING_RATE, q=95),
                    np.std(lengths[iteration] / SAMPLING_RATE)]

        data_temp += data_new

        # signal
        data_new = [np.mean(ecg_signal),
                    np.median(ecg_signal),
                    np.percentile(ecg_signal, q=5),
                    np.percentile(ecg_signal, q=95),
                    np.std(ecg_signal)]

        data_temp += data_new

        # Check if we have enough peaks to retrieve more information
        if len(R_amplitudes) > 4:

            quality = nk.ecg_quality(ecg_signal, sampling_rate=SAMPLING_RATE)
            data_new = [np.mean(quality),
                        np.median(quality),
                        np.percentile(quality, q=5),
                        np.percentile(quality, q=95),
                        np.std(quality)]

            data_temp += data_new

            # Delineate the ECG signal
            # “ECG_P_Peaks”, “ECG_Q_Peaks”, “ECG_S_Peaks”, “ECG_T_Peaks”, “ECG_P_Onsets”, “ECG_T_Offsets”

            # _, waves_peak = nk.ecg_delineate(ecg_signal, info, sampling_rate=SAMPLING_RATE, show=False)

            # Q amplitude

            # remove nan values
            Q_peaks = [peak for peak in waves_peak['ECG_Q_Peaks'] if str(peak) != 'nan']

            if len(Q_peaks) > 0:
                Q_amplitudes = ecg_signal[Q_peaks]

                data_new = [np.mean(Q_amplitudes),
                            np.median(Q_amplitudes),
                            np.percentile(Q_amplitudes, q=5),
                            np.percentile(Q_amplitudes, q=95),
                            np.std(Q_amplitudes),
                            len(Q_amplitudes)]

                data_temp += data_new
            else:
                empty = np.empty([6])
                empty[:] = np.NaN
                empty[5] = 0
                data_temp += empty.tolist()

            # more than 1 Q-Peak => can build interval[s]
            if len(Q_peaks) > 1:
                Q_peaks_diff = [(Q_peaks[index + 1] - Q_peaks[index]) / SAMPLING_RATE
                                for index, item in enumerate(Q_peaks[:len(Q_peaks) - 1])]

                # QQ interval

                data_new = [np.mean(Q_peaks_diff),
                            np.median(Q_peaks_diff),
                            np.percentile(Q_peaks_diff, q=5),
                            np.percentile(Q_peaks_diff, q=95),
                            np.std(Q_peaks_diff)]

                data_temp += data_new

            # 0 or 1 Q-peak = no interval => return nan
            else:
                empty = np.empty([5])
                empty[:] = np.NaN
                data_temp += empty.tolist()

            # S amplitude

            # remove nan values
            S_peaks = [peak for peak in waves_peak['ECG_S_Peaks'] if str(peak) != 'nan']

            if len(S_peaks) > 0:
                S_amplitudes = ecg_signal[S_peaks]

                data_new = [np.mean(S_amplitudes),
                            np.median(S_amplitudes),
                            np.percentile(S_amplitudes, q=5),
                            np.percentile(S_amplitudes, q=95),
                            np.std(S_amplitudes),
                            len(S_amplitudes)]

                data_temp += data_new

            else:
                empty = np.empty([6])
                empty[:] = np.NaN
                empty[5] = 0
                data_temp += empty.tolist()

            # more than one S-peak
            if len(S_peaks) > 1:
                S_peaks_diff = [(S_peaks[index + 1] - S_peaks[index]) / SAMPLING_RATE
                                for index, item in enumerate(S_peaks[:len(S_peaks) - 1])]

                # SS interval

                data_new = [np.mean(S_peaks_diff),
                            np.median(S_peaks_diff),
                            np.percentile(S_peaks_diff, q=5),
                            np.percentile(S_peaks_diff, q=95),
                            np.std(S_peaks_diff)]

                data_temp += data_new

            # 0 or 1 S-peak = no interval => return nan
            else:
                empty = np.empty([5])
                empty[:] = np.NaN
                data_temp += empty.tolist()

            P_peaks = [peak for peak in waves_peak['ECG_P_Peaks'] if str(peak) != 'nan']

            if len(P_peaks) > 0:
                P_amplitudes = ecg_signal[P_peaks]

                data_new = [np.mean(P_amplitudes),
                            np.median(P_amplitudes),
                            np.percentile(P_amplitudes, q=5),
                            np.percentile(P_amplitudes, q=95),
                            np.std(P_amplitudes),
                            len(P_amplitudes)]

                data_temp += data_new

            else:
                empty = np.empty([6])
                empty[:] = np.NaN
                empty[5] = 0
                data_temp += empty.tolist()

            T_peaks = [peak for peak in waves_peak['ECG_T_Peaks'] if str(peak) != 'nan']

            if len(T_peaks) > 0:
                T_peaks = ecg_signal[T_peaks]

                data_new = [np.mean(T_peaks),
                            np.median(T_peaks),
                            np.percentile(T_peaks, q=5),
                            np.percentile(T_peaks, q=95),
                            np.std(T_peaks),
                            len(T_peaks)]

                data_temp += data_new

            else:
                empty = np.empty([6])
                empty[:] = np.NaN
                empty[5] = 0
                data_temp += empty.tolist()


            # QRS interval

            QRS_peaks_diff = []

            # compute difference between Q and S peak
            for index in range(len(waves_peak['ECG_Q_Peaks'])):
                if not (np.isnan(waves_peak['ECG_Q_Peaks'][index]) or np.isnan(waves_peak['ECG_S_Peaks'][index])):
                    QRS_peaks_diff.append(
                        (waves_peak['ECG_S_Peaks'][index] - waves_peak['ECG_Q_Peaks'][index]) / SAMPLING_RATE)

            if len(QRS_peaks_diff) > 0:
                data_new = [np.mean(QRS_peaks_diff),
                            np.median(QRS_peaks_diff),
                            np.percentile(QRS_peaks_diff, q=5),
                            np.percentile(QRS_peaks_diff, q=95),
                            np.std(QRS_peaks_diff)]

                data_temp += data_new

            else:
                empty = np.empty([5])
                empty[:] = np.NaN
                data_temp += empty.tolist()

            # PR interval

            PR_peaks_diff = []

            # compute difference between P and R peak
            for index in range(len(waves_peak['ECG_P_Peaks'])):
                if not np.isnan(waves_peak['ECG_P_Peaks'][index]):
                    PR_peaks_diff.append(
                        (info['ECG_R_Peaks'][index] - waves_peak['ECG_P_Peaks'][index]) / SAMPLING_RATE)

            if len(PR_peaks_diff) > 0:
                data_new = [np.mean(PR_peaks_diff),
                            np.median(PR_peaks_diff),
                            np.percentile(PR_peaks_diff, q=5),
                            np.percentile(PR_peaks_diff, q=95),
                            np.std(PR_peaks_diff)]

                data_temp += data_new
            else:
                empty = np.empty([5])
                empty[:] = np.NaN
                data_temp += empty.tolist()

            # RT interval

            RT_peaks_diff = []

            # compute difference between P and R peak
            for index in range(len(waves_peak['ECG_T_Peaks'])):
                if not np.isnan(waves_peak['ECG_T_Peaks'][index]):
                    RT_peaks_diff.append(
                        (waves_peak['ECG_T_Peaks'][index] - info['ECG_R_Peaks'][index]) / SAMPLING_RATE)

            if len(RT_peaks_diff) > 0:
                data_new = [np.mean(RT_peaks_diff),
                            np.median(PR_peaks_diff),
                            np.percentile(RT_peaks_diff, q=5),
                            np.percentile(RT_peaks_diff, q=95),
                            np.std(RT_peaks_diff)]

                data_temp += data_new

            else:
                empty = np.empty([5])
                empty[:] = np.NaN
                data_temp += empty.tolist()

            # Extract clean EDA and SCR features
            # explanation of features:
            # https://neurokit2.readthedocs.io/en/latest/functions.html?highlight=hrv%20time#neurokit2.hrv.hrv_time

            hrv_time = nk.hrv(peaks, sampling_rate=SAMPLING_RATE, show=False)

            data_new = hrv_time.values.tolist()[0]

            data_temp += data_new

            # Jannik
            # http://www.paulvangent.com/2016/03/21/analyzing-a-discrete-heart-rate-signal-using-python-part-2/
            rpeaks = info['ECG_R_Peaks']
            r_interval = [rpeaks[index+1]-rpeaks[index] for index in range(len(rpeaks)-1)]
            RR_x_new = np.linspace(rpeaks[0],rpeaks[-2],rpeaks[-2])
            f = interp1d(rpeaks[:-1], r_interval, kind='cubic')

            n = lengths[iteration] + 1 # Length of the signal
            frq = np.fft.fftfreq(n, d=(1 / SAMPLING_RATE)) # divide the bins into frequency categories
            frq = frq[range(int(n/2))] # Get single side of the frequency range

            Y = np.fft.fft(f(RR_x_new))/n # Calculate FFT

            try:
                Y = Y[range(int(n / 2))]
                lf = np.trapz(abs(Y[(frq >= 0.04) & (frq <= 0.15)]))

                hf = np.trapz(abs(Y[(frq >= 0.16) & (frq <= 0.5)]))  # Do the same for 0.16-0.5Hz (HF)

                data_new = [lf, hf, lf / hf]

                data_temp += data_new
            except IndexError as err:
                print(err)
                data_temp += [None, None, None]

        # if we don't have enough R peaks return vector of nan's
        else:
            empty = np.empty([len(names) - 16 - len(typical_signal_names)])
            empty[:] = np.NaN
            data_temp += empty.tolist()

        # Create a 'typical' heartbeat

        # Scaler = StandardScaler()
        # ecg_signal = Scaler.fit_transform(X=ecg_signal.reshape(-1, 1)).reshape(1, -1)[0].tolist()

        out = ecg.ecg(signal=ecg_signal, sampling_rate=SAMPLING_RATE, show=False)

        mean = np.mean(out['templates'], axis=0)
        median = np.median(out['templates'], axis=0)
        perc5 = np.percentile(out['templates'].astype(np.float64), axis=0, q=5)
        perc95 = np.percentile(out['templates'].astype(np.float64), axis=0, q=95)
        std = np.std(out['templates'].astype(np.float64), axis=0)

        data_new = np.concatenate((mean, median, perc5, perc95, std)).tolist()

        data_temp += data_new

        (wl_mean_cA, wl_mean_cD) = pywt.dwt(np.mean(out['templates'], axis=0),
                                            'db3', 'periodic')
        (wl_median_cA, wl_median_cD) = pywt.dwt(np.median(out['templates'], axis=0),
                                                'db3', 'periodic')
        (wl_perc5_cA, wl_perc5_cD) = pywt.dwt(np.percentile(out['templates'].astype(np.float64), axis=0, q=5),
                                              'db3', 'periodic')
        (wl_perc95_cA, wl_perc95_cD) = pywt.dwt(np.percentile(out['templates'].astype(np.float64), axis=0, q=95),
                                                'db3', 'periodic')
        (wl_sd_cA, wl_sd_cD) = pywt.dwt(np.std(out['templates'].astype(np.float64), axis=0),
                                        'db3', 'periodic')

        data_new = np.concatenate((wl_mean_cA, wl_mean_cD,
                                   wl_median_cA, wl_median_cD,
                                   wl_perc5_cA, wl_perc5_cD,
                                   wl_perc95_cA, wl_perc95_cD,
                                   wl_sd_cA, wl_sd_cD)).tolist()

        data_temp += data_new

        data[iteration] = data_temp

        iteration += 1

    features = pd.DataFrame(data, columns=names)

    return features
Exemplo n.º 28
0
plot = nk.signal_plot([original, distorted, cleaned])

# Save plot
fig = plt.gcf()
fig.set_size_inches(10, 6)
fig.savefig("README_signalprocessing.png", dpi=300, h_pad=3)

# =============================================================================
# Heart Rate Variability
# =============================================================================

# Download data
data = nk.data("bio_resting_8min_100hz")

# Find peaks
peaks, info = nk.ecg_peaks(data["ECG"], sampling_rate=100)

# Compute HRV indices
hrv = nk.hrv(peaks, sampling_rate=100, show=True)
hrv

# Save plot
fig = plt.gcf()
fig.set_size_inches(10 * 1.5, 6 * 1.5, forward=True)
fig.savefig("README_hrv.png", dpi=300, h_pad=3)

# =============================================================================
# Complexity
# =============================================================================

# Generate signal
Exemplo n.º 29
0
def compute_features(data, condition, sampling_rate=700, window_size=60, window_shift=0.25):

    index = 0
    init = time.time()

    # data cleaning
    ## ECG
    ecg_cleaned = nk.ecg_clean(data["ECG"][condition].flatten(), sampling_rate=sampling_rate)
    ## == OLD
    # ecg_rpeaks, _ = nk.ecg_peaks(ecg_cleaned, sampling_rate=sampling_rate)
    # ecg_hr = nk.signal_rate(ecg_rpeaks, sampling_rate=sampling_rate)
    ## ==
    ## EDA
    ## 5Hz lowpass filter
    eda_highcut = 5
    eda_filtered = nk.signal_filter(data['EDA'][condition].flatten(), sampling_rate=sampling_rate, highcut=eda_highcut)
    eda_cleaned = nk.standardize(eda_filtered)
    # TODO: not sure about the approach. cvxeda takes longer periods
    # phasic_tonic = nk.eda_phasic(cleaned, sampling_rate=700, method='cvxeda')
    eda_phasic_tonic = nk.eda_phasic(eda_cleaned, sampling_rate=sampling_rate)
    eda_phasic_tonic['t'] = [(1 / sampling_rate) * i for i in range(eda_phasic_tonic.shape[0])]
    eda_scr_peaks, scr_info = nk.eda_peaks(eda_phasic_tonic['EDA_Phasic'], sampling_rate=sampling_rate)
    ## EMG
    ## For 5 sec window signal
    ## More on DC Bias https://www.c-motion.com/v3dwiki/index.php/EMG:_Removing_DC_Bias
    emg_lowcut = 50
    emg_filtered_dc = nk.signal_filter(data['EMG'][condition].flatten(), sampling_rate=sampling_rate, lowcut=emg_lowcut)
    # OR 100 Hz highpass Butterworth filter followed by a constant detrending
    # filtered_dc = nk.emg_clean(chest_data_dict['EMG'][baseline].flatten(), sampling_rate=700)
    ## For 60 sec window signal
    # 50Hz lowpass filter
    emg_highcut = 50
    emg_filtered = nk.signal_filter(data['EMG'][condition].flatten(), sampling_rate=sampling_rate, highcut=emg_highcut)
    ## Resp
    ## Method biosppy important to appply bandpass filter 0.1 - 0.35 Hz
    resp_processed, _ = nk.rsp_process(data['Resp'][condition].flatten(), sampling_rate=sampling_rate, method='biosppy')

    print('Elapsed Preprocess', str(timedelta(seconds=time.time() - init)))
    init = time.time()

    chest_df_5 = pd.DataFrame() # For 5 sec window size
    chest_df = pd.DataFrame()

    window = int(sampling_rate * window_size)
    for i in range(0, data['ACC'][condition].shape[0] - window, int(sampling_rate * window_shift)):

        # ACC
        w_acc_data = data['ACC'][condition][i: window + i]
        acc_x_mean, acc_y_mean, acc_z_mean = np.mean(w_acc_data, axis=0)  # Feature
        acc_x_std, acc_y_std, acc_z_std = np.std(w_acc_data, axis=0)  # Feature
        acc_x_peak, acc_y_peak, acc_z_peak = np.amax(w_acc_data, axis=0)  # Feature
        acc_x_absint, acc_y_absint, acc_z_absint = np.abs(np.trapz(w_acc_data, axis=0))  # Feature
        xyz = np.sum(w_acc_data, axis=0)
        xyz_mean = np.mean(xyz)  # Feature
        xyz_std = np.std(xyz)  # Feature
        xyz_absint = np.abs(np.trapz(xyz))  # Feature


        # == OLD
        # ## ECG
        # w_ecg_rpeaks = ecg_rpeaks[i: window + i]
        # # HR
        # w_ecg_hr = ecg_hr[i: window + i]
        # hr_mean = np.mean(w_ecg_hr)  # Feature
        # hr_std = np.std(w_ecg_hr)  # Feature
        # # HRV Time-domain Indices
        # # HRV_MeanNN
        # # HRV_SDNN
        # # HRV_pNN50
        # # HRV_RMSSD -> Root mean square of the HRV
        # # HRV_HTI -> Triangular interpolation index
        # hrv_time = nk.hrv_time(w_ecg_rpeaks, sampling_rate=sampling_rate, show=False)
        # hrv_mean = hrv_time.loc[0, 'HRV_MeanNN']  # Feature
        # hrv_std = hrv_time.loc[0, 'HRV_SDNN']  # Feature
        # # TODO: NN50
        # # hrv_NN50 = 
        # hrv_pNN50 = hrv_time.loc[0, 'HRV_pNN50']  # Feature
        # hrv_TINN = hrv_time.loc[0, 'HRV_HTI']  # Feature
        # hrv_rms = hrv_time.loc[0, 'HRV_RMSSD']  # Feature

        # # HRV Frequency-domain Indices
        # # TODO: get NaN values within windows (*)
        # # HRV_ULF *
        # # HRV_LF *
        # # HRV_HF 
        # # HRV_VHF
        # # HRV_LFHF - Ratio LF/HF *
        # # HRV_LFn *
        # # HRV_HFn
        # hrv_freq = nk.hrv_frequency(w_ecg_rpeaks, sampling_rate=sampling_rate, ulf=(0.01, 0.04), lf=(0.04, 0.15), hf=(0.15, 0.4), vhf=(0.4, 1.))
        # hrv_ULF = hrv_freq.loc[0, 'HRV_ULF']  # Feature
        # hrv_LF = hrv_freq.loc[0, 'HRV_LF']  # Feature
        # hrv_HF = hrv_freq.loc[0, 'HRV_HF']  # Feature
        # hrv_VHF = hrv_freq.loc[0, 'HRV_VHF']  # Feature
        # hrv_lf_hf_ratio = hrv_freq.loc[0, 'HRV_LFHF']  # Feature
        # hrv_f_sum = np.nansum(np.hstack((hrv_ULF, hrv_LF, hrv_HF, hrv_VHF)))
        # # TODO: rel_f
        # # hrv_rel_f = 
        # hrv_LFn = hrv_freq.loc[0, 'HRV_LFn']  # Feature
        # hrv_HFn = hrv_freq.loc[0, 'HRV_HFn']  # Feature
        # ==

        ## ECG 
        w_ecg_cleaned = ecg_cleaned[i: window + i]
        _, ecg_info = nk.ecg_peaks(w_ecg_cleaned, sampling_rate=sampling_rate)
        w_ecg_rpeaks = ecg_info['ECG_R_Peaks']
        ecg_nni = pyhrv.tools.nn_intervals(w_ecg_rpeaks)
        # HR
        rs_hr = pyhrv.time_domain.hr_parameters(ecg_nni)
        hr_mean = rs_hr['hr_mean']  # Feature
        hr_std = rs_hr['hr_std']  # Feature
        # HRV-time
        rs_hrv = pyhrv.time_domain.nni_parameters(ecg_nni)
        hrv_mean = rs_hrv['nni_mean']  # Feature
        hrv_std = pyhrv.time_domain.sdnn(ecg_nni)['sdnn']  # Feature
        rs_nn50 = pyhrv.time_domain.nn50(ecg_nni)
        hrv_NN50 = rs_nn50['nn50']  # Feature
        hrv_pNN50 = rs_nn50['pnn50']  # Feature
        hrv_time = nk.hrv_time(w_ecg_rpeaks, sampling_rate=sampling_rate, show=False)
        hrv_TINN = hrv_time.loc[0, 'HRV_TINN']  # Feature
        hrv_rms = pyhrv.time_domain.rmssd(ecg_nni)['rmssd']  # Feature
        # HRV-freq
        hrv_freq = pyhrv.frequency_domain.welch_psd(ecg_nni, fbands={'ulf': (0.01, 0.04), 'vlf': (0.04, 0.15), 'lf': (0.15, 0.4), 'hf': (0.4, 1)}, mode='dev')
        # hrv_freq = hrv_freq.as_dict()
        hrv_freq = hrv_freq[0]
        hrv_ULF = hrv_freq['fft_abs'][0]  # Feature
        hrv_LF = hrv_freq['fft_abs'][1]  # Feature
        hrv_HF = hrv_freq['fft_abs'][2]  # Feature
        hrv_VHF = hrv_freq['fft_abs'][3]  # Feature
        hrv_lf_hf_ratio = hrv_freq['fft_ratio']  # Feature
        hrv_f_sum = hrv_freq['fft_total']  # Feature
        hrv_rel_ULF = hrv_freq['fft_rel'][0]  # Feature
        hrv_rel_LF = hrv_freq['fft_rel'][1]  # Feature
        hrv_rel_HF = hrv_freq['fft_rel'][2]  # Feature
        hrv_rel_VHF = hrv_freq['fft_rel'][3]  # Feature
        hrv_LFn = hrv_freq['fft_norm'][0]  # Feature
        hrv_HFn = hrv_freq['fft_norm'][1]  # Feature

        # EDA
        w_eda_data = eda_cleaned[i: window + i]
        w_eda_phasic_tonic = eda_phasic_tonic[i: window + i]

        eda_mean = np.mean(w_eda_data)  # Feature
        eda_std = np.std(w_eda_data)  # Feature
        eda_min = np.amin(w_eda_data)  # Feature
        eda_max = np.amax(w_eda_data)  # Feature
        # dynamic range: https://en.wikipedia.org/wiki/Dynamic_range
        eda_slope = get_slope(w_eda_data)  # Feature
        eda_drange = eda_max / eda_min  # Feature
        eda_scl_mean = np.mean(w_eda_phasic_tonic['EDA_Tonic'])  # Feature
        eda_scl_std = np.std(w_eda_phasic_tonic['EDA_Tonic'])  # Feature
        eda_scr_mean = np.mean(w_eda_phasic_tonic['EDA_Phasic'])  # Feature
        eda_scr_std = np.std(w_eda_phasic_tonic['EDA_Phasic'])  # Feature
        eda_corr_scl_t = nk.cor(w_eda_phasic_tonic['EDA_Tonic'], w_eda_phasic_tonic['t'], show=False)  # Feature
        
        eda_scr_no = eda_scr_peaks['SCR_Peaks'][i: window + i].sum()  # Feature
        # Sum amplitudes in SCR signal
        ampl = scr_info['SCR_Amplitude'][i: window + i]
        eda_ampl_sum = np.sum(ampl[~np.isnan(ampl)])  # Feature
        # TODO: 
        # eda_t_sum = 

        scr_peaks, scr_properties = scisig.find_peaks(w_eda_phasic_tonic['EDA_Phasic'], height=0)
        width_scr = scisig.peak_widths(w_eda_phasic_tonic['EDA_Phasic'], scr_peaks, rel_height=0)
        ht_scr = scr_properties['peak_heights']
        eda_scr_area = 0.5 * np.matmul(ht_scr, width_scr[1])  # Feature

        # EMG
        ## 5sec
        w_emg_data = emg_filtered_dc[i: window + i]
        emg_mean = np.mean(w_emg_data)  # Feature
        emg_std = np.std(w_emg_data)  # Feature
        emg_min = np.amin(w_emg_data)
        emg_max = np.amax(w_emg_data)
        emg_drange = emg_max / emg_min  # Feature
        emg_absint = np.abs(np.trapz(w_emg_data))  # Feature
        emg_median = np.median(w_emg_data)  # Feature
        emg_perc_10 = np.percentile(w_emg_data, 10)  # Feature
        emg_perc_90 = np.percentile(w_emg_data, 90)  # Feature
        emg_peak_freq, emg_mean_freq, emg_median_freq = get_freq_features(w_emg_data)  # Features
        # TODO: PSD -> energy in seven bands
        # emg_psd = 

        ## 60 sec
        peaks, properties = scisig.find_peaks(emg_filtered[i: window + i], height=0)
        emg_peak_no = peaks.shape[0]
        emg_peak_amp_mean = np.mean(properties['peak_heights'])  # Feature
        emg_peak_amp_std = np.std(properties['peak_heights'])  # Feature
        emg_peak_amp_sum = np.sum(properties['peak_heights'])  # Feature
        emg_peak_amp_max = np.abs(np.amax(properties['peak_heights']))
        # https://www.researchgate.net/post/How_Period_Normalization_and_Amplitude_normalization_are_performed_in_ECG_Signal
        emg_peak_amp_norm_sum = np.sum(properties['peak_heights'] / emg_peak_amp_max)  # Feature

        # Resp
        w_resp_data = resp_processed[i: window + i]
        ## Inhalation / Exhalation duration analysis
        idx = np.nan
        count = 0
        duration = dict()
        first = True
        for j in w_resp_data[~w_resp_data['RSP_Phase'].isnull()]['RSP_Phase'].to_numpy():
            if j != idx:
                if first:
                    idx = int(j)
                    duration[1] = []
                    duration [0] = []
                    first = False
                    continue
                # print('New value', j, count)
                duration[idx].append(count)
                idx = int(j)
                count = 0 
            count += 1
        resp_inhal_mean = np.mean(duration[1])  # Feature
        resp_inhal_std = np.std(duration[1])  # Feature
        resp_exhal_mean = np.mean(duration[0])  # Feature
        resp_exhal_std = np.std(duration[0])  # Feature
        resp_inhal_duration = w_resp_data['RSP_Phase'][w_resp_data['RSP_Phase'] == 1].count()
        resp_exhal_duration = w_resp_data['RSP_Phase'][w_resp_data['RSP_Phase'] == 0].count()
        resp_ie_ratio = resp_inhal_duration / resp_exhal_duration  # Feature
        resp_duration = resp_inhal_duration + resp_exhal_duration  # Feature
        resp_stretch = w_resp_data['RSP_Amplitude'].max() - w_resp_data['RSP_Amplitude'].min()  # Feature
        resp_breath_rate = len(duration[1])  # Feature
        ## Volume: area under the curve of the inspiration phase on a respiratory cycle
        resp_peaks, resp_properties = scisig.find_peaks(w_resp_data['RSP_Clean'], height=0)
        resp_width = scisig.peak_widths(w_resp_data['RSP_Clean'], resp_peaks, rel_height=0)
        resp_ht = resp_properties['peak_heights']        
        resp_volume = 0.5 * np.matmul(resp_ht, resp_width[1])  # Feature

        # Temp
        w_temp_data = data['Temp'][condition][i: window + i].flatten()
        temp_mean = np.mean(w_temp_data)  # Feature
        temp_std = np.std(w_temp_data)  # Feature
        temp_min = np.amin(w_temp_data)  # Feature
        temp_max = np.amax(w_temp_data)  # Feature
        temp_drange = temp_max / temp_min  # Feature
        temp_slope = get_slope(w_temp_data.ravel())  # Feature


        # chest_df_5 = chest_df_5.append({
        #     'ACC_x_mean': acc_x_mean, 'ACC_y_mean': acc_y_mean, 'ACC_z_mean': acc_z_mean, 'ACC_xzy_mean': xyz_mean,
        #     'ACC_x_std': acc_x_std, 'ACC_y_std': acc_y_std, 'ACC_z_std': acc_z_std, 'ACC_xyz_std': xyz_std,
        #     'ACC_x_absint': acc_x_absint, 'ACC_y_absint': acc_y_absint, 'ACC_z_absint': acc_z_absint, 'ACC_xyz_absint': xyz_absint,
        #     'ACC_x_peak': acc_x_peak, 'ACC_y_peak': acc_y_peak, 'ACC_z_peak': acc_z_peak,
        #     'EMG_mean': emg_mean, 'EMG_std': emg_std, 'EMG_drange': emg_drange, 'EMG_absint': emg_absint, 'EMG_median': emg_median, 'EMG_perc_10': emg_perc_10,
        #     'EMG_perc_90': emg_perc_90, 'EMG_peak_freq': emg_peak_freq, 'EMG_mean_freq': emg_mean_freq, 'EMG_median_freq': emg_median_freq
        # }, ignore_index=True)

        chest_df = chest_df.append({
            'ACC_x_mean': acc_x_mean, 'ACC_y_mean': acc_y_mean, 'ACC_z_mean': acc_z_mean, 'ACC_xzy_mean': xyz_mean,
            'ACC_x_std': acc_x_std, 'ACC_y_std': acc_y_std, 'ACC_z_std': acc_z_std, 'ACC_xyz_std': xyz_std,
            'ACC_x_absint': acc_x_absint, 'ACC_y_absint': acc_y_absint, 'ACC_z_absint': acc_z_absint, 'ACC_xyz_absint': xyz_absint,
            'ACC_x_peak': acc_x_peak, 'ACC_y_peak': acc_y_peak, 'ACC_z_peak': acc_z_peak,
            'ECG_hr_mean': hr_mean, 'ECG_hr_std': hr_std, 'ECG_hrv_NN50': hrv_NN50, 'ECG_hrv_pNN50': hrv_pNN50, 'ECG_hrv_TINN': hrv_TINN, 'ECG_hrv_RMS': hrv_rms,
            'ECG_hrv_ULF': hrv_ULF, 'ECG_hrv_LF': hrv_LF, 'ECG_hrv_HF': hrv_HF, 'ECG_hrv_VHF': hrv_VHF, 'ECG_hrv_LFHF_ratio': hrv_lf_hf_ratio, 'ECG_hrv_f_sum': hrv_f_sum,
            'ECG_hrv_rel_ULF': hrv_rel_ULF, 'ECG_hrv_rel_LF': hrv_rel_LF, 'ECG_hrv_rel_HF': hrv_rel_HF, 'ECG_hrv_rel_VHF': hrv_rel_VHF, 'ECG_hrv_LFn': hrv_LFn, 'ECG_hrv_HFn': hrv_HFn,
            'EDA_mean': eda_mean, 'EDA_std': eda_std, 'EDA_mean': eda_mean, 'EDA_min': eda_min, 'EDA_max': eda_max, 'EDA_slope': eda_slope,
            'EDA_drange': eda_drange, 'EDA_SCL_mean': eda_scl_mean, 'EDA_SCL_std': eda_scl_mean, 'EDA_SCR_mean': eda_scr_mean, 'EDA_SCR_std': eda_scr_std,
            'EDA_corr_SCL_t': eda_corr_scl_t, 'EDA_SCR_no': eda_scr_no, 'EDA_ampl_sum': eda_ampl_sum, 'EDA_scr_area': eda_scr_area,
            'EMG_mean': emg_mean, 'EMG_std': emg_std, 'EMG_drange': emg_drange, 'EMG_absint': emg_absint, 'EMG_median': emg_median, 'EMG_perc_10': emg_perc_10,
            'EMG_perc_90': emg_perc_90, 'EMG_peak_freq': emg_peak_freq, 'EMG_mean_freq': emg_mean_freq, 'EMG_median_freq': emg_median_freq,
            'EMG_peak_no': emg_peak_no, 'EMG_peak_amp_mean':  emg_peak_amp_mean, 'EMG_peak_amp_std':  emg_peak_amp_std, 'EMG_peak_amp_sum':  emg_peak_amp_sum,
            'EMG_peak_amp_norm_sum':  emg_peak_amp_norm_sum,
            'RESP_inhal_mean': resp_inhal_mean, 'RESP_inhal_std': resp_inhal_std, 'RESP_exhal_mean': resp_exhal_mean, 'RESP_exhal_std': resp_exhal_std,
            'RESP_ie_ratio': resp_ie_ratio, 'RESP_duration': resp_duration, 'RESP_stretch': resp_stretch, 'RESP_breath_rate': resp_breath_rate, 'RESP_volume': resp_volume,
            'TEMP_mean': temp_mean, 'TEMP_std': temp_std, 'TEMP_min': temp_min, 'TEMP_max': temp_max, 'TEMP_drange': temp_drange, 'TEMP_slope': temp_slope
        }, ignore_index=True)


        # index += 1
        # if index % 10 == 0:
        #     break
    
    print('Elapsed Process', condition.shape[0], str(timedelta(seconds=time.time() - init)))
    return chest_df, chest_df_5
Exemplo n.º 30
0
def elgendi2010(ecg, sampling_rate):
    signal, info = nk.ecg_peaks(ecg,
                                sampling_rate=sampling_rate,
                                method="elgendi2010")
    return info["ECG_R_Peaks"]