def extractTimeDomain(self, x): try: nni = self.extractRR(x) nniParams = td.nni_parameters(nni=nni) nniSD = td.sdnn(nni=nni) nniDiff = td.nni_differences_parameters(nni=nni) nniDiffRM = td.rmssd(nni=nni) nniDiffSD = td.sdsd(nni=nni) hrParams = td.hr_parameters(nni=nni) return np.array([nniParams["nni_mean"], nniParams["nni_counter"], nniSD["sdnn"], nniDiff["nni_diff_mean"], nniDiffRM["rmssd"], nniDiffSD["sdsd"], hrParams["hr_mean"], hrParams["hr_std"]]) except: return np.array([])
def compute_features(nni): features = {} features['mean_hr'] = tools.heart_rate(nni).mean() features['sdnn'] = td.sdnn(nni)[0] features['rmssd'] = td.rmssd(nni)[0] features['sdsd'] = td.sdsd(nni)[0] features['nn20'] = td.nn20(nni)[0] features['pnn20'] = td.nn20(nni)[1] features['nn50'] = td.nn50(nni)[0] features['pnn50'] = td.nn50(nni)[1] features['hf_lf_ratio'] = fd.welch_psd(nni, show=False)['fft_ratio'] features['very_lf'] = fd.welch_psd(nni, show=False)['fft_peak'][0] features['lf'] = fd.welch_psd(nni, show=False)['fft_peak'][1] features['hf'] = fd.welch_psd(nni, show=False)['fft_peak'][2] features['log_very_lf'] = fd.welch_psd(nni, show=False)['fft_log'][0] features['log_lf'] = fd.welch_psd(nni, show=False)['fft_log'][1] features['log_hf'] = fd.welch_psd(nni, show=False)['fft_log'][2] features['sampen'] = nl.sample_entropy(nni)[0] return features
def cal_hrv(data_list): if (np.nan in data_list) or (len(data_list) <= 1): sdsd = np.nan rmssd = np.nan sd1 = np.nan sd2 = np.nan sd_ratio = np.nan a1 = np.nan a2 = np.nan a_ratio = np.nan sampen = np.nan else: # sdsd sdsd = round(td.sdsd(nni=data_list)['sdsd'], 5) # rmssd rmssd = round(td.rmssd(nni=data_list)['rmssd'], 5) # sd1, sd2 nl_results = nl.poincare(nni=data_list) sd1 = round(nl_results['sd1'], 5) sd2 = round(nl_results['sd2'], 5) sd_ratio = round(sd2 / sd1, 5) # dfa a1, a2 dfa_results = nl.dfa(data_list) try: a1 = round(dfa_results['dfa_alpha1'], 5) a2 = round(dfa_results['dfa_alpha2'], 5) a_ratio = round(a2 / a1, 5) except: a1 = np.nan print(a1, type(a1)) a2 = np.nan print(a2, type(a2)) a_ratio = np.nan # Sampen t = np.std(data_list) sampen = round( nl.sample_entropy(nni=data_list, tolerance=t)['sampen'], 6) return sdsd, rmssd, sd1, sd2, sd_ratio, a1, a2, a_ratio, sampen
def extractTimeDomain(self, x): try: nni = self.extractRR(x) nniParams = td.nni_parameters(nni=nni) nniSD = td.sdnn(nni=nni) nniDiff = td.nni_differences_parameters(nni=nni) nniDiffRM = td.rmssd(nni=nni) nniDiffSD = td.sdsd(nni=nni) hrParams = td.hr_parameters(nni=nni) nn20 = td.nn20(nni=nni) nn30 = td.nnXX(nni=nni, threshold=30) nn50 = td.nn50(nni=nni) # return np.array([nniParams["nni_mean"], nniParams["nni_counter"], nniSD["sdnn"], # nniDiff["nni_diff_mean"], nniDiffRM["rmssd"], nniDiffSD["sdsd"], # hrParams["hr_mean"], hrParams["hr_std"]]) return np.array([nniParams["nni_mean"], nniParams["nni_counter"], nniSD["sdnn"], nniDiff["nni_diff_mean"], nniDiffRM["rmssd"], nniDiffSD["sdsd"], hrParams["hr_mean"], hrParams["hr_std"], hrParams["hr_max"] - hrParams["hr_min"], nn20["nn20"], nn20["pnn20"], nn30["nn30"], nn30["pnn30"], nn50["nn50"], nn50["pnn50"]]) except: return np.array([])
def process(X): features = [] plot = False print(y[0:40]) tpls0 = [] tpls1 = [] tpls2 = [] tpls3 = [] shp = None for i in range(len(X)): _sample = X[i] print(f"sample {i}: Class {y[i]}") sample = _sample[~np.isnan(_sample)] res = ecg(signal=sample, sampling_rate=300, show=False) # FT # N = len(sample) / 2 # T = 1.0 / 300.0 # xf = np.linspace(0.0, 1.0 / (2 * T), int(N // 2)) # yf = fft(res["filtered"]) # plt.plot(xf, 2.0 / N * np.abs(yf[0 : int(N // 2)])) # plt.show() median = calc_median(res["templates"]) # if (np.argmin(median) < 60) and not 0.7*np.max(median) > abs(np.min(median)): if ( not np.max(median[55:65]) == np.max(median) or (np.max(median) < -0.8 * np.min(median)) or ( not 0.75 * np.max(median) > -np.min(median) and ( np.argmin(median) < 60 and ( np.min(median[:60]) < 1.5 * min(median[60:]) or np.min(median[60:]) < 1.5 * min(median[:60]) ) ) ) ): # and ((np.min(median) < 1.2 * np.min( # median[[i for i in range(len(median)) if i != np.argmin(median)]])) or np.max(median[45:48]) > -np.min(median[65:75])): # if np.min(median[45:55]) < np.min(median[0:45]) and np.min(median[65:80]) < np.min(median[80:]) and np.max(median[55:65]) == np.max(median): # if np.max(median) < abs(np.min(median)) and np.min(median[50:55]) < np.min(median[60:65]): # if abs(np.mean(median)) > abs(np.median(median)): res = ecg(-sample, sampling_rate=300, show=False) median = calc_median(res["templates"]) # neg = True # res["templates"][j] = (res["templates"][j]-mean)/std median = (median) / median.std() if i < 40 and plot: # plt.plot(res["templates"][j]) plt.title(y[i]) plt.plot(median) plt.show() # if not neg: if y[i] == 0: tpls0.append(median) if y[i] == 1: tpls1.append(median) if y[i] == 2: tpls2.append(median) if y[i] == 3: tpls3.append(median) # beat characterization heart_rate = res["heart_rate"] filtered = res["filtered"] rpeaks = res["rpeaks"] # peaks in seconds, required by pyhrv rpeaks_s = res["ts"][rpeaks] qpeaks = np.array([find_minimum(filtered, r) for r in rpeaks]) speaks = np.array( [find_minimum(filtered, r, direction="right") for r in rpeaks] ) r_amplitude = filtered[rpeaks] q_amplitude = filtered[qpeaks] s_amplitude = filtered[speaks] qrs_duration = speaks - qpeaks # hrv_res = hrv( # rpeaks=rpeaks_s, # plot_tachogram=False, # kwargs_ar={"order": 8}, # show=False, # ) nni = time_domain.nni_parameters(rpeaks=rpeaks_s) nni_diff = time_domain.nni_differences_parameters(rpeaks=rpeaks_s) sdnn = time_domain.sdnn(rpeaks=rpeaks_s) sdsd = time_domain.sdsd(rpeaks=rpeaks_s) tri_index = time_domain.triangular_index(rpeaks=rpeaks_s, plot=False) welch_psd = frequency_domain.welch_psd(rpeaks=rpeaks_s, mode="dev")[0] # print(templates.shape, median.shape) features.append( build_features(q_amplitude, r_amplitude, s_amplitude, qrs_duration) + [ nni["nni_mean"], nni["nni_min"], nni["nni_max"], nni_diff["nni_diff_mean"], nni_diff["nni_diff_min"], nni_diff["nni_diff_max"], sdnn["sdnn"], sdsd["sdsd"], tri_index["tri_index"], welch_psd["fft_ratio"], ] + list(welch_psd["fft_peak"] + welch_psd["fft_abs"] + welch_psd["fft_norm"]) ) # print(templates.shape, median.shape) features = np.array(features) print(f"computed features {features.shape}") return features
def _computeSignal(self, signal): obj = {} # Best min_dist & thres for sphygmogram signal peaks = peak.indexes(signal, min_dist=56, thres=0.16) # Ignore un normal signls (with no peaks) if (len(peaks) == 0): return obj nn = tools.nn_intervals(peaks) # Ignore un normal signls (with no NN) if (len(nn) == 0): return # Standard obj = dict(td.nni_parameters(nn, peaks), **obj) obj = dict(td.nni_differences_parameters(nn, peaks), **obj) obj = dict(td.sdnn(nn, peaks), **obj) obj = dict(td.sdnn_index(nn, peaks), **obj) obj = dict(td.sdann(nn, peaks), **obj) obj = dict(td.rmssd(nn, peaks), **obj) obj = dict(td.sdsd(nn, peaks), **obj) obj = dict(td.nn50(nn, peaks), **obj) obj = dict(td.nn20(nn, peaks), **obj) obj = dict(td.geometrical_parameters(nn, peaks, plot=False), **obj) del obj['nni_histogram'] # Additional obj = dict({'cv': self._cv(obj['sdnn'], obj['nni_mean'])}, **obj) peaks_diff = tools.nni_diff(peaks) obj = dict({'MxDMn': max(peaks_diff) - min(peaks_diff)}, **obj) obj = dict({'MxRMn': max(peaks_diff) / min(peaks_diff)}, **obj) obj = dict({'Mo': stats.mode(peaks_diff)[0][0]}, **obj) counter = Counter(peaks_diff) idx = list(counter.keys()).index(obj["Mo"]) obj = dict({'AMo': list(counter.values())[idx]}, **obj) obj = dict({'SI': obj['AMo'] / (2 * obj['Mo'] * obj['MxDMn'])}, **obj) # Autocorrelation function # Frequency stats welch = frequency_domain(signal).stats['welch']['params'] bands = list(welch['fft_bands'].keys()) obj = dict({'TP': welch['fft_total']}, **obj) obj = dict({'HF': welch['fft_rel'][bands.index('hf')]}, **obj) obj = dict({'LF': welch['fft_rel'][bands.index('lf')]}, **obj) obj = dict({'VLF': welch['fft_rel'][bands.index('vlf')]}, **obj) obj = dict({'ULF': welch['fft_rel'][bands.index('ulf')]}, **obj) obj = dict({'HFav': welch['fft_abs'][bands.index('hf')]}, **obj) obj = dict({'LFav': welch['fft_abs'][bands.index('lf')]}, **obj) obj = dict({'VLFav': welch['fft_abs'][bands.index('vlf')]}, **obj) obj = dict({'ULFav': welch['fft_abs'][bands.index('ulf')]}, **obj) obj = dict({'(LF/HF)av': obj['LFav'] / obj['HFav']}, **obj) obj = dict({'IC': obj['LF'] / obj['VLF']}, **obj) for k in obj: if (math.isnan(obj[k])): obj[k] = 0 return obj