def run(inp, opt, cfg): srate = inp['ecg']['srate'] data = arr.interp_undefined(inp['ecg']['vals']) ret = arr.remove_wander_spline(data, srate) return [{"srate": srate, "vals": ret}]
def run(inp, opt, cfg): data = arr.interp_undefined(inp['ecg']['vals']) srate = inp['ecg']['srate'] r_list = arr.detect_qrs(data, srate) # detect r-peak ret_rpeak = [] for idx in r_list: dt = idx / srate ret_rpeak.append({'dt': dt, 'val': 1}) return [ret_rpeak]
def run(inp, opt, cfg): data = arr.interp_undefined(inp['eeg']['vals']) data -= smooth(np.array(data)) srate = int(inp['eeg']['srate']) nfft = srate * 2 # srate * epoch size fres = srate / nfft # frequency resolution (hz) # frequency domain analysis EPOCH_SIZE = int(srate * 2) STRIDE_SIZE = int(srate * 0.5) ps = [] for epoch_start in range(0, len(data) - EPOCH_SIZE + 1, STRIDE_SIZE): # 0.5초 마다 겹침 epoch_w = data[epoch_start:epoch_start + EPOCH_SIZE] # 2초 epoch epoch_w = (epoch_w - np.mean(epoch_w)) * np.blackman(EPOCH_SIZE) # detrend and windowing dft = np.fft.fft(epoch_w)[:srate] # 실수를 fft 했으므로 절반만 필요하다 dft[0] = 0 # dc 성분은 지움 ps.append(2 * np.abs(dft) ** 2) # 파워의 절대값인데 절반 날렸으므로 ps = np.mean(np.array(ps), axis=0) pssum = np.cumsum(ps) # cummulative sum pssum = pssum[1:] totpow = pssum[fromhz(30, fres)] sef = tohz(np.argmax(pssum > 0.95 * totpow), fres) mf = tohz(np.argmax(pssum > 0.5 * totpow), fres) delta = pssum[fromhz(4, fres) - 1] / pssum[-1] * 100 theta = (pssum[fromhz(8, fres) - 1] - pssum[fromhz(4, fres)]) / pssum[-1] * 100 alpha = (pssum[fromhz(12, fres) - 1] - pssum[fromhz(8, fres)]) / pssum[-1] * 100 beta = (pssum[fromhz(30, fres) - 1] - pssum[fromhz(12, fres)]) / pssum[-1] * 100 gamma = (pssum[-1] - pssum[fromhz(30, fres)]) / pssum[-1] * 100 # pttmax_list.append() # pttdmax_list.append({'dt': dmax_dt, 'val': (dmax_dt - rpeak_dt) * 1000}) # pttmin_list.append({'dt': min_dt, 'val': (min_dt - rpeak_dt) * 1000}) # return [ [{'dt': cfg['interval'], 'val': 10 * np.log10(totpow)}], [{'dt': cfg['interval'], 'val': sef}], [{'dt': cfg['interval'], 'val': mf}], [{'dt': cfg['interval'], 'val': delta}], [{'dt': cfg['interval'], 'val': theta}], [{'dt': cfg['interval'], 'val': alpha}], [{'dt': cfg['interval'], 'val': beta}], [{'dt': cfg['interval'], 'val': gamma}] ]
def run(inp, opt, cfg): """ http:#ocw.utm.my/file.php/38/SEB4223/07_ECG_Analysis_1_-_QRS_Detection.ppt%20%5BCompatibility%20Mode%5D.pdf """ global hist_ppga, hist_hbi data = arr.interp_undefined(inp['pleth']['vals']) srate = inp['pleth']['srate'] minlist, maxlist = arr.detect_peaks(data, srate) # extract beats beat_res = [{'dt': idx / srate, 'val': 1} for idx in maxlist] ppga_res = [] hbi_res = [] ppga_perc_res = [] hbi_perc_res = [] spi_res = [] for i in range(len(maxlist) - 1): dt = maxlist[i + 1] / srate hbi = (maxlist[i + 1] - maxlist[i]) / srate * 1000 ppga = data[maxlist[i + 1]] - data[minlist[i]] #hbi_perc = hist_hbi.percentile(hbi) * 0.7 + st.norm.cdf(hbi, 754.7, 210.8) * 30 hbi_perc = hist_hbi.percentile(hbi) * 0.7 + st.norm.cdf(hbi, 700, 100) * 30 #ppga_perc = hist_ppga.percentile(ppga) * 0.7 + st.norm.cdf(ppga, 2.428, 1.896) * 30 ppga_perc = hist_ppga.percentile(ppga) * 0.7 + st.norm.cdf( ppga, 1, 0.2) * 30 # hbi_perc = hist_hbi.percentile(hbi) * 0.7 + hist_hbi_grp.percentile(hbi) * 0.3 # ppga_perc = hist_ppga.percentile(ppga) * 0.7 + hist_ppga_grp.percentile(ppga) * 0.3 spi = 100 - (0.7 * ppga_perc + 0.3 * hbi_perc) ppga_res.append({'dt': dt, 'val': ppga}) hbi_res.append({'dt': dt, 'val': hbi}) ppga_perc_res.append({'dt': dt, 'val': ppga_perc}) hbi_perc_res.append({'dt': dt, 'val': hbi_perc}) spi_res.append({'dt': dt, 'val': spi}) hist_hbi.learn(hbi) hist_ppga.learn(ppga) return [beat_res, ppga_res, hbi_res, ppga_perc_res, hbi_perc_res, spi_res]
def run(inp, opt, cfg): data = arr.interp_undefined(inp['ecg']['vals']) srate = inp['ecg']['srate'] span = int(srate * 10) # Cut every 10 second regardless of interval baseline = [0] * len(data) x = np.arange(0, span) for spos in range(0, len(data), span): # start position of this segment if spos + span > len(data): span = len(data) - spos x = x[0:span] med = np.median( data[spos:spos + span]) # compute overall median for the entire waveform for j in range(span): data[ spos + j] -= med # shift each sample of the entire waveform by this median value y = data[spos:spos + span] p = np.polyfit(x, y, 4) y = np.polyval(p, x) for j in range(span): baseline[spos + j] = y[j] data[spos + j] -= y[j] r_list = arr.detect_qrs(data, srate) # detect r-peak for i in range(len(r_list) - 1): # for each rr interval idx1 = r_list[i] idx2 = r_list[i + 1] if idx1 + 1 > idx2: continue med = np.median(data[idx1 + 1:idx2]) for j in range(idx1 + 1, idx2): data[j] -= med return [{'srate': srate, 'vals': data}, {'srate': srate, 'vals': baseline}]
def run(inp, opt, cfg): """ calculate ppv from arterial waveform :param art: arterial waveform :return: max, min, upper envelope, lower envelope, respiratory rate, ppv """ vsrate = inp['volume']['srate'] psrate = inp['awp']['srate'] fsrate = inp['flow']['srate'] if vsrate != psrate or vsrate != fsrate: print("sampling rates of volume, flow and awp are different") return srate = vsrate vdata = arr.interp_undefined(inp['volume']['vals']) fdata = arr.interp_undefined(inp['flow']['vals']) pdata = arr.interp_undefined(inp['awp']['vals']) # if srate < 200: # vdata = arr.resample_hz(vdata, srate, 200) # fdata = arr.resample_hz(fdata, srate, 200) # pdata = arr.resample_hz(pdata, srate, 200) # srate = 200 vdata = np.array(vdata) fdata = np.array(fdata) / 60 # L/min -> L/sec pdata = np.array(pdata) #fdata = np.diff(vdata) * srate / 1000 # make difference to rate #vdata = vdata[:-1] # remove the last sample #pdata = pdata[:-1] # remove the last sample vmax = max(vdata) vmin = min(vdata) v95 = vmax - (vmax - vmin) * 0.1 v5 = vmin + (vmax - vmin) * 0.1 vret = [] cret = [] rret = [] p0ret = [] nstep = 31 vstep = (v95 - v5) / nstep for i in range(nstep): # collect data vfrom = v5 + vstep * i seg_idx = np.logical_and(vfrom < vdata, vdata <= vfrom + vstep) if sum(seg_idx) < 3: print('number of samples in data seg < 3') continue pseg = pdata[seg_idx] vseg = vdata[seg_idx] fseg = fdata[seg_idx] A = np.vstack([vseg, fseg, np.ones(len(vseg))]).T cinv, r, p0 = np.linalg.lstsq(A, pseg)[0] c = 1 / cinv vret.append({'dt': i * 0.02, 'val': vfrom}) cret.append({'dt': i * 0.02, 'val': c}) rret.append({'dt': i * 0.02, 'val': r}) p0ret.append({'dt': i * 0.02, 'val': p0}) return [ #{'dt':0, 'srate':srate, 'vals':list(fdata)}, vret, cret, rret, p0ret ]
def run(inp, opt, cfg): data = arr.interp_undefined(inp['ecg']['vals']) srate = inp['ecg']['srate'] ecg_500 = data if srate != 500: ecg_500 = arr.resample(data, math.ceil(len(data) / srate * 500)) # resample to 500 Hz srate = 500 ecg_filt = arr.band_pass(ecg_500, srate, 0.01, 100) # filtering ecg_filt = arr.remove_wander_spline(ecg_filt, srate) # remove baseline wander r_list = arr.detect_qrs(ecg_filt, srate) # detect r-peak new_r_list = [] for ridx in r_list: # remove qrs before and after overlap if cfg['overlap'] <= ridx / srate: new_r_list.append(ridx) r_list = new_r_list ret_rpeak = [] for ridx in r_list: ret_rpeak.append({'dt': ridx / srate}) segbeats = 128 segsteps = 32 # int(segbeats/4) # for each segments twavs = [] twars = [] ret_twav = [] ret_twar = [] ret_avg_beat = {'srate': srate, 'vals': [0] * len(ecg_500)} iseg = 0 for seg_start in range( 0, len(r_list) - segbeats, segsteps ): # Separates in 128-beat units regardless of input length iseg += 1 hrs = [] # calculate hrs for i in range(segbeats - 1): hr = srate / (r_list[seg_start + i + 1] - r_list[seg_start + i]) hrs.append(hr) if max(hrs) - min(hrs) > 20: # print('seg ' + iseg + ' excluded HR diff > ' + diff_hr) continue # only -250 to 350 ms from R peak idx_r = int(0.25 * srate) # idx_r == 125 beat_len = int(0.6 * srate) # beat_len == 300 beats = [] for i in range(segbeats): ridx = r_list[seg_start + i] beat = ecg_filt[ridx - idx_r:ridx - idx_r + beat_len] beats.append(beat) beats = np.array(beats) # remove each beat's baseline voltage # no effect because of R peak leveling is below # Baseline correction included estimation of the baseline in the isoelectric PQ # segment by averaging 16 successive samples in this time window pq_width = int(0.008 * srate) # for i in range(segbeats): # idx_base = arr.min_idx(beats[i], idx_r - int(0.15 * srate), idx_r) # min_std = 999999 # for j in range(idx_base - int(0.03 * srate), idx_base + int(0.03 * srate)): # # The baseline is the point at which the standard deviation of around 15ms is minimized. # this_std = np.std(beats[i][j - pq_width:j + pq_width]) # if this_std < min_std: # idx_base = j # min_std = this_std # beats[i] -= np.mean(beats[i][idx_base - pq_width:idx_base + pq_width]) # calculate average beat avg_beat = np.mean(beats, axis=0) # average beat of the segbeats beats # find minimum values from avg_beat in both sides idx_start = idx_r - int(0.15 * srate) # idx_start == 50 idx_end = idx_r + int(0.1 * srate) # idx_end == 175 idx_base = arr.min_idx(avg_beat, idx_start, idx_r) # avg_beat's baseline min_std = 999999 # find minimum std value for j in range(idx_base - int(0.03 * srate), idx_base + int(0.03 * srate)): idx_from = max(0, j - pq_width) idx_to = min(len(avg_beat), j + pq_width) this_std = np.std(avg_beat[idx_from:idx_to]) # print("{} {}".format(j, this_std)) if this_std < min_std: idx_base = j min_std = this_std # print("idx_base={}", idx_base) min_left = np.mean(avg_beat[idx_base - pq_width:idx_base + pq_width]) min_right = np.min(avg_beat[idx_r:idx_end]) # threshold = 5% of max val th_left = min_left + 0.05 * (avg_beat[idx_r] - min_left) th_right = min_right + 0.05 * (avg_beat[idx_r] - min_right) idx_qrs_start = idx_r - int(0.05 * srate) idx_qrs_end = idx_r + int(0.05 * srate) for j in range(idx_r, idx_r - int(0.1 * srate), -1): # idx_r = 125 if avg_beat[j] < th_left: idx_qrs_start = j break for j in range(idx_r, idx_r + int(0.1 * srate)): if avg_beat[j] < th_right: idx_qrs_end = j break # find offset with maximum correlation offsets = [] # for each beat, likes [0, -1, 0, 0, 1, ...] qrs_coeffs = [] offset_width = int(0.01 * srate) # 3 = range for finding offset for i in range(segbeats): # for each beat maxoffset = -offset_width maxce = -999999 for offset in range(-offset_width, offset_width + 1): ce = arr.corr( avg_beat[idx_qrs_start:idx_qrs_end], beats[i][offset + idx_qrs_start:offset + idx_qrs_end]) if maxce < ce: maxoffset = offset maxce = ce offsets.append(maxoffset) qrs_coeffs.append(maxce) # move beats by the offset new_beats = [] for i in range(segbeats): ost = offsets[i] beat = beats[i].tolist() if ost < 0: beat = [0] * -ost + beat[:ost] else: beat = beat[ost:] + [0] * ost new_beats.append(beat) beats = np.array(new_beats) # beats.shape == (segbeats,300) # calculate average beat avg_beat = np.mean(beats, axis=0) # average beat of the segbeats beats # replace vpc as template nreplaced = 0 for i in range(segbeats): ce = arr.corr(avg_beat, beats[i]) if ce < 0.95: nreplaced += 1 beats[i] = copy.deepcopy(avg_beat) offsets[i] = 0 #print('{} beats are replaced'.format(nreplaced)) if nreplaced > 0.1 * segbeats: print('excluded VPC > {}'.format(nreplaced)) continue # qrs level alignment # idx_r == 125 # len(avg_beat) == beat_len == 300 for i in range(segbeats): beats[i] -= beats[i][idx_r] # plot for debugging # plt.plot() # for i in range(segbeats): # col = 'blue' # if i % 2: # col = 'red' # plt.plot(beats[i], c=col, ls='-') # plt.savefig('{:02d}_{}.png'.format(opt['ifile'], iseg)) # plt.close() # gather segbeats beats from idx_r(125) to beat_len(300) # power spectrums of segbeats beats spect = [] for idx_from_r in range(beat_len - idx_r): timed_samples = beats[:, idx_r + idx_from_r] # timed_samples *= np.hamming(len(timed_samples)) segfft = 2**np.abs(np.fft.fft(timed_samples)) spect.append(segfft) # each segbeats beat fft result spect = np.array( spect) # rows == idx_from_r, cols == frequency(0-segbeats) # power spectra are summed into a composite in which # the magnitude at 0.5 cycles/beat indicates raw alternans (in mv2) # cum_spect.shape == segbeats # cumulative spectum of beats st_start = int(0.1 * srate) # idx_qrs_end - idx_r #int(0.1*srate) st_end = int(0.25 * srate) avg_spect = np.mean( spect[st_start:st_end, :], axis=0) # between 100 (50) and 250 ms (125) from rpeak avg_alt = avg_spect[int(0.5 * segbeats)] # cum_spect_noise = cum_spect[int(0.4*segbeats):int(0.46*segbeats)] # noise level: 0.44-0.49 cycles / beat avg_spect_noise = avg_spect[int(0.44 * segbeats):int( 0.49 * segbeats)] # noise level: 0.44-0.49 cycles / beat # cum_spect_noise = cum_spect[int(0.33 * segbeats):int(0.48 * segbeats)] # noise level: 0.44-0.49 cycles / beat avg_noise_avg = np.mean(avg_spect_noise) avg_noise_std = np.std(avg_spect_noise) # return avg beat # avg_beat = np.mean(beats, axis=0) for j in range(len(avg_beat)): if len(ret_avg_beat['vals']) > r_list[seg_start + segbeats - 1] + j: ret_avg_beat['vals'][r_list[seg_start + segbeats - 1] + j] = avg_beat[j] # print('avg alt {}, noise {}'.format(cum_alt, cum_noise_avg)) twar = 0 if avg_alt > avg_noise_avg: twav = 1000 * (avg_alt - avg_noise_avg)**0.5 twar = (avg_alt - avg_noise_avg) / avg_noise_std twavs.append(twav) ret_twav.append({ 'dt': r_list[seg_start + segbeats - 1] / srate, 'val': twav }) twars.append(twar) ret_twar.append({ 'dt': r_list[seg_start + segbeats - 1] / srate, 'val': twar }) # plt.figure(figsize=(30, 5)) # plt.plot(ecg_filt.tolist(), color='black', lw=1) # plt.savefig('e:/{}_raw.pdf'.format(twar), bbox_inches="tight", pad_inches=0.5) # plt.close() # # plt.figure(figsize=(10, 5)) # for i in range(len(beats)): # c = 'red' # if i % 2 == 0: # c = 'blue' # plt.plot(beats[i], color=c, lw=1) # plt.savefig('e:/{}_ecg.pdf'.format(twar), bbox_inches="tight", pad_inches=0.5) # plt.close() # # plt.figure(figsize=(10, 5)) # plt.plot(np.arange(1, 65) / 128, avg_spect[1:65], lw=1) # plt.savefig('e:/{}_spect.pdf'.format(twar), bbox_inches="tight", pad_inches=0.5) # plt.close() dt_last = r_list[-1] / srate - cfg['overlap'] return [{ 'srate': srate, 'vals': ecg_filt.tolist() }, ret_avg_beat, ret_rpeak, ret_twav, ret_twar]
def run(inp, opt, cfg): """ calculate ppv from arterial waveform :param art: arterial waveform :return: max, min, upper envelope, lower envelope, respiratory rate, ppv """ global last_ppv data = arr.interp_undefined(inp['pleth']['vals']) srate = inp['pleth']['srate'] data = arr.resample_hz(data, srate, 100) srate = 100 if len(data) < 30 * srate: print('hr < 30') return # beat detection minlist, maxlist = arr.detect_peaks(data, srate) maxlist = maxlist[1:] # beat lengths beatlens = [] beats_128 = [] beats_128_valid = [] for i in range(0, len(minlist) - 1): beatlen = minlist[i + 1] - minlist[i] # in samps if not 30 < beatlen < 300: beats_128.append(None) continue pp = data[maxlist[i]] - data[minlist[i]] # pulse pressure if not 20 < pp < 100: beats_128.append(None) continue beatlens.append(beatlen) beat = data[minlist[i]:minlist[i + 1]] resampled = arr.resample(beat, 128) beats_128.append(resampled) beats_128_valid.append(resampled) if not beats_128_valid: return avgbeat = np.array(beats_128_valid).mean(axis=0) meanlen = np.mean(beatlens) stdlen = np.std(beatlens) if stdlen > meanlen * 0.2: # irregular rhythm return # remove beats with correlation < 0.9 pulse_vals = [] for i in range(0, len(minlist) - 1): if not beats_128[i]: continue if np.corrcoef(avgbeat, beats_128[i])[0, 1] < 0.9: continue pp = data[maxlist[i]] - data[minlist[i]] # pulse pressure pulse_vals.append({'dt': minlist[i] / srate, 'val': pp}) # estimates the upper env(n) and lower env(n) envelopes xa = np.array([data[idx] for idx in minlist]) lower_env = np.array([0.0] * len(data)) for i in range(len(data)): be = np.array([b((i - idx) / (0.2 * srate)) for idx in minlist]) s = sum(be) if s != 0: lower_env[i] = np.dot(xa, be) / s xb = np.array([data[idx] for idx in maxlist]) upper_env = np.array([0.0] * len(data)) for i in range(len(data)): be = np.array([b((i - idx) / (0.2 * srate)) for idx in maxlist]) s = sum(be) if s != 0: upper_env[i] = np.dot(xb, be) / s pulse_env = upper_env - lower_env pulse_env[pulse_env < 0.0] = 0.0 # estimates resp rate rr = arr.estimate_resp_rate(pulse_env, srate) # split by respiration nsamp_in_breath = int(srate * 60 / rr) m = int(len(data) / nsamp_in_breath) # m segments exist raw_pps = [] pps = [] for ibreath in np.arange(0, m - 1, 0.5): pps_breath = [] for ppe in pulse_vals: if ibreath * nsamp_in_breath < ppe['dt'] * srate < ( ibreath + 1) * nsamp_in_breath: pps_breath.append(ppe['val']) if len(pps_breath) < 4: continue pp_min = min(pps_breath) pp_max = max(pps_breath) ppv = 2 * (pp_max - pp_min) / (pp_max + pp_min) * 100 # estimate if not 0 < ppv < 50: continue # raw_pps.append({'dt': (ibreath * nsamp_in_breath) / srate, 'val': pp}) # # kalman filter if last_ppv == 0: # first time last_ppv = ppv elif abs(last_ppv - ppv) <= 1.0: ppv = last_ppv elif abs(last_ppv - ppv) <= 25.0: # ppv cannot be changed abruptly ppv = (ppv + last_ppv) * 0.5 last_ppv = ppv else: continue # no update pps.append({ 'dt': ((ibreath + 1) * nsamp_in_breath) / srate, 'val': int(ppv) }) return [pps, pulse_vals, [{'dt': cfg['interval'], 'val': rr}]]
def run(inp, opt, cfg): ecg_data = arr.interp_undefined(inp['ecg']['vals']) ecg_srate = inp['ecg']['srate'] pleth_data = arr.interp_undefined(inp['pleth']['vals']) pleth_srate = inp['pleth']['srate'] pleth_data = arr.band_pass(pleth_data, pleth_srate, 0.5, 15) ecg_rlist = arr.detect_qrs(ecg_data, ecg_srate) pleth_minlist, pleth_maxlist = arr.detect_peaks(pleth_data, pleth_srate) dpleth = np.diff(pleth_data) pleth_dmaxlist = [ ] # index of the maximum slope between peak and nadir in pleth for i in range(len(pleth_minlist)): # maxlist is one less than minlist dmax_idx = arr.max_idx(dpleth, pleth_minlist[i], pleth_maxlist[i + 1]) pleth_dmaxlist.append(dmax_idx) pttmax_list = [] pttmin_list = [] pttdmax_list = [] for i in range(len(ecg_rlist) - 1): if len(pleth_minlist) == 0: continue if len(pleth_maxlist) == 0: continue rpeak_dt = ecg_rlist[i] / ecg_srate rpeak_dt_next = ecg_rlist[i + 1] / ecg_srate if rpeak_dt < cfg['overlap']: continue # find first min in pleth after rpeak_dt in ecg found_minidx = 0 for minidx in pleth_minlist: if minidx > rpeak_dt * pleth_srate: found_minidx = minidx break elif minidx > rpeak_dt_next * pleth_srate: break if found_minidx == 0: continue # find first dmax in pleth after rpeak_dt in ecg found_dmaxidx = 0 for dmaxidx in pleth_dmaxlist: if dmaxidx > rpeak_dt * pleth_srate: found_dmaxidx = dmaxidx break elif dmaxidx > rpeak_dt_next * pleth_srate: break if found_dmaxidx == 0: continue # find first dmax in pleth after rpeak_dt in ecg found_maxidx = 0 for maxidx in pleth_maxlist: if maxidx > rpeak_dt * pleth_srate: found_maxidx = maxidx break elif maxidx > rpeak_dt_next * pleth_srate: break if found_maxidx == 0: continue max_dt = found_maxidx / pleth_srate if max_dt > cfg['interval']: continue min_dt = found_minidx / pleth_srate dmax_dt = found_dmaxidx / pleth_srate pttmax_list.append({'dt': max_dt, 'val': (max_dt - rpeak_dt) * 1000}) pttdmax_list.append({ 'dt': dmax_dt, 'val': (dmax_dt - rpeak_dt) * 1000 }) pttmin_list.append({'dt': min_dt, 'val': (min_dt - rpeak_dt) * 1000}) return [ pttmin_list, pttdmax_list, arr.get_samples(ecg_data, ecg_srate, ecg_rlist), pttmax_list ]
def run(inp, opt, cfg): """ calculate ppv from arterial waveform :param art: arterial waveform :return: max, min, upper envelope, lower envelope, respiratory rate, ppv """ data = arr.interp_undefined(inp['pleth']['vals']) srate = inp['pleth']['srate'] data = arr.resample_hz(data, srate, 100) srate = 100 if len(data) < 30 * srate: return [{}, {}, {}, {}, {}, [], []] minlist, maxlist = arr.detect_peaks(data, srate) maxlist = maxlist[1:] # estimates the upper ue(n) and lower le(n) envelopes xa = np.array([data[idx] for idx in minlist]) le = np.array([0] * len(data)) for i in range(len(data)): be = np.array([b((i - idx) / (0.2 * srate)) for idx in minlist]) s = sum(be) if s != 0: le[i] = np.dot(xa, be) / s xb = np.array([data[idx] for idx in maxlist]) ue = np.array([0] * len(data)) for i in range(len(data)): be = np.array([b((i - idx) / (0.2 * srate)) for idx in maxlist]) s = sum(be) if s != 0: ue[i] = np.dot(xb, be) / s re = ue - le re[re < 0] = 0 # estimates resp rate rr = arr.estimate_resp_rate(re, srate) # split by respiration nsamp_in_breath = int(srate * 60 / rr) m = int(len(data) / nsamp_in_breath) # m segments exist pps = [] for i in range(m - 1): imax = arr.max_idx(re, i * nsamp_in_breath, (i+2) * nsamp_in_breath) # 50% overlapping imin = arr.min_idx(re, i * nsamp_in_breath, (i+2) * nsamp_in_breath) ppmax = re[imax] ppmin = re[imin] ppe = 2 * (ppmax - ppmin) / (ppmax + ppmin) * 100 # estimate if ppe > 50 or ppe < 0: continue pp = cfg['pp'] if pp == 0: pp = ppe err = abs(ppe - pp) if err < 1: pp = ppe elif err < 25: pp = (pp + ppe) / 2 else: pass # dont update cfg['pp'] = pp pps.append({'dt': (i * nsamp_in_breath) / srate, 'val': pp}) return [ [{'dt': cfg['interval'], 'val': rr}], pps ]
def run(inp, opt, cfg): """ calculate ppv from arterial waveform :param art: arterial waveform :return: max, min, upper envelope, lower envelope, respiratory rate, ppv """ global last_ppv, last_spv data = arr.interp_undefined(inp['ART']['vals']) srate = inp['ART']['srate'] data = arr.resample_hz(data, srate, 100) srate = 100 if len(data) < 30 * srate: print('hr < 30') return # beat detection minlist, maxlist = arr.detect_peaks(data, srate) maxlist = maxlist[1:] # beat lengths beatlens = [] beats_128 = [] beats_128_valid = [] for i in range(0, len(minlist) - 1): beatlen = minlist[i + 1] - minlist[i] # in samps if not 30 < beatlen < 300: beats_128.append(None) continue pp = data[maxlist[i]] - data[minlist[i]] # pulse pressure if not 20 < pp < 100: beats_128.append(None) continue beatlens.append(beatlen) beat = data[minlist[i]:minlist[i + 1]] resampled = arr.resample(beat, 128) beats_128.append(resampled) beats_128_valid.append(resampled) if not beats_128_valid: return avgbeat = np.array(beats_128_valid).mean(axis=0) meanlen = np.mean(beatlens) stdlen = np.std(beatlens) if stdlen > meanlen * 0.2: # irregular rhythm return # remove beats with correlation < 0.9 pp_vals = [] sp_vals = [] for i in range(0, len(minlist) - 1): if beats_128[i] is None or not len(beats_128[i]): continue if np.corrcoef(avgbeat, beats_128[i])[0, 1] < 0.9: continue pp = data[maxlist[i]] - data[minlist[i]] # pulse pressure sp = data[maxlist[i]] pp_vals.append({'dt': minlist[i] / srate, 'val': pp}) sp_vals.append({'dt': minlist[i] / srate, 'val': sp}) dtstart = time.time() # estimates resp rate # upper env idx_start = max(min(minlist), min(maxlist)) idx_end = min(max(minlist), max(maxlist)) xa = scipy.interpolate.CubicSpline( maxlist, [data[idx] for idx in maxlist])(np.arange(idx_start, idx_end)) # lower env xb = scipy.interpolate.CubicSpline( minlist, [data[idx] for idx in minlist])(np.arange(idx_start, idx_end)) rr = arr.estimate_resp_rate(xa - xb, srate) dtend = time.time() #print('rr {}'.format(rr)) # split by respiration nsamp_in_breath = int(srate * 60 / rr) m = int(len(data) / nsamp_in_breath) # m segments exist raw_pps = [] raw_sps = [] ppvs = [] spvs = [] for ibreath in np.arange(0, m - 1, 0.5): pps_breath = [] sps_breath = [] for ppe in pp_vals: if ibreath * nsamp_in_breath < ppe['dt'] * srate < ( ibreath + 1) * nsamp_in_breath: pps_breath.append(ppe['val']) for spe in sp_vals: if ibreath * nsamp_in_breath < spe['dt'] * srate < ( ibreath + 1) * nsamp_in_breath: sps_breath.append(spe['val']) if len(pps_breath) < 4: continue if len(sps_breath) < 4: continue pp_min = min(pps_breath) pp_max = max(pps_breath) sp_min = min(sps_breath) sp_max = max(sps_breath) ppv = (pp_max - pp_min) / (pp_max + pp_min) * 200 if not 0 < ppv < 50: continue spv = (sp_max - sp_min) / (sp_max + sp_min) * 200 if not 0 < spv < 50: continue # kalman filter if last_ppv == 0: # first time last_ppv = ppv elif abs(last_ppv - ppv) <= 1.0: ppv = last_ppv elif abs(last_ppv - ppv) <= 25.0: # ppv cannot be changed abruptly ppv = (ppv + last_ppv) * 0.5 last_ppv = ppv else: continue if last_spv == 0: # first time last_spv = spv elif abs(last_spv - spv) <= 1.0: spv = last_spv elif abs(last_spv - spv) <= 25.0: # ppv cannot be changed abruptly spv = (spv + last_spv) * 0.5 last_spv = spv else: continue ppvs.append(ppv) spvs.append(spv) median_ppv = np.median(ppvs) median_spv = np.median(spvs) return [[{ 'dt': cfg['interval'], 'val': median_ppv }], [{ 'dt': cfg['interval'], 'val': median_spv }], [{ 'dt': cfg['interval'], 'val': rr }]]
def run(inp, opt, cfg): """ calculate svv from arterial waveform :param art: arterial waveform :return: max, min, upper envelope, lower envelope, respiratory rate, ppv """ data = arr.interp_undefined(inp['art1']['vals']) srate = inp['art1']['srate'] data = arr.resample_hz(data, srate, 100) srate = 100 if len(data) < 30 * srate: return [[], [], [], []] minlist, maxlist = arr.detect_peaks(data, srate) maxlist = maxlist[1:] # make the same length # calculate each beat's std and put it at the peak time stds = [] lzs = [] for i in range(len(minlist) - 1): maxidx = maxlist[i] beat = data[minlist[i]:minlist[i + 1]] if max(beat) - min(beat) < 20: continue s = np.std(beat) stds.append({'dt': maxidx / srate, 'val': s}) sbp = np.max(beat) dbp = beat[0] lz = (sbp - dbp) / (sbp + dbp) # 0.1~0.3 lzs.append({'dt': maxidx / srate, 'val': lz}) # estimates resp rate rr = np.median([o['val'] for o in inp['vent_rr']]) if not rr > 1: return [[], [], [], []] # split by respiration nsamp_in_breath = int(srate * 60 / rr) m = int(len(data) / nsamp_in_breath) # m segments exist # std svv_stds = [] for i in range(m - 1): # 50% overlapping this_breath_stds = [] for j in range(len(stds)): if i * nsamp_in_breath <= stds[j]['dt'] * srate < ( i + 2) * nsamp_in_breath: this_breath_stds.append(stds[j]['val']) svmax = np.max(this_breath_stds) svmin = np.min(this_breath_stds) svv_stde = 2 * (svmax - svmin) * 100 / (svmax + svmin) # estimate if svv_stde > 40 or svv_stde < 0: continue svv_stds.append(svv_stde) svv_stde = np.median(svv_stds) if svv_stde < 0: svv_stde = 0 svv_std = cfg['svv_std'] if svv_std == 0 or svv_std is None: svv_std = svv_stde err = abs(svv_stde - svv_std) if err < 5: svv_std = svv_stde elif err < 25: svv_std = (svv_std + svv_stde) / 2 else: pass # dont update cfg['svv_std'] = svv_std # lz svv_lzs = [] for i in range(m - 1): # 50% overlapping this_breath_lzs = [] for j in range(len(lzs)): if i * nsamp_in_breath <= lzs[j]['dt'] * srate < ( i + 2) * nsamp_in_breath: this_breath_lzs.append(lzs[j]['val']) svmax = np.max(this_breath_lzs) svmin = np.min(this_breath_lzs) svv_lze = 2 * (svmax - svmin) * 100 / (svmax + svmin) # estimate if svv_lze > 40 or svv_lze < 0: continue svv_lzs.append(svv_lze) svv_lze = np.median(svv_lzs) if svv_lze < 0: svv_lze = 0 svv_lz = cfg['svv_lz'] if svv_lz == 0 or svv_lz is None: svv_lz = svv_lze err = abs(svv_lze - svv_lz) if err < 5: svv_lz = svv_lze elif err < 25: svv_lz = (svv_lz + svv_lze) / 2 else: pass # dont update cfg['svv_lz'] = svv_lz return [ stds, [{ 'dt': cfg['interval'], 'val': svv_std }], lzs, [{ 'dt': cfg['interval'], 'val': svv_lz }] ]
def run(inp, opt, cfg): data = arr.interp_undefined(inp['ecg']['vals']) srate = inp['ecg']['srate'] min_hr = 40 # min bpm max_hr = 200 # max bpm min_qrs = 0.04 # min qslist duration max_qrs = 0.2 # max qslist duration min_umv = 0.2 # min UmV of R,S peaks min_pq = 0.07 # min PQ duration max_pq = 0.20 # max PQ duration min_qt = 0.21 # min QT duration max_qt = 0.48 # max QT duration pfreq = 9.0 # cwt Hz for pidx wave tfreq = 2.5 # cwt Hz for tidx wave min_sq = (60.0 / max_hr) - max_qrs # from s to next q if min_sq * srate <= 0: min_sq = 0.1 max_hr = int(60.0 / (max_qrs + min_sq)) # denoised ecg depth = int(math.ceil(np.log2(srate / 0.8))) - 1 ad = pywt.wavedec(data, 'db2', level=depth) ad[0].fill(0) # low frequency approx -> 0 ecg_denoised = pywt.waverec(ad, 'db2') # interpolation filter inter1 = pywt.Wavelet('inter1', filter_bank=orthfilt([0.25, 0.5, 0.25])) # qrs augmented ecg sig = cwt(data, srate, 'gaus1', 13) # 13 Hz gaus convolution depth = int(math.ceil(np.log2(srate / 23))) - 2 ad = pywt.wavedec(sig, inter1, level=depth) for level in range(depth): # remove [0-30Hz] wsize = int(2 * srate / (2**(level + 1))) # 2 sec window denoise(ad[depth - level], wsize) # Remove less than 30 hz from all detail ad[0].fill(0) # most lowest frequency approx -> 0 ecg_qrs = pywt.waverec(ad, inter1) # start parsing qslist = [] # qrs list [startqrs, endqrs, startqrs, endqrs, ...] vpclist = [] # abnormal beat # save greater than 0 after min_sq prev_zero = 0 ipos = 0 while ipos < len(ecg_qrs) - int(max_qrs * srate): if ecg_qrs[ipos] == 0: prev_zero += 1 else: if prev_zero > min_sq * srate: iend = ipos + int( max_qrs * srate) # find the position of the end of the current qrs while iend > ipos: if ecg_qrs[iend] != 0: break iend -= 1 # Check if it is the minimum length or if there is a pause if ipos + min_qrs * srate > iend or np.any( ecg_qrs[iend + 1:iend + 1 + int(min_sq * srate)]): vpclist.append(ipos) # push vpc else: qslist.append(ipos) qslist.append(iend) ipos = iend prev_zero = 0 ipos += 1 # qlist = [qslist[i] for i in range(0, len(qslist), 2)] complist = [] for n in range(int(len(qslist) / 2)): start_qrs = qslist[n * 2] end_qrs = qslist[n * 2 + 1] qidx = -1 ridx = arr.max_idx(ecg_denoised, start_qrs, end_qrs) if ecg_denoised[ridx] < min_umv: ridx = -1 sidx = arr.min_idx(ecg_denoised, start_qrs, end_qrs) if -ecg_denoised[sidx] < min_umv: sidx = -1 # ridxpeak > 0mV sidxpeak < 0mV if ridx != -1 and sidx != -1: if sidx < ridx: # check for sidx if ecg_denoised[ridx] > -ecg_denoised[sidx]: qidx = sidx sidx = arr.min_idx(ecg_denoised, ridx, end_qrs + 1) if sidx == ridx or sidx == end_qrs or abs( ecg_denoised[end_qrs] - ecg_denoised[sidx]) < 0.05: sidx = -1 else: # check for qidx qidx = arr.min_idx(ecg_denoised, start_qrs, ridx + 1) if qidx == ridx or qidx == start_qrs or abs( ecg_denoised[start_qrs] - ecg_denoised[qidx]) < 0.05: qidx = -1 elif sidx != -1: # only sidx --> Find small r if only sidx detected in rsidx large tidx lead ridx = arr.max_idx(ecg_denoised, start_qrs, sidx + 1) if ridx == sidx or ridx == start_qrs or abs( ecg_denoised[start_qrs] - ecg_denoised[ridx]) < 0.05: ridx = -1 elif ridx != -1: # only ridx --> Find small q,s qidx = arr.min_idx(ecg_denoised, start_qrs, ridx + 1) if qidx == ridx or qidx == start_qrs or abs( ecg_denoised[start_qrs] - ecg_denoised[qidx]) < 0.05: qidx = -1 sidx = arr.min_idx(ecg_denoised, ridx, end_qrs + 1) if sidx == ridx or sidx == end_qrs or abs( ecg_denoised[end_qrs] - ecg_denoised[sidx]) < 0.05: sidx = -1 else: vpclist.append(start_qrs) continue o = {'q': qslist[n * 2], 's': qslist[n * 2 + 1]} # always exists if qidx != -1: o['q'] = qidx if ridx != -1: o['r'] = ridx if sidx != -1: o['s'] = sidx complist.append(o) # for each QRS --> find tidx and pidx wave for n in range(len(complist) - 1): pree = complist[n]['q'] nows = complist[n]['s'] nowe = complist[n + 1]['q'] size = nowe - nows # s-q interval size = int(min(size, srate * max_qt - (nows - pree))) rr = (nowe - pree) / srate if (60.0 / rr < min_hr) or (60.0 / rr > max_hr - 20): continue # all are in this block = [data[nows + i] for i in range(size)] ecg_qrs = cwt(block, srate, 'gaus1', tfreq) tidx1 = arr.min_idx(ecg_qrs) + nows tidx2 = arr.max_idx(ecg_qrs) + nows if tidx1 > tidx2: tidx1, tidx2 = tidx2, tidx1 # additional constraints on [tidx1 tidx tidx2] duration, symmetry, QT interval ist = False if ecg_qrs[tidx1 - nows] < 0 < ecg_qrs[tidx2 - nows]: ist = True elif ecg_qrs[tidx1 - nows] > 0 > ecg_qrs[tidx2 - nows]: ist = True if ist: if ( tidx2 - tidx1 ) >= 0.09 * srate: # and (tidx2-tidx1)<=0.24 * srate) #check for tidx wave duration ist = True # QT interval = .4 * sqrt(RR) if min_qt * srate <= (tidx2 - pree) <= max_qt * srate: ist = True else: ist = False else: ist = False if ist: tidx = 0 # zero crossing sign = (ecg_qrs[tidx1 - nows] >= 0) for i in range(tidx1 - nows, tidx2 - nows): if sign == (ecg_qrs[i] >= 0): continue tidx = i + nows break # check for tidx wave symetry if tidx2 - tidx < tidx - tidx1: ratio = (tidx2 - tidx) / (tidx - tidx1) else: ratio = (tidx - tidx1) / (tidx2 - tidx) if ratio < 0.4: ist = False if ist: tmin = arr.min_idx(data, tidx1, tidx2) tmax = arr.max_idx(data, tidx1, tidx2) # find the most nearest values from 0-cross, tmin, tmax tidx = arr.find_nearest((tidx, tmin, tmax), (tidx2 + tidx1) / 2) complist[n]['(t'] = tidx1 complist[n]['t'] = tidx complist[n]['t)'] = tidx2 # search for P-WAVE size = nowe - nows # s-q interval size = int(min(size, srate * max_pq)) if ist: if tidx2 > nowe - size - int( 0.04 * srate): # isp wnd far from Twave at least on 0.04 sec size -= tidx2 - (nowe - size - int(0.04 * srate)) nskip = (nowe - nows) - size if size <= 0.03 * srate: continue # impresize QRS begin detection block = [data[nows + nskip + i] for i in range(size)] ecg_qrs = cwt(block, srate, 'gaus1', pfreq) p1 = arr.min_idx(ecg_qrs) + nows + nskip p2 = arr.max_idx(ecg_qrs) + nows + nskip if p1 > p2: p1, p2 = p2, p1 # additional constraints on [p1 pidx p2] duration, symmetry, PQ interval isp = False if ecg_qrs[p1 - nows - nskip] < 0 < ecg_qrs[p2 - nows - nskip]: isp = True elif ecg_qrs[p1 - nows - nskip] > 0 > ecg_qrs[p2 - nows - nskip]: isp = True if isp: if 0.03 * srate <= ( p2 - p1 ) <= 0.15 * srate: # check for pidx wave duration 9Hz0.03 5Hz0.05 isp = (min_pq * srate <= (nowe - p1) <= max_pq * srate ) # PQ interval = [0.07 - 0.12,0.20] else: isp = False if not isp: continue pidx = 0 # zero crossing sign = (ecg_qrs[p1 - nows - nskip] >= 0) for i in range(p1 - nows - nskip, p2 - nows - nskip): if sign == (ecg_qrs[i] >= 0): continue pidx = i + nows + nskip break # check for pidx wave symetry if p2 - pidx < pidx - p1: ratio = (p2 - pidx) / (pidx - p1) else: ratio = (pidx - p1) / (p2 - pidx) if ratio < 0.4: isp = False # not a p wave if isp: complist[n]['(p'] = p1 complist[n]['p'] = pidx complist[n]['p)'] = p2 # add annotation ret_ann = [] for n in range(len(complist)): for k, v in complist[n].items(): if k == 'q' and abs(ecg_denoised[v]) > 0.5: k = 'Q' elif k == 'r' and abs(ecg_denoised[v]) > 0.5: k = 'R' elif k == 's' and abs(ecg_denoised[v]) > 0.5: k = 'S' elif k == '(t': k = '(T' elif k == 't': k = 'T' elif k == 't)': k = 'T)' elif k == '(p': k = '(P' elif k == 'p': k = 'P' elif k == 'p)': k = 'P)' ret_ann.append({"dt": v / srate, "val": k}) for n in range(len(vpclist)): ret_ann.append({"dt": vpclist[n] / srate, "val": 'A'}) return [ret_ann]