def monhe2(raw, srate, show=0, show2=0, show3=0, filtered=None): """ GREAT but: discard crossings close to one another by less than 100 ms """ # 0 - Remove EMG, powerline and baseline shift if filtered is None: filtered = prefilt(raw, srate, show) # 0.5 - Choose sign of peaks (batch) up = definepeak(filtered, srate) # 1 - filter block (chebyshev 4th order 6-18 Hz) nyqfreq = srate / 2. filtband = [6 / nyqfreq, 18 / nyqfreq] num, den = ss.cheby2(4, 40, filtband, btype='bandpass') filtafter = ss.filtfilt(num, den, raw) # filtafter = ss.filtfilt(num, den, rawend) if show: fig = pl.figure() mngr = pl.get_current_fig_manager() mngr.window.setGeometry(950, 50, 1000, 800) ax = fig.add_subplot(411) ax.plot(raw) ax.set_title('raw signal') ax = fig.add_subplot(412) ax.plot(filtered) ax.set_title('filtered from raw') ax = fig.add_subplot(413) # ax.plot(filtafter2) ax.plot(filtafter, 'r') ax.set_title('filtered for algorithm after preprocessing') ax = fig.add_subplot(414) ax.plot(filtafter) ax.set_title('filtered for algorithm') pl.show() raw_input('cleaning') # 2 - differentiate the signal and normalize according to max derivative in the signal diffsig = np.diff(filtafter) diffmax = np.max(np.abs(diffsig)) dsignal = diffsig / diffmax # 3 - Get Shannon energy envelope diffsquare = dsignal**2 logdiff = np.log(diffsquare) shannon = -1 * diffsquare * logdiff # 4 - Two sided zero-phase filtering windowlen = int(0.15 * srate) # safe length of a QRS pulse rectangular = ss.boxcar(windowlen) smoothfirst = ss.convolve(shannon, rectangular, mode='same') revfirst = smoothfirst[::-1] smoothsec = ss.convolve(revfirst, rectangular, mode='same') smoothfinal = smoothsec[::-1] # 5 - Hilbert transform applied to the smoothed shannon energy envelope hilbbuff = ss.hilbert(smoothfinal) hilbsign = np.imag(hilbbuff) # 6 - Get moving average of the hilbert transform so as to subtract after n = int(2.5 * srate) movav = moving_average(hilbsign, n) analyser = hilbsign - movav # 7 - Get zero crossings (from negative to positive) of the 'analyser' signal zero_crossings = np.where(np.diff(np.sign(analyser)))[0] zero_crossings = zero_crossings[ zero_crossings > 0.05 * srate] # discard boundary effect that might appear at start crossers = analyser[zero_crossings] beats = zero_crossings[crossers < 0] crossdiffs = np.diff(beats) dangerous = crossdiffs < 0.15 * srate # to avoid stupid repetitions dangerous = np.nonzero(dangerous)[0] if len(dangerous): print 'DANGER', beats[dangerous] beats = np.delete(beats, dangerous) # 7.1 -------- EXTRA ANTI-FALSE-POSITIVES -------- store_size = 5 index_store = 0 anti_fp = 0.243 anti_massive = 4 anti_badset = 3 reset_count = 0 cross_storer = np.zeros(store_size) crossderivs = np.diff(analyser) beats = sorted(list(beats)) iterator = beats[:] evilbeats = [] for b in iterator: cross_med = np.median(cross_storer) # print 'info', b, crossderivs[b], anti_fp*cross_med, cross_med, anti_massive*cross_med # massive slopes can be eliminated here too (decided not too because it helps in defining Agrafioti windows) if crossderivs[b] > anti_fp * cross_med: reset_count = 0 if crossderivs[b] < anti_massive * cross_med or cross_med < 1e-10: # print 'store' cross_storer[index_store] = crossderivs[b] index_store += 1 if index_store == store_size: index_store = 0 else: reset_count += 1 print '\tEVIL SLOPE', b, crossderivs[ b], anti_fp * cross_med, reset_count evilbeats.append(b) beats.remove(b) if reset_count >= anti_badset: print '\tRESET' reset_count = 0 cross_storer = np.zeros(store_size) beats = np.array(beats, dtype=int) evilbeats = np.array(evilbeats) # 8 ----------------------------------------- Find the R-peak exactly ----------------------------------------- search = int(0.15 * srate) adjacency = int(0.03 * srate) diff_nr = int(0.01 * srate) if diff_nr <= 1: diff_nr = 2 rawbeats = [] for b in xrange(len(beats)): if beats[b] - search < 0: rawwindow = filtered[0:beats[b] + search] add = 0 elif beats[b] + search >= len(filtered): rawwindow = filtered[beats[b] - search:len(filtered)] add = beats[b] - search else: rawwindow = filtered[beats[b] - search:beats[b] + search] add = beats[b] - search # ----- get peaks ----- w_peaks = peakd.sgndiff(Signal=rawwindow)['Peak'] w_negpeaks = peakd.sgndiff(Signal=rawwindow, a=1)['Peak'] zerdiffs = np.where(np.diff(rawwindow) == 0)[0] w_peaks = np.concatenate((w_peaks, zerdiffs)) w_negpeaks = np.concatenate((w_negpeaks, zerdiffs)) if up: pospeaks = sorted(zip(rawwindow[w_peaks], w_peaks), reverse=True) else: pospeaks = sorted(zip(rawwindow[w_negpeaks], w_negpeaks)) # print '\n peaksssss', pospeaks try: twopeaks = [pospeaks[0]] except IndexError: twopeaks = [] # ----------- getting peaks ----------- for i in xrange(len(pospeaks) - 1): if abs(pospeaks[0][1] - pospeaks[i + 1][1]) > adjacency: twopeaks.append(pospeaks[i + 1]) break poslen = len(twopeaks) # print twopeaks, poslen, diff_nr, twopeaks[1][1]-diff_nr+1, twopeaks[1][1]+diff_nr-1 if poslen == 2: # --- get maximum slope for max peak --- if twopeaks[0][1] < diff_nr: diff_f = np.diff(rawwindow[0:twopeaks[0][1] + diff_nr]) elif twopeaks[0][1] + diff_nr >= len(rawwindow): diff_f = np.diff(rawwindow[twopeaks[0][1] - diff_nr:len(rawwindow)]) else: diff_f = np.diff(rawwindow[twopeaks[0][1] - diff_nr:twopeaks[0][1] + diff_nr]) max_f = np.max(np.abs(diff_f)) # --- get maximum slope for second peak --- if twopeaks[1][1] < diff_nr: diff_s = np.diff(rawwindow[0:twopeaks[1][1] + diff_nr - 1]) elif twopeaks[1][1] + diff_nr >= len(rawwindow): diff_s = np.diff(rawwindow[twopeaks[1][1] - diff_nr + 1:len(rawwindow)]) else: diff_s = np.diff(rawwindow[twopeaks[1][1] - diff_nr + 1:twopeaks[1][1] + diff_nr - 1]) # print diff_s, np.abs(diff_s) max_s = np.max(np.abs(diff_s)) if show2: print 'diffs, main', diff_f, max_f, '\nsec', diff_s, max_s if max_f > max_s: # print '\tbigup' assignup = [twopeaks[0][0], twopeaks[0][1]] else: # print '\tsmallup' assignup = [twopeaks[1][0], twopeaks[1][1]] rawbeats.append(assignup[1] + add) elif poslen == 1: rawbeats.append(twopeaks[0][1] + add) else: rawbeats.append(beats[b]) if show2: fig = pl.figure() mngr = pl.get_current_fig_manager() mngr.window.setGeometry(950, 50, 1000, 800) ax = fig.add_subplot(111) ax.plot(rawwindow, 'b') for i in xrange(poslen): ax.plot(twopeaks[i][1], twopeaks[i][0], 'bo', markersize=10) ax.plot(rawbeats[b] - add, rawwindow[rawbeats[b] - add], 'yo', markersize=7) ax.grid('on') ax.axis('tight') pl.show() raw_input('---') pl.close() # 8 ----------------------------------------- END OF POINT 8 ----------------------------------------- if show3: fig = pl.figure() mngr = pl.get_current_fig_manager() mngr.window.setGeometry(950, 50, 1000, 800) ax = fig.add_subplot(412) ax.plot(filtered) if len(rawbeats): ax.plot(rawbeats, filtered[rawbeats], 'go') ax.set_title('end signal') ax = fig.add_subplot(411) ax.plot(raw) if beats.any(): ax.plot(beats, raw[beats], 'go') ax.set_title('filtered from raw') ax = fig.add_subplot(413) ax.plot(smoothfinal) ax.set_title('smooth shannon') ax = fig.add_subplot(414) ax.plot(analyser) if beats.any(): ax.plot(beats, analyser[beats], 'go') if evilbeats.any(): ax.plot(evilbeats, analyser[evilbeats], 'ro') ax.plot(hilbsign, 'r') ax.set_title('analysed signal') pl.show() raw_input('shannon') hrate = np.diff(rawbeats) hrate = 60 * srate / hrate pl.close('all') # kwrvals kwrvals = {'Signal': filtered, 'R': sorted(list(frozenset(rawbeats)))} return kwrvals, hrate
def definepeak(signal, srate, filter=False, show=0): if filter: print 'bla' signal = prefilt(signal, srate, 0) bin_nr = 50 meankill = 1 / 3.5 upmargin = 0.92 w_peaks = peakd.sgndiff(Signal=signal)['Peak'] w_negpeaks = peakd.sgndiff(Signal=signal, a=1)['Peak'] zerdiffs = np.where(np.diff(signal) == 0)[0] w_peaks = np.concatenate((w_peaks, zerdiffs)) w_negpeaks = np.concatenate((w_negpeaks, zerdiffs)) poscounts, posedges = np.histogram(signal[w_peaks], bin_nr) negcounts, negedges = np.histogram(signal[w_negpeaks], bin_nr) poscond = (poscounts > 0) + (poscounts < max(poscounts)) negcond = (negcounts > 0) + (negcounts < max(negcounts)) derppos = poscounts[poscond] derpneg = negcounts[negcond] poscond = np.concatenate(([False], poscond)) derp_edgepos = posedges[poscond] derp_edgeneg = negedges[negcond] meanpos = int(meankill * np.mean(derppos)) meanneg = int(meankill * np.mean(derpneg)) if meanpos < 2: meanpos = 2 if meanneg < 2: meanneg = 2 killpos = derppos >= meanpos killneg = derpneg >= meanneg # derppos = derppos[killpos] # derpneg = derpneg[killneg] derp_edgepos = derp_edgepos[killpos] derp_edgeneg = derp_edgeneg[killneg] if show: print 'meanup', meanpos print 'meandown', meanneg print derppos, '\n', derp_edgepos print derpneg, '\n', derp_edgeneg negmax = np.min(derp_edgeneg) posmax = np.max(derp_edgepos) if posmax >= upmargin * abs(negmax): print 'UP', posmax, upmargin * negmax, meanpos, meanneg up = True else: print 'DOWN', posmax, upmargin * negmax, meanpos, meanneg up = False if show: fig = pl.figure() mngr = pl.get_current_fig_manager() # mngr.window.showMaximized() mngr.window.setGeometry(950, 50, 1000, 800) ax = fig.add_subplot(311) ax.plot(signal) ax.plot(w_peaks, signal[w_peaks], 'go') ax.plot(w_negpeaks, signal[w_negpeaks], 'ro') ax = fig.add_subplot(312) ax.bar(posedges[:-1], poscounts, posedges[1] - posedges[0]) ax = fig.add_subplot(313) ax.bar(negedges[:-1], negcounts, negedges[1] - negedges[0]) raw_input('histograms') pl.close() return up
def hamilton(hand, Signal=None, SamplingRate=1000., Filter=True, init=(), Show=0, show2=0, show3=0, TH=None): """ Algorithm to detect ECG beat indexes. Kwargs: Signal (array): input filtered ECG signal. SamplingRate (float): Sampling frequency (Hz). Filter (dict): Filter parameters. Kwrvals: Signal (array): output filtered signal if Filter is defined. R (array): R peak indexes (or instants in seconds if sampling rate is defined). init (dict): dict with initial values of some variables npeaks (int): number of detected heart beats. indexqrs (int): most recent QRS complex index. indexnoise (int): most recent noise peak index. indexrr (int): most recent R-to-R interval index. qrspeakbuffer (array): 8 most recent QRS complexes. noisepeakbuffer (array): 8 most recent noise peaks. rrinterval (array): 8 most recent R-to-R intervals. DT (float): QRS complex detection threshold. offset (int): signal start in samples. Configurable fields:{"name": "models.hamilton", "config": {"SamplingRate": "1000."}, "inputs": ["Signal", "Filter", "init"], "outputs": ["Signal", "R", "init", "npeaks", "indexqrs", "indexnoise", "indexrr", "qrspeakbuffer", "noisepeakbuffer", "rrinterval", "DT", "offset"]} See Also: filt Notes: Example: References: .. [1] P.S. Hamilton, Open Source ECG Analysis Software Documentation, E.P.Limited http://www.eplimited.com/osea13.pdf """ # Check if Signal is None: raise TypeError("An input signal is needed.") # 0.1 - Choose sign of peaks (batch) # up = definepeak(Signal, SamplingRate) up = 1 if Filter: # 0.15 - Remove EMG, powerline and baseline shift emgsamples = 0.028 * SamplingRate movemg = np.ones(emgsamples) / emgsamples rawbase = prepro.medFIR(Signal, SamplingRate)['Signal'] rawend = ss.convolve(rawbase, movemg, mode='same') RawSignal = np.copy(rawend) else: RawSignal = np.copy(Signal) # 0.2 - Get transformed signal UpperCutoff = 16. LowerCutoff = 8. Order = 4 Signal = flt.zpdfr(Signal=Signal, SamplingRate=SamplingRate, UpperCutoff=UpperCutoff, LowerCutoff=LowerCutoff, Order=Order)['Signal'] Signal = abs(np.diff(Signal, 1) * SamplingRate) # Signal = flt.smooth(Signal=Signal, Window={'Length': 0.08*SamplingRate, 'Type': 'hamming', # 'Parameters': None})['Signal'] Signal = moving_average(Signal, int(0.15 * SamplingRate), cut=True) # 0.3 - Initialize Buffers if not init: init_ecg = 8 if len(Signal) / (1. * SamplingRate) < init_ecg: init_ecg = int(len(Signal) / (1. * SamplingRate)) qrspeakbuffer = np.zeros(init_ecg) noisepeakbuffer = np.zeros(init_ecg) print init_ecg rrinterval = SamplingRate * np.ones(init_ecg) a, b = 0, int(SamplingRate) all_peaks = np.array(peakd.sgndiff(Signal)['Peak']) nulldiffs = np.where(np.diff(Signal) == 0)[0] all_peaks = np.concatenate((all_peaks, nulldiffs)) all_peaks = np.array(sorted(frozenset(all_peaks))) for i in range(0, init_ecg): peaks = peakd.sgndiff(Signal=Signal[a:b])['Peak'] nulldiffs = np.where(np.diff(Signal[a:b]) == 0)[0] peaks = np.concatenate((peaks, nulldiffs)) peaks = np.array(sorted(frozenset(peaks))) try: qrspeakbuffer[i] = max(Signal[a:b][peaks]) except Exception as e: print e a += int(SamplingRate) b += int(SamplingRate) # Set Thresholds # Detection_Threshold = Average_Noise_Peak + TH*(Average_QRS_Peak-Average_Noise_Peak) ANP = np.median(noisepeakbuffer) AQRSP = np.median(qrspeakbuffer) if TH is None: TH = 0.45 # 0.45 for CVP, 0.475 for ECGIDDB, 0.35 for PTB # 0.3125 - 0.475 DT = ANP + TH * (AQRSP - ANP) init = {} init['qrspeakbuffer'] = qrspeakbuffer init['noisepeakbuffer'] = noisepeakbuffer init['rrinterval'] = rrinterval init['indexqrs'] = 0 init['indexnoise'] = 0 init['indexrr'] = 0 init['DT'] = DT init['npeaks'] = 0 beats = [] twaves = np.array([]) # ---> Heuristic Thresholds lim = int(np.ceil(0.2 * SamplingRate)) elapselim = int(np.ceil(0.36 * SamplingRate)) slopelim = 0.7 artlim = 2.75 diff_nr = int(np.ceil(0.01 * SamplingRate)) if diff_nr <= 1: diff_nr = 2 # ---> Peak Detection for f in all_peaks: # 1 - Checking if f-peak is larger than any peak following or preceding it by less than 200 ms peak_cond = np.array( (all_peaks > f - lim) * (all_peaks < f + lim) * (all_peaks != f)) peaks_within = all_peaks[peak_cond] if peaks_within.any() and max(Signal[peaks_within]) > Signal[f]: # # ---> Update noise buffer # init['noisepeakbuffer'][init['indexnoise']] = Signal[f] # init['indexnoise'] += 1 # # print 'NOISE' # if init['indexnoise'] == init_ecg: # init['indexnoise'] = 0 # # print 'TINY' continue # print 'DT', init['DT'] if Signal[f] > init['DT']: #---------------------FRANCIS--------------------- # 2 - look for both positive and negative slopes in raw signal # if f < diff_nr: # diff_now = np.diff(RawSignal[0:f+diff_nr]) # elif f + diff_nr >= len(RawSignal): # diff_now = np.diff(RawSignal[f-diff_nr:len(Signal)]) # else: # diff_now = np.diff(RawSignal[f-diff_nr:f+diff_nr]) # diff_signer = diff_now[ diff_now > 0] # # print 'diff signs:', diff_signer, '\n', diff_now # if len(diff_signer) == 0 or len(diff_signer) == len(diff_now): # print 'BASELINE SHIFT' # continue #RR INTERVALS if init['npeaks'] > 0: # 3 - in here we check point 3 of the Hamilton paper (checking whether T-wave or not) prev_rpeak = beats[init['npeaks'] - 1] elapsed = f - prev_rpeak # print 'elapsed', elapsed # if the previous peak was within 360 ms interval if elapsed < elapselim: # check current and previous slopes # print '---', f, prev_rpeak, diff_nr, '---' if f < diff_nr: diff_now = np.diff(Signal[0:f + diff_nr]) elif f + diff_nr >= len(Signal): diff_now = np.diff(Signal[f - diff_nr:len(Signal)]) else: diff_now = np.diff(Signal[f - diff_nr:f + diff_nr]) if prev_rpeak < diff_nr: diff_prev = np.diff(Signal[0:prev_rpeak + diff_nr]) elif prev_rpeak + diff_nr >= len(Signal): diff_prev = np.diff(Signal[prev_rpeak - diff_nr:len(Signal)]) else: diff_prev = np.diff( Signal[prev_rpeak - diff_nr:prev_rpeak + diff_nr]) slope_now = np.max(np.abs(diff_now)) slope_prev = np.max(np.abs(diff_prev)) # print 'diff_now', diff_now # print 'diff_prev', diff_prev # print '\tf -->', f, 'slopes: now -', slope_now, 'prev -', slope_prev, 'lim -', slopelim*slope_prev if slope_now < slopelim * slope_prev: # print 'T-WAVE' twaves = np.concatenate((twaves, [f])) continue if not hand or Signal[f] < artlim * np.median(qrspeakbuffer): # print 'GOT IT GOOD', f beats += [int(f)] else: continue # ---> Update R-R interval init['rrinterval'][init['indexrr']] = beats[ init['npeaks']] - beats[init['npeaks'] - 1] init['indexrr'] += 1 if init['indexrr'] == init_ecg: init['indexrr'] = 0 elif not hand or Signal[f] < artlim * np.median(qrspeakbuffer): # print 'GOT IT GOOD', f beats += [int(f)] else: continue # ---> Update QRS buffer init['npeaks'] += 1 qrspeakbuffer[init['indexqrs']] = Signal[f] init['indexqrs'] += 1 if init['indexqrs'] == init_ecg: init['indexqrs'] = 0 if Signal[f] <= init['DT']: RRM = np.median(init['rrinterval']) if len(beats) >= 2: elapsed = f - beats[init['npeaks'] - 1] if elapsed >= 1.5 * RRM and elapsed > elapselim: prev_rpeak = beats[init['npeaks'] - 1] rrpeak_cond = np.array( (all_peaks > prev_rpeak + lim) * (all_peaks < f + 1) * (all_peaks != twaves)) peaks_rr = all_peaks[rrpeak_cond] contender = peaks_rr[np.argmax(Signal[peaks_rr])] if Signal[contender] > 0.5 * init['DT']: # print 'GOT IT RR', contender, f beats += [int(contender)] # ---> Update R-R interval if init['npeaks'] > 0: init['rrinterval'][init['indexrr']] = beats[ init['npeaks']] - beats[init['npeaks'] - 1] init['indexrr'] += 1 if init['indexrr'] == init_ecg: init['indexrr'] = 0 # ---> Update QRS buffer init['npeaks'] += 1 qrspeakbuffer[init['indexqrs']] = Signal[contender] init['indexqrs'] += 1 if init['indexqrs'] == init_ecg: init['indexqrs'] = 0 else: # ---> Update noise buffer init['noisepeakbuffer'][init['indexnoise']] = Signal[f] init['indexnoise'] += 1 # print 'NOISE' if init['indexnoise'] == init_ecg: init['indexnoise'] = 0 else: # ---> Update noise buffer init['noisepeakbuffer'][init['indexnoise']] = Signal[f] init['indexnoise'] += 1 # print 'NOISE' if init['indexnoise'] == init_ecg: init['indexnoise'] = 0 else: # ---> Update noise buffer init['noisepeakbuffer'][init['indexnoise']] = Signal[f] init['indexnoise'] += 1 # print 'NOISE' if init['indexnoise'] == init_ecg: init['indexnoise'] = 0 if Show: fig = pl.figure() mngr = pl.get_current_fig_manager() mngr.window.setGeometry(950, 50, 1000, 800) ax = fig.add_subplot(211) ax.plot(Signal, 'b', label='Signal') ax.grid('on') ax.axis('tight') ax.plot(all_peaks, Signal[all_peaks], 'ko', ms=10, label='peaks') if np.any(np.array(beats)): ax.plot(np.array(beats), Signal[np.array(beats)], 'g^', ms=10, label='rpeak') range_aid = range(len(Signal)) ax.plot(range_aid, init['DT'] * np.ones(len(range_aid)), 'r--', label='DT') ax.legend(('Processed Signal', 'all peaks', 'R-peaks', 'DT'), 'best', shadow=True) ax = fig.add_subplot(212) ax.plot(RawSignal, 'b', label='Signal') ax.grid('on') ax.axis('tight') ax.plot(all_peaks, RawSignal[all_peaks], 'ko', ms=10, label='peaks') if np.any(np.array(beats)): ax.plot(np.array(beats), RawSignal[np.array(beats)], 'g^', ms=10, label='rpeak') pl.show() if raw_input('_') == 'q': sys.exit() pl.close() # --> Update Detection Threshold ANP = np.median(init['noisepeakbuffer']) AQRSP = np.median(qrspeakbuffer) init['DT'] = ANP + TH * (AQRSP - ANP) if show3: fig = pl.figure() mngr = pl.get_current_fig_manager() mngr.window.setGeometry(950, 50, 1000, 800) ax = fig.add_subplot(111) ax.plot(Signal, 'b', label='Signal') ax.grid('on') ax.axis('tight') if np.any(np.array(beats)): ax.plot(np.array(beats), Signal[np.array(beats)], 'g^', ms=10, label='rpeak') # 8 - Find the R-peak exactly search = int(np.ceil(0.15 * SamplingRate)) adjacency = int(np.ceil(0.03 * SamplingRate)) diff_nr = int(np.ceil(0.01 * SamplingRate)) if diff_nr <= 1: diff_nr = 2 rawbeats = [] for b in xrange(len(beats)): if beats[b] - search < 0: rawwindow = RawSignal[0:beats[b] + search] add = 0 elif beats[b] + search >= len(RawSignal): rawwindow = RawSignal[beats[b] - search:len(RawSignal)] add = beats[b] - search else: rawwindow = RawSignal[beats[b] - search:beats[b] + search] add = beats[b] - search # ----- get peaks ----- if up: w_peaks = peakd.sgndiff(Signal=rawwindow)['Peak'] else: w_peaks = peakd.sgndiff(Signal=rawwindow, a=1)['Peak'] zerdiffs = np.where(np.diff(rawwindow) == 0)[0] w_peaks = np.concatenate((w_peaks, zerdiffs)) if up: pospeaks = sorted(zip(rawwindow[w_peaks], w_peaks), reverse=True) else: pospeaks = sorted(zip(rawwindow[w_peaks], w_peaks)) try: twopeaks = [pospeaks[0]] except IndexError: twopeaks = [] # ----------- getting peaks ----------- for i in xrange(len(pospeaks) - 1): if abs(pospeaks[0][1] - pospeaks[i + 1][1]) > adjacency: twopeaks.append(pospeaks[i + 1]) break poslen = len(twopeaks) # print twopeaks, poslen, diff_nr, twopeaks[1][1]-diff_nr+1, twopeaks[1][1]+diff_nr-1 if poslen == 2: # --- get maximum slope for max peak --- if twopeaks[0][1] < diff_nr: diff_f = np.diff(rawwindow[0:twopeaks[0][1] + diff_nr]) elif twopeaks[0][1] + diff_nr >= len(rawwindow): diff_f = np.diff(rawwindow[twopeaks[0][1] - diff_nr:len(rawwindow)]) else: diff_f = np.diff(rawwindow[twopeaks[0][1] - diff_nr:twopeaks[0][1] + diff_nr]) max_f = np.max(np.abs(diff_f)) # --- get maximum slope for second peak --- if twopeaks[1][1] < diff_nr: diff_s = np.diff(rawwindow[0:twopeaks[1][1] + diff_nr - 1]) elif twopeaks[1][1] + diff_nr >= len(rawwindow): diff_s = np.diff(rawwindow[twopeaks[1][1] - diff_nr + 1:len(rawwindow)]) else: diff_s = np.diff(rawwindow[twopeaks[1][1] - diff_nr + 1:twopeaks[1][1] + diff_nr - 1]) # print diff_s, np.abs(diff_s) max_s = np.max(np.abs(diff_s)) if show2: print 'diffs, main', diff_f, max_f, '\nsec', diff_s, max_s if max_f > max_s: # print '\tbigup' assignup = [twopeaks[0][0], twopeaks[0][1]] else: # print '\tsmallup' assignup = [twopeaks[1][0], twopeaks[1][1]] rawbeats.append(assignup[1] + add) elif poslen == 1: rawbeats.append(twopeaks[0][1] + add) else: rawbeats.append(beats[b]) if show2: fig = pl.figure() mngr = pl.get_current_fig_manager() mngr.window.setGeometry(950, 50, 1000, 800) ax = fig.add_subplot(111) ax.plot(rawwindow, 'b') for i in xrange(poslen): ax.plot(twopeaks[i][1], twopeaks[i][0], 'bo', markersize=10) ax.plot(rawbeats[b] - add, rawwindow[rawbeats[b] - add], 'yo', markersize=7) ax.grid('on') ax.axis('tight') pl.show() raw_input('---') pl.close() # kwrvals kwrvals = {} kwrvals['Signal'] = RawSignal kwrvals['init'] = init kwrvals['R'] = sorted(list( frozenset(rawbeats))) #/SamplingRate if SamplingRate else beats return kwrvals
ax31 = fig1.add_subplot(313) for rid in mdata: print rid, # load fd = gzip.open(data_path % rid, 'rb') data = scipy.array(cPickle.load(fd)['segments']) fd.close() # outliers idxs = map(lambda i: int(i), misc.merge_clusters(outlier_results[rid])) outliers = outlier_results[rid] if idxs != []: q = map(lambda i: peakd.sgndiff(-i[:200])['Peak'][-1], data[idxs]) s = map(lambda i: 200 + peakd.sgndiff(-i[200:])['Peak'][0], data[idxs]) fd = gzip.open('falc_temp/qs/%d-qs.dict' % rid, 'wb') cPickle.dump({'q': q, 's': s}, fd) fd.close() mn, mx = numpy.min(data[idxs]), numpy.max(data[idxs]) mq, sdq = scipy.mean(q), scipy.std(q) ms, sds = scipy.mean(s), scipy.std(s) misc.plot_data(data[idxs], ax21, 'selected') ax21.vlines([mq - sdq], mn, mx, 'k', '--')
def basicSCR(Signal=None, SamplingRate=1000., Filter={}): """ Detects and extracts Skin Conductivity Responses (SCRs) information such as: SCRs amplitudes, onsets, peak instant, rise, and half-recovery times. Kwargs: Signal (array): input EDA signal. SamplingRate (float): Sampling frequency (Hz). Method (string): SCR detection algorithm. Filter (dict): filter parameters. Kwrvals: Signal (array): output filtered signal (see notes 1) Amplitude (array): signal pulses amplitudes (in the units of the input signal) Onset (array): indexes (or instants in seconds, see notes 2.a) of the SCRs onsets Peak (array): indexes (or instants in seconds, see notes 2.a) of the SCRs peaks Rise (array): SCRs rise times (in seconds) HalfRecovery (array): SCRs half-recovery times (in seconds) See Also: filt Notes: 1 - If a filter is given as a parameter, then the returned keyworded values dict has a 'Signal' key. 2 - If the sampling rate is defined, then: a) keys 'onset', and 'peak' are converted to instants of occurrence in seconds. Example: References: .. [1] """ # Check if Signal is None: raise TypeError("an input signal must be provided") if Filter: Filter.update({'Signal': Signal}) if not Filter.has_key('SamplingRate'): Filter.update({'SamplingRate': SamplingRate}) Signal = eda.filt(**Filter)['Signal'] try: SamplingRate = float(SamplingRate) # Compute 1st order derivative ds = Signal #np.diff(Signal) # Determine maximums and minimuns pi = peakd.sgndiff(ds)['Peak'] #max of ds ni = peakd.sgndiff(-ds)['Peak'] #min of ds # Pair vectors if (len(pi) != 0 and len(ni) != 0): (pi, ni) = sync.pair(pi, ni) li = min(len(pi), len(ni)) i1 = pi[:li] i3 = ni[:li] # Indexes i0 = i1 - (i3 - i1) / 2. if (i0[0] < 0): i0[0] = 0 i2 = (i1 + i3) / 2. # Amplitude a = np.array(map(lambda i: max(Signal[i1[i]:i3[i]]), np.arange(0, li))) # Times rt = (i2 - i0) hdt = (i3 - i0) # Gamboa model # for i in range(0, li): # scr=gamboa(i1[i]*dt, i3[i]*dt, ds[i1[i]], ds[i3[i]]) #scr amplitude (uS),rise time (s), 1/2 decay time (s) # a[i]=np.max(scr) # Determine t0-t3 if sampling frequency is provided # if SamplingRate is not None: dt=1/SamplingRate;i0*=dt;i1*=dt;i2*=dt;i3*=dt;rt*=dt;hdt*=dt # deviates indexes from real position # kwrvals kwrvals = {} if Filter is not None: kwrvals['Signal'] = Signal kwrvals['Amplitude'] = a kwrvals['Onset'] = i3 kwrvals['Peak'] = i1 except Exception as e: kwrvals = {'Amplitude': [], 'Onset': [], 'Peak': []} return kwrvals
def onset(Signal=None, SamplingRate=1000.): """ Determines the onsets of the BVP signal pulses. Skips very corrupted signal parts. Kwargs: Signal (array): input signal. SamplingRate (float): sampling frequency (Hz). Kwrvals: Onset (array): Configurable fields:{"name": "bvp.onset", "config": {"SamplingRate": "1000."}, "inputs": ["Signal"], "outputs": ["Onset"]} See Also: Notes: Example: References: .. [1] """ # Check if Signal is None: raise TypeError, "An input signal is needed." # Init idx, GO, start = [], True, 0 window_size = 5 # Analyze window_size seconds of signal Nerror = [] while (GO): try: Signal_part = Signal[start:start + window_size * int(SamplingRate)] except IndexError: Signal_part = Signal[start:-1] GO = False # Break if remaining signal length is less than 1 second if (len(Signal_part) < 1 * SamplingRate): break # Compute SSF q = peakd.ssf(Signal=Signal_part) sq = q['Signal'] - np.mean(q['Signal']) ss = q['SSF'] * 25 sq = sq[1:] # pidx = sq>ss # pidx = pidx.astype('int') # dpidx = np.diff(pidx) # dpidx[dpidx<0]=0 # dpidx = np.where(dpidx!=0)[0] # dpidx +=1 # above code only does not work when there're small fluctuations of the signal sss = (np.diff(ss)) * 100 sss[sss < 0] = 0 sss = sss - 2.0 * np.mean(sss) #eliminates small variations pk = peakd.sgndiff(sss)['Peak'] pk = pk[pl.find(sss[pk] > 0)] pk += 100 dpidx = pk # Analyze signal between maximums of 2nd derivative of ss +100 samples (dpidx indexes) detected = False for i in range(1, len(dpidx) + 1): try: st, end = dpidx[i - 1], dpidx[i] except IndexError: st, end = dpidx[-1], -1 # Error estimation # try: # Ne = MAR(filt(Signal_part[1:][st:end])['Signal'])['Ne'] # except ValueError: # Ne = 0 Ne = 0 # Skip if error is too big, i.e, signal is too corrupted Nerror += [abs(np.mean(Ne))] if ( abs(np.mean(Ne)) > 1e-1 ): # empirical value, REVIEW: has to depend on maximum amplitude continue s = sq[st:end] M = peakd.sgndiff(s)['Peak'] m = peakd.sgndiff(-s)['Peak'] try: M = M[np.argmax(s[M])] # get max index m = m[np.argmin(s[m])] # get min index except ValueError: continue if ( s[M] - s[m] > 0 and m - M > 150 ): #: and m-M < 2000 and m-M>100): # maximum has to be larger than minimum # interval between maximum and minimum bounds idx += [st + start] detected = True # Next round continues from previous detected beat + 100 samples to avoid double detections if (detected): start = idx[-1] + 100 # if no beat was detected, it moves window_size seconds forward else: start += window_size * int(SamplingRate) # print start, # if(raw_input('>')=='q'): break idx = np.array(idx) # Heart Rate upper and lower bounds hr = SamplingRate * (60.0 / (np.diff(idx))) idx = idx[np.intersect1d(pl.find(hr > 30), pl.find(hr < 200))] # pl.figure(707) # pl.figure(707).clf() # # pl.plot(Signal) # pl.plot(sq,'b') # # pl.plot(ss,'g') # # pl.plot(sss,'k') # # pl.plot(pk, sss[pk],'k.') # if(np.any(idx)): pl.vlines(idx, -0.05,0.05,'r') # pl.grid('on') # pl.show() # kwrvals kwrvals = {} kwrvals['Onset'] = idx / SamplingRate if SamplingRate else idx kwrvals['Ne'] = np.array(Nerror) kwrvals['Signal'] = filt(Signal[1:])['Signal'] return kwrvals