def eval_asyst(annotations, _):
    """Evaluates the asystole presence"""
    def check_vf(start, end):
        """Obtains the flutter waves present in a given interval"""
        return [
            a for a in annotations
            if start < a.time < end and a.code is ECGCodes.FLWAV
        ]

    lth, uth, dth = ms2sp(
        (4 * 60 + 45) * 1000), ms2sp(5 * 60 * 1000), ms2sp(3500)
    beats = np.array([
        b.time for b in annotations
        if MIT.is_qrs_annotation(b) and lth <= b.time <= uth
    ])
    if len(beats) < 2:
        return not check_vf(lth, uth)
    if uth - beats[-1] > dth:
        return not check_vf(beats[-1], uth)
    rrs = np.diff(beats)
    for i in range(len(rrs)):
        if rrs[i] > dth:
            if not check_vf(beats[i], beats[i + 1]):
                return True
    return False
def eval_vflut(anns, _):
    """Evaluates the ventricular flutter presence"""
    lth, uth, dth = ms2sp(
        (4 * 60 + 45) * 1000), ms2sp(5 * 60 * 1000), ms2sp(3500)
    #We remove separations between consecutive flutter fragments
    i = 0
    while i < len(anns):
        if anns[i].code is ECGCodes.VFOFF:
            onset = next((j for j in range(i, len(anns))
                          if anns[j].code is ECGCodes.VFON), None)
            if onset is not None and anns[i].time == anns[onset].time:
                anns.pop(onset)
                anns.pop(i)
                i -= 1
        i += 1
    vflim = (a for a in anns if a.code in (ECGCodes.VFON, ECGCodes.VFOFF))
    vfluts = []
    while True:
        try:
            beg = next(vflim)
            end = next(vflim)
            vfluts.append(Iv(beg.time, end.time))
        except StopIteration:
            break
    #If the record shows many flutter fragments, we simply check some flutter
    #waves in the last 15 seconds.
    if sum(fl.length for fl in vfluts) > ms2sp(20000):
        vfw = [
            a.time for a in anns
            if a.code is ECGCodes.FLWAV and lth <= a.time <= uth
        ]
        return len(vfw) > 5
    interv = Iv(lth, uth)
    return any([interv.intersection(vflut).length > dth for vflut in vfluts])
def eval_tach(annotations, _):
    """Evaluates the tachycardia presence"""
    lth, uth = ms2sp((4 * 60 + 30) * 1000), ms2sp(5 * 60 * 1000)
    beats = np.array([
        b.time for b in annotations
        if MIT.is_qrs_annotation(b) and lth <= b.time <= uth
    ])
    for i in range(len(beats) - 16):
        if ms2bpm(sp2ms(beats[i + 16] - beats[i]) / 16.0) > 120:
            return True
    return False
Exemple #4
0
def _update_measures(pattern):
    """
    Updates the cycle time measures of the pattern.
    """
    #Maximum number of observations considered for the measures (to avoid
    #excessive influence of old observations)
    nobs = 30
    beats = pattern.evidence[o.QRS][-nobs:]
    obseq = pattern.obs_seq
    #RR
    rrs = np.diff([b.time.start for b in beats])
    #The RT (QT) measure is updated by a Kalman Filter strategy.
    #Belief values
    rtmean, rtstd = pattern.hypothesis.meas.rt
    #Current RR measure (bounded)
    qrs = beats[-1]
    rr = rrs[-1]
    rr = max(min(rr, C.QTC_RR_LIMITS.end), C.QTC_RR_LIMITS.start)
    #Kalman filter algorithm, as explained in "Probabilistic Robotics"
    sigma_tbar = rtstd**2 + C.KF_Q**2
    twave = obseq[-1]
    if isinstance(twave, o.TWave):
        #rt and corrected rt measure in the current iteration
        rt = twave.earlyend - qrs.time.start
        rtc = ms2sp(1000.0 * sp2sc(rt) / np.cbrt(sp2sc(rr)))
        meas_err = rtc - rtmean
        #Abnormally QT intervals have associated higher uncertainty
        qt = twave.earlyend - qrs.earlystart
        qt_lims = C.QT_FROM_RR(Iv(rr, rr))
        #Measure uncertainty, represented by the R matrix in the Kalman filter
        KF_R = meas_err if qt in qt_lims else ms2sp(120)
        k_t = sigma_tbar / (sigma_tbar + max(KF_R, C.MIN_QT_STD)**2)
    else:
        #No measure - 0 Kalman gain
        meas_err = 0
        k_t = 0
    if rtmean == 0:
        mu_t = meas_err
        sigma_t = C.QT_ERR_STD**2
    else:
        mu_t = rtmean + k_t * meas_err
        sigma_t = (1.0 - k_t) * sigma_tbar
    #PQ
    pqs = []
    for pwave in pattern.evidence[o.PWave][-nobs:]:
        i = pattern.get_step(pwave)
        qrs = obseq[i - 1]
        pqs.append(qrs.earlystart - pwave.earlystart)
    pattern.hypothesis.meas = o.CycleMeasurements((np.mean(rrs), np.std(rrs)),
                                                  (mu_t, np.sqrt(sigma_t)),
                                                  (np.mean(pqs), np.std(pqs)))
Exemple #5
0
def _verify_atrial_activity(pattern):
    """
    Checks if the atrial activity is consistent with the definition of atrial
    fibrillation (that is, absence of constant P Waves or flutter-like
    baseline activity.)
    """
    beats = pattern.evidence[o.QRS][-5:]
    obseq = pattern.obs_seq
    atr_sig = {lead: [] for lead in sig_buf.get_available_leads()}
    pw_lims = []
    idx = pattern.get_step(beats[0])
    #First we get all the signal fragments between ventricular observations,
    #which are the only recognized by this pattern. In these fragments is where
    #atrial activity may be recognized.
    for i in xrange(idx + 1, len(obseq)):
        if isinstance(obseq[i], o.QRS):
            beg = next(obs for obs in reversed(obseq[:i])
                       if obs is not None).lateend
            end = obseq[i].earlystart
            if end - beg > ms2sp(200):
                beg = end - ms2sp(200)
            pw_lims.append((beg, end))
    for i in xrange(len(beats) - 1):
        beg, end = beats[i].lateend, beats[i + 1].earlystart
        for lead in atr_sig:
            atr_sig[lead].append(
                sig_buf.get_signal_fragment(beg, end, lead=lead)[0] -
                characterize_baseline(lead, beg, end)[0])
    #Flutter check (only for atrial activity)
    aflut = set()
    for lead in atr_sig:
        sigfr = np.concatenate(atr_sig[lead])
        if len(sigfr) > 15 and _is_VF(sigfr):
            aflut.add(lead)
    #FIXME improve flutter check, now is quite poor.
    #aflut = frozenset()
    #P waveform check (only for leads where flutters were not found.)
    pwaves = []
    for beg, end in pw_lims:
        pwsig = _get_pwave_sig(beg, end)
        if pwsig is not None:
            for lead in aflut:
                pwsig.pop(lead, None)
            if not pwsig:
                continue
            for wave in pwaves:
                verify(
                    abs(wave.values()[0].pr - pwsig.values()[0].pr) > C.TMARGIN
                    or not signal_match(wave, pwsig))
            pwaves.append(pwsig)
def eval_brad(annotations, _):
    """Evaluates the bradycardia presence"""
    lth, uth = ms2sp((4 * 60 + 45) * 1000), ms2sp(5 * 60 * 1000)
    beats = np.array([b.time for b in annotations if MIT.is_qrs_annotation(b)])
    variability = np.std(np.diff(beats))
    #The default threshold is 40 bpm, but if the rhythm shows high variability,
    #we relax such threshold to 45 bpm.
    thres = 45 if variability > ms2sp(200) else 40
    lidx = bisect.bisect_left(beats, lth)
    uidx = bisect.bisect_right(beats, uth)
    for i in range(lidx, uidx - 4):
        bpm = int(ms2bpm(sp2ms(beats[i + 4] - beats[i]) / 4.0))
        if bpm <= thres:
            return True
    return False
Exemple #7
0
def normal_classification(qrs):
    """
    Distinguishes between a normal QRS and complete Left and Right Bundle
    Branch Blocks, according to the typical duration and morphology criteria.
    """
    qdur = qrs.lateend - qrs.earlystart
    if qdur > ms2sp(100) and 'V1' in qrs.shape:
        v1tag = qrs.shape['V1'].tag
        if v1tag[-1] == 'R':
            #Right bundle branch block (complete or incomplete)
            return C.RBBB
        elif qdur > ms2sp(120) and v1tag in ('QS', 'rS'):
            #Left bundle branch block (only complete)
            return C.LBBB
    return C.NORMAL
Exemple #8
0
def _get_pwave_sig(beg, end):
    """
    Checks if before a QRS complex there is a waveform similar to a P Wave. In
    an atrial fibrillation context, there cannot be any recognizable atrial
    activity.

    Parameters:
    ----------
    beg:
        Earliest point for the starting of the P Wave. This limit may be
        further constrained if the distance between the two parameters is
        excessive.
    end:
        Latest point for the ending of the P Wave. **It is assumed to be the
        starting point of the QRS complex associated to the P Wave**.

    Returns
    -------
    out:
        Dictionary with a tuple for each lead in which a P-Wave can be
        recognized. The tuple contains the distance in samples from *end* to
        the beginning of the P-Wave, and the signal fragment containing the
        P-Wave.
    """
    #If the result is cached, we use it
    result = PCACHE.get((beg, end), None)
    if result is not None:
        return result.copy()
    est = end - ms2sp(250) if end - beg > ms2sp(250) else beg
    lst = end - ms2sp(80)
    eend = est + ms2sp(40)
    ltnd = end - ms2sp(20)
    if est > lst or eend > end or eend > ltnd:
        #Inconsistency
        return None
    pwave = o.PWave()
    limits = delineate_pwave(est, lst, eend, ltnd, pwave)
    if limits is None:
        return None
    result = {}
    for lead in pwave.amplitude:
        sig = sig_buf.get_signal_fragment(est + limits.start,
                                          est + limits.end + 1,
                                          lead=lead)[0]
        result[lead] = PW_SIG(end - (est + limits.start), sig)
    #Result is cached
    PCACHE[(beg, end)] = result
    return result.copy()
Exemple #9
0
def _find_peak(rdef, siginfo, beg, interv):
    """
    Obtains an estimation of the peak situation of a QRS complex, from the
    energy interval that forms the base evidence, a fragment of signal evidence,
    a reference time point, and the interval of valid points for the peak.
    """
    llim, ulim = interv.start - beg, interv.end - beg
    dist = lambda p: 1.0 + 2.0 * abs(beg + p - rdef.earlystart) / ms2sp(150)
    dist = np.vectorize(dist)
    peak = None
    #For each lead, the peak will be the maximum deviation point wrt the
    #baseline, and applying the distance function just defined. We give more
    #importance to the first leads, as they supposedly have more quality.
    for _, sig, points, baseline, _ in siginfo:
        if len(points) < 3:
            continue
        peaks = points[sig_meas.get_peaks(sig[points])]
        peaks = peaks[np.logical_and(llim <= peaks, peaks <= ulim)]
        if len(peaks) == 0:
            continue
        peakscore = abs(sig[peaks] - baseline) / dist(peaks)
        lpeak = peaks[peakscore.argmax()]
        if peak is None:
            peak = lpeak
        elif abs(peak - lpeak) <= C.TMARGIN:
            peak = lpeak if lpeak < peak else peak
    return peak
Exemple #10
0
 def __init__(self, qrs):
     self.qrs = qrs
     self.rr = ms2sp(800)
     self.pwave = False
     self.pos = REGULAR
     self.axis = get_axis(qrs)
     self.rh = None
Exemple #11
0
 def _t_tconst(pattern, twave):
     """
     Temporal constraints of the T wave.
     """
     beats = pattern.evidence[o.QRS]
     tnet = pattern.tnet
     qidx = qrsidx+len(beats) if qrsidx < 0 else qrsidx
     qrs = beats[qidx]
     if qidx < len(beats) - 1:
         tnet.set_before(twave.end, beats[qidx+1].start)
     if qidx > 0:
         refrr = qrs.time.end - pattern.evidence[o.QRS][qidx-1].time.start
         refrr = max(min(refrr, C.QTC_RR_LIMITS.end), C.QTC_RR_LIMITS.start)
         rtc, rtstd = pattern.hypothesis.meas.rt
         if rtc > 0:
             #Expected QT value from the QT corrected value
             rtmean = ms2sp(1000.0*sp2sc(rtc)*np.cbrt(sp2sc(refrr)))
             tnet.add_constraint(qrs.time, twave.end, Iv(rtmean-2.5*rtstd,
                                                         rtmean+2.5*rtstd))
         try:
             tnet.add_constraint(qrs.time, twave.end,
                                           Iv(0, refrr - C.TQ_INTERVAL_MIN))
         except ValueError:
             pass
     tnet.add_constraint(qrs.start, twave.end, C.N_QT_INTERVAL)
     #ST interval
     tnet.add_constraint(qrs.end, twave.start, C.ST_INTERVAL)
Exemple #12
0
def characterize_baseline(lead, beg, end):
    """
    Obtains the baseline estimation for a fragment delimited by two time
    points in a specific lead. It also obtains a quality estimator for the
    fragment.

    Parameters
    ----------
    lead:
        Selected lead to obtain the baseline estimator.
    beg:
        Starting sample of the interval.
    end:
        Ending sample of the interval.

    Returns
    ------
    out: (baseline, quality)
        Tuple with (baseline, quality) estimators. At the moment, the quality
        estimator is not yet numerically characterized, but we have strong
        evidence that the higher this value is, the higher the signal quality
        of the fragment where the baseline has been estimated.
    """
    assert beg >= 0 and end >= beg
    #We need at least 1 second of signal to estimate the baseline and the
    #quality.
    MIN_LENGTH = ms2sp(1000)
    if end - beg < MIN_LENGTH:
        center = beg + (end - beg) / 2.0
        beg = max(0, int(center - MIN_LENGTH / 2))
        end = int(center + MIN_LENGTH / 2)
    signal = sig_buf.get_signal_fragment(beg, end, lead=lead)[0]
    return (sig_meas.mode(signal), sig_meas.kurtosis(signal))
Exemple #13
0
def get_cluster_features(cluster, features):
    """
    Obtains a BeatInfo object as a summary of the features of a complete
    cluster. It is created by the calculation of the mean value of all the
    relevant features. It also involves the selection of a representant
    from the cluster by the minimization of the distance to the mean.
    """
    cl = [b for b in features if b in cluster]
    if not cl:
        return BeatInfo(o.QRS())
    leads = set.union(*[set(b.shape) for b in cl])
    cl = [b for b in cl if all(l in b.shape for l in leads)]
    if not cl:
        return BeatInfo(o.QRS())
    pwamps = {}
    amplitudes = {}
    qdurs = {}
    for l in leads:
        arr = np.array([features[b].pwave.get(l, 0.0) for b in cl])
        hist = np.histogram(arr, PW_BINS)
        pwamps[l] = dg2ph(hist[1][hist[0].argmax()])
        amplitudes[l] = np.array([b.shape[l].amplitude for b in cl])
        amplitudes[l] = (amplitudes[l] - np.mean(amplitudes[l])) / ph2dg(5.0)
        qdurs[l] = np.array([len(b.shape[l].sig) - 1 for b in cl])
        qdurs[l] = (qdurs[l] - np.mean(qdurs[l])) / ms2sp(120)
    axis = (np.array([features[b].axis for b in cl])
            if Leads.MLII in leads else np.zeros(len(cl)))
    axis = (axis - np.mean(axis)) / 180.0
    #We calculate the euclidean distance of every QRS to the central measures
    eucdist = np.linalg.norm(np.matrix(
        (tuple(qdurs.values()) + tuple(amplitudes.values()) + (axis, ))),
                             axis=0)
    #We select as representative the beat with minimum distance.
    info = BeatInfo(cl[np.argmin(eucdist)])
    info.pwave = np.mean(list(pwamps.values())) > 0.05
    #For the rhythm features, we use all beats
    cl = {b for b in cluster if b in features}
    info.rr = np.mean([features[b].rr for b in cl])
    info.pos = collections.Counter([features[b].pos for b in cl])
    rhpos = max(info.pos, key=lambda v: info.pos[v])
    n = float(sum(info.pos.values()))
    #Factor correction for advanced beats
    if rhpos != ADVANCED and info.pos[ADVANCED] / n > 0.2:
        nadv = ilen(b for b in cl if features[b].pos is REGULAR
                    and features[b].rr < features[b].rh.meas.rr[0])
        nadv -= info.pos[REGULAR] / 2
        if 0 < info.pos[ADVANCED] + nadv > info.pos[REGULAR] - nadv:
            rhpos = ADVANCED
    #Factor correction for delayed beats
    elif rhpos != DELAYED and info.pos[DELAYED] / n > 0.2:
        ndel = ilen(b for b in cl if features[b].pos is REGULAR
                    and features[b].rr < features[b].rh.meas.rr[0])
        ndel -= info.pos[REGULAR] / 2
        if 0 < info.pos[DELAYED] + ndel > info.pos[REGULAR] - ndel:
            rhpos = DELAYED
    info.rh = rhpos
    return info
Exemple #14
0
def _update_measures(pattern):
    """
    Updates the cycle time measures of the pattern.
    """
    #Maximum number of observations considered for the measures (to avoid
    #excessive influence of old observations)
    nobs = 30
    beats = pattern.evidence[o.QRS][-nobs:]
    #RR
    rrs = np.diff([b.time.start for b in beats])
    obseq = pattern.obs_seq
    #The RT (QT) measure is updated by a Kalman Filter strategy.
    #Belief values
    rtmean, rtstd = pattern.hypothesis.meas.rt
    if (len(obseq) > 1 and isinstance(obseq[-2], o.TWave)
            and obseq[-2] is not pattern.finding):
        twave = obseq[-2]
        #Current RR measure (bounded)
        qrs = next(
            (q for q in reversed(beats) if q.lateend <= twave.earlystart),
            None)
        rr = qrs.time.start - beats[beats.index(qrs) - 1].time.start
        rr = max(min(rr, C.QTC_RR_LIMITS.end), C.QTC_RR_LIMITS.start)
        #Kalman filter algorithm, as explained in "Probabilistic Robotics"
        sigma_tbar = rtstd**2 + C.KF_Q**2
        #rt and corrected rt measure in the current iteration
        rt = twave.earlyend - qrs.time.start
        rtc = ms2sp(1000.0 * sp2sc(rt) / np.cbrt(sp2sc(rr)))
        meas_err = rtc - rtmean
        #Abnormally QT intervals have associated higher uncertainty
        qt = twave.earlyend - qrs.earlystart
        qt_lims = C.QT_FROM_RR(Iv(rr, rr))
        #Measure uncertainty, represented by the R matrix in the Kalman filter
        KF_R = meas_err if qt in qt_lims else ms2sp(120)
        k_t = sigma_tbar / (sigma_tbar + max(KF_R, C.MIN_QT_STD)**2)
        if rtmean == 0:
            rtmean = meas_err
            rtstd = C.QT_ERR_STD
        else:
            rtmean = rtmean + k_t * meas_err
            rtstd = np.sqrt((1.0 - k_t) * sigma_tbar)
    pattern.hypothesis.meas = o.CycleMeasurements((np.mean(rrs), np.std(rrs)),
                                                  (rtmean, rtstd), (0.0, 0.0))
Exemple #15
0
def _merge_annots(annlst, interp, reftime):
    """
    Merges an annotations list and an interpretation by selecting on the
    overlap interval the sequence with highest coverage.
    """
    beg = next((ob.earlystart + reftime
                for ob in interp.get_observations(o.Cardiac_Rhythm)),
               np.inf) - ms2sp(150)
    #Ventricular flutter episodes change the reference point.
    vflut = next((a for a in reversed(annlst)
                  if a.code is ECGCodes.VFOFF and a.time >= beg), None)
    if vflut is not None:
        beg = vflut.time + 1
    bidx = next((i for i in xrange(len(annlst)) if annlst[i].time >= beg),
                len(annlst))
    end = next((a.time for a in reversed(annlst)
                if a.code is ECGCodes.RHYTHM and a.aux == ')'),
               annlst[-1].time)
    #First we calculate the possible 'join points' of the two sequences.
    jpts = (
        set(a.time for a in annlst[bidx:]
            if a.time <= end and a.code is ECGCodes.RHYTHM)
        & set(reftime + r.earlystart for r in interp.get_observations(
            o.Cardiac_Rhythm,
            filt=lambda rh: beg - reftime <= rh.earlystart <= end - reftime)))
    #If there are no join points, we give priority to the interpretation.
    if not jpts:
        jpt = beg
    else:
        #We select the join point with highest coverage.
        score = {}
        for jpt in jpts:
            score[jpt] = (len([
                a for a in annlst[bidx:] if a.time <= jpt and
                (a.code in (ECGCodes.TWAVE, ECGCodes.PWAVE)
                 or MITAnnotation.is_qrs_annotation(a))
            ]) + len(
                list(
                    interp.get_observations((o.QRS, o.PWave, o.TWave),
                                            jpt - reftime, end - reftime))))
        jpt = max(jpts, key=lambda pt: score[pt])
    #We remove the discarded annotations (those after the selected join point),
    #ensuring the WFON/WFOFF pairs are consistent.
    offsets = 0
    while annlst and annlst[-1].time >= jpt:
        if annlst[-1].code is ECGCodes.WFOFF:
            offsets += 1
        elif annlst[-1].code is ECGCodes.WFON:
            offsets -= 1
        annlst.pop()
    while offsets > 0:
        ann = annlst.pop()
        if ann.code is ECGCodes.WFON:
            offsets -= 1
    return jpt - reftime
def eval_vtach(anns, rec):
    """Evaluates the ventricular tachycardia presence"""
    lth, uth = ms2sp((4 * 60 + 45) * 1000), ms2sp(5 * 60 * 1000)
    #First we perform clustering on all beats
    qrsdur = {}
    clusters = []
    for ann in anns:
        if MIT.is_qrs_annotation(ann):
            delin = json.loads(ann.aux)
            qrs = {}
            for lead in delin:
                sidx = rec.leads.index(lead)
                qon = ann.time + delin[lead][0]
                qoff = ann.time + delin[lead][-1]
                qrs[lead] = SIG(sig=rec.signal[sidx][qon:qoff + 1])
            qrsdur[ann] = max(len(s.sig) for s in qrs.values())
            clustered = False
            for cluster in clusters:
                if _same_cluster(cluster[0], qrs):
                    cluster[1].add(ann)
                    clustered = True
                    break
            if not clustered:
                clusters.append((qrs, set([ann]), qrsdur[ann]))
    if not clusters:
        return False
    #We take as normal beats the cluster with highest number of annotations.
    nclust = max(clusters, key=lambda cl: len(cl[1]))
    beats = [
        ann for ann in anns
        if MIT.is_qrs_annotation(ann) and lth <= ann.time <= uth
    ]
    if len(beats) < 5:
        return False
    for i in range(len(beats) - 4):
        tach = ms2bpm(sp2ms(beats[i + 4].time - beats[i].time) / 4.0) >= 100
        bset = set(beats[i:i + 5])
        ventr = (np.min([qrsdur[b] for b in bset]) > ms2sp(110)
                 or any([bset.issubset(cl[1]) for cl in clusters]))
        if (tach and ventr and all([b not in nclust[1] for b in bset])):
            return True
    return False
Exemple #17
0
def get_combined_energy(start, end, max_level, group=ms2sp(80)):
    """
    This function obtains the energy intervals between two time points combined
    in a multilead fashion. And grouping by a distance criteria.

    Parameters
    ----------
    start:
        Start time point to get the observations with respect to the signal
        buffer.
    end:
        Finish time point to get the observations wrt the signal buffer.
    max_level:
        Maximum level to search for energy intervals. See the description of
        the level in the *get_energy_intervals* function.
    group:
        Distance used to group close observations.

    Returns
    -------
    out:
        Sorte list of *EnergyInterval* observations.
    """
    #Dictionaries to store the energy intervals for each lead
    dicts = {}
    for lead in sig_buf.get_available_leads():
        dicts[lead] = {}
        for i in range(max_level + 1):
            dicts[lead][i] = []
    #Energy intervals detection and combination
    idx = start
    while idx < end:
        wfs = {}
        for lead in dicts:
            wfs[lead] = get_deflection_observations(start + idx,
                                                    start + idx + TWINDOW,
                                                    lead=lead,
                                                    max_level=max_level,
                                                    group=group)
            for i in range(max_level + 1):
                if dicts[lead][i] and wfs[lead][i]:
                    if (wfs[lead][i][0].earlystart - dicts[lead][i][-1].lateend
                            <= group):
                        dicts[lead][i][-1].end.cpy(wfs[lead][i][0].start)
                        wfs[lead][i].pop(0)
                dicts[lead][i].extend(wfs[lead][i])
        idx += TWINDOW
    #Remove overlapping intervals
    combine_energy_intervals(list(dicts.values()))
    #Now we flatten the dictionaries, putting all the intervals in a sequence
    #sorted by the earlystart value.
    return SortedList(w for w in it.chain.from_iterable(
        it.chain.from_iterable(dic.values() for dic in dicts.values())))
Exemple #18
0
def combine_energy_intervals(dicts, margin=ms2sp(20)):
    """
    Combines the overlapping observations in several dicts in the result format
    of the get_deflection_observations() function.

    Parameters
    ----------
    dicts:
        List of dictionaries. The combination is always performed to the
        first dictionary.
    score:
        Dictionary that stores the score for each observation. For overlapping
        observations, the result score is the sum of the overlapped
        observations.
    margin:
        Group margin. Intervals separated by less than this margin are removed.
    """
    chain = it.chain.from_iterable
    dict1 = dicts[0]
    for wint in chain(dict1.values()):
        for i in range(1, len(dicts)):
            conflictive = []
            for lst in dicts[i].values():
                if not lst:
                    continue
                idx = bisect.bisect_left(lst, wint)
                #We go to the first real index
                while (idx > 0 and lst[idx - 1].lateend + margin >=
                       wint.earlystart - margin):
                    idx -= 1
                #Now we search for overlapping intervals
                while (idx < len(lst) and
                       lst[idx].earlystart - margin <= wint.lateend + margin):
                    w = lst[idx]
                    if Iv(w.earlystart - margin, w.lateend + margin).overlap(
                            Iv(wint.earlystart - margin,
                               wint.lateend + margin)):
                        conflictive.append(w)
                    idx += 1
            if conflictive:
                alleads = set.union(*(set(w.level.keys())
                                      for w in conflictive)) - set(
                                          wint.level.keys())
                for lead in alleads:
                    wint.level[lead] = min(
                        w.level.get(lead, np.Inf) for w in conflictive)
                for wconf in conflictive:
                    dicts[i][next(wconf.level.values())].remove(wconf)
Exemple #19
0
def best_quality_lead(rec):
    """
    Obtains the index of the lead with best quality in a given record.
    """
    wlen = int(ms2sp(1000))
    n = len(rec.leads)
    siglen = len(rec.signal[0])
    quality = np.zeros(n)
    for i in range(n):
        quality[i] = np.median(
            np.array([
                kurtosis(rec.signal[i][j:j + wlen])
                for j in range(0, siglen, wlen)
            ]))
    print(record, quality)
    return quality.argmax()
Exemple #20
0
def _t_qrs_tconst(pattern, twave):
    """
    Temporal constraints of thw T Waves wrt the corresponding QRS complex.
    """
    BASIC_TCONST(pattern, twave)
    obseq = pattern.obs_seq
    idx = pattern.get_step(twave)
    try:
        tnet = pattern.last_tnet
        #We find the qrs observation precedent to this T wave.
        qrs = next(obseq[i] for i in xrange(idx - 1, -1, -1)
                   if isinstance(obseq[i], o.QRS))
        #If we have more than one QRS, it is possible to constrain even more
        #the location of the T-Wave, based on rhythm information.
        qidx = pattern.evidence[o.QRS].index(qrs)
        if qidx > 0:
            refrr = qrs.time.end - pattern.evidence[o.QRS][qidx - 1].time.start
            refrr = max(min(refrr, C.QTC_RR_LIMITS.end), C.QTC_RR_LIMITS.start)
            rtc, rtstd = pattern.hypothesis.meas.rt
            if rtc > 0:
                #Expected QT value from the QT corrected value
                rtmean = ms2sp(1000.0 * sp2sc(rtc) * np.cbrt(sp2sc(refrr)))
                tnet.add_constraint(
                    qrs.time, twave.end,
                    Iv(rtmean - 2.5 * rtstd, rtmean + 2.5 * rtstd))
            tnet.add_constraint(qrs.time, twave.end,
                                C.QT_FROM_RR(Iv(refrr, refrr)))
        else:
            #QT duration
            tnet.add_constraint(qrs.start, twave.end, C.N_QT_INTERVAL)
        if idx > 0 and isinstance(obseq[idx - 1], o.PWave):
            pwave = obseq[idx - 1]
            tnet.add_constraint(
                pwave.end, twave.start,
                Iv(C.ST_INTERVAL.start, C.PQ_INTERVAL.end + C.QRS_DUR.end))
        #ST interval
        tnet.add_constraint(qrs.end, twave.start, C.ST_INTERVAL)
    except StopIteration:
        pass
Exemple #21
0
def find_normal_cluster(clusters):
    """
    This function tries to obtain the most probable **normal** cluster from a
    list of non-classified clusters.

    Arguments:
    ----------
    - clusters: List of 2-tuples of QRS clusters, with (id, ClusterInfo) for
                each cluster.

    Returns
    -------
    out: 2-tuple with ((id, ClusterInfo), tag) assigned to the selected
         cluster. The tag must be in (AFTAG, C.NORMAL)
    """
    pwl = sorted(clusters,
                 key=lambda cl: (cl[1].info.pwave, len(cl[1].beats)),
                 reverse=True)
    _, pwcl = pwl[0]
    if (len(pwcl.beats) > 30 and pwcl.info.pwave and
        (pwcl.info.qrs.lateend - pwcl.info.qrs.earlystart) < ms2sp(120)):
        return (pwl[0], normal_classification(pwcl.info.qrs))
    else:
        for cl in clusters:
            _, nxt = cl
            if max(nxt.info.pos,
                   key=lambda v: nxt.info.pos[v]) in (REGULAR, AFIB):
                afrel = nxt.info.pos[AFIB] / float(nxt.info.pos[REGULAR])
                tag = (AFTAG
                       if afrel > 0.5 else normal_classification(nxt.info.qrs))
                return (cl, tag)
    #At this point, we select as normal cluster the cluster with
    #highest number of REGULAR or AFIB beats.
    ncl = max(
        clusters,
        key=lambda cl: max(cl[1].info.pos[AFIB], cl[1].info.pos[REGULAR]))
    tag = (AFTAG if ncl[1].info.pos[AFIB] > ncl[1].info.pos[REGULAR] else
           normal_classification(ncl[1].info.qrs))
    return ncl, tag
Exemple #22
0
def _clean_artifacts_redundancy(annots):
    """
    Removes those artifact annotations that are close to a QRS annotation,  as
    well as redundant rhythm annotations.
    """
    DISTANCE = ms2sp(150)
    banns = [
        a for a in annots
        if MITAnnotation.is_qrs_annotation(a) or a.code == ECGCodes.ARFCT
    ]
    i = 0
    while i < len(banns):
        if (banns[i].code == ECGCodes.ARFCT and
            ((i > 0 and banns[i].time - banns[i - 1].time < DISTANCE) or
             (i < len(banns) - 1
              and banns[i + 1].time - banns[i].time < DISTANCE))):
            #We cannot use 'remove' due to a bug in SortedList.
            j = annots.bisect_left(banns[i])
            while annots[j] is not banns[i]:
                j += 1
            annots.pop(j)
            banns.pop(i)
        else:
            i += 1
    #Redundant rhythms
    i = 1
    while i < len(annots):
        if annots[i].code is ECGCodes.RHYTHM:
            prev = next(
                (a for a in reversed(annots[:i]) if a.code is ECGCodes.RHYTHM),
                None)
            if prev is not None and prev.aux == annots[i].aux:
                annots.pop(i)
            else:
                i += 1
        else:
            i += 1
    return annots
Exemple #23
0
def is_afib_rhythm_lian(rrs):
    """
    Checks if an RR series matches the AF classification criteria explained in
    the Lian 2011 paper.
    """
    if len(rrs) < 3:
        return True
    elif len(rrs) > 128:
        i = len(rrs)-128
        isafib = True
        while isafib and i > 0:
            isafib = isafib and is_afib_rhythm_lian(rrs[i:i+128])
            i = max(0, i-128)
        return isafib
    drr = np.diff(rrs)
    xbins = np.arange(int(np.min(rrs)-ms2sp(50)),
                                int(np.max(rrs)+ms2sp(50)), int(ms2sp(25)))
    ybins = np.arange(int(np.min(drr)-ms2sp(50)),
                                int(np.max(drr)+ms2sp(50)), int(ms2sp(25)))
    hist2d, _, _ = np.histogram2d(rrs[1:], drr, [xbins, ybins])
    thres = min(len(drr), round(_NEC(len(drr))))
    return np.count_nonzero(hist2d) >= thres
Exemple #24
0
RECORDS = [l.strip() for l in open(DATABASE_DIR + 'RECORDS')]

#Records to be interpreted can be selected from command line
SLC_STR = '0:{0}'.format(len(RECORDS)) if len(sys.argv) < 2 else sys.argv[1]
#We get a slice from the input string
SLC = slice(*[{True: lambda n: None, False: int}[x == ''](x)
                             for x in (SLC_STR.split(':') + ['', '', ''])[:3]])

for rec in RECORDS[SLC]:
    fname = OUTPUT_DIR + str(rec) + '.i' + ANNOTATOR
    if os.path.isfile(fname):
        print('Output file "{0}" already exists. Skipping record {1}'.format(
                                                                   fname, rec))
        continue
    #Time check
    T0 = time.time()
    TFACTOR = 5.0
    FR_OVERLAP = int(ms2sp(3000))
    MIN_DELAY = 1750
    MAX_DELAY = 20.0
    print('Processing record {0} at 250.0 Hz '.format(rec))
    ANNOTS = process_record(DATABASE_DIR + str(rec), ANNOTATOR, TFACTOR, FR_LEN,
                                     FR_OVERLAP, MIN_DELAY, MAX_DELAY, KFACTOR)
    MITAnnotation.save_annotations(ANNOTS, fname)
    print('Record '+ str(rec) +' processed in '+ str(time.time() - T0) +'s')

print('The full database was sucessfully processed. Total branches: {0}'.format(
                                                       Interpretation.counter))
print('Reasoning statistics:')
pprint.pprint(reasoning.STATS.most_common())
Exemple #25
0
def get_deflection_observations(start,
                                end,
                                lead,
                                max_level=0,
                                group=ms2sp(20)):
    """
    Obtains deflection observations present in a signal fragment,
    specified by their limits. The returned intervals are separated by levels,
    and grouped by a closeness parameter.

    Parameters
    ----------
    start:
        Start index of the signal fragment.
    end:
        End index of the fragment.
    lead:
        Lead used for the
    signal:
        Signal fragment, as a one-dimensional array.
    max_level:
        Energy level we want to reach in the search.
    group:
        Distance parameter. Observations with differences less than this
        value are grouped in a single observation.

    Result
    ------
    out: dict
        Dict with one list of observations by level. The temporal variables
        of the intervals are set according to the start index.

    See Also
    --------
    wavelet_filter.get_energy_intervals
    """
    energ = sig_buf.get_energy_fragment(start, end, lead=lead)[0]
    obs = {}
    for i in range(max_level + 1):
        obs[i] = []
        for interv in get_energy_intervals(energ, level=i, group=group):
            defl = o.Deflection()
            defl.start.set(interv.start, interv.start)
            defl.end.set(interv.end, interv.end)
            defl.level[lead] = i
            obs[i].append(defl)
        #We update the time of the intervals
        changeTime(obs[i], start)
    #Now we need to remove redundant observations of upper levels
    for i in range(max_level, 0, -1):
        j = 0
        while j < len(obs[i]):
            obj = obs[i][j]
            found = False
            for upper in obs[i - 1]:
                #First posterior observation
                if upper.earlystart >= obj.earlystart:
                    #If is contained, then remove the lower level obs.
                    if (upper.earlystart >= obj.earlystart
                            and upper.lateend <= obj.lateend):
                        found = True
                        obs[i].pop(j)
                    break
            if not found:
                j += 1
    return obs
Exemple #26
0
def get_qualitative_features(nclust, clust):
    """
    Obtains a *Feat* object with the computed values of the features used
    for the classification based on comparison between two clusters.

    Parameters
    ----------
    nclust:
        Cluster structure already identified as normal.
    clust:
        It can be another cluster, or a single *BeatInfo* object.
    """
    if isinstance(clust, BeatInfo):
        info = clust
        rhpos = info.pos
        pwave = 1 if sum(info.pwave.values()) > 0.1 else 0
    else:
        info = clust.info
        rhpos = info.rh
        pwave = int(info.pwave)
    cleads = set(info.qrs.shape).intersection(nclust.qrs.shape)
    if cleads:
        mxl = max(cleads, key=lambda l: info.qrs.shape[l].amplitude)
        ampdf = (float(info.qrs.shape[mxl].amplitude) /
                 nclust.qrs.shape[mxl].amplitude)
    else:
        ampdf = 1.0
    similarity = get_similarity(nclust.qrs.shape, info.qrs.shape)
    ndur = nclust.qrs.lateend - nclust.qrs.earlystart
    dur = info.qrs.lateend - info.qrs.earlystart
    durdf = dur - ndur
    ax = 0.0 if info.axis is None else info.axis
    axdf = (abs(nclust.axis - info.axis) if None not in (nclust.axis,
                                                         info.axis) else 0.0)
    rr = msec2bpm(sp2ms(info.rr))
    rrdf = rr - msec2bpm(sp2ms(nclust.rr))
    #QRS width: -1=narrow, 0=normal, 1=abnormal, 2=wide
    if dur < ms2sp(80):
        dur = -1
    elif dur < ms2sp(100):
        dur = 0
    elif dur < ms2sp(120):
        dur = 1
    else:
        dur = 2
    #QRS width difference: -1=narrower, 0:equal, 1=wider, 2=much wider
    if durdf <= ms2sp(-20):
        durdf = -1
    elif durdf < ms2sp(20):
        durdf = 0
    elif durdf < ms2sp(40):
        durdf = 1
    else:
        durdf = 2
    #Axis: -1 = Negative, 0=Balanced, 1=Positive
    if ax < -45:
        ax = -1
    elif ax < 45:
        ax = 0
    else:
        ax = 1
    #Axis difference: 0=equal, 1=different, 2=very different, 3=opposite
    if axdf < 45:
        axdf = 0
    elif axdf < 90:
        axdf = 1
    elif axdf < 135:
        axdf = 2
    else:
        axdf = 3
    #Rhythm: -1=Bradycardia, 0=Normal, 1=Tachycardia, 2=Extreme tachycardia
    if rr < 60:
        rr = -1
    elif rr < 100:
        rr = 0
    elif rr < 150:
        rr = 1
    else:
        rr = 2
    #Rhythm difference: -1=slower, 0=equal, 1=faster
    if rrdf <= -20:
        rrdf = -1
    elif rrdf < 20:
        rrdf = 0
    else:
        rrdf = 1
    #Similarity: 0=very different, 1=different, 2=similar,
    #            3=very similar, 4=identical
    if similarity < 0.25:
        similarity = 0
    elif similarity < 0.5:
        similarity = 1
    elif similarity < 0.75:
        similarity = 2
    elif similarity < 0.9:
        similarity = 3
    else:
        similarity = 4
    #Amplitude difference: -1=lower, 0=equal, 1=higher
    if ampdf < 0.75:
        ampdf = -1
    elif ampdf <= 1.25:
        ampdf = 0
    else:
        ampdf = 1
    return Feat(rr, rrdf, dur, durdf, ax, axdf, pwave, rhpos, similarity,
                ampdf)
Exemple #27
0
     break
 elif isinstance(nrhythm, (o.Atrial_Fibrillation, o.RhythmBlock)):
     #Join consecutive AFIB episodes
     afib.end.cpy(nrhythm.end)
     rpeaks = np.array([qrs.time.start for qrs in
                            interp.get_observations(o.QRS,
                               filt=lambda q, rh=nrhythm:
                       rh.earlystart < q.time.start <= rh.lateend)])
     rpks = np.concatenate((rpks, rpeaks))
     if nrhythm in afibs:
         afibs.remove(nrhythm)
     interp.observations.remove(nrhythm)
 elif not isinstance(nrhythm, o.Ventricular_Flutter):
     #Asystoles of more than 3 seconds always have to be reported.
     if (isinstance(nrhythm, o.Asystole) and
             nrhythm.lateend-nrhythm.earlystart > ms2sp(3000)):
         break
     #We check the shapeform and the RR of the new whole episode
     #candidate.
     rbeats = list(interp.get_observations(o.QRS, filt=
                     lambda q, rh=nrhythm:
                        rh.earlystart < q.time.start <= rh.lateend))
     if all([signal_match(refshape, q.shape) for q in rbeats]):
         rpeaks = np.array([q.time.start for q in rbeats])
         tmprpks = np.concatenate((rpks, rpeaks))
         rrs = (np.diff(rpeaks) if len(rpeaks) >= C.AFIB_MIN_NQRS
                                else np.diff(tmprpks))
         if is_afib_rhythm_lian(rrs):
             #The rhythm is assumed to be part of the afib.
             afib.end.cpy(nrhythm.end)
             interp.observations.remove(nrhythm)
Exemple #28
0
def _rhythmstart_gconst(pattern, _):
    """General constraints of the rhythm start pattern."""
    #We assume an starting mean rhythm of 75ppm, but the range allows from 65
    #to 85bpm
    pattern.hypothesis.meas = o.CycleMeasurements((ms2sp(800), ms2sp(200)),
                                                  (0, 0), (0, 0))
Exemple #29
0
                    action='store_true',
                    help=('Does not remove dead-end interpretations, '
                          'keeping them in the interpretation tree'))
parser.add_argument('--no-merge',
                    action='store_true',
                    help=('Avoids the use of a branch-merging strategy for'
                          ' interpretation exploration.'))

args = parser.parse_args()
if args.l % IN._STEP != 0:
    raise ValueError('Fragment length must be multiple of ' + str(IN._STEP))
#Searching settings
TFACTOR = 5000.0
KFACTOR = 12
MIN_DELAY = 1750
MAX_DELAY = int(ms2sp(20000) * TFACTOR)
searching.reasoning.SAVE_TREE = args.full_tree or args.video
searching.reasoning.MERGE_STRATEGY = not args.no_merge

#Input system configuration
IN.reset()
IN.set_record(args.r, args.a)
IN.set_offset(args.f)
IN.set_duration(args.l)
IN.set_tfactor(TFACTOR)
IN.start()
print('Preloading buffer...')
time.sleep(sp2ms(MIN_DELAY) / (1000.0 * TFACTOR))
#Load the initial evidence
IN.get_more_evidence()
Exemple #30
0
RECORDS = [
    100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 111, 112, 113, 114, 115,
    116, 117, 118, 119, 121, 122, 123, 124, 200, 201, 202, 203, 205, 207, 208,
    209, 210, 212, 213, 214, 215, 217, 219, 220, 221, 222, 223, 228, 230, 231,
    232, 233, 234
]
#RECORDS = [101]
RECORDS = [l.strip() for l in open(DB_DIR + 'RECORDS')]

series = {}
for record in RECORDS:
    rec = DB_DIR + str(record)
    mitr = load_MIT_record(rec)
    set_sampling_freq(mitr.frequency)
    slen = len(mitr.signal[0])
    wlen = int(ms2sp(30 * 60 * 1000))
    leads = get_leads(rec)
    tmpann = 'tmp'
    annotators = []
    for lead in leads:
        command = ['gqrs', '-r', rec, '-outputName', tmpann, '-s', lead]
        subprocess.check_call(command)
        annpath = rec + '.' + tmpann
        annotators.append(read_annotations(annpath)[1:])
        os.remove(annpath)
    series[record] = np.array([[a.num for a in ann] for ann in annotators])
    for i in range(len(leads)):
        series[record][i] = series[record][i] / np.mean(series[record][i])
    #bestann = annotators[best_quality_lead(mitr)]
    annots = []
    i = 0