def _update_measures(pattern):
    """
    Updates the cycle time measures of the pattern.
    """
    # Maximum number of observations considered for the measures (to avoid
    # excessive influence of old observations)
    nobs = 30
    beats = pattern.evidence[o.QRS][-nobs:]
    obseq = pattern.obs_seq
    # RR
    rrs = np.diff([b.time.start for b in beats])
    # The RT (QT) measure is updated by a Kalman Filter strategy.
    # Belief values
    rtmean, rtstd = pattern.hypothesis.meas.rt
    # Current RR measure (bounded)
    qrs = beats[-1]
    rr = rrs[-1]
    rr = max(min(rr, C.QTC_RR_LIMITS.end), C.QTC_RR_LIMITS.start)
    # Kalman filter algorithm, as explained in "Probabilistic Robotics"
    sigma_tbar = rtstd**2 + C.KF_Q**2
    twave = obseq[-1]
    if isinstance(twave, o.TWave):
        # rt and corrected rt measure in the current iteration
        rt = twave.earlyend - qrs.time.start
        rtc = ms2sp(1000.0 * sp2sc(rt) / np.cbrt(sp2sc(rr)))
        meas_err = rtc - rtmean
        # Abnormally QT intervals have associated higher uncertainty
        qt = twave.earlyend - qrs.earlystart
        qt_lims = C.QT_FROM_RR(Iv(rr, rr))
        # Measure uncertainty, represented by the R matrix in the Kalman filter
        KF_R = meas_err if qt in qt_lims else ms2sp(120)
        k_t = sigma_tbar / (sigma_tbar + max(KF_R, C.MIN_QT_STD)**2)
    else:
        # No measure - 0 Kalman gain
        meas_err = 0
        k_t = 0
    if rtmean == 0:
        mu_t = meas_err
        sigma_t = C.QT_ERR_STD**2
    else:
        mu_t = rtmean + k_t * meas_err
        sigma_t = (1.0 - k_t) * sigma_tbar
    # PQ
    pqs = []
    for pwave in pattern.evidence[o.PWave][-nobs:]:
        i = pattern.get_step(pwave)
        qrs = obseq[i - 1]
        pqs.append(qrs.earlystart - pwave.earlystart)
    pattern.hypothesis.meas = o.CycleMeasurements((np.mean(rrs), np.std(rrs)),
                                                  (mu_t, np.sqrt(sigma_t)),
                                                  (np.mean(pqs), np.std(pqs)))
示例#2
0
def _verify_atrial_activity(pattern):
    """
    Checks if the atrial activity is consistent with the definition of atrial
    fibrillation (that is, absence of constant P Waves or flutter-like
    baseline activity.)
    """
    beats = pattern.evidence[o.QRS][-5:]
    obseq = pattern.obs_seq
    atr_sig = {lead: [] for lead in sig_buf.get_available_leads()}
    pw_lims = []
    idx = pattern.get_step(beats[0])
    # First we get all the signal fragments between ventricular observations,
    # which are the only recognized by this pattern. In these fragments is where
    # atrial activity may be recognized.
    for i in range(idx + 1, len(obseq)):
        if isinstance(obseq[i], o.QRS):
            beg = next(obs for obs in reversed(obseq[:i])
                       if obs is not None).lateend
            end = obseq[i].earlystart
            if end - beg > ms2sp(200):
                beg = end - ms2sp(200)
            pw_lims.append((beg, end))
    for i in range(len(beats) - 1):
        beg, end = beats[i].lateend, beats[i + 1].earlystart
        for lead in atr_sig:
            atr_sig[lead].append(
                sig_buf.get_signal_fragment(beg, end, lead=lead)[0] -
                characterize_baseline(lead, beg, end)[0])
    # Flutter check (only for atrial activity)
    aflut = set()
    for lead in atr_sig:
        sigfr = np.concatenate(atr_sig[lead])
        if len(sigfr) > 15 and _is_VF(sigfr):
            aflut.add(lead)
    # FIXME improve flutter check, now is quite poor.
    # aflut = frozenset()
    # P waveform check (only for leads where flutters were not found.)
    pwaves = []
    for beg, end in pw_lims:
        pwsig = _get_pwave_sig(beg, end)
        if pwsig is not None:
            for lead in aflut:
                pwsig.pop(lead, None)
            if not pwsig:
                continue
            for wave in pwaves:
                verify(
                    abs(wave.values()[0].pr - pwsig.values()[0].pr) > C.TMARGIN
                    or not signal_match(wave, pwsig))
            pwaves.append(pwsig)
示例#3
0
def _get_pwave_sig(beg, end):
    """
    Checks if before a QRS complex there is a waveform similar to a P Wave. In
    an atrial fibrillation context, there cannot be any recognizable atrial
    activity.

    Parameters:
    ----------
    beg:
        Earliest point for the starting of the P Wave. This limit may be
        further constrained if the distance between the two parameters is
        excessive.
    end:
        Latest point for the ending of the P Wave. **It is assumed to be the
        starting point of the QRS complex associated to the P Wave**.

    Returns
    -------
    out:
        Dictionary with a tuple for each lead in which a P-Wave can be
        recognized. The tuple contains the distance in samples from *end* to
        the beginning of the P-Wave, and the signal fragment containing the
        P-Wave.
    """
    # If the result is cached, we use it
    result = PCACHE.get((beg, end), None)
    if result is not None:
        return result.copy()
    est = end - ms2sp(250) if end - beg > ms2sp(250) else beg
    lst = end - ms2sp(80)
    eend = est + ms2sp(40)
    ltnd = end - ms2sp(20)
    if est > lst or eend > end or eend > ltnd:
        # Inconsistency
        return None
    pwave = o.PWave()
    limits = delineate_pwave(est, lst, eend, ltnd, pwave)
    if limits is None:
        return None
    result = {}
    for lead in pwave.amplitude:
        sig = sig_buf.get_signal_fragment(est + limits.start,
                                          est + limits.end + 1,
                                          lead=lead)[0]
        result[lead] = PW_SIG(end - (est + limits.start), sig)
    # Result is cached
    PCACHE[(beg, end)] = result
    return result.copy()
示例#4
0
def _find_peak(rdef, siginfo, beg, interv):
    """
    Obtains an estimation of the peak situation of a QRS complex, from the
    energy interval that forms the base evidence, a fragment of signal evidence,
    a reference time point, and the interval of valid points for the peak.
    """
    llim, ulim = interv.start - beg, interv.end - beg
    dist = lambda p: 1.0 + 2.0 * abs(beg + p - rdef.earlystart) / ms2sp(150)
    dist = np.vectorize(dist)
    peak = None
    # For each lead, the peak will be the maximum deviation point wrt the
    # baseline, and applying the distance function just defined. We give more
    # importance to the first leads, as they supposedly have more quality.
    for _, sig, points, baseline, _ in siginfo:
        if len(points) < 3:
            continue
        peaks = points[sig_meas.get_peaks(sig[points])]
        peaks = peaks[np.logical_and(llim <= peaks, peaks <= ulim)]
        if len(peaks) == 0:
            continue
        peakscore = abs(sig[peaks] - baseline) / dist(peaks)
        lpeak = peaks[peakscore.argmax()]
        if peak is None:
            peak = lpeak
        elif abs(peak - lpeak) <= C.TMARGIN:
            peak = lpeak if lpeak < peak else peak
    return peak
示例#5
0
 def _t_tconst(pattern, twave):
     """
     Temporal constraints of the T wave.
     """
     BASIC_TCONST(pattern, twave)
     beats = pattern.evidence[o.QRS]
     tnet = pattern.last_tnet
     qidx = qrsidx + len(beats) if qrsidx < 0 else qrsidx
     qrs = beats[qidx]
     if qidx < len(beats) - 1:
         tnet.set_before(twave.end, beats[qidx + 1].start)
     if qidx > 0:
         refrr = qrs.time.end - pattern.evidence[o.QRS][qidx - 1].time.start
         refrr = max(min(refrr, C.QTC_RR_LIMITS.end), C.QTC_RR_LIMITS.start)
         rtc, rtstd = pattern.hypothesis.meas.rt
         if rtc > 0:
             # Expected QT value from the QT corrected value
             rtmean = ms2sp(1000.0 * sp2sc(rtc) * np.cbrt(sp2sc(refrr)))
             tnet.add_constraint(
                 qrs.time, twave.end,
                 Iv(rtmean - 2.5 * rtstd, rtmean + 2.5 * rtstd))
         try:
             tnet.add_constraint(qrs.time, twave.end,
                                 Iv(0, refrr - C.TQ_INTERVAL_MIN))
         except ValueError:
             pass
     tnet.add_constraint(qrs.start, twave.end, C.N_QT_INTERVAL)
     # ST interval
     tnet.add_constraint(qrs.end, twave.start, C.ST_INTERVAL)
示例#6
0
def characterize_baseline(lead, beg, end):
    """
    Obtains the baseline estimation for a fragment delimited by two time
    points in a specific lead. It also obtains a quality estimator for the
    fragment.

    Parameters
    ----------
    lead:
        Selected lead to obtain the baseline estimator.
    beg:
        Starting sample of the interval.
    end:
        Ending sample of the interval.

    Returns
    ------
    out: (baseline, quality)
        Tuple with (baseline, quality) estimators. At the moment, the quality
        estimator is not yet numerically characterized, but we have strong
        evidence that the higher this value is, the higher the signal quality
        of the fragment where the baseline has been estimated.
    """
    assert beg >= 0 and end >= beg
    # We need at least 1 second of signal to estimate the baseline and the
    # quality.
    MIN_LENGTH = ms2sp(1000)
    if end - beg < MIN_LENGTH:
        center = beg + (end - beg) / 2.0
        beg = max(0, int(center - MIN_LENGTH / 2))
        end = int(center + MIN_LENGTH / 2)
    signal = sig_buf.get_signal_fragment(beg, end, lead=lead)[0]
    return (sig_meas.mode(signal), sig_meas.kurtosis(signal))
示例#7
0
def _update_measures(pattern):
    """
    Updates the cycle time measures of the pattern.
    """
    # Maximum number of observations considered for the measures (to avoid
    # excessive influence of old observations)
    nobs = 30
    beats = pattern.evidence[o.QRS][-nobs:]
    # RR
    rrs = np.diff([b.time.start for b in beats])
    obseq = pattern.obs_seq
    # The RT (QT) measure is updated by a Kalman Filter strategy.
    # Belief values
    rtmean, rtstd = pattern.hypothesis.meas.rt
    if len(obseq) > 1 and isinstance(
            obseq[-2], o.TWave) and obseq[-2] is not pattern.finding:
        twave = obseq[-2]
        # Current RR measure (bounded)
        qrs = next(
            (q for q in reversed(beats) if q.lateend <= twave.earlystart),
            None)
        rr = qrs.time.start - beats[beats.index(qrs) - 1].time.start
        rr = max(min(rr, C.QTC_RR_LIMITS.end), C.QTC_RR_LIMITS.start)
        # Kalman filter algorithm, as explained in "Probabilistic Robotics"
        sigma_tbar = rtstd**2 + C.KF_Q**2
        # rt and corrected rt measure in the current iteration
        rt = twave.earlyend - qrs.time.start
        rtc = ms2sp(1000.0 * sp2sc(rt) / np.cbrt(sp2sc(rr)))
        meas_err = rtc - rtmean
        # Abnormally QT intervals have associated higher uncertainty
        qt = twave.earlyend - qrs.earlystart
        qt_lims = C.QT_FROM_RR(Iv(rr, rr))
        # Measure uncertainty, represented by the R matrix in the Kalman filter
        KF_R = meas_err if qt in qt_lims else ms2sp(120)
        k_t = sigma_tbar / (sigma_tbar + max(KF_R, C.MIN_QT_STD)**2)
        if rtmean == 0:
            rtmean = meas_err
            rtstd = C.QT_ERR_STD
        else:
            rtmean = rtmean + k_t * meas_err
            rtstd = np.sqrt((1.0 - k_t) * sigma_tbar)
    pattern.hypothesis.meas = o.CycleMeasurements((np.mean(rrs), np.std(rrs)),
                                                  (rtmean, rtstd), (0.0, 0.0))
def get_combined_energy(start, end, max_level, group=ms2sp(80)):
    """
    This function obtains the energy intervals between two time points combined
    in a multilead fashion. And grouping by a distance criteria.

    Parameters
    ----------
    start:
        Start time point to get the observations with respect to the signal
        buffer.
    end:
        Finish time point to get the observations wrt the signal buffer.
    max_level:
        Maximum level to search for energy intervals. See the description of
        the level in the *get_energy_intervals* function.
    group:
        Distance used to group close observations.

    Returns
    -------
    out:
        Sorte list of *EnergyInterval* observations.
    """
    # Dictionaries to store the energy intervals for each lead
    dicts = {}
    for lead in sig_buf.get_available_leads():
        dicts[lead] = {}
        for i in range(max_level + 1):
            dicts[lead][i] = []
    # Energy intervals detection and combination
    idx = start
    while idx < end:
        wfs = {}
        for lead in dicts:
            wfs[lead] = get_deflection_observations(start + idx,
                                                    start + idx + TWINDOW,
                                                    lead=lead,
                                                    max_level=max_level,
                                                    group=group)
            for i in range(max_level + 1):
                if dicts[lead][i] and wfs[lead][i]:
                    if wfs[lead][i][0].earlystart - dicts[lead][i][
                            -1].lateend <= group:
                        dicts[lead][i][-1].end.value = wfs[lead][i][
                            0].start.value
                        wfs[lead][i].pop(0)
                dicts[lead][i].extend(wfs[lead][i])
        idx += TWINDOW
    # Remove overlapping intervals
    combine_energy_intervals(dicts.values())
    # Now we flatten the dictionaries, putting all the intervals in a sequence
    # sorted by the earlystart value.
    return SortedList(w for w in it.chain.from_iterable(
        it.chain.from_iterable(dic.values() for dic in dicts.values())))
def combine_energy_intervals(dicts, margin=ms2sp(20)):
    """
    Combines the overlapping observations in several dicts in the result format
    of the get_deflection_observations() function.

    Parameters
    ----------
    dicts:
        List of dictionaries. The combination is always performed to the
        first dictionary.
    score:
        Dictionary that stores the score for each observation. For overlapping
        observations, the result score is the sum of the overlapped
        observations.
    margin:
        Group margin. Intervals separated by less than this margin are removed.
    """
    chain = it.chain.from_iterable
    dict1 = dicts[0]
    for wint in chain(dict1.itervalues()):
        for i in range(1, len(dicts)):
            conflictive = []
            for lst in dicts[i].itervalues():
                if not lst:
                    continue
                idx = bisect.bisect_left(lst, wint)
                # We go to the first real index
                while idx > 0 and lst[
                        idx - 1].lateend + margin >= wint.earlystart - margin:
                    idx -= 1
                # Now we search for overlapping intervals
                while idx < len(lst) and lst[
                        idx].earlystart - margin <= wint.lateend + margin:
                    w = lst[idx]
                    if Iv(w.earlystart - margin, w.lateend + margin).overlap(
                            Iv(wint.earlystart - margin,
                               wint.lateend + margin)):
                        conflictive.append(w)
                    idx += 1
            if conflictive:
                alleads = set.union(*(set(w.level.iterkeys())
                                      for w in conflictive)) - set(
                                          wint.level.iterkeys())
                for lead in alleads:
                    wint.level[lead] = min(
                        w.level.get(lead, np.Inf) for w in conflictive)
                for wconf in conflictive:
                    dicts[i][wconf.level.values()[0]].remove(wconf)
def _t_qrs_tconst(pattern, twave):
    """
    Temporal constraints of thw T Waves wrt the corresponding QRS complex.
    """
    BASIC_TCONST(pattern, twave)
    obseq = pattern.obs_seq
    idx = pattern.get_step(twave)
    try:
        tnet = pattern.last_tnet
        # We find the qrs observation precedent to this T wave.
        qrs = next(obseq[i] for i in range(idx - 1, -1, -1)
                   if isinstance(obseq[i], o.QRS))
        # If we have more than one QRS, it is possible to constrain even more
        # the location of the T-Wave, based on rhythm information.
        qidx = pattern.evidence[o.QRS].index(qrs)
        if qidx > 0:
            refrr = qrs.time.end - pattern.evidence[o.QRS][qidx - 1].time.start
            refrr = max(min(refrr, C.QTC_RR_LIMITS.end), C.QTC_RR_LIMITS.start)
            rtc, rtstd = pattern.hypothesis.meas.rt
            if rtc > 0:
                # Expected QT value from the QT corrected value
                rtmean = ms2sp(1000.0 * sp2sc(rtc) * np.cbrt(sp2sc(refrr)))
                tnet.add_constraint(
                    qrs.time, twave.end,
                    Iv(rtmean - 2.5 * rtstd, rtmean + 2.5 * rtstd))
            tnet.add_constraint(qrs.time, twave.end,
                                C.QT_FROM_RR(Iv(refrr, refrr)))
        else:
            # QT duration
            tnet.add_constraint(qrs.start, twave.end, C.N_QT_INTERVAL)
        if idx > 0 and isinstance(obseq[idx - 1], o.PWave):
            pwave = obseq[idx - 1]
            tnet.add_constraint(
                pwave.end, twave.start,
                Iv(C.ST_INTERVAL.start, C.PQ_INTERVAL.end + C.QRS_DUR.end))
        # ST interval
        tnet.add_constraint(qrs.end, twave.start, C.ST_INTERVAL)
    except StopIteration:
        pass
def get_more_evidence():
    """
    Obtains a new piece of evidence and introduces it in the appropriate
    structures.
    """
    if BUF.get_status() is BUF.Status.STOPPED:
        return
    dtime = _TFACTOR * (T.time() - _T0) * 1000.0
    cursize = SIG.get_signal_length()
    if dtime - sp2ms(cursize) > sp2ms(_STEP):
        nchunks = int((min(ms2sp(dtime), _DURATION) - cursize) / _STEP)
        init = _OFFSET + cursize
        for i in range(len(_REC.leads)):
            fragment = _REC.signal[i, init:init + nchunks * _STEP]
            if len(fragment) < nchunks * _STEP:
                fragment = np.concatenate(
                    (fragment,
                     fragment[-1] * np.ones(nchunks * _STEP - len(fragment))))
            SIG.add_signal_fragment(fragment, _REC.leads[i])
        for ann in (a for a in _ANNOTS
                    if ((is_qrs_annotation(a) or a.code is ECGCodes.FLWAV)
                        and init <= a.time < init + nchunks * _STEP)):
            rdef = o.RDeflection()
            atime = ann.time - _OFFSET
            rdef.time.value = Iv(atime, atime)
            # The level is established according to the annotation information.
            rdef.level = {
                SIG.VALID_LEAD_NAMES[lead]: 127
                for lead in _REC.leads
            }
            rdef.level[SIG.VALID_LEAD_NAMES[_REC.leads[
                ann.chan]]] = 127 - ann.num
            rdef.freeze()
            BUF.publish_observation(rdef)
        newsize = SIG.get_signal_length()
        if newsize >= _DURATION or newsize >= len(_REC.signal[0]) - _OFFSET:
            BUF.set_status(BUF.Status.STOPPED)
示例#12
0
def is_afib_rhythm_lian(rrs):
    """
    Checks if an RR series matches the AF classification criteria explained in
    the Lian 2011 paper.
    """
    if len(rrs) < 3:
        return True
    elif len(rrs) > 128:
        i = len(rrs) - 128
        isafib = True
        while isafib and i > 0:
            isafib = isafib and is_afib_rhythm_lian(rrs[i:i + 128])
            i = max(0, i - 128)
        return isafib
    drr = np.diff(rrs)
    xbins = np.arange(int(np.min(rrs) - ms2sp(50)),
                      int(np.max(rrs) + ms2sp(50)), int(ms2sp(25)))
    ybins = np.arange(int(np.min(drr) - ms2sp(50)),
                      int(np.max(drr) + ms2sp(50)), int(ms2sp(25)))
    hist2d, _, _ = np.histogram2d(rrs[1:], drr, [xbins, ybins])
    thres = min(len(drr), round(_NEC(len(drr))))
    return np.count_nonzero(hist2d) >= thres
示例#13
0
def get_deflection_observations(start,
                                end,
                                lead,
                                max_level=0,
                                group=ms2sp(20)):
    """
    Obtains deflection observations present in a signal fragment,
    specified by their limits. The returned intervals are separated by levels,
    and grouped by a closeness parameter.

    Parameters
    ----------
    start:
        Start index of the signal fragment.
    end:
        End index of the fragment.
    lead:
        Lead used for the
    signal:
        Signal fragment, as a one-dimensional array.
    max_level:
        Energy level we want to reach in the search.
    group:
        Distance parameter. Observations with differences less than this
        value are grouped in a single observation.

    Result
    ------
    out: dict
        Dict with one list of observations by level. The temporal variables
        of the intervals are set according to the start index.

    See Also
    --------
    wavelet_filter.get_energy_intervals
    """
    energ = sig_buf.get_energy_fragment(start, end, lead=lead)[0]
    obs = {}
    for i in range(max_level + 1):
        obs[i] = []
        for interv in get_energy_intervals(energ, level=i, group=group):
            defl = o.Deflection()
            defl.start.value = Iv(interv.start, interv.start)
            defl.end.value = Iv(interv.end, interv.end)
            defl.level[lead] = i
            obs[i].append(defl)
        # We update the time of the intervals
        changeTime(obs[i], start)
    # Now we need to remove redundant observations of upper levels
    for i in range(max_level, 0, -1):
        j = 0
        while j < len(obs[i]):
            obj = obs[i][j]
            found = False
            for upper in obs[i - 1]:
                # First posterior observation
                if upper.earlystart >= obj.earlystart:
                    # If is contained, then remove the lower level obs.
                    if upper.earlystart >= obj.earlystart and upper.lateend <= obj.lateend:
                        found = True
                        obs[i].pop(j)
                    break
            if not found:
                j += 1
    return obs
示例#14
0
def _qrs_gconst(pattern, rdef):
    """
    Checks the general constraints of the QRS pattern transition.
    """
    # We ensure that the abstracted evidence has been observed.
    if rdef.earlystart != rdef.lateend:
        return
    # The energy level of the observed interval must be low
    hyp = pattern.hypothesis
    # First we try a guided QRS observation
    _guided_qrs_observation(hyp)
    if hyp.shape:
        hyp.freeze()
        return
    # Hypothesis initial limits
    beg = int(hyp.earlystart)
    if beg < 0:
        beg = 0
    end = int(hyp.lateend)
    # 1. Signal characterization.
    siginfo = _characterize_signal(beg, end)
    verify(siginfo is not None)
    # 2. Peak point estimation.
    peak = _find_peak(rdef, siginfo, beg, hyp.time)
    verify(peak is not None)
    # 3. QRS start and end estimation
    # For each lead, we first check if it is a paced beat, whose
    # delineation process is different. In case of failure, we perform
    # common delineation.
    limits = OrderedDict()
    for lead, sig, points, baseline, _ in siginfo:
        endpoints = _paced_qrs_delineation(sig, points, peak, baseline)
        if endpoints is None:
            endpoints = _qrs_delineation(sig, points, peak)
            if endpoints is None:
                continue
            limits[lead] = (False, endpoints)
        else:
            limits[lead] = (True, endpoints)
    # Now we combine the limits in all leads.
    start, end = _combine_limits(limits, siginfo, peak)
    verify(start is not None and end > start)
    # 4. QRS waveform extraction for each lead.
    for lead, sig, points, baseline, _ in siginfo:
        # We constrain the area delineated so far.
        sig = sig[start : end + 1]
        points = points[np.logical_and(points >= start, points <= end)] - start
        if len(points) == 0:
            continue
        if points[0] != 0:
            points = np.insert(points, 0, 0)
        if points[-1] != len(sig) - 1:
            points = np.append(points, len(sig) - 1)
        if len(points) < 3:
            continue
        # We define a distance function to evaluate the peaks
        dist = lambda p: 1.0 + 2.0 * abs(beg + start + p - rdef.earlystart) / ms2sp(150)
        dist = np.vectorize(dist)
        # We get the peak for this lead
        pks = points[sig_meas.get_peaks(sig[points])]
        if len(pks) == 0:
            continue
        peakscore = abs(sig[pks] - baseline) / dist(pks)
        peak = pks[peakscore.argmax()]
        # Now we get the shape of the QRS complex in this lead.
        shape = None
        # If there is a pace detection in this lead
        if lead in limits and limits[lead][0]:
            endpoints = limits[lead][1]
            shape = _get_paced_qrs_shape(
                sig, points, endpoints.start - start, min(endpoints.end - start, len(sig))
            )
            if shape is None:
                limits[lead] = (False, endpoints)
        if shape is None:
            shape = _get_qrs_shape(sig, points, peak, baseline)
        if shape is None:
            continue
        hyp.shape[lead] = shape
    # There must be a recognizable QRS waveform in at least one lead.
    verify(hyp.shape)
    # 5. The detected shapes may constrain the delineation area.
    llim = min(hyp.shape[lead].waves[0].l for lead in hyp.shape)
    if llim > 0:
        start = start + llim
        for lead in hyp.shape:
            hyp.shape[lead].move(-llim)
    ulim = max(hyp.shape[lead].waves[-1].r for lead in hyp.shape)
    if ulim < end - start:
        end = start + ulim
    # 6. The definitive peak is assigned to the first relevant wave
    # (each QRS shapeform has a specific peak point.)
    peak = start + min(s.waves[_reference_wave(s)].m for s in hyp.shape.itervalues())
    # 7. Segmentation points set
    hyp.paced = any(v[0] for v in limits.itervalues())
    hyp.time.value = Iv(beg + peak, beg + peak)
    hyp.start.value = Iv(beg + start, beg + start)
    hyp.end.value = Iv(beg + end, beg + end)
    ###################################################################
    # Amplitude conditions (between 0.5mV and 6.5 mV in at least one
    # lead or an identified pattern in most leads).
    ###################################################################
    verify(
        len(hyp.shape) > len(sig_buf.get_available_leads()) / 2.0
        or ph2dg(0.5) <= max(s.amplitude for s in hyp.shape.itervalues()) <= ph2dg(6.5)
    )
    hyp.freeze()
示例#15
0
def _rhythmstart_gconst(pattern, _):
    """General constraints of the rhythm start pattern."""
    # We assume an starting mean rhythm of 75ppm, but the range allows from 65
    # to 85bpm
    pattern.hypothesis.meas = o.CycleMeasurements((ms2sp(800), ms2sp(200)),
                                                  (0, 0), (0, 0))