def eval_tach(annotations, _):
    """Evaluates the tachycardia presence"""
    lth, uth = ms2sp((4 * 60 + 30) * 1000), ms2sp(5 * 60 * 1000)
    beats = np.array([
        b.time for b in annotations
        if MIT.is_qrs_annotation(b) and lth <= b.time <= uth
    ])
    for i in range(len(beats) - 16):
        if ms2bpm(sp2ms(beats[i + 16] - beats[i]) / 16.0) > 120:
            return True
    return False
示例#2
0
def _scale_sample(signal, lead):
    if not lead in _CL_MAP:
        raise ValueError('Invalid lead.')
    scaler = _SCALERS[_CL_MAP[lead]]
    #The signal is converted to physical units, and the first point is (0,0)
    sig = dg2ph(signal-signal[0])
    #The features are the 4 points better representing the signal shape.
    points = DP.arrayRDP(sig, 0.001, 5)[1:]
    if len(points) < 4:
        raise ValueError('Not enough points after path simplification')
    sample = np.concatenate((sp2ms(points), sig[points])).reshape(1, -1)
    return scaler.transform(sample)
示例#3
0
def best_quality_annotator(annotators, base_time=0):
    """
    Obtains the lead with best quality, according to the indicator selected in
    acunote task #983.

    Parameters
    ----------
    annotators:
        List of annotators, from which one will be selected.
    base_time:
        Time (in samples) where the annotators started.
    """
    #First we discard empty annotators
    annotators = [ann for ann in annotators if len(ann) > 0]
    if not annotators:
        return []
    #Now we filter with a basic global frequency rule.
    #FIXME we assume annotators starting at 0, be careful!
    freqs = [
        len(ann) / (sp2ms(ann[-1].time - base_time) / 60000.0)
        for ann in annotators
    ]
    gfreqann = [
        annotators[i] for i in range(len(freqs)) if 30 <= freqs[i] <= 140
    ]
    if gfreqann:
        annotators = gfreqann
    amplitudes = [np.array([a.num for a in ann]) for ann in annotators]
    amplitudes = [amp / np.mean(amp) for amp in amplitudes]
    #Maximum amplitude
    #    return annotators[np.argmax([np.mean(amp) if len(amp) > 1 else -np.inf
    #                                                       for amp in amplitudes])]
    #Minimum amplitude variation
    #    return annotators[np.argmin([np.std(amp) if len(amp) > 1 else np.inf
    #                                                       for amp in amplitudes])]
    #Minimum amplitude variation with std windowing
    return annotators[np.argmin([
        np.std([np.std(amp[i:i + 20]) for i in range(0, len(amp), 20)])
        for amp in amplitudes
    ])]
    #Minimum amplitude variation with mean windowing
    #    return annotators[np.argmin([np.std(
    #                         [np.mean(amp[i:i+20]) for i in xrange(0,len(amp),20)])
    #                                                       for amp in amplitudes])]
    #Entropy
    #    return annotators[np.argmin([H(amp) if len(amp) > 1 else np.inf
    #                                                       for amp in amplitudes])]
    #Mismatch calculation
    mismatchs = [mismatch(amp) if len(amp) > 1 else [0] for amp in amplitudes]
    bst = np.argmin([min(np.where(mm > 0.95)[0]) for mm in mismatchs])
    return annotators[bst]
def eval_brad(annotations, _):
    """Evaluates the bradycardia presence"""
    lth, uth = ms2sp((4 * 60 + 45) * 1000), ms2sp(5 * 60 * 1000)
    beats = np.array([b.time for b in annotations if MIT.is_qrs_annotation(b)])
    variability = np.std(np.diff(beats))
    #The default threshold is 40 bpm, but if the rhythm shows high variability,
    #we relax such threshold to 45 bpm.
    thres = 45 if variability > ms2sp(200) else 40
    lidx = bisect.bisect_left(beats, lth)
    uidx = bisect.bisect_right(beats, uth)
    for i in range(lidx, uidx - 4):
        bpm = int(ms2bpm(sp2ms(beats[i + 4] - beats[i]) / 4.0))
        if bpm <= thres:
            return True
    return False
def eval_vtach(anns, rec):
    """Evaluates the ventricular tachycardia presence"""
    lth, uth = ms2sp((4 * 60 + 45) * 1000), ms2sp(5 * 60 * 1000)
    #First we perform clustering on all beats
    qrsdur = {}
    clusters = []
    for ann in anns:
        if MIT.is_qrs_annotation(ann):
            delin = json.loads(ann.aux)
            qrs = {}
            for lead in delin:
                sidx = rec.leads.index(lead)
                qon = ann.time + delin[lead][0]
                qoff = ann.time + delin[lead][-1]
                qrs[lead] = SIG(sig=rec.signal[sidx][qon:qoff + 1])
            qrsdur[ann] = max(len(s.sig) for s in qrs.values())
            clustered = False
            for cluster in clusters:
                if _same_cluster(cluster[0], qrs):
                    cluster[1].add(ann)
                    clustered = True
                    break
            if not clustered:
                clusters.append((qrs, set([ann]), qrsdur[ann]))
    if not clusters:
        return False
    #We take as normal beats the cluster with highest number of annotations.
    nclust = max(clusters, key=lambda cl: len(cl[1]))
    beats = [
        ann for ann in anns
        if MIT.is_qrs_annotation(ann) and lth <= ann.time <= uth
    ]
    if len(beats) < 5:
        return False
    for i in range(len(beats) - 4):
        tach = ms2bpm(sp2ms(beats[i + 4].time - beats[i].time) / 4.0) >= 100
        bset = set(beats[i:i + 5])
        ventr = (np.min([qrsdur[b] for b in bset]) > ms2sp(110)
                 or any([bset.issubset(cl[1]) for cl in clusters]))
        if (tach and ventr and all([b not in nclust[1] for b in bset])):
            return True
    return False
示例#6
0
def get_qualitative_features(nclust, clust):
    """
    Obtains a *Feat* object with the computed values of the features used
    for the classification based on comparison between two clusters.

    Parameters
    ----------
    nclust:
        Cluster structure already identified as normal.
    clust:
        It can be another cluster, or a single *BeatInfo* object.
    """
    if isinstance(clust, BeatInfo):
        info = clust
        rhpos = info.pos
        pwave = 1 if sum(info.pwave.values()) > 0.1 else 0
    else:
        info = clust.info
        rhpos = info.rh
        pwave = int(info.pwave)
    cleads = set(info.qrs.shape).intersection(nclust.qrs.shape)
    if cleads:
        mxl = max(cleads, key=lambda l: info.qrs.shape[l].amplitude)
        ampdf = (float(info.qrs.shape[mxl].amplitude) /
                 nclust.qrs.shape[mxl].amplitude)
    else:
        ampdf = 1.0
    similarity = get_similarity(nclust.qrs.shape, info.qrs.shape)
    ndur = nclust.qrs.lateend - nclust.qrs.earlystart
    dur = info.qrs.lateend - info.qrs.earlystart
    durdf = dur - ndur
    ax = 0.0 if info.axis is None else info.axis
    axdf = (abs(nclust.axis - info.axis) if None not in (nclust.axis,
                                                         info.axis) else 0.0)
    rr = msec2bpm(sp2ms(info.rr))
    rrdf = rr - msec2bpm(sp2ms(nclust.rr))
    #QRS width: -1=narrow, 0=normal, 1=abnormal, 2=wide
    if dur < ms2sp(80):
        dur = -1
    elif dur < ms2sp(100):
        dur = 0
    elif dur < ms2sp(120):
        dur = 1
    else:
        dur = 2
    #QRS width difference: -1=narrower, 0:equal, 1=wider, 2=much wider
    if durdf <= ms2sp(-20):
        durdf = -1
    elif durdf < ms2sp(20):
        durdf = 0
    elif durdf < ms2sp(40):
        durdf = 1
    else:
        durdf = 2
    #Axis: -1 = Negative, 0=Balanced, 1=Positive
    if ax < -45:
        ax = -1
    elif ax < 45:
        ax = 0
    else:
        ax = 1
    #Axis difference: 0=equal, 1=different, 2=very different, 3=opposite
    if axdf < 45:
        axdf = 0
    elif axdf < 90:
        axdf = 1
    elif axdf < 135:
        axdf = 2
    else:
        axdf = 3
    #Rhythm: -1=Bradycardia, 0=Normal, 1=Tachycardia, 2=Extreme tachycardia
    if rr < 60:
        rr = -1
    elif rr < 100:
        rr = 0
    elif rr < 150:
        rr = 1
    else:
        rr = 2
    #Rhythm difference: -1=slower, 0=equal, 1=faster
    if rrdf <= -20:
        rrdf = -1
    elif rrdf < 20:
        rrdf = 0
    else:
        rrdf = 1
    #Similarity: 0=very different, 1=different, 2=similar,
    #            3=very similar, 4=identical
    if similarity < 0.25:
        similarity = 0
    elif similarity < 0.5:
        similarity = 1
    elif similarity < 0.75:
        similarity = 2
    elif similarity < 0.9:
        similarity = 3
    else:
        similarity = 4
    #Amplitude difference: -1=lower, 0=equal, 1=higher
    if ampdf < 0.75:
        ampdf = -1
    elif ampdf <= 1.25:
        ampdf = 0
    else:
        ampdf = 1
    return Feat(rr, rrdf, dur, durdf, ax, axdf, pwave, rhpos, similarity,
                ampdf)
示例#7
0
TFACTOR = 5000.0
KFACTOR = 12
MIN_DELAY = 1750
MAX_DELAY = int(ms2sp(20000) * TFACTOR)
searching.reasoning.SAVE_TREE = args.full_tree or args.video
searching.reasoning.MERGE_STRATEGY = not args.no_merge

#Input system configuration
IN.reset()
IN.set_record(args.r, args.a)
IN.set_offset(args.f)
IN.set_duration(args.l)
IN.set_tfactor(TFACTOR)
IN.start()
print('Preloading buffer...')
time.sleep(sp2ms(MIN_DELAY) / (1000.0 * TFACTOR))
#Load the initial evidence
IN.get_more_evidence()

#Trivial interpretation
interp = Interpretation()
#The focus is initially set in the first observation
interp.focus.push(next(obs_buffer.get_observations()), None)
##########################
### Construe searching ###
##########################
print('Starting interpretation')
t0 = time.time()
cntr = searching.Construe(interp, KFACTOR)
ltime = (cntr.last_time, t0)
#Main loop
示例#8
0
    except StopIteration:
        continue
    print('Interpretation results for record {0}:'.format(rec))
    print('Rhythm analysis:')
    nect = len([a for a in anns if a.aux == b'(EXT'])
    nbk = len([a for a in anns if a.aux == b'(BK'])
    ncpt = len([a for a in anns if a.aux == b'(CPT'])
    while True:
        end = next(rhythms, anns[-1])
        if start.aux in RHNAMES:
            rhctr[start.aux] += end.time - start.time
        elif start.code == ECGCodes.VFON:
            rhctr['(VFL'] += end.time - start.time
        if end.code not in (ECGCodes.RHYTHM, ECGCodes.VFON):
            break
        start = end
    for rh, samples in rhctr.most_common():
        ms = int(sp2ms(samples))
        h = int(ms / 3600000)
        ms -= h * 3600000
        m = int(ms / 60000)
        ms -= m * 60000
        s = int(ms / 1000)
        ms -= s * 1000
        print('    {0:<20} - {1:02}:{2:02}:{3:02}.{4:03}'.format(
            RHNAMES[rh], h, m, s, ms))
    print('Number of extrasystoles: {0}'.format(nect))
    print('Number of rhythm blocks: {0}'.format(nbk))
    print('Number of couplets: {0}'.format(ncpt))
    print('\n')
示例#9
0
from construe.utils.units_helper import samples2msec as sp2ms
import numpy as np
import matplotlib.pyplot as plt

PATH = '/home/tomas/Dropbox/Investigacion/tese/estadias/2015_BMI/data/'
RECORDS = [l.strip() for l in open(PATH + 'RECORDS')]
ANN = '.iqrs'

plt.ioff()
for rec in RECORDS:
    try:
        annots = MIT.read_annotations(PATH + rec + ANN)
    except IOError:
        print('No results found for record ' + rec)
        continue
    rpeaks = sp2ms(
        np.array([a.time for a in annots if MIT.is_qrs_annotation(a)]))
    if len(rpeaks) < 2:
        print('No hearbeats found for record ' + rec)
        continue
    pwaves = [a for a in annots if a.code == ECGCodes.PWAVE]
    #Plot creation
    fig, host = plt.subplots()
    par1 = host.twinx()
    rrstd = []
    pwf = []
    #We create one point by minute.
    minutes = int(rpeaks[-1] / 60000)
    for m in range(minutes):
        mpks = rpeaks[np.logical_and(rpeaks > m * 60000, rpeaks <
                                     (m + 1) * 60000)]
        if len(mpks) < 2:
示例#10
0
def process_record_rhythm(path, ann='atr', tfactor=1.0, fr_len=23040,
                          fr_overlap=1080, fr_tlimit=np.inf, min_delay=2560,
                          max_delay=20.0, kfactor=12, initial_pos=0,
                          final_pos=np.inf, exclude_pwaves=False,
                          exclude_twaves=False, verbose=True):
    """
    This function performs a complete interpretation of a given MIT-BIH
    formatted record, using as initial evidence an external set of annotations.
    The interpretation is splitted in independent fragments of configurable
    length. The exploration factor is also configurable.

    Parameters
    ----------
    path:
        Complete name of the record to be processed (without any extension)
    ann:
        Annotator used to obtain the initial evidence (default: 'atr')
    tfactor:
        Time factor to control the speed of the input signal. For example,
        if tfactor = 2.0 two seconds of new signal are added to the signal
        buffer each real second. Of course this can only be greater than 1 in
        offline interpretations.
    fr_len:
        Length in samples of each independently interpreted fragment.
    fr_overlap:
        Lenght in samples of the overlapping between consecutive fragments, to
        prevent loss of information.
    fr_tlimit:
        Time limit **in seconds** for the interpretation of each fragment.
    min_delay:
        Minimum delay **in samples** between the acquisition time and the last
        interpretation time.
    max_delay:
        Maximum delay **in seconds**, that the interpretation can be without
        moving forward. If this threshold is exceeded, the searching process
        is pruned.
    kfactor:
        Exploration factor. It is the number of interpretations expanded in
        each searching cycle.
    initial_pos:
        Time position (in samples) where the interpretation should begin.
    final_pos:
        Time position (in samples) where the interpretation should finish.
    exclude_pwaves:
        Flag to avoid P-wave searching.
    exclude_twaves:
        Flag to avoid T-wave searching.
    verbose:
        Boolean flag. If active, the algorithm will print to standard output
        the fragment being interpreted.

    Returns
    -------
    out:
        sortedlist of annotations resulting from the interpretation, including
        segmentation and rhythm annnotations.
    """
    if fr_len > final_pos-initial_pos:
        fr_len = int(final_pos-initial_pos)
        fr_overlap = 0
    if fr_len % IN._STEP != 0:
        fr_len += IN._STEP - (fr_len % IN._STEP)
        warnings.warn('Fragment length is not multiple of {0}. '
                      'Adjusted to {1}'.format(IN._STEP, fr_len))
    #Knowledge base configuration
    prev_knowledge = ap.KNOWLEDGE
    curr_knowledge = ap.RHYTHM_KNOWLEDGE[:]
    if exclude_twaves:
        curr_knowledge.remove(ap.TWAVE_PATTERN)
        curr_knowledge.remove(ap.PWAVE_PATTERN)
    elif exclude_pwaves:
        curr_knowledge.remove(ap.PWAVE_PATTERN)
    ap.set_knowledge_base(curr_knowledge)
    #Input configuration
    IN.set_record(path, ann)
    IN.set_duration(fr_len)
    IN.set_tfactor(tfactor)
    #Annotations buffer
    annots = sortedcontainers.SortedList()
    pos = initial_pos
    ictr = Interpretation.counter
    while pos < min(IN.get_record_length(), final_pos):
        if verbose:
            print('Processing fragment {0}:{1}'.format(pos, pos+fr_len))
        #Input start
        IN.reset()
        IN.set_offset(pos)
        IN.start()
        time.sleep(sp2ms(min_delay)/(1000.0*tfactor))
        IN.get_more_evidence()

        #Reasoning and interpretation
        root = Interpretation()
        try:
            root.focus.push(next(IN.BUF.get_observations()), None)
            cntr = searching.Construe(root, kfactor)
        except (StopIteration, ValueError):
            pos += fr_len - fr_overlap
            if verbose:
                print('No evidence found in this fragment. Skipping.')
            continue
        t0 = time.time()
        ltime = (cntr.last_time, t0)
        while cntr.best is None:
            IN.get_more_evidence()
            acq_time = IN.get_acquisition_point()
            def filt(node):
                """Filter function to enforce *min_delay*"""
                if IN.BUF.get_status() is IN.BUF.Status.ACQUIRING:
                    return acq_time + node[0][2] >= min_delay
                else:
                    return True
            cntr.step(filt)
            t = time.time()
            if cntr.last_time > ltime[0]:
                ltime = (cntr.last_time, t)
            if t-ltime[1] > max_delay:
                cntr.prune()
            if t-t0 > fr_tlimit:
                cntr.best = (min(cntr.open) if len(cntr.open) > 0
                                            else min(cntr.closed))
        best_explanation = cntr.best.node
        best_explanation.recover_all()
        #End of reasoning
        #We resolve possible conflicts on joining two fragments, selecting the
        #interpretation higher coverage.
        btime = _merge_annots(annots, best_explanation, pos) if annots else 0
        #We generate and add the annotations for the current fragment
        newanns = interp2ann(best_explanation, btime, pos, pos == initial_pos)
        annots.update(newanns)
        #We go to the next fragment after deleting the current used branch and
        #clearing the reasoning cache.
        del cntr
        del root
        reasoning.reset()
        if verbose:
            idur = time.time() - t0
            print('Fragment finished in {0:.03f} seconds. Real-time factor: '
                  '{1:.03f}. Created {2} interpretations.'.format(idur,
                      sp2ms(acq_time)/(idur*1000.),
                      Interpretation.counter-ictr))
        ictr = Interpretation.counter
        #We introduce an overlapping between consecutive fragments
        pos += fr_len - fr_overlap
    #Restore the previous knowledge base
    ap.set_knowledge_base(prev_knowledge)
    return _clean_artifacts_redundancy(annots)
示例#11
0
def process_record_conduction(path, ann='atr', fr_len=512000, initial_pos=0,
                              final_pos=np.inf, exclude_pwaves=False,
                              exclude_twaves=False, verbose=True):
    """
    This function performs an interpretation in the conduction abstraction
    level of a given MIT-BIH formatted record, using as initial evidence an
    external set of annotations. The result is a delineation of the P waves,
    QRS complex, and T waves of each heartbeat in the initial evidence
    annotator. The interpretation is splitted in independent fragments of
    configurable length.

    Parameters
    ----------
    path:
        Complete name of the record to be processed (without any extension)
    ann:
        Annotator used to obtain the initial evidence (default: 'atr')
    fr_len:
        Length in samples of each independently interpreted fragment.
    initial_pos:
        Time position (in samples) where the interpretation should begin.
    final_pos:
        Time position (in samples) where the interpretation should finish.
    exclude_pwaves:
        Flag to avoid P-wave searching.
    exclude_twaves:
        Flag to avoid T-wave searching.
    verbose:
        Boolean flag. If active, the algorithm will print to standard output
        the fragment being interpreted.

    Returns
    -------
    out:
        sortedlist of annotations resulting from the interpretation, including
        only segmentation annnotations.
    """
    if fr_len > final_pos-initial_pos:
        fr_len = int(final_pos-initial_pos)
    if fr_len % IN._STEP != 0:
        fr_len += IN._STEP - (fr_len % IN._STEP)
        warnings.warn('Fragment length is not multiple of {0}. '
                      'Adjusted to {1}'.format(IN._STEP, fr_len))
    #Knowledge base configuration
    prev_knowledge = ap.KNOWLEDGE
    curr_knowledge = ap.SEGMENTATION_KNOWLEDGE[:]
    if exclude_twaves:
        curr_knowledge.remove(ap.TWAVE_PATTERN)
        curr_knowledge.remove(ap.PWAVE_PATTERN)
    elif exclude_pwaves:
        curr_knowledge.remove(ap.PWAVE_PATTERN)
    ap.set_knowledge_base(curr_knowledge)
    #Input configuration
    IN.set_record(path, ann)
    IN.set_duration(fr_len)
    IN.set_tfactor(1e20)
    #Annotations buffer
    annots = sortedcontainers.SortedList()
    pos = initial_pos
    ictr = Interpretation.counter
    while pos < min(IN.get_record_length(), final_pos):
        if verbose:
            print('Processing fragment {0}:{1}'.format(pos, pos+fr_len))
        #Input start
        IN.reset()
        IN.set_offset(pos)
        IN.start()
        while IN.BUF.get_status() == IN.BUF.Status.ACQUIRING:
            IN.get_more_evidence()

        #Reasoning and interpretation
        root = node = Interpretation()
        try:
            root.focus.push(next(IN.BUF.get_observations()), None)
        except (StopIteration, ValueError):
            pos += fr_len
            if verbose:
                print('No evidence found in this fragment. Skipping.')
            continue
        successors = {node:reasoning.firm_succ(node)}
        t0 = time.time()
        ########################
        ### Greedy searching ###
        ########################
        while True:
            try:
                node = next(successors[node])
                if node not in successors:
                    successors[node] = reasoning.firm_succ(node)
            except StopIteration:
                #If the focus contains a top-level hypothesis, then there is
                #no more evidence to explain.
                if isinstance(node.focus.top[0], o.CardiacCycle):
                    break
                else:
                    #In other case, we perform a backtracking operation
                    node = node.parent
            except KeyError:
                node = root
                break
        best_explanation = node
        best_explanation.recover_all()
        #End of reasoning
        #We generate and add the annotations for the current fragment
        newanns = interp2ann(best_explanation, 0, pos, pos == initial_pos)
        annots.update(newanns)
        #We go to the next fragment after deleting the current used branch and
        #clearing the reasoning cache.
        del root
        reasoning.reset()
        if verbose:
            idur = time.time() - t0
            print('Fragment finished in {0:.03f} seconds. Real-time factor: '
                  '{1:.03f}. Created {2} interpretations.'.format(idur,
                      sp2ms(IN.get_acquisition_point())/(idur*1000.),
                      Interpretation.counter-ictr))
        ictr = Interpretation.counter
        #We introduce an overlapping between consecutive fragments
        pos += fr_len
    #Restore the previous knowledge base
    ap.set_knowledge_base(prev_knowledge)
    return _clean_artifacts_redundancy(annots)
示例#12
0
interp = Interpretation()
#The focus is initially set in the first observation
interp.focus.append(next(obs_buffer.get_observations()))
########################
### PEKBFS searching ###
########################
print('Starting interpretation')
t0 = time.time()
pekbfs = searching.PEKBFS(interp, KFACTOR)
ltime = (pekbfs.last_time, t0)
while pekbfs.best is None:
    IN.get_more_evidence()
    acq_time = IN.get_acquisition_point()
    #HINT debug code
    fstr = 'Int: {0:05d} '
    for i in range(int(sp2ms(acq_time - pekbfs.last_time) / 1000.0)):
        fstr += '-'
    fstr += ' Acq: {1}'
    print(fstr.format(int(pekbfs.last_time), acq_time))
    #End of debug code
    pekbfs.step()
    if pekbfs.last_time > ltime[0]:
        ltime = (pekbfs.last_time, time.time())
    if ms2sp((time.time() - ltime[1]) * 1000.0) > MAX_DELAY:
        print('Pruning search')
        if pekbfs.open:
            prevopen = pekbfs.open
        pekbfs.prune()
print('Finished in {0:.3f} seconds'.format(time.time() - t0))
print('Created {0} interpretations'.format(interp.counter))
be = pekbfs.best
示例#13
0
    miss = 0
    for rec in RECORDS:
        dist[rec] = []
        REF_FILE = ANNOTS_DIR + str(rec) + '.atr'
        TEST_FILE = ANNOTS_DIR + str(rec) + '.wbr'
        reference = np.array([
            anot.time for anot in MIT.read_annotations(REF_FILE)
            if MIT.is_qrs_annotation(anot)
        ])
        test = np.array([
            anot.time for anot in MIT.read_annotations(TEST_FILE)
            if MIT.is_qrs_annotation(anot)
        ])
        #Missing beat search
        for b in reference:
            err = np.Inf
            for t in test:
                bdist = t - b
                if abs(bdist) > abs(err):
                    break
                else:
                    err = bdist
            if abs(err) > ms2sp(150.0):
                print('{0}: {1}'.format(rec, b))
                miss += 1
            else:
                dist[rec].append(sp2ms(err))
        dist[rec] = np.array(dist[rec])
        print('Record {0} processed'.format(rec))
    alldist = np.concatenate(tuple(dist[r] for r in RECORDS))
示例#14
0
        result = process_record_conduction(rname, annots, length, args.f,
                                           args.t, args.exclude_pwaves,
                                           args.exclude_twaves, args.v)
    else:
        from record_processing import process_record_rhythm
        #Merge strategy
        import construe.inference.reasoning as reasoning
        reasoning.MERGE_STRATEGY = not args.no_merge
        length = 23040 if args.l == 0 else args.l
        overl = 1080 if args.overl == -1 else args.overl
        if length <= overl:
            raise ValueError('The length of each individually interpreted '
                             'fragment has to be greater than the overlap '
                             'between consecutive fragments.')
        result = process_record_rhythm(rname, annots, args.tfactor,
                                       length, overl, args.time_limit,
                                       args.d, args.D, args.k, args.f,
                                       args.t, args.exclude_pwaves,
                                       args.exclude_twaves, args.v)
    save_annotations(result, args.r + '.' + args.o)
    print('Record ' + args.r + ' succesfully processed')
    if args.v:
        from construe.model.interpretation import Interpretation
        from construe.utils.units_helper import samples2msec as sp2ms
        import construe.acquisition.record_acquisition as IN
        idur = time.time() - t0
        print('Interpretation time: {0:.03f} seconds. Global Real-time factor: '
              '{1:.03f}. Created {2} interpretations.'.format(idur,
              sp2ms(min(args.t, IN.get_record_length())-args.f)/(idur*1000.),
              Interpretation.counter))
示例#15
0
def _delimit_t(signal, baseline, ls_lim, ee_lim, qrs_shape):
    """
    This function performs the delineation of a possible T Wave present
    in the fragment. To obtain the endpoint of the T Wave, it uses a method
    based on the work by Zhang: 'An algorithm for robust and efficient location
    of T-wave ends in electrocardiograms'. To get the beginning, it uses a
    probabilistic approach with some basic morphology constraints. All the
    processing is made to a simplification of the signal fragment with at most
    7 points.
    """
    try:
        #We exclude the areas in which the slope of the signal exceeds limit.
        maxtslope = qrs_shape.maxslope * C.TQRS_MAX_DIFFR
        lidx, uidx = 0, len(signal)
        if ls_lim > 0:
            idx = np.where(
                np.max(np.abs(np.diff(signal[:ls_lim +
                                             1]))) > maxtslope)[0] + 1
            lidx = max(idx) if len(idx) > 0 else 0
        if ee_lim < len(signal) - 1:
            idx = np.where(
                np.max(np.abs(np.diff(signal[ee_lim:]))) > maxtslope
            )[0] + ee_lim
            uidx = min(idx) if len(idx) > 0 else len(signal) - 1
            if (uidx > 1 and
                    abs(signal[uidx] - baseline) > C.TWEND_BASELINE_MAX_DIFF):
                dfsign = np.sign(np.diff(signal[:uidx + 1]))
                signchange = ((np.roll(dfsign, 1) - dfsign) != 0).astype(int)
                if np.any(signchange):
                    uidx = np.where(signchange)[0][-1]
        verify(uidx >= lidx)
        signal = signal[lidx:uidx + 1]
        ls_lim -= lidx
        ee_lim -= lidx
        #Any T waveform should be representable with at most 7 points.
        points = DP.arrayRDP(signal,
                             max(ph2dg(0.02), qrs_shape.amplitude / 20.0), 7)
        n = len(points)
        verify(n >= 3)
        #1. Endpoint estimation
        epts = points[points >= ee_lim]
        verify(len(epts) > 0)
        Tend, dum = _zhang_tendpoint(signal, epts)
        #2. Onset point estimation.
        bpts = points[np.logical_and(points < Tend, points <= ls_lim)]
        score = {}
        #Range to normalize differences in the signal values
        rang = max(baseline, signal.max()) - min(signal.min(), baseline)
        #There must be between one and 3 peaks in the T Wave.
        for i in xrange(len(bpts)):
            sigpt = signal[points[i:np.where(points == Tend)[0][0] + 1]]
            npks = len(get_peaks(sigpt)) if len(sigpt) >= 3 else 0
            if (npks < 1 or npks > 2 or np.ptp(sigpt) <= ph2dg(0.05)):
                continue
            bl_dist = 1.0 - np.abs(signal[bpts[i]] - baseline) / rang
            tdur = sp2ms(Tend - bpts[i])
            score[bpts[i]] = bl_dist * _check_histogram(_TDUR_HIST, tdur)
        verify(score)
        Tbeg = max(score, key=score.get)
        verify(score[Tbeg] > 0)
        verify(np.max(np.abs(np.diff(signal[Tbeg:Tend + 1]))) <= maxtslope)
        return (Iv(Tbeg + lidx, Tend + lidx), dum)
    except InconsistencyError:
        return None
示例#16
0
                                           args.exclude_twaves, args.v)
    else:
        from record_processing import process_record_rhythm
        #Merge strategy
        import construe.inference.reasoning as reasoning
        reasoning.MERGE_STRATEGY = not args.no_merge
        length = 23040 if args.l == 0 else args.l
        overl = 1080 if args.overl == -1 else args.overl
        if length <= overl:
            raise ValueError('The length of each individually interpreted '
                             'fragment has to be greater than the overlap '
                             'between consecutive fragments.')
        result = process_record_rhythm(rname, annots, args.tfactor, length,
                                       overl, args.time_limit, args.d, args.D,
                                       args.k, args.f, args.t,
                                       args.exclude_pwaves,
                                       args.exclude_twaves, args.v)
    save_annotations(result, args.r + '.' + args.o)
    print('Record ' + args.r + ' succesfully processed')
    if args.v:
        from construe.model.interpretation import Interpretation
        from construe.utils.units_helper import samples2msec as sp2ms
        import construe.acquisition.record_acquisition as IN
        idur = time.time() - t0
        print(
            'Interpretation time: {0:.03f} seconds. Global Real-time factor: '
            '{1:.03f}. Created {2} interpretations.'.format(
                idur,
                sp2ms(min(args.t, IN.get_record_length()) - args.f) /
                (idur * 1000.), Interpretation.counter))