ncl[1].info.qrs.tag = tag classified.append(ncl) clist.remove(ncl) #Comparative classification while clist: c, nxt = clist.pop(0) tag = comparative_classification(nxt, classified) for b in nxt.beats: b.tag = tag #The cluster representant also is assigned the classification. nxt.info.qrs.tag = tag classified.append((c, nxt)) #Afib code is now changed to normality to fit the convention for _, (beats, _) in classified: for b in (b for b in beats if isinstance(b, o.QRS)): if b.tag == AFTAG: b.tag = C.NORMAL annots = interp2annots.interp2ann(interp) #We also include the clustered artifacts. for b in interp.get_observations( o.RDeflection, filt=lambda ba: any([ ba in cl.beats and any(isinstance(b, o.QRS) for b in cl.beats) for cl in clusters.values() ])): a = MIT.MITAnnotation.MITAnnotation() a.code = b.tag a.time = b.time.start annots.add(a) MIT.save_annotations(annots, '{0}.{1}'.format(args.r, args.o))
def process_record_rhythm(path, ann='atr', tfactor=1.0, fr_len=23040, fr_overlap=1080, fr_tlimit=np.inf, min_delay=2560, max_delay=20.0, kfactor=12, initial_pos=0, final_pos=np.inf, exclude_pwaves=False, exclude_twaves=False, verbose=True): """ This function performs a complete interpretation of a given MIT-BIH formatted record, using as initial evidence an external set of annotations. The interpretation is splitted in independent fragments of configurable length. The exploration factor is also configurable. Parameters ---------- path: Complete name of the record to be processed (without any extension) ann: Annotator used to obtain the initial evidence (default: 'atr') tfactor: Time factor to control the speed of the input signal. For example, if tfactor = 2.0 two seconds of new signal are added to the signal buffer each real second. Of course this can only be greater than 1 in offline interpretations. fr_len: Length in samples of each independently interpreted fragment. fr_overlap: Lenght in samples of the overlapping between consecutive fragments, to prevent loss of information. fr_tlimit: Time limit **in seconds** for the interpretation of each fragment. min_delay: Minimum delay **in samples** between the acquisition time and the last interpretation time. max_delay: Maximum delay **in seconds**, that the interpretation can be without moving forward. If this threshold is exceeded, the searching process is pruned. kfactor: Exploration factor. It is the number of interpretations expanded in each searching cycle. initial_pos: Time position (in samples) where the interpretation should begin. final_pos: Time position (in samples) where the interpretation should finish. exclude_pwaves: Flag to avoid P-wave searching. exclude_twaves: Flag to avoid T-wave searching. verbose: Boolean flag. If active, the algorithm will print to standard output the fragment being interpreted. Returns ------- out: sortedlist of annotations resulting from the interpretation, including segmentation and rhythm annnotations. """ if fr_len > final_pos-initial_pos: fr_len = int(final_pos-initial_pos) fr_overlap = 0 if fr_len % IN._STEP != 0: fr_len += IN._STEP - (fr_len % IN._STEP) warnings.warn('Fragment length is not multiple of {0}. ' 'Adjusted to {1}'.format(IN._STEP, fr_len)) #Knowledge base configuration prev_knowledge = ap.KNOWLEDGE curr_knowledge = ap.RHYTHM_KNOWLEDGE[:] if exclude_twaves: curr_knowledge.remove(ap.TWAVE_PATTERN) curr_knowledge.remove(ap.PWAVE_PATTERN) elif exclude_pwaves: curr_knowledge.remove(ap.PWAVE_PATTERN) ap.set_knowledge_base(curr_knowledge) #Input configuration IN.set_record(path, ann) IN.set_duration(fr_len) IN.set_tfactor(tfactor) #Annotations buffer annots = sortedcontainers.SortedList() pos = initial_pos ictr = Interpretation.counter while pos < min(IN.get_record_length(), final_pos): if verbose: print('Processing fragment {0}:{1}'.format(pos, pos+fr_len)) #Input start IN.reset() IN.set_offset(pos) IN.start() time.sleep(sp2ms(min_delay)/(1000.0*tfactor)) IN.get_more_evidence() #Reasoning and interpretation root = Interpretation() try: root.focus.push(next(IN.BUF.get_observations()), None) cntr = searching.Construe(root, kfactor) except (StopIteration, ValueError): pos += fr_len - fr_overlap if verbose: print('No evidence found in this fragment. Skipping.') continue t0 = time.time() ltime = (cntr.last_time, t0) while cntr.best is None: IN.get_more_evidence() acq_time = IN.get_acquisition_point() def filt(node): """Filter function to enforce *min_delay*""" if IN.BUF.get_status() is IN.BUF.Status.ACQUIRING: return acq_time + node[0][2] >= min_delay else: return True cntr.step(filt) t = time.time() if cntr.last_time > ltime[0]: ltime = (cntr.last_time, t) if t-ltime[1] > max_delay: cntr.prune() if t-t0 > fr_tlimit: cntr.best = (min(cntr.open) if len(cntr.open) > 0 else min(cntr.closed)) best_explanation = cntr.best.node best_explanation.recover_all() #End of reasoning #We resolve possible conflicts on joining two fragments, selecting the #interpretation higher coverage. btime = _merge_annots(annots, best_explanation, pos) if annots else 0 #We generate and add the annotations for the current fragment newanns = interp2ann(best_explanation, btime, pos, pos == initial_pos) annots.update(newanns) #We go to the next fragment after deleting the current used branch and #clearing the reasoning cache. del cntr del root reasoning.reset() if verbose: idur = time.time() - t0 print('Fragment finished in {0:.03f} seconds. Real-time factor: ' '{1:.03f}. Created {2} interpretations.'.format(idur, sp2ms(acq_time)/(idur*1000.), Interpretation.counter-ictr)) ictr = Interpretation.counter #We introduce an overlapping between consecutive fragments pos += fr_len - fr_overlap #Restore the previous knowledge base ap.set_knowledge_base(prev_knowledge) return _clean_artifacts_redundancy(annots)
tmprpks = np.concatenate((rpks, rpeaks)) rrs = (np.diff(rpeaks) if len(rpeaks) >= C.AFIB_MIN_NQRS else np.diff(tmprpks)) if is_afib_rhythm_lian(rrs): #The rhythm is assumed to be part of the afib. afib.end.cpy(nrhythm.end) interp.observations.remove(nrhythm) rpks = tmprpks else: break else: break else: break i += 1 anns = i2a.interp2ann(interp) for b in interp.get_observations(o.BeatAnn): a = MIT.MITAnnotation.MITAnnotation() a.code = MIT.ECGCodes.ARFCT a.time = b.time.start anns.add(a) #Now we select only rhythm annotations and we remove repeated annotations. anns = [a for a in anns if a.code == COD.RHYTHM] i = 1 while i < len(anns): if (anns[i].aux in (b'(N', b'(SVTA', b'(SBR', b'(AFIB', b'(T', b'(B', b'(VFL') and anns[i].aux == anns[i-1].aux): anns.pop(i) else: i += 1
def process_record_conduction(path, ann='atr', fr_len=512000, initial_pos=0, final_pos=np.inf, exclude_pwaves=False, exclude_twaves=False, verbose=True): """ This function performs an interpretation in the conduction abstraction level of a given MIT-BIH formatted record, using as initial evidence an external set of annotations. The result is a delineation of the P waves, QRS complex, and T waves of each heartbeat in the initial evidence annotator. The interpretation is splitted in independent fragments of configurable length. Parameters ---------- path: Complete name of the record to be processed (without any extension) ann: Annotator used to obtain the initial evidence (default: 'atr') fr_len: Length in samples of each independently interpreted fragment. initial_pos: Time position (in samples) where the interpretation should begin. final_pos: Time position (in samples) where the interpretation should finish. exclude_pwaves: Flag to avoid P-wave searching. exclude_twaves: Flag to avoid T-wave searching. verbose: Boolean flag. If active, the algorithm will print to standard output the fragment being interpreted. Returns ------- out: sortedlist of annotations resulting from the interpretation, including only segmentation annnotations. """ if fr_len > final_pos-initial_pos: fr_len = int(final_pos-initial_pos) if fr_len % IN._STEP != 0: fr_len += IN._STEP - (fr_len % IN._STEP) warnings.warn('Fragment length is not multiple of {0}. ' 'Adjusted to {1}'.format(IN._STEP, fr_len)) #Knowledge base configuration prev_knowledge = ap.KNOWLEDGE curr_knowledge = ap.SEGMENTATION_KNOWLEDGE[:] if exclude_twaves: curr_knowledge.remove(ap.TWAVE_PATTERN) curr_knowledge.remove(ap.PWAVE_PATTERN) elif exclude_pwaves: curr_knowledge.remove(ap.PWAVE_PATTERN) ap.set_knowledge_base(curr_knowledge) #Input configuration IN.set_record(path, ann) IN.set_duration(fr_len) IN.set_tfactor(1e20) #Annotations buffer annots = sortedcontainers.SortedList() pos = initial_pos ictr = Interpretation.counter while pos < min(IN.get_record_length(), final_pos): if verbose: print('Processing fragment {0}:{1}'.format(pos, pos+fr_len)) #Input start IN.reset() IN.set_offset(pos) IN.start() while IN.BUF.get_status() == IN.BUF.Status.ACQUIRING: IN.get_more_evidence() #Reasoning and interpretation root = node = Interpretation() try: root.focus.push(next(IN.BUF.get_observations()), None) except (StopIteration, ValueError): pos += fr_len if verbose: print('No evidence found in this fragment. Skipping.') continue successors = {node:reasoning.firm_succ(node)} t0 = time.time() ######################## ### Greedy searching ### ######################## while True: try: node = next(successors[node]) if node not in successors: successors[node] = reasoning.firm_succ(node) except StopIteration: #If the focus contains a top-level hypothesis, then there is #no more evidence to explain. if isinstance(node.focus.top[0], o.CardiacCycle): break else: #In other case, we perform a backtracking operation node = node.parent except KeyError: node = root break best_explanation = node best_explanation.recover_all() #End of reasoning #We generate and add the annotations for the current fragment newanns = interp2ann(best_explanation, 0, pos, pos == initial_pos) annots.update(newanns) #We go to the next fragment after deleting the current used branch and #clearing the reasoning cache. del root reasoning.reset() if verbose: idur = time.time() - t0 print('Fragment finished in {0:.03f} seconds. Real-time factor: ' '{1:.03f}. Created {2} interpretations.'.format(idur, sp2ms(IN.get_acquisition_point())/(idur*1000.), Interpretation.counter-ictr)) ictr = Interpretation.counter #We introduce an overlapping between consecutive fragments pos += fr_len #Restore the previous knowledge base ap.set_knowledge_base(prev_knowledge) return _clean_artifacts_redundancy(annots)