def _rdef_gconst(pattern, _): """ General constraints of the R-Deflection pattern, that simply looks in the global list for an appropriate annotation. """ if ANNOTS is None: _load_annots() leads = IN.SIG.get_available_leads() #We find all the annotations in the given interval. rdef = pattern.hypothesis beg = min(int(rdef.earlystart), IN.get_acquisition_point()) + IN._OFFSET end = min(int(rdef.lateend), IN.get_acquisition_point()) + IN._OFFSET dummy = MITAnnotation() dummy.time = beg bidx = ANNOTS.bisect_left(dummy) dummy.time = end eidx = ANNOTS.bisect_right(dummy) verify(eidx > bidx) selected = max(ANNOTS[bidx:eidx], key=operator.attrgetter('num')) time = selected.time - IN._OFFSET rdef.time.set(time, time) rdef.start.set(time, time) rdef.end.set(time, time) rdef.level = {lead: 127 for lead in leads} rdef.level[leads[selected.chan]] = 127 - selected.num
#Trivial interpretation interp = Interpretation() #The focus is initially set in the first observation interp.focus.push(next(obs_buffer.get_observations()), None) ########################## ### Construe searching ### ########################## print('Starting interpretation') t0 = time.time() cntr = searching.Construe(interp, KFACTOR) ltime = (cntr.last_time, t0) #Main loop while cntr.best is None: IN.get_more_evidence() acq_time = IN.get_acquisition_point() #HINT debug code fstr = 'Int: {0:05d} ' for i in range(int(sp2ms(acq_time - cntr.last_time) / 1000.0)): fstr += '-' fstr += ' Acq: {1}' if interp.counter > 100: print(fstr.format(int(cntr.last_time), acq_time)) #End of debug code filt = ((lambda n: acq_time + n[0][2] >= MIN_DELAY) if obs_buffer.get_status() is obs_buffer.Status.ACQUIRING else (lambda _: True)) cntr.step(filt) if cntr.last_time > ltime[0]: ltime = (cntr.last_time, time.time()) #If the distance between acquisition time and interpretation time is
def process_record_rhythm(path, ann='atr', tfactor=1.0, fr_len=23040, fr_overlap=1080, fr_tlimit=np.inf, min_delay=2560, max_delay=20.0, kfactor=12, initial_pos=0, final_pos=np.inf, exclude_pwaves=False, exclude_twaves=False, verbose=True): """ This function performs a complete interpretation of a given MIT-BIH formatted record, using as initial evidence an external set of annotations. The interpretation is splitted in independent fragments of configurable length. The exploration factor is also configurable. Parameters ---------- path: Complete name of the record to be processed (without any extension) ann: Annotator used to obtain the initial evidence (default: 'atr') tfactor: Time factor to control the speed of the input signal. For example, if tfactor = 2.0 two seconds of new signal are added to the signal buffer each real second. Of course this can only be greater than 1 in offline interpretations. fr_len: Length in samples of each independently interpreted fragment. fr_overlap: Lenght in samples of the overlapping between consecutive fragments, to prevent loss of information. fr_tlimit: Time limit **in seconds** for the interpretation of each fragment. min_delay: Minimum delay **in samples** between the acquisition time and the last interpretation time. max_delay: Maximum delay **in seconds**, that the interpretation can be without moving forward. If this threshold is exceeded, the searching process is pruned. kfactor: Exploration factor. It is the number of interpretations expanded in each searching cycle. initial_pos: Time position (in samples) where the interpretation should begin. final_pos: Time position (in samples) where the interpretation should finish. exclude_pwaves: Flag to avoid P-wave searching. exclude_twaves: Flag to avoid T-wave searching. verbose: Boolean flag. If active, the algorithm will print to standard output the fragment being interpreted. Returns ------- out: sortedlist of annotations resulting from the interpretation, including segmentation and rhythm annnotations. """ if fr_len > final_pos-initial_pos: fr_len = int(final_pos-initial_pos) fr_overlap = 0 if fr_len % IN._STEP != 0: fr_len += IN._STEP - (fr_len % IN._STEP) warnings.warn('Fragment length is not multiple of {0}. ' 'Adjusted to {1}'.format(IN._STEP, fr_len)) #Knowledge base configuration prev_knowledge = ap.KNOWLEDGE curr_knowledge = ap.RHYTHM_KNOWLEDGE[:] if exclude_twaves: curr_knowledge.remove(ap.TWAVE_PATTERN) curr_knowledge.remove(ap.PWAVE_PATTERN) elif exclude_pwaves: curr_knowledge.remove(ap.PWAVE_PATTERN) ap.set_knowledge_base(curr_knowledge) #Input configuration IN.set_record(path, ann) IN.set_duration(fr_len) IN.set_tfactor(tfactor) #Annotations buffer annots = sortedcontainers.SortedList() pos = initial_pos ictr = Interpretation.counter while pos < min(IN.get_record_length(), final_pos): if verbose: print('Processing fragment {0}:{1}'.format(pos, pos+fr_len)) #Input start IN.reset() IN.set_offset(pos) IN.start() time.sleep(sp2ms(min_delay)/(1000.0*tfactor)) IN.get_more_evidence() #Reasoning and interpretation root = Interpretation() try: root.focus.push(next(IN.BUF.get_observations()), None) cntr = searching.Construe(root, kfactor) except (StopIteration, ValueError): pos += fr_len - fr_overlap if verbose: print('No evidence found in this fragment. Skipping.') continue t0 = time.time() ltime = (cntr.last_time, t0) while cntr.best is None: IN.get_more_evidence() acq_time = IN.get_acquisition_point() def filt(node): """Filter function to enforce *min_delay*""" if IN.BUF.get_status() is IN.BUF.Status.ACQUIRING: return acq_time + node[0][2] >= min_delay else: return True cntr.step(filt) t = time.time() if cntr.last_time > ltime[0]: ltime = (cntr.last_time, t) if t-ltime[1] > max_delay: cntr.prune() if t-t0 > fr_tlimit: cntr.best = (min(cntr.open) if len(cntr.open) > 0 else min(cntr.closed)) best_explanation = cntr.best.node best_explanation.recover_all() #End of reasoning #We resolve possible conflicts on joining two fragments, selecting the #interpretation higher coverage. btime = _merge_annots(annots, best_explanation, pos) if annots else 0 #We generate and add the annotations for the current fragment newanns = interp2ann(best_explanation, btime, pos, pos == initial_pos) annots.update(newanns) #We go to the next fragment after deleting the current used branch and #clearing the reasoning cache. del cntr del root reasoning.reset() if verbose: idur = time.time() - t0 print('Fragment finished in {0:.03f} seconds. Real-time factor: ' '{1:.03f}. Created {2} interpretations.'.format(idur, sp2ms(acq_time)/(idur*1000.), Interpretation.counter-ictr)) ictr = Interpretation.counter #We introduce an overlapping between consecutive fragments pos += fr_len - fr_overlap #Restore the previous knowledge base ap.set_knowledge_base(prev_knowledge) return _clean_artifacts_redundancy(annots)
def process_record_conduction(path, ann='atr', fr_len=512000, initial_pos=0, final_pos=np.inf, exclude_pwaves=False, exclude_twaves=False, verbose=True): """ This function performs an interpretation in the conduction abstraction level of a given MIT-BIH formatted record, using as initial evidence an external set of annotations. The result is a delineation of the P waves, QRS complex, and T waves of each heartbeat in the initial evidence annotator. The interpretation is splitted in independent fragments of configurable length. Parameters ---------- path: Complete name of the record to be processed (without any extension) ann: Annotator used to obtain the initial evidence (default: 'atr') fr_len: Length in samples of each independently interpreted fragment. initial_pos: Time position (in samples) where the interpretation should begin. final_pos: Time position (in samples) where the interpretation should finish. exclude_pwaves: Flag to avoid P-wave searching. exclude_twaves: Flag to avoid T-wave searching. verbose: Boolean flag. If active, the algorithm will print to standard output the fragment being interpreted. Returns ------- out: sortedlist of annotations resulting from the interpretation, including only segmentation annnotations. """ if fr_len > final_pos-initial_pos: fr_len = int(final_pos-initial_pos) if fr_len % IN._STEP != 0: fr_len += IN._STEP - (fr_len % IN._STEP) warnings.warn('Fragment length is not multiple of {0}. ' 'Adjusted to {1}'.format(IN._STEP, fr_len)) #Knowledge base configuration prev_knowledge = ap.KNOWLEDGE curr_knowledge = ap.SEGMENTATION_KNOWLEDGE[:] if exclude_twaves: curr_knowledge.remove(ap.TWAVE_PATTERN) curr_knowledge.remove(ap.PWAVE_PATTERN) elif exclude_pwaves: curr_knowledge.remove(ap.PWAVE_PATTERN) ap.set_knowledge_base(curr_knowledge) #Input configuration IN.set_record(path, ann) IN.set_duration(fr_len) IN.set_tfactor(1e20) #Annotations buffer annots = sortedcontainers.SortedList() pos = initial_pos ictr = Interpretation.counter while pos < min(IN.get_record_length(), final_pos): if verbose: print('Processing fragment {0}:{1}'.format(pos, pos+fr_len)) #Input start IN.reset() IN.set_offset(pos) IN.start() while IN.BUF.get_status() == IN.BUF.Status.ACQUIRING: IN.get_more_evidence() #Reasoning and interpretation root = node = Interpretation() try: root.focus.push(next(IN.BUF.get_observations()), None) except (StopIteration, ValueError): pos += fr_len if verbose: print('No evidence found in this fragment. Skipping.') continue successors = {node:reasoning.firm_succ(node)} t0 = time.time() ######################## ### Greedy searching ### ######################## while True: try: node = next(successors[node]) if node not in successors: successors[node] = reasoning.firm_succ(node) except StopIteration: #If the focus contains a top-level hypothesis, then there is #no more evidence to explain. if isinstance(node.focus.top[0], o.CardiacCycle): break else: #In other case, we perform a backtracking operation node = node.parent except KeyError: node = root break best_explanation = node best_explanation.recover_all() #End of reasoning #We generate and add the annotations for the current fragment newanns = interp2ann(best_explanation, 0, pos, pos == initial_pos) annots.update(newanns) #We go to the next fragment after deleting the current used branch and #clearing the reasoning cache. del root reasoning.reset() if verbose: idur = time.time() - t0 print('Fragment finished in {0:.03f} seconds. Real-time factor: ' '{1:.03f}. Created {2} interpretations.'.format(idur, sp2ms(IN.get_acquisition_point())/(idur*1000.), Interpretation.counter-ictr)) ictr = Interpretation.counter #We introduce an overlapping between consecutive fragments pos += fr_len #Restore the previous knowledge base ap.set_knowledge_base(prev_knowledge) return _clean_artifacts_redundancy(annots)