def enable_hidden_transitions(net, marking, activated_transitions, visited_transitions, all_visited_markings, hidden_transitions_to_enable, t): """ Actually enable hidden transitions on the Petri net Parameters ----------- net Petri net marking Current marking activated_transitions All activated transitions during the replay visited_transitions All visited transitions by the recursion all_visited_markings All visited markings hidden_transitions_to_enable List of hidden transition to enable t Transition against we should check if they are enabled """ j_indexes = [0] * len(hidden_transitions_to_enable) for z in range(10000000): something_changed = False for k in range( j_indexes[z % len(hidden_transitions_to_enable)], len(hidden_transitions_to_enable[ z % len(hidden_transitions_to_enable)])): t3 = hidden_transitions_to_enable[ z % len(hidden_transitions_to_enable)][j_indexes[ z % len(hidden_transitions_to_enable)]] if not t3 == t: if semantics.is_enabled(t3, net, marking): if t3 not in visited_transitions: marking = semantics.execute(t3, net, marking) activated_transitions.append(t3) visited_transitions.add(t3) all_visited_markings.append(marking) something_changed = True j_indexes[z % len(hidden_transitions_to_enable)] = j_indexes[ z % len(hidden_transitions_to_enable)] + 1 if semantics.is_enabled(t, net, marking): break if semantics.is_enabled(t, net, marking): break if not something_changed: break return [ marking, activated_transitions, visited_transitions, all_visited_markings ]
def get_visible_transitions_eventually_enabled_by_marking(net, marking): """ Get visible transitions eventually enabled by marking (passing possibly through hidden transitions) Parameters ---------- net Petri net marking Current marking """ all_enabled_transitions = list(semantics.enabled_transitions(net, marking)) visible_transitions = set() visited_transitions = set() for i in range(len(all_enabled_transitions)): t = all_enabled_transitions[i] if t not in visited_transitions: if t.label is not None: visible_transitions.add(t) else: marking_copy = copy(marking) if semantics.is_enabled(t, net, marking_copy): new_marking = semantics.execute(t, net, marking_copy) new_enabled_transitions = list(semantics.enabled_transitions(net, new_marking)) all_enabled_transitions = all_enabled_transitions + new_enabled_transitions visited_transitions.add(t) return visible_transitions
def is_sequence_possible(net, transition_sequence): """ Helper function used to run a sequence of transitions. Returns [a,b,c] where a = True if sequence of transitions is possible in net b = the index of the transition that is not possible if a = False c = label of transition if a transition was not found in the network else False """ # Get marking object current_marking = get_marking(net) # Create a mapping from label to transition objects transition_dict = {} for transition in net.transitions: if transition.name in transition_sequence: transition_dict[transition.name] = transition index = 0 for transition_label in transition_sequence: # Transition not found if transition_label not in transition_dict.keys(): return [False, 0, transition_label] transition = transition_dict[transition_label] # Transition not enabled, sequence impossible if not semantics.is_enabled(transition, net, current_marking): return [False, index, False] # Fire the transition to enter next marking current_marking = semantics.execute(transition, net, current_marking) index += 1 # Sequence possible return [True, 0, False]
def get_visible_transitions_eventually_enabled_by_marking(net, marking): """ Get visible transitions eventually enabled by marking (passing possibly through hidden transitions) Parameters ---------- net Petri net marking Current marking """ all_enabled_transitions = sorted(list( semantics.enabled_transitions(net, marking)), key=lambda x: (str(x.name), id(x))) initial_all_enabled_transitions_marking_dictio = {} all_enabled_transitions_marking_dictio = {} for trans in all_enabled_transitions: all_enabled_transitions_marking_dictio[trans] = marking initial_all_enabled_transitions_marking_dictio[trans] = marking visible_transitions = set() visited_transitions = set() i = 0 while i < len(all_enabled_transitions): t = all_enabled_transitions[i] marking_copy = copy(all_enabled_transitions_marking_dictio[t]) if repr([t, marking_copy]) not in visited_transitions: if t.label is not None: visible_transitions.add(t) else: if semantics.is_enabled(t, net, marking_copy): new_marking = semantics.execute(t, net, marking_copy) new_enabled_transitions = sorted(list( semantics.enabled_transitions(net, new_marking)), key=lambda x: (str(x.name), id(x))) for t2 in new_enabled_transitions: all_enabled_transitions.append(t2) all_enabled_transitions_marking_dictio[ t2] = new_marking visited_transitions.add(repr([t, marking_copy])) i = i + 1 return visible_transitions
def tr_vlist(vlist, net, im, fm, tmap, bmap, parameters=None): """ Visit a variant using the backwards token basedr eplay Parameters ------------ vlist Variants list net Petri net im Initial marking tmap Transition map (labels to list of transitions) bmap B-map parameters Possible parameters of the execution Returns ------------- visited_transitions List of visited transitions during the replay is_fit Indicates if the replay was successful or not """ if parameters is None: parameters = {} stop_immediately_unfit = exec_utils.get_param_value( Parameters.STOP_IMMEDIATELY_UNFIT, parameters, False) m = copy(im) tokens_counter = Counter() tokens_counter["missing"] = 0 tokens_counter["remaining"] = 0 tokens_counter["consumed"] = 0 tokens_counter["produced"] = 0 for p in m: tokens_counter["produced"] += m[p] visited_transitions = [] transitions_with_problems = [] is_fit = True replay_interrupted = False for act in vlist: if act in tmap: rep_ok = False for t in tmap[act]: if is_enabled(t, net, m): m, tokens_counter = execute_tr(m, t, tokens_counter) visited_transitions.append(t) rep_ok = True continue elif len(tmap[act]) == 1: back_res = explore_backwards([(get_bmap( net, t.in_marking, bmap), copy(t.in_marking), list())], set(), net, m, bmap) if back_res is not None: rep_ok = True for t2 in back_res: m, tokens_counter = execute_tr( m, t2, tokens_counter) visited_transitions = visited_transitions + back_res m, tokens_counter = execute_tr(m, t, tokens_counter) visited_transitions.append(t) else: is_fit = False transitions_with_problems.append(t) m, tokens_counter = execute_tr(m, t, tokens_counter) visited_transitions.append(t) if stop_immediately_unfit: rep_ok = False break else: rep_ok = True if not rep_ok: is_fit = False replay_interrupted = True break if not m == fm: is_fit = False diff1 = m - fm diff2 = fm - m for p in diff1: if diff1[p] > 0: tokens_counter["remaining"] += diff1[p] for p in diff2: if diff2[p] > 0: tokens_counter["missing"] += diff2[p] for p in fm: tokens_counter["consumed"] += m[p] trace_fitness = 0.5 * (1.0 - float(tokens_counter["missing"]) / float( tokens_counter["consumed"])) + 0.5 * (1.0 - float( tokens_counter["remaining"]) / float(tokens_counter["produced"])) enabled_transitions_in_marking = get_visible_transitions_eventually_enabled_by_marking( net, m) return { "activated_transitions": visited_transitions, "trace_is_fit": is_fit, "replay_interrupted": replay_interrupted, "transitions_with_problems": transitions_with_problems, "activated_transitions_labels": [x.label for x in visited_transitions], "missing_tokens": tokens_counter["missing"], "consumed_tokens": tokens_counter["consumed"], "produced_tokens": tokens_counter["produced"], "remaining_tokens": tokens_counter["remaining"], "trace_fitness": trace_fitness, "enabled_transitions_in_marking": enabled_transitions_in_marking }
def enhance(self, log_wrapper: LogWrapper): """ Enhance a given petri net based on an event log. :param log_wrapper: Event log under consideration as LogWrapper :return: None """ """ Standard Enhancement """ beta = float(10) reactivation_deltas = {} for place in self.net.places: reactivation_deltas[str(place)] = list() log_wrapper.iterator_reset() last_activation = {} pbar = tqdm(total=log_wrapper.num_traces, desc="Replay for Process Model Enhancement") while log_wrapper.iterator_hasNext(): trace = log_wrapper.iterator_next() pbar.update(1) for place in self.net.places: if place in self.initial_marking: last_activation[str(place)] = trace[0]['time:timestamp'] else: last_activation[str(place)] = -1 """ Replay and estimate parameters """ places_shortest_path_by_hidden = get_places_shortest_path_by_hidden( self.net, self.MAX_REC_DEPTH) marking = copy(self.initial_marking) for event in trace: if event[self.activity_key] in self.trans_map.keys(): activated_places = [] toi = self.trans_map[event[self.activity_key]] """ If Transition of interest is not enabled yet, then go through hidden""" if not semantics.is_enabled(toi, self.net, marking): _, _, act_trans, _ = apply_hidden_trans( toi, self.net, copy(marking), places_shortest_path_by_hidden, [], 0, set(), [copy(marking)]) for act_tran in act_trans: for arc in act_tran.out_arcs: activated_places.append(arc.target) marking = semantics.execute( act_tran, self.net, marking) """ If Transition of interest is STILL not enabled yet, then naively add missing token to fulfill firing rule """ if not semantics.is_enabled(toi, self.net, marking): for arc in toi.in_arcs: if arc.source not in marking: marking[arc.source] += 1 """ Fire transition of interest """ for arc in toi.out_arcs: activated_places.append(arc.target) marking = semantics.execute(toi, self.net, marking) """ Marking is gone - transition could not be fired ...""" if marking is None: raise ValueError("Invalid Marking - Transition " + toi + " could not be fired.") """ Update Time Recordings """ for activated_place in activated_places: if last_activation[str(activated_place)] != -1: time_delta = time_delta_seconds( last_activation[str(activated_place)], event['time:timestamp']) if time_delta > 0: # noinspection PyUnboundLocalVariable reactivation_deltas[str(place)].append( time_delta) last_activation[str( activated_place)] = event['time:timestamp'] pbar.close() """ Calculate decay function parameter """ for place in self.net.places: if len(reactivation_deltas[str(place)]) > 1: self.decay_functions[str(place)] = LinearDecay( alpha=1 / np.mean(reactivation_deltas[str(place)]), beta=beta) else: self.decay_functions[str(place)] = LinearDecay( alpha=1 / log_wrapper.max_trace_duration, beta=beta) """ Get resource keys to store """ self.resource_keys = log_wrapper.getResourceKeys()
def decay_replay(self, log_wrapper: LogWrapper, resources: list = None): """ Decay Replay on given event log. :param log_wrapper: Input event log as LogWrapper to be replayed. :param resources: Resource keys to count (must have been counted during Petri net enhancement already!), as a list :return: list of timed state samples as JSON, list of timed state sample objects """ tss = list() tss_objs = list() decay_values = {} token_counts = {} marks = {} last_activation = {} """ Initialize Resource Counter """ count_resources = False resource_counter = None if log_wrapper.resource_keys is not None: count_resources = True resource_counter = dict() for key in log_wrapper.resource_keys.keys(): resource_counter[key] = 0 """ ---> """ log_wrapper.iterator_reset() pbar = tqdm(total=log_wrapper.num_traces, desc="Decay Replay on Event Log") while log_wrapper.iterator_hasNext(): trace = log_wrapper.iterator_next() pbar.update(1) resource_count = copy(resource_counter) """ Reset all counts for the next trace """ for place in self.net.places: if place in self.initial_marking: last_activation[str(place)] = trace[0]['time:timestamp'] else: last_activation[str(place)] = -1 for place in self.net.places: decay_values[str(place)] = 0.0 token_counts[str(place)] = 0.0 marks[str(place)] = 0.0 """ ----------------------------------> """ places_shortest_path_by_hidden = get_places_shortest_path_by_hidden( self.net, self.MAX_REC_DEPTH) marking = copy(self.initial_marking) """ Initialize counts based on initial marking """ for place in marking: decay_values[str(place)] = self.decay_functions[str( place)].decay(t=0) token_counts[str(place)] += 1 marks[str(place)] = 1 """ ----------------------------------> """ """ Replay """ time_recent = None init_time = None for event_id in range(len(trace)): event = trace[event_id] if event_id == 0: init_time = event['time:timestamp'] time_past = time_recent time_recent = event['time:timestamp'] if event[self.activity_key] in self.trans_map.keys(): activated_places = list() toi = self.trans_map[event[self.activity_key]] """ If Transition of interest is not enabled yet, then go through hidden""" if not semantics.is_enabled(toi, self.net, marking): _, _, act_trans, _ = apply_hidden_trans( toi, self.net, copy(marking), places_shortest_path_by_hidden, [], 0, set(), [copy(marking)]) for act_tran in act_trans: for arc in act_tran.out_arcs: activated_places.append(arc.target) marking = semantics.execute( act_tran, self.net, marking) """ If Transition of interest is STILL not enabled yet, then naively add missing token to fulfill firing rule""" if not semantics.is_enabled(toi, self.net, marking): for arc in toi.in_arcs: if arc.source not in marking: marking[arc.source] += 1 """ Fire transition of interest """ for arc in toi.out_arcs: activated_places.append(arc.target) marking = semantics.execute(toi, self.net, marking) """ Marking is gone - transition could not be fired ...""" if marking is None: raise ValueError("Invalid Marking - Transition '" + str(toi) + "' could not be fired.") """ ----->""" """ Update Time Recordings """ for activated_place in activated_places: last_activation[str( activated_place)] = event['time:timestamp'] """ Count Resources""" if count_resources and resources is not None: for resource_key in resources: if resource_key in event.keys(): val = resource_key + "_:_" + event[resource_key] if val in resource_count.keys(): resource_count[val] += 1 """ Update Vectors and create TimedStateSamples """ if time_past is not None: decay_values, token_counts = self.__updateVectors( decay_values=decay_values, last_activation=last_activation, token_counts=token_counts, activated_places=activated_places, current_time=time_recent) next_event_id = self.__findNextEventId(event_id, trace) if next_event_id is not None: next_event = trace[next_event_id][ self.activity_key] else: next_event = None if count_resources: timedstatesample = TimedStateSample( time_delta_seconds(init_time, time_recent), copy(decay_values), copy(token_counts), copy(marking), copy(self.place_list), resource_count=copy(resource_count), resource_indices=log_wrapper.getResourceKeys()) else: timedstatesample = TimedStateSample( time_delta_seconds(init_time, time_recent), copy(decay_values), copy(token_counts), copy(marking), copy(self.place_list)) timedstatesample.setLabel(next_event) tss.append(timedstatesample.export()) tss_objs.append(timedstatesample) pbar.close() return tss, tss_objs
def apply_trace(trace, net, initial_marking, final_marking, trans_map, enable_pltr_fitness, place_fitness, transition_fitness, notexisting_activities_in_model, places_shortest_path_by_hidden, consider_remaining_in_fitness, activity_key="concept:name", try_to_reach_final_marking_through_hidden=True, stop_immediately_unfit=False, walk_through_hidden_trans=True, post_fix_caching=None, marking_to_activity_caching=None, is_reduction=False, thread_maximum_ex_time=MAX_DEF_THR_EX_TIME, enable_postfix_cache=False, enable_marktoact_cache=False, cleaning_token_flood=False, s_components=None): """ Apply the token replaying algorithm to a trace Parameters ---------- trace Trace in the event log net Petri net initial_marking Initial marking final_marking Final marking trans_map Map between transitions labels and transitions enable_pltr_fitness Enable fitness calculation at place/transition level place_fitness Current dictionary of places associated with unfit traces transition_fitness Current dictionary of transitions associated with unfit traces notexisting_activities_in_model Map that stores the notexisting activities in the model places_shortest_path_by_hidden Shortest paths between places by hidden transitions consider_remaining_in_fitness Boolean value telling if the remaining tokens should be considered in fitness evaluation activity_key Name of the attribute that contains the activity try_to_reach_final_marking_through_hidden Boolean value that decides if we shall try to reach the final marking through hidden transitions stop_immediately_unfit Boolean value that decides if we shall stop immediately when a non-conformance is detected walk_through_hidden_trans Boolean value that decides if we shall walk through hidden transitions in order to enable visible transitions post_fix_caching Stores the post fix caching object marking_to_activity_caching Stores the marking-to-activity cache is_reduction Expresses if the token-based replay is called in a reduction attempt thread_maximum_ex_time Alignment threads maximum allowed execution time enable_postfix_cache Enables postfix cache enable_marktoact_cache Enables marking to activity cache cleaning_token_flood Decides if a cleaning of the token flood shall be operated s_components S-components of the Petri net (if workflow net) """ trace_activities = [event[activity_key] for event in trace] act_trans = [] transitions_with_problems = [] vis_mark = [] activating_transition_index = {} activating_transition_interval = [] used_postfix_cache = False marking = copy(initial_marking) vis_mark.append(marking) missing = 0 consumed = 0 sum_tokens_im = 0 for place in initial_marking: sum_tokens_im = sum_tokens_im + initial_marking[place] sum_tokens_fm = 0 for place in final_marking: sum_tokens_fm = sum_tokens_fm + final_marking[place] produced = sum_tokens_im current_event_map = {} current_remaining_map = {} for i in range(len(trace)): if enable_postfix_cache and ( str(trace_activities) in post_fix_caching.cache and hash(marking) in post_fix_caching.cache[str(trace_activities)]): trans_to_act = post_fix_caching.cache[str(trace_activities)][hash( marking)]["trans_to_activate"] for z in range(len(trans_to_act)): t = trans_to_act[z] act_trans.append(t) used_postfix_cache = True marking = post_fix_caching.cache[str(trace_activities)][hash( marking)]["final_marking"] break else: prev_len_activated_transitions = len(act_trans) if enable_marktoact_cache and ( hash(marking) in marking_to_activity_caching.cache and trace[i][activity_key] in marking_to_activity_caching.cache[hash(marking)] and trace[i - 1][activity_key] == marking_to_activity_caching.cache[hash(marking)][ trace[i][activity_key]]["previousActivity"]): this_end_marking = marking_to_activity_caching.cache[hash( marking)][trace[i][activity_key]]["end_marking"] this_act_trans = marking_to_activity_caching.cache[hash( marking)][trace[i] [activity_key]]["this_activated_transitions"] this_vis_markings = marking_to_activity_caching.cache[hash( marking)][trace[i][activity_key]]["this_visited_markings"] act_trans = act_trans + this_act_trans vis_mark = vis_mark + this_vis_markings marking = copy(this_end_marking) else: if trace[i][activity_key] in trans_map: current_event_map.update(trace[i]) t = trans_map[trace[i][activity_key]] if walk_through_hidden_trans and not semantics.is_enabled( t, net, marking): visited_transitions = set() prev_len_activated_transitions = len(act_trans) [net, marking, act_trans, vis_mark] = apply_hidden_trans( t, net, marking, places_shortest_path_by_hidden, act_trans, 0, visited_transitions, vis_mark) is_initially_enabled = True old_marking_names = [x.name for x in list(marking.keys())] if not semantics.is_enabled(t, net, marking): is_initially_enabled = False transitions_with_problems.append(t) if stop_immediately_unfit: missing = missing + 1 break [m, tokens_added] = add_missing_tokens(t, marking) missing = missing + m if enable_pltr_fitness: for place in tokens_added.keys(): if place in place_fitness: place_fitness[place][ "underfed_traces"].add(trace) if trace not in transition_fitness[t][ "underfed_traces"]: transition_fitness[t]["underfed_traces"][ trace] = list() transition_fitness[t]["underfed_traces"][ trace].append(current_event_map) elif enable_pltr_fitness: if trace not in transition_fitness[t]["fit_traces"]: transition_fitness[t]["fit_traces"][trace] = list() transition_fitness[t]["fit_traces"][trace].append( current_event_map) c = get_consumed_tokens(t) p = get_produced_tokens(t) consumed = consumed + c produced = produced + p if semantics.is_enabled(t, net, marking): marking = semantics.execute(t, net, marking) act_trans.append(t) vis_mark.append(marking) if not is_initially_enabled and cleaning_token_flood: # here, a routine for cleaning token flood shall go new_marking_names = [ x.name for x in list(marking.keys()) ] new_marking_names_diff = [ x for x in new_marking_names if x not in old_marking_names ] new_marking_names_inte = [ x for x in new_marking_names if x in old_marking_names ] for p1 in new_marking_names_inte: for p2 in new_marking_names_diff: for comp in s_components: if p1 in comp and p2 in comp: place_to_delete = [ place for place in list(marking.keys()) if place.name == p1 ] if len(place_to_delete) == 1: del marking[place_to_delete[0]] if not place_to_delete[ 0] in current_remaining_map: current_remaining_map[ place_to_delete[0]] = 0 current_remaining_map[ place_to_delete[ 0]] = current_remaining_map[ place_to_delete[0]] + 1 pass else: if not trace[i][ activity_key] in notexisting_activities_in_model: notexisting_activities_in_model[trace[i] [activity_key]] = {} notexisting_activities_in_model[ trace[i][activity_key]][trace] = current_event_map del trace_activities[0] if len(trace_activities) < MAX_POSTFIX_SUFFIX_LENGTH: activating_transition_index[str(trace_activities)] = { "index": len(act_trans), "marking": hash(marking) } if i > 0: activating_transition_interval.append([ trace[i][activity_key], prev_len_activated_transitions, len(act_trans), trace[i - 1][activity_key] ]) else: activating_transition_interval.append([ trace[i][activity_key], prev_len_activated_transitions, len(act_trans), "" ]) if try_to_reach_final_marking_through_hidden and not used_postfix_cache: for i in range(MAX_IT_FINAL1): if not break_condition_final_marking(marking, final_marking): hidden_transitions_to_enable = get_req_transitions_for_final_marking( marking, final_marking, places_shortest_path_by_hidden) for group in hidden_transitions_to_enable: for t in group: if semantics.is_enabled(t, net, marking): marking = semantics.execute(t, net, marking) act_trans.append(t) vis_mark.append(marking) if break_condition_final_marking(marking, final_marking): break else: break # if i > DebugConst.REACH_ITF1: # DebugConst.REACH_ITF1 = i # try to reach the final marking in a different fashion, if not already reached if not break_condition_final_marking(marking, final_marking): if len(final_marking) == 1: sink_place = list(final_marking)[0] connections_to_sink = [] for place in marking: if place in places_shortest_path_by_hidden and sink_place in places_shortest_path_by_hidden[ place]: connections_to_sink.append([ place, places_shortest_path_by_hidden[place][sink_place] ]) connections_to_sink = sorted(connections_to_sink, key=lambda x: len(x[1])) for i in range(MAX_IT_FINAL2): for j in range(len(connections_to_sink)): for z in range(len(connections_to_sink[j][1])): t = connections_to_sink[j][1][z] if semantics.is_enabled(t, net, marking): marking = semantics.execute(t, net, marking) act_trans.append(t) vis_mark.append(marking) continue else: break # if i > DebugConst.REACH_ITF2: # DebugConst.REACH_ITF2 = i if break_condition_final_marking(marking, final_marking): consumed = consumed + sum_tokens_fm marking_before_cleaning = copy(marking) remaining = 0 for p in marking: if p in final_marking: marking[p] = max(0, marking[p] - final_marking[p]) if enable_pltr_fitness: if marking[p] > 0: if p in place_fitness: if trace not in place_fitness[p]["underfed_traces"]: place_fitness[p]["overfed_traces"].add(trace) remaining = remaining + marking[p] for p in current_remaining_map: if enable_pltr_fitness: if p in place_fitness: if trace not in place_fitness[p][ "underfed_traces"] and trace not in place_fitness[p][ "overfed_traces"]: place_fitness[p]["overfed_traces"].add(trace) remaining = remaining + current_remaining_map[p] if consider_remaining_in_fitness: is_fit = (missing == 0) and (remaining == 0) else: is_fit = (missing == 0) if consumed > 0 and produced > 0: #trace_fitness = (1.0 - float(missing) / float(consumed)) * (1.0 - float(remaining) / float(produced)) trace_fitness = 0.5 * (1.0 - float(missing) / float(consumed)) + 0.5 * ( 1.0 - float(remaining) / float(produced)) else: trace_fitness = 1.0 if is_fit: for suffix in activating_transition_index: if suffix not in post_fix_caching.cache: post_fix_caching.cache[suffix] = {} if activating_transition_index[suffix][ "marking"] not in post_fix_caching.cache[suffix]: post_fix_caching.cache[suffix][activating_transition_index[suffix]["marking"]] = \ {"trans_to_activate": act_trans[activating_transition_index[suffix]["index"]:], "final_marking": marking} for trans in activating_transition_interval: activity = trans[0] start_marking_index = trans[1] end_marking_index = trans[2] previous_activity = trans[3] if end_marking_index < len(vis_mark): start_marking_object = vis_mark[start_marking_index] start_marking_hash = hash(start_marking_object) end_marking_object = vis_mark[end_marking_index] if activity in trans_map: this_activated_trans = act_trans[ start_marking_index:end_marking_index] this_visited_markings = vis_mark[start_marking_index + 1:end_marking_index + 1] if start_marking_hash not in marking_to_activity_caching.cache: marking_to_activity_caching.cache[ start_marking_hash] = {} if activity not in marking_to_activity_caching.cache[ start_marking_hash]: marking_to_activity_caching.cache[start_marking_hash][ activity] = { "start_marking": start_marking_object, "end_marking": end_marking_object, "this_activated_transitions": this_activated_trans, "this_visited_markings": this_visited_markings, "previousActivity": previous_activity } return [ is_fit, trace_fitness, act_trans, transitions_with_problems, marking_before_cleaning, get_visible_transitions_eventually_enabled_by_marking( net, marking_before_cleaning), missing, consumed, remaining, produced ]
def apply_hidden_trans(t, net, marking, places_shortest_paths_by_hidden, act_tr, rec_depth, visit_trans, vis_mark): """ Apply hidden transitions in order to enable a given transition Parameters ---------- t Transition to eventually enable net Petri net marking Marking places_shortest_paths_by_hidden Shortest paths between places connected by hidden transitions act_tr All activated transitions rec_depth Current recursion depth visit_trans All visited transitions by hiddenTrans method vis_mark All visited markings """ if rec_depth >= MAX_REC_DEPTH_HIDTRANSENABL or t in visit_trans: return [net, marking, act_tr, vis_mark] # if rec_depth > DebugConst.REACH_MRH: # DebugConst.REACH_MRH = rec_depth visit_trans.add(t) marking_at_start = copy(marking) places_with_missing = get_places_with_missing_tokens(t, marking) hidden_transitions_to_enable = get_hidden_transitions_to_enable( marking, places_with_missing, places_shortest_paths_by_hidden) if hidden_transitions_to_enable: [marking, act_tr, visit_trans, vis_mark] = enable_hidden_transitions(net, marking, act_tr, visit_trans, vis_mark, hidden_transitions_to_enable, t) if not semantics.is_enabled(t, net, marking): hidden_transitions_to_enable = get_hidden_transitions_to_enable( marking, places_with_missing, places_shortest_paths_by_hidden) for z in range(len(hidden_transitions_to_enable)): for k in range(len(hidden_transitions_to_enable[z])): t4 = hidden_transitions_to_enable[z][k] if not t4 == t: if t4 not in visit_trans: if not semantics.is_enabled(t4, net, marking): [net, marking, act_tr, vis_mark] = apply_hidden_trans( t4, net, marking, places_shortest_paths_by_hidden, act_tr, rec_depth + 1, visit_trans, vis_mark) if semantics.is_enabled(t4, net, marking): marking = semantics.execute(t4, net, marking) act_tr.append(t4) visit_trans.add(t4) vis_mark.append(marking) if not semantics.is_enabled(t, net, marking): if not (marking_at_start == marking): [net, marking, act_tr, vis_mark] = apply_hidden_trans( t, net, marking, places_shortest_paths_by_hidden, act_tr, rec_depth + 1, visit_trans, vis_mark) return [net, marking, act_tr, vis_mark]
def apply_trace(trace, net, initial_marking, final_marking, trans_map, enable_place_fitness, place_fitness, places_shortest_path_by_hidden, consider_remaining_in_fitness, activity_key="concept:name", try_to_reach_final_marking_through_hidden=True, stop_immediately_unfit=False, walk_through_hidden_trans=True, post_fix_caching=None, marking_to_activity_caching=None): """ Apply the token replaying algorithm to a trace Parameters ---------- trace Trace in the event log net Petri net initial_marking Initial marking final_marking Final marking trans_map Map between transitions labels and transitions enable_place_fitness Enable fitness calculation at place level place_fitness Current dictionary of places associated with unfit traces places_shortest_path_by_hidden Shortest paths between places by hidden transitions consider_remaining_in_fitness Boolean value telling if the remaining tokens should be considered in fitness evaluation activity_key Name of the attribute that contains the activity try_to_reach_final_marking_through_hidden Boolean value that decides if we shall try to reach the final marking through hidden transitions stop_immediately_unfit Boolean value that decides if we shall stop immediately when a non-conformance is detected walk_through_hidden_trans Boolean value that decides if we shall walk through hidden transitions in order to enable visible transitions post_fix_caching Stores the post fix caching object marking_to_activity_caching Stores the marking-to-activity cache """ trace_activities = [event[activity_key] for event in trace] act_trans = [] transitions_with_problems = [] vis_mark = [] activating_transition_index = {} activating_transition_interval = [] used_postfix_cache = False marking = copy(initial_marking) vis_mark.append(marking) missing = 0 consumed = 0 produced = 0 for i in range(len(trace)): if ENABLE_POSTFIX_CACHE and (str(trace_activities) in post_fix_caching.cache and hash(marking) in post_fix_caching.cache[str(trace_activities)]): trans_to_act = post_fix_caching.cache[str(trace_activities)][hash(marking)]["trans_to_activate"] for z in range(len(trans_to_act)): t = trans_to_act[z] act_trans.append(t) used_postfix_cache = True marking = post_fix_caching.cache[str(trace_activities)][hash(marking)]["final_marking"] break else: prev_len_activated_transitions = len(act_trans) if ENABLE_MARKTOACT_CACHE and (hash(marking) in marking_to_activity_caching.cache and trace[i][activity_key] in marking_to_activity_caching.cache[hash(marking)] and trace[i - 1][activity_key] == marking_to_activity_caching.cache[hash(marking)][trace[i][activity_key]] ["previousActivity"]): this_end_marking = marking_to_activity_caching.cache[hash(marking)][trace[i][activity_key]][ "end_marking"] this_act_trans = marking_to_activity_caching.cache[hash(marking)][trace[i][activity_key]][ "this_activated_transitions"] this_vis_markings = marking_to_activity_caching.cache[hash(marking)][trace[i][activity_key]][ "this_visited_markings"] act_trans = act_trans + this_act_trans vis_mark = vis_mark + this_vis_markings marking = copy(this_end_marking) else: if trace[i][activity_key] in trans_map: t = trans_map[trace[i][activity_key]] if walk_through_hidden_trans and not semantics.is_enabled(t, net, marking): visited_transitions = set() prev_len_activated_transitions = len(act_trans) [net, marking, act_trans, vis_mark] = apply_hidden_trans(t, net, marking, places_shortest_path_by_hidden, act_trans, 0, visited_transitions, vis_mark) if not semantics.is_enabled(t, net, marking): transitions_with_problems.append(t) if stop_immediately_unfit: missing = missing + 1 break [m, tokens_added] = add_missing_tokens(t, marking) missing = missing + m if enable_place_fitness: for place in tokens_added.keys(): if place in place_fitness: place_fitness[place]["underfed_traces"].add(trace) c = get_consumed_tokens(t) p = get_produced_tokens(t) consumed = consumed + c produced = produced + p if semantics.is_enabled(t, net, marking): marking = semantics.execute(t, net, marking) act_trans.append(t) vis_mark.append(marking) del trace_activities[0] if len(trace_activities) < MAX_POSTFIX_SUFFIX_LENGTH: activating_transition_index[str(trace_activities)] = {"index": len(act_trans), "marking": hash(marking)} if i > 0: activating_transition_interval.append( [trace[i][activity_key], prev_len_activated_transitions, len(act_trans), trace[i - 1][activity_key]]) else: activating_transition_interval.append( [trace[i][activity_key], prev_len_activated_transitions, len(act_trans), ""]) if try_to_reach_final_marking_through_hidden and not used_postfix_cache: for i in range(MAX_IT_FINAL): if not break_condition_final_marking(marking, final_marking): hidden_transitions_to_enable = get_req_transitions_for_final_marking(marking, final_marking, places_shortest_path_by_hidden) for group in hidden_transitions_to_enable: for t in group: if semantics.is_enabled(t, net, marking): marking = semantics.execute(t, net, marking) act_trans.append(t) vis_mark.append(marking) if break_condition_final_marking(marking, final_marking): break else: break # try to reach the final marking in a different fashion, if not already reached if not break_condition_final_marking(marking, final_marking): if len(final_marking) == 1: sink_place = list(final_marking)[0] connections_to_sink = [] for place in marking: if place in places_shortest_path_by_hidden and sink_place in places_shortest_path_by_hidden[place]: connections_to_sink.append([place, places_shortest_path_by_hidden[place][sink_place]]) connections_to_sink = sorted(connections_to_sink, key=lambda x: len(x[1])) for i in range(MAX_IT_FINAL): for j in range(len(connections_to_sink)): for z in range(len(connections_to_sink[j][1])): t = connections_to_sink[j][1][z] if semantics.is_enabled(t, net, marking): marking = semantics.execute(t, net, marking) act_trans.append(t) vis_mark.append(marking) continue else: break marking_before_cleaning = copy(marking) remaining = 0 for p in marking: if p in final_marking: marking[p] = max(0, marking[p] - final_marking[p]) if enable_place_fitness: if marking[p] > 0: if p in place_fitness: if trace not in place_fitness[p]["underfed_traces"]: place_fitness[p]["overfed_traces"].add(trace) remaining = remaining + marking[p] if consider_remaining_in_fitness: is_fit = (missing == 0) and (remaining == 0) else: is_fit = (missing == 0) if consumed > 0 and produced > 0: trace_fitness = (1.0 - float(missing) / float(consumed)) * (1.0 - float(remaining) / float(produced)) else: trace_fitness = 1.0 if is_fit: for suffix in activating_transition_index: if suffix not in post_fix_caching.cache: post_fix_caching.cache[suffix] = {} if activating_transition_index[suffix]["marking"] not in post_fix_caching.cache[suffix]: post_fix_caching.cache[suffix][activating_transition_index[suffix]["marking"]] = \ {"trans_to_activate": act_trans[activating_transition_index[suffix]["index"]:], "final_marking": marking} for trans in activating_transition_interval: activity = trans[0] start_marking_index = trans[1] end_marking_index = trans[2] previous_activity = trans[3] if end_marking_index < len(vis_mark): start_marking_object = vis_mark[start_marking_index] start_marking_hash = hash(start_marking_object) end_marking_object = vis_mark[end_marking_index] if activity in trans_map: this_activated_trans = act_trans[start_marking_index:end_marking_index] this_visited_markings = vis_mark[start_marking_index + 1:end_marking_index + 1] if start_marking_hash not in marking_to_activity_caching.cache: marking_to_activity_caching.cache[start_marking_hash] = {} if activity not in marking_to_activity_caching.cache[start_marking_hash]: marking_to_activity_caching.cache[start_marking_hash][activity] = { "start_marking": start_marking_object, "end_marking": end_marking_object, "this_activated_transitions": this_activated_trans, "this_visited_markings": this_visited_markings, "previousActivity": previous_activity} return [is_fit, trace_fitness, act_trans, transitions_with_problems, marking_before_cleaning, get_visible_transitions_eventually_enabled_by_marking(net, marking_before_cleaning)]