def marking_flow_petri(net, im, return_eventually_enabled=False, parameters=None): """ Construct the marking flow of a Petri net Parameters ----------------- net Petri net im Initial marking return_eventually_enabled Return the eventually enabled (visible) transitions """ if parameters is None: parameters = {} # set a maximum execution time of 1 day (it can be changed by providing the parameter) max_exec_time = exec_utils.get_param_value(Parameters.MAX_ELAB_TIME, parameters, 86400) start_time = time.time() incoming_transitions = {im: set()} outgoing_transitions = {} eventually_enabled = {} active = [im] while active: if (time.time() - start_time) >= max_exec_time: # interrupt the execution return incoming_transitions, outgoing_transitions, eventually_enabled m = active.pop() enabled_transitions = semantics.enabled_transitions(net, m) if return_eventually_enabled: eventually_enabled[ m] = align_utils.get_visible_transitions_eventually_enabled_by_marking( net, m) outgoing_transitions[m] = {} for t in enabled_transitions: nm = semantics.weak_execute(t, m) outgoing_transitions[m][t] = nm if nm not in incoming_transitions: incoming_transitions[nm] = set() if nm not in active: active.append(nm) incoming_transitions[nm].add(t) return incoming_transitions, outgoing_transitions, eventually_enabled
def calculate_precision_with_tbr(self, net, im, fm, log, parameters=None): from pm4py import util as pmutil from pm4py.algo.conformance.tokenreplay import algorithm as token_replay from pm4py.objects import log as log_lib from pm4py.evaluation.precision import utils as precision_utils if parameters is None: parameters = {} sum_at = 0.0 sum_ee = 0.0 prefixes, prefix_count = precision_utils.get_log_prefixes(log) print("got prefixes") prefixes_keys = list(prefixes.keys()) fake_log = precision_utils.form_fake_log(prefixes_keys) print("got fake log") variants = log_variants_filter.get_variants_from_log_trace_idx(fake_log, parameters=parameters) print("got variants from fake log") var_list = [[x, y] for x, y in variants.items()] print("got var list") parameters["enable_parameters_precision"] = True parameters["consider_remaining_in_fitness"] = False aligned_traces = self.perform_tbr_net_variants(net, im, fm, var_list=var_list, parameters=parameters) print("got aligned traces") start_activities = set(x.split(",")[0] for x in variants) trans_en_ini_marking = set( [x.label for x in get_visible_transitions_eventually_enabled_by_marking(net, im)]) diff = trans_en_ini_marking.difference(start_activities) sum_at += len(log) * len(trans_en_ini_marking) sum_ee += len(log) * len(diff) for i in range(len(aligned_traces)): if aligned_traces[i]["trace_is_fit"]: log_transitions = set(prefixes[prefixes_keys[i]]) activated_transitions_labels = set( [x for x in aligned_traces[i]["enabled_transitions_in_marking_labels"] if x != "None"]) sum_at += len(activated_transitions_labels) * prefix_count[prefixes_keys[i]] escaping_edges = activated_transitions_labels.difference(log_transitions) sum_ee += len(escaping_edges) * prefix_count[prefixes_keys[i]] if sum_at > 0: precision = 1 - float(sum_ee) / float(sum_at) return precision
def apply(log, net, marking, final_marking, parameters=None): """ Get ET Conformance precision Parameters ---------- log Trace log_skeleton net Petri net marking Initial marking final_marking Final marking parameters Parameters of the algorithm, including: Parameters.ACTIVITY_KEY -> Activity key """ if parameters is None: parameters = {} cleaning_token_flood = exec_utils.get_param_value( Parameters.CLEANING_TOKEN_FLOOD, parameters, False) token_replay_variant = exec_utils.get_param_value( Parameters.TOKEN_REPLAY_VARIANT, parameters, executor.Variants.TOKEN_REPLAY) activity_key = exec_utils.get_param_value( Parameters.ACTIVITY_KEY, parameters, log_lib.util.xes.DEFAULT_NAME_KEY) # default value for precision, when no activated transitions (not even by looking at the initial marking) are found precision = 1.0 sum_ee = 0 sum_at = 0 parameters_tr = { token_replay.Parameters.CONSIDER_REMAINING_IN_FITNESS: False, token_replay.Parameters.TRY_TO_REACH_FINAL_MARKING_THROUGH_HIDDEN: False, token_replay.Parameters.STOP_IMMEDIATELY_UNFIT: True, token_replay.Parameters.WALK_THROUGH_HIDDEN_TRANS: True, token_replay.Parameters.CLEANING_TOKEN_FLOOD: cleaning_token_flood, token_replay.Parameters.ACTIVITY_KEY: activity_key } prefixes, prefix_count = precision_utils.get_log_prefixes( log, activity_key=activity_key) prefixes_keys = list(prefixes.keys()) fake_log = precision_utils.form_fake_log(prefixes_keys, activity_key=activity_key) aligned_traces = executor.apply(fake_log, net, marking, final_marking, variant=token_replay_variant, parameters=parameters_tr) # fix: also the empty prefix should be counted! start_activities = set(get_start_activities(log, parameters=parameters)) trans_en_ini_marking = set([ x.label for x in get_visible_transitions_eventually_enabled_by_marking( net, marking) ]) diff = trans_en_ini_marking.difference(start_activities) sum_at += len(log) * len(trans_en_ini_marking) sum_ee += len(log) * len(diff) # end fix for i in range(len(aligned_traces)): if aligned_traces[i]["trace_is_fit"]: log_transitions = set(prefixes[prefixes_keys[i]]) activated_transitions_labels = set([ x.label for x in aligned_traces[i]["enabled_transitions_in_marking"] if x.label is not None ]) sum_at += len(activated_transitions_labels) * prefix_count[ prefixes_keys[i]] escaping_edges = activated_transitions_labels.difference( log_transitions) sum_ee += len(escaping_edges) * prefix_count[prefixes_keys[i]] if sum_at > 0: precision = 1 - float(sum_ee) / float(sum_at) return precision
def apply(log, net, marking, final_marking, parameters=None): """ Get Align-ET Conformance precision Parameters ---------- log Trace log net Petri net marking Initial marking final_marking Final marking parameters Parameters of the algorithm, including: Parameters.ACTIVITY_KEY -> Activity key """ if parameters is None: parameters = {} debug_level = parameters[ "debug_level"] if "debug_level" in parameters else 0 activity_key = exec_utils.get_param_value( Parameters.ACTIVITY_KEY, parameters, log_lib.util.xes.DEFAULT_NAME_KEY) # default value for precision, when no activated transitions (not even by looking at the initial marking) are found precision = 1.0 sum_ee = 0 sum_at = 0 unfit = 0 if not petri.check_soundness.check_relaxed_soundness_net_in_fin_marking( net, marking, final_marking): raise Exception( "trying to apply Align-ETConformance on a Petri net that is not a relaxed sound net!!" ) prefixes, prefix_count = precision_utils.get_log_prefixes( log, activity_key=activity_key) prefixes_keys = list(prefixes.keys()) fake_log = precision_utils.form_fake_log(prefixes_keys, activity_key=activity_key) align_stop_marking = align_fake_log_stop_marking(fake_log, net, marking, final_marking, parameters=parameters) all_markings = transform_markings_from_sync_to_original_net( align_stop_marking, net, parameters=parameters) for i in range(len(prefixes)): markings = all_markings[i] if markings is not None: log_transitions = set(prefixes[prefixes_keys[i]]) activated_transitions_labels = set() for m in markings: # add to the set of activated transitions in the model the activated transitions # for each prefix activated_transitions_labels = activated_transitions_labels.union( x.label for x in utils. get_visible_transitions_eventually_enabled_by_marking( net, m) if x.label is not None) escaping_edges = activated_transitions_labels.difference( log_transitions) sum_at += len(activated_transitions_labels) * prefix_count[ prefixes_keys[i]] sum_ee += len(escaping_edges) * prefix_count[prefixes_keys[i]] if debug_level > 1: print("") print("prefix=", prefixes_keys[i]) print("log_transitions=", log_transitions) print("activated_transitions=", activated_transitions_labels) print("escaping_edges=", escaping_edges) else: unfit += prefix_count[prefixes_keys[i]] if debug_level > 0: print("\n") print("overall unfit", unfit) print("overall activated transitions", sum_at) print("overall escaping edges", sum_ee) # fix: also the empty prefix should be counted! start_activities = set(get_start_activities(log, parameters=parameters)) trans_en_ini_marking = set([ x.label for x in get_visible_transitions_eventually_enabled_by_marking( net, marking) ]) diff = trans_en_ini_marking.difference(start_activities) sum_at += len(log) * len(trans_en_ini_marking) sum_ee += len(log) * len(diff) # end fix if sum_at > 0: precision = 1 - float(sum_ee) / float(sum_at) return precision
def tr_vlist(vlist, net, im, fm, tmap, bmap, parameters=None): """ Visit a variant using the backwards token basedr eplay Parameters ------------ vlist Variants list net Petri net im Initial marking tmap Transition map (labels to list of transitions) bmap B-map parameters Possible parameters of the execution Returns ------------- visited_transitions List of visited transitions during the replay is_fit Indicates if the replay was successful or not """ if parameters is None: parameters = {} stop_immediately_unfit = exec_utils.get_param_value( Parameters.STOP_IMMEDIATELY_UNFIT, parameters, False) m = copy(im) tokens_counter = Counter() tokens_counter["missing"] = 0 tokens_counter["remaining"] = 0 tokens_counter["consumed"] = 0 tokens_counter["produced"] = 0 for p in m: tokens_counter["produced"] += m[p] visited_transitions = [] transitions_with_problems = [] is_fit = True replay_interrupted = False for act in vlist: if act in tmap: rep_ok = False for t in tmap[act]: if is_enabled(t, net, m): m, tokens_counter = execute_tr(m, t, tokens_counter) visited_transitions.append(t) rep_ok = True continue elif len(tmap[act]) == 1: back_res = explore_backwards([(get_bmap( net, t.in_marking, bmap), copy(t.in_marking), list())], set(), net, m, bmap) if back_res is not None: rep_ok = True for t2 in back_res: m, tokens_counter = execute_tr( m, t2, tokens_counter) visited_transitions = visited_transitions + back_res m, tokens_counter = execute_tr(m, t, tokens_counter) visited_transitions.append(t) else: is_fit = False transitions_with_problems.append(t) m, tokens_counter = execute_tr(m, t, tokens_counter) visited_transitions.append(t) if stop_immediately_unfit: rep_ok = False break else: rep_ok = True if not rep_ok: is_fit = False replay_interrupted = True break if not m == fm: is_fit = False diff1 = m - fm diff2 = fm - m for p in diff1: if diff1[p] > 0: tokens_counter["remaining"] += diff1[p] for p in diff2: if diff2[p] > 0: tokens_counter["missing"] += diff2[p] for p in fm: tokens_counter["consumed"] += m[p] trace_fitness = 0.5 * (1.0 - float(tokens_counter["missing"]) / float( tokens_counter["consumed"])) + 0.5 * (1.0 - float( tokens_counter["remaining"]) / float(tokens_counter["produced"])) enabled_transitions_in_marking = get_visible_transitions_eventually_enabled_by_marking( net, m) return { "activated_transitions": visited_transitions, "trace_is_fit": is_fit, "replay_interrupted": replay_interrupted, "transitions_with_problems": transitions_with_problems, "activated_transitions_labels": [x.label for x in visited_transitions], "missing_tokens": tokens_counter["missing"], "consumed_tokens": tokens_counter["consumed"], "produced_tokens": tokens_counter["produced"], "remaining_tokens": tokens_counter["remaining"], "trace_fitness": trace_fitness, "enabled_transitions_in_marking": enabled_transitions_in_marking }
def apply(log, net, marking, final_marking, parameters=None): """ Get Align-ET Conformance precision Parameters ---------- log Trace log net Petri net marking Initial marking final_marking Final marking parameters Parameters of the algorithm, including: pm4py.util.constants.PARAMETER_CONSTANT_ACTIVITY_KEY -> Activity key """ if parameters is None: parameters = {} activity_key = parameters[ PARAM_ACTIVITY_KEY] if PARAM_ACTIVITY_KEY in parameters else log_lib.util.xes.DEFAULT_NAME_KEY # default value for precision, when no activated transitions (not even by looking at the initial marking) are found precision = 1.0 sum_ee = 0 sum_at = 0 if not (petri.check_soundness.check_wfnet(net) and petri.check_soundness.check_relaxed_soundness_net_in_fin_marking( net, marking, final_marking)): raise Exception( "trying to apply Align-ETConformance on a Petri net that is not a relaxed sound workflow net!!" ) prefixes, prefix_count = precision_utils.get_log_prefixes( log, activity_key=activity_key) prefixes_keys = list(prefixes.keys()) fake_log = precision_utils.form_fake_log(prefixes_keys, activity_key=activity_key) align_stop_marking = align_fake_log_stop_marking(fake_log, net, marking, final_marking, parameters=parameters) all_markings = transform_markings_from_sync_to_original_net( align_stop_marking, net, parameters=parameters) for i in range(len(all_markings)): atm = all_markings[i] log_transitions = set(prefixes[prefixes_keys[i]]) activated_transitions_labels = set( x.label for x in utils. get_visible_transitions_eventually_enabled_by_marking(net, atm) if x.label is not None) sum_at += len(activated_transitions_labels) * prefix_count[ prefixes_keys[i]] escaping_edges = activated_transitions_labels.difference( log_transitions) sum_ee += len(escaping_edges) * prefix_count[prefixes_keys[i]] # fix: also the empty prefix should be counted! start_activities = set( start_activities_filter.get_start_activities(log, parameters=parameters)) trans_en_ini_marking = set([ x.label for x in get_visible_transitions_eventually_enabled_by_marking( net, marking) ]) diff = trans_en_ini_marking.difference(start_activities) sum_at += len(log) * len(trans_en_ini_marking) sum_ee += len(log) * len(diff) # end fix if sum_at > 0: precision = 1 - float(sum_ee) / float(sum_at) return precision
def apply(log, net, marking, final_marking, parameters=None): """ Get ET Conformance precision Parameters ---------- log Trace log net Petri net marking Initial marking final_marking Final marking parameters Parameters of the algorithm, including: pm4py.util.constants.PARAMETER_CONSTANT_ACTIVITY_KEY -> Activity key """ if parameters is None: parameters = {} cleaning_token_flood = parameters[ "cleaning_token_flood"] if "cleaning_token_flood" in parameters else False activity_key = parameters[ PARAM_ACTIVITY_KEY] if PARAM_ACTIVITY_KEY in parameters else log_lib.util.xes.DEFAULT_NAME_KEY # default value for precision, when no activated transitions (not even by looking at the initial marking) are found precision = 1.0 sum_ee = 0 sum_at = 0 parameters_tr = { "consider_remaining_in_fitness": False, "try_to_reach_final_marking_through_hidden": False, "stop_immediately_unfit": True, "walk_through_hidden_trans": True, "cleaning_token_flood": cleaning_token_flood, PARAM_ACTIVITY_KEY: activity_key } prefixes, prefix_count = precision_utils.get_log_prefixes( log, activity_key=activity_key) prefixes_keys = list(prefixes.keys()) fake_log = precision_utils.form_fake_log(prefixes_keys, activity_key=activity_key) aligned_traces = token_replay.apply(fake_log, net, marking, final_marking, parameters=parameters_tr) # fix: also the empty prefix should be counted! start_activities = set( start_activities_filter.get_start_activities(log, parameters=parameters)) trans_en_ini_marking = set([ x.label for x in get_visible_transitions_eventually_enabled_by_marking( net, marking) ]) diff = trans_en_ini_marking.difference(start_activities) sum_at += len(log) * len(trans_en_ini_marking) sum_ee += len(log) * len(diff) # end fix for i in range(len(aligned_traces)): if aligned_traces[i]["trace_is_fit"]: log_transitions = set(prefixes[prefixes_keys[i]]) activated_transitions_labels = set([ x.label for x in aligned_traces[i]["enabled_transitions_in_marking"] if x.label is not None ]) sum_at += len(activated_transitions_labels) * prefix_count[ prefixes_keys[i]] escaping_edges = activated_transitions_labels.difference( log_transitions) sum_ee += len(escaping_edges) * prefix_count[prefixes_keys[i]] if sum_at > 0: precision = 1 - float(sum_ee) / float(sum_at) return precision