def calculate_precision_with_tbr(self, net, im, fm, log, parameters=None): from pm4py import util as pmutil from pm4py.algo.conformance.tokenreplay import factory as token_replay from pm4py.objects import log as log_lib from pm4py.evaluation.precision import utils as precision_utils if parameters is None: parameters = {} sum_at = 0.0 sum_ee = 0.0 prefixes, prefix_count = precision_utils.get_log_prefixes(log) print("got prefixes") prefixes_keys = list(prefixes.keys()) fake_log = precision_utils.form_fake_log(prefixes_keys) print("got fake log") variants = log_variants_filter.get_variants_from_log_trace_idx( fake_log, parameters=parameters) print("got variants from fake log") var_list = [[x, y] for x, y in variants.items()] print("got var list") parameters["enable_parameters_precision"] = True parameters["consider_remaining_in_fitness"] = False aligned_traces = self.perform_tbr_net_variants(net, im, fm, var_list=var_list, parameters=parameters) print("got aligned traces") for i in range(len(aligned_traces)): if aligned_traces[i]["trace_is_fit"]: log_transitions = set(prefixes[prefixes_keys[i]]) activated_transitions_labels = set([ x for x in aligned_traces[i] ["enabled_transitions_in_marking_labels"] if x != "None" ]) sum_at += len(activated_transitions_labels) * prefix_count[ prefixes_keys[i]] escaping_edges = activated_transitions_labels.difference( log_transitions) sum_ee += len(escaping_edges) * prefix_count[prefixes_keys[i]] if sum_at > 0: precision = 1 - float(sum_ee) / float(sum_at) return precision
def apply(log, net, marking, final_marking, parameters=None): """ Get Align-ET Conformance precision Parameters ---------- log Trace log net Petri net marking Initial marking final_marking Final marking parameters Parameters of the algorithm, including: pm4py.util.constants.PARAMETER_CONSTANT_ACTIVITY_KEY -> Activity key """ if parameters is None: parameters = {} activity_key = parameters[ PARAM_ACTIVITY_KEY] if PARAM_ACTIVITY_KEY in parameters else log_lib.util.xes.DEFAULT_NAME_KEY precision = 0.0 sum_ee = 0 sum_at = 0 if not (petri.check_soundness.check_wfnet(net) and petri.check_soundness.check_relaxed_soundness_net_in_fin_marking( net, marking, final_marking)): raise Exception( "trying to apply Align-ETConformance on a Petri net that is not a relaxed sound workflow net!!" ) places_corr = {p.name: p for p in net.places} prefixes, prefix_count = precision_utils.get_log_prefixes( log, activity_key=activity_key) prefixes_keys = list(prefixes.keys()) fake_log = precision_utils.form_fake_log(prefixes_keys, activity_key=activity_key) max_trace_length = max(len(x) for x in fake_log) for i in range(len(fake_log)): trace = fake_log[i] sync_net, sync_initial_marking, sync_final_marking = build_sync_net( trace, net, marking, final_marking) stop_marking = petri.petrinet.Marking() for pl, count in sync_final_marking.items(): if pl.name[1] == utils.SKIP: stop_marking[pl] = count cost_function = utils.construct_standard_cost_function( sync_net, utils.SKIP) res = __search(sync_net, sync_initial_marking, sync_final_marking, stop_marking, cost_function, utils.SKIP, max_trace_length) atm = petri.petrinet.Marking() for pl, count in res.items(): if pl.name[0] == utils.SKIP: atm[places_corr[pl.name[1]]] = count log_transitions = set(prefixes[prefixes_keys[i]]) activated_transitions_labels = set( x.label for x in utils. get_visible_transitions_eventually_enabled_by_marking(net, atm) if x.label is not None) sum_at += len(activated_transitions_labels) * prefix_count[ prefixes_keys[i]] escaping_edges = activated_transitions_labels.difference( log_transitions) sum_ee += len(escaping_edges) * prefix_count[prefixes_keys[i]] if sum_at > 0: precision = 1 - float(sum_ee) / float(sum_at) return precision
def apply(log, net, marking, final_marking, parameters=None): """ Get ET Conformance precision Parameters ---------- log Trace log_skeleton net Petri net marking Initial marking final_marking Final marking parameters Parameters of the algorithm, including: Parameters.ACTIVITY_KEY -> Activity key """ if parameters is None: parameters = {} cleaning_token_flood = exec_utils.get_param_value( Parameters.CLEANING_TOKEN_FLOOD, parameters, False) token_replay_variant = exec_utils.get_param_value( Parameters.TOKEN_REPLAY_VARIANT, parameters, executor.Variants.TOKEN_REPLAY) activity_key = exec_utils.get_param_value( Parameters.ACTIVITY_KEY, parameters, log_lib.util.xes.DEFAULT_NAME_KEY) # default value for precision, when no activated transitions (not even by looking at the initial marking) are found precision = 1.0 sum_ee = 0 sum_at = 0 parameters_tr = { token_replay.Parameters.CONSIDER_REMAINING_IN_FITNESS: False, token_replay.Parameters.TRY_TO_REACH_FINAL_MARKING_THROUGH_HIDDEN: False, token_replay.Parameters.STOP_IMMEDIATELY_UNFIT: True, token_replay.Parameters.WALK_THROUGH_HIDDEN_TRANS: True, token_replay.Parameters.CLEANING_TOKEN_FLOOD: cleaning_token_flood, token_replay.Parameters.ACTIVITY_KEY: activity_key } prefixes, prefix_count = precision_utils.get_log_prefixes( log, activity_key=activity_key) prefixes_keys = list(prefixes.keys()) fake_log = precision_utils.form_fake_log(prefixes_keys, activity_key=activity_key) aligned_traces = executor.apply(fake_log, net, marking, final_marking, variant=token_replay_variant, parameters=parameters_tr) # fix: also the empty prefix should be counted! start_activities = set(get_start_activities(log, parameters=parameters)) trans_en_ini_marking = set([ x.label for x in get_visible_transitions_eventually_enabled_by_marking( net, marking) ]) diff = trans_en_ini_marking.difference(start_activities) sum_at += len(log) * len(trans_en_ini_marking) sum_ee += len(log) * len(diff) # end fix for i in range(len(aligned_traces)): if aligned_traces[i]["trace_is_fit"]: log_transitions = set(prefixes[prefixes_keys[i]]) activated_transitions_labels = set([ x.label for x in aligned_traces[i]["enabled_transitions_in_marking"] if x.label is not None ]) sum_at += len(activated_transitions_labels) * prefix_count[ prefixes_keys[i]] escaping_edges = activated_transitions_labels.difference( log_transitions) sum_ee += len(escaping_edges) * prefix_count[prefixes_keys[i]] if sum_at > 0: precision = 1 - float(sum_ee) / float(sum_at) return precision
def apply(log, net, marking, final_marking, parameters=None): """ Get Align-ET Conformance precision Parameters ---------- log Trace log net Petri net marking Initial marking final_marking Final marking parameters Parameters of the algorithm, including: Parameters.ACTIVITY_KEY -> Activity key """ if parameters is None: parameters = {} debug_level = parameters[ "debug_level"] if "debug_level" in parameters else 0 activity_key = exec_utils.get_param_value( Parameters.ACTIVITY_KEY, parameters, log_lib.util.xes.DEFAULT_NAME_KEY) # default value for precision, when no activated transitions (not even by looking at the initial marking) are found precision = 1.0 sum_ee = 0 sum_at = 0 unfit = 0 if not petri.check_soundness.check_relaxed_soundness_net_in_fin_marking( net, marking, final_marking): raise Exception( "trying to apply Align-ETConformance on a Petri net that is not a relaxed sound net!!" ) prefixes, prefix_count = precision_utils.get_log_prefixes( log, activity_key=activity_key) prefixes_keys = list(prefixes.keys()) fake_log = precision_utils.form_fake_log(prefixes_keys, activity_key=activity_key) align_stop_marking = align_fake_log_stop_marking(fake_log, net, marking, final_marking, parameters=parameters) all_markings = transform_markings_from_sync_to_original_net( align_stop_marking, net, parameters=parameters) for i in range(len(prefixes)): markings = all_markings[i] if markings is not None: log_transitions = set(prefixes[prefixes_keys[i]]) activated_transitions_labels = set() for m in markings: # add to the set of activated transitions in the model the activated transitions # for each prefix activated_transitions_labels = activated_transitions_labels.union( x.label for x in utils. get_visible_transitions_eventually_enabled_by_marking( net, m) if x.label is not None) escaping_edges = activated_transitions_labels.difference( log_transitions) sum_at += len(activated_transitions_labels) * prefix_count[ prefixes_keys[i]] sum_ee += len(escaping_edges) * prefix_count[prefixes_keys[i]] if debug_level > 1: print("") print("prefix=", prefixes_keys[i]) print("log_transitions=", log_transitions) print("activated_transitions=", activated_transitions_labels) print("escaping_edges=", escaping_edges) else: unfit += prefix_count[prefixes_keys[i]] if debug_level > 0: print("\n") print("overall unfit", unfit) print("overall activated transitions", sum_at) print("overall escaping edges", sum_ee) # fix: also the empty prefix should be counted! start_activities = set(get_start_activities(log, parameters=parameters)) trans_en_ini_marking = set([ x.label for x in get_visible_transitions_eventually_enabled_by_marking( net, marking) ]) diff = trans_en_ini_marking.difference(start_activities) sum_at += len(log) * len(trans_en_ini_marking) sum_ee += len(log) * len(diff) # end fix if sum_at > 0: precision = 1 - float(sum_ee) / float(sum_at) return precision
def apply(log, net, marking, final_marking, parameters=None): """ Get ET Conformance precision Parameters ---------- log Trace log net Petri net marking Initial marking final_marking Final marking parameters Parameters of the algorithm, including: pm4py.util.constants.PARAMETER_CONSTANT_ACTIVITY_KEY -> Activity key """ if parameters is None: parameters = {} cleaning_token_flood = parameters[ "cleaning_token_flood"] if "cleaning_token_flood" in parameters else False activity_key = parameters[ PARAM_ACTIVITY_KEY] if PARAM_ACTIVITY_KEY in parameters else log_lib.util.xes.DEFAULT_NAME_KEY precision = 0.0 sum_ee = 0 sum_at = 0 parameters_tr = { "consider_remaining_in_fitness": False, "try_to_reach_final_marking_through_hidden": False, "stop_immediately_unfit": True, "walk_through_hidden_trans": True, "cleaning_token_flood": cleaning_token_flood, PARAM_ACTIVITY_KEY: activity_key } prefixes, prefix_count = precision_utils.get_log_prefixes( log, activity_key=activity_key) prefixes_keys = list(prefixes.keys()) fake_log = precision_utils.form_fake_log(prefixes_keys, activity_key=activity_key) aligned_traces = token_replay.apply(fake_log, net, marking, final_marking, parameters=parameters_tr) for i in range(len(aligned_traces)): if aligned_traces[i]["trace_is_fit"]: log_transitions = set(prefixes[prefixes_keys[i]]) activated_transitions_labels = set([ x.label for x in aligned_traces[i]["enabled_transitions_in_marking"] if x.label is not None ]) sum_at += len(activated_transitions_labels) * prefix_count[ prefixes_keys[i]] escaping_edges = activated_transitions_labels.difference( log_transitions) sum_ee += len(escaping_edges) * prefix_count[prefixes_keys[i]] if sum_at > 0: precision = 1 - float(sum_ee) / float(sum_at) return precision
def apply(log, net, marking, final_marking, parameters=None): """ Get Align-ET Conformance precision Parameters ---------- log Trace log net Petri net marking Initial marking final_marking Final marking parameters Parameters of the algorithm, including: pm4py.util.constants.PARAMETER_CONSTANT_ACTIVITY_KEY -> Activity key """ if parameters is None: parameters = {} activity_key = parameters[ PARAM_ACTIVITY_KEY] if PARAM_ACTIVITY_KEY in parameters else log_lib.util.xes.DEFAULT_NAME_KEY # default value for precision, when no activated transitions (not even by looking at the initial marking) are found precision = 1.0 sum_ee = 0 sum_at = 0 if not (petri.check_soundness.check_wfnet(net) and petri.check_soundness.check_relaxed_soundness_net_in_fin_marking( net, marking, final_marking)): raise Exception( "trying to apply Align-ETConformance on a Petri net that is not a relaxed sound workflow net!!" ) prefixes, prefix_count = precision_utils.get_log_prefixes( log, activity_key=activity_key) prefixes_keys = list(prefixes.keys()) fake_log = precision_utils.form_fake_log(prefixes_keys, activity_key=activity_key) align_stop_marking = align_fake_log_stop_marking(fake_log, net, marking, final_marking, parameters=parameters) all_markings = transform_markings_from_sync_to_original_net( align_stop_marking, net, parameters=parameters) for i in range(len(all_markings)): atm = all_markings[i] log_transitions = set(prefixes[prefixes_keys[i]]) activated_transitions_labels = set( x.label for x in utils. get_visible_transitions_eventually_enabled_by_marking(net, atm) if x.label is not None) sum_at += len(activated_transitions_labels) * prefix_count[ prefixes_keys[i]] escaping_edges = activated_transitions_labels.difference( log_transitions) sum_ee += len(escaping_edges) * prefix_count[prefixes_keys[i]] # fix: also the empty prefix should be counted! start_activities = set( start_activities_filter.get_start_activities(log, parameters=parameters)) trans_en_ini_marking = set([ x.label for x in get_visible_transitions_eventually_enabled_by_marking( net, marking) ]) diff = trans_en_ini_marking.difference(start_activities) sum_at += len(log) * len(trans_en_ini_marking) sum_ee += len(log) * len(diff) # end fix if sum_at > 0: precision = 1 - float(sum_ee) / float(sum_at) return precision
def apply(log, net, marking, final_marking, parameters=None): """ Get ET Conformance precision Parameters ---------- log Trace log net Petri net marking Initial marking final_marking Final marking parameters Parameters of the algorithm, including: pm4py.util.constants.PARAMETER_CONSTANT_ACTIVITY_KEY -> Activity key """ if parameters is None: parameters = {} cleaning_token_flood = parameters[ "cleaning_token_flood"] if "cleaning_token_flood" in parameters else False token_replay_variant = parameters[ PARAMETER_TOKEN_REPLAY_VARIANT] if PARAMETER_TOKEN_REPLAY_VARIANT in parameters else DEFAULT_TOKEN_REPLAY_VARIANT activity_key = parameters[ PARAM_ACTIVITY_KEY] if PARAM_ACTIVITY_KEY in parameters else log_lib.util.xes.DEFAULT_NAME_KEY # default value for precision, when no activated transitions (not even by looking at the initial marking) are found precision = 1.0 sum_ee = 0 sum_at = 0 parameters_tr = { "consider_remaining_in_fitness": False, "try_to_reach_final_marking_through_hidden": False, "stop_immediately_unfit": True, "walk_through_hidden_trans": True, "cleaning_token_flood": cleaning_token_flood, PARAM_ACTIVITY_KEY: activity_key } prefixes, prefix_count = precision_utils.get_log_prefixes( log, activity_key=activity_key) prefixes_keys = list(prefixes.keys()) fake_log = precision_utils.form_fake_log(prefixes_keys, activity_key=activity_key) aligned_traces = token_replay.apply(fake_log, net, marking, final_marking, variant=token_replay_variant, parameters=parameters_tr) # fix: also the empty prefix should be counted! start_activities = set(get_start_activities(log, parameters=parameters)) trans_en_ini_marking = set([ x.label for x in get_visible_transitions_eventually_enabled_by_marking( net, marking) ]) diff = trans_en_ini_marking.difference(start_activities) sum_at += len(log) * len(trans_en_ini_marking) sum_ee += len(log) * len(diff) # end fix for i in range(len(aligned_traces)): if aligned_traces[i]["trace_is_fit"]: log_transitions = set(prefixes[prefixes_keys[i]]) activated_transitions_labels = set([ x.label for x in aligned_traces[i]["enabled_transitions_in_marking"] if x.label is not None ]) sum_at += len(activated_transitions_labels) * prefix_count[ prefixes_keys[i]] escaping_edges = activated_transitions_labels.difference( log_transitions) sum_ee += len(escaping_edges) * prefix_count[prefixes_keys[i]] if sum_at > 0: precision = 1 - float(sum_ee) / float(sum_at) return precision