def test_footprints_net(self): log = xes_importer.apply(os.path.join("input_data", "running-example.xes")) from pm4py.algo.discovery.alpha import algorithm as alpha_miner net, im, fm = alpha_miner.apply(log) from pm4py.algo.discovery.footprints import algorithm as footprints_discovery fp_entire_log = footprints_discovery.apply(log, variant=footprints_discovery.Variants.ENTIRE_EVENT_LOG) fp_trace_trace = footprints_discovery.apply(log) fp_net = footprints_discovery.apply(net, im) from pm4py.algo.conformance.footprints import algorithm as footprints_conformance conf1 = footprints_conformance.apply(fp_entire_log, fp_net) conf2 = footprints_conformance.apply(fp_trace_trace, fp_net) conf3 = footprints_conformance.apply(fp_entire_log, fp_net, variant=footprints_conformance.Variants.LOG_EXTENSIVE) conf4 = footprints_conformance.apply(fp_trace_trace, fp_net, variant=footprints_conformance.Variants.TRACE_EXTENSIVE)
def test_footprints_tree(self): log = xes_importer.apply(os.path.join("input_data", "running-example.xes")) from pm4py.algo.discovery.inductive import algorithm as inductive_miner tree = inductive_miner.apply_tree(log) from pm4py.algo.discovery.footprints import algorithm as footprints_discovery fp_entire_log = footprints_discovery.apply(log, variant=footprints_discovery.Variants.ENTIRE_EVENT_LOG) fp_trace_trace = footprints_discovery.apply(log) fp_tree = footprints_discovery.apply(tree) from pm4py.algo.conformance.footprints import algorithm as footprints_conformance conf1 = footprints_conformance.apply(fp_entire_log, fp_tree) conf2 = footprints_conformance.apply(fp_trace_trace, fp_tree) conf3 = footprints_conformance.apply(fp_entire_log, fp_tree, variant=footprints_conformance.Variants.LOG_EXTENSIVE) conf4 = footprints_conformance.apply(fp_trace_trace, fp_tree, variant=footprints_conformance.Variants.TRACE_EXTENSIVE)
def test_footprints_net(self): log = xes_importer.apply(os.path.join("input_data", "running-example.xes")) from pm4py.algo.discovery.alpha import algorithm as alpha_miner net, im, fm = alpha_miner.apply(log) from pm4py.algo.discovery.footprints import algorithm as footprints_discovery fp_log = footprints_discovery.apply(log) fp_net = footprints_discovery.apply(net, im) from pm4py.algo.conformance.footprints import algorithm as footprints_conformance conf = footprints_conformance.apply(fp_log, fp_net)
def test_footprints_tree(self): log = xes_importer.apply(os.path.join("input_data", "running-example.xes")) from pm4py.algo.discovery.inductive import algorithm as inductive_miner tree = inductive_miner.apply_tree(log) from pm4py.algo.discovery.footprints import algorithm as footprints_discovery fp_log = footprints_discovery.apply(log) fp_tree = footprints_discovery.apply(tree) from pm4py.algo.conformance.footprints import algorithm as footprints_conformance conf = footprints_conformance.apply(fp_log, fp_tree)
def test_footprints_tree_df(self): df = pd.read_csv(os.path.join("input_data", "running-example.csv")) df = dataframe_utils.convert_timestamp_columns_in_df(df) from pm4py.algo.discovery.inductive import algorithm as inductive_miner log = converter.apply(df) tree = inductive_miner.apply_tree(log) from pm4py.algo.discovery.footprints import algorithm as footprints_discovery fp_df = footprints_discovery.apply(df) fp_tree = footprints_discovery.apply(tree) from pm4py.algo.conformance.footprints import algorithm as footprints_conformance conf = footprints_conformance.apply(fp_df, fp_tree)
def test_footprints_tree_df(self): df = csv_import_adapter.import_dataframe_from_path( os.path.join("input_data", "running-example.csv")) from pm4py.algo.discovery.inductive import algorithm as inductive_miner log = converter.apply(df) tree = inductive_miner.apply_tree(log) from pm4py.algo.discovery.footprints import algorithm as footprints_discovery fp_df = footprints_discovery.apply(df) fp_tree = footprints_discovery.apply(tree) from pm4py.algo.conformance.footprints import algorithm as footprints_conformance conf = footprints_conformance.apply(fp_df, fp_tree)
def conformance_diagnostics_footprints(*args) -> Union[List[Dict[str, Any]], Dict[str, Any]]: """ Provide conformance checking diagnostics using footprints Parameters ---------------- args Provided argument: - The first argument is supposed to be an event log (or the footprints discovered from the event log) - The other arguments are supposed to be the process model (or the footprints discovered from the process model) Returns ---------------- fps Footprints of the event log / process model """ fp1 = __convert_to_fp(args[0]) fp2 = __convert_to_fp(args[1:]) from pm4py.algo.conformance.footprints import algorithm as footprints_conformance if isinstance(fp1, list): return footprints_conformance.apply(fp1, fp2, variant=footprints_conformance.Variants.TRACE_EXTENSIVE) else: return footprints_conformance.apply(fp1, fp2, variant=footprints_conformance.Variants.LOG_EXTENSIVE)
def execute_script(): # import a log log = importer.apply( os.path.join("..", "tests", "input_data", "receipt.xes")) # found a filtered version of the log that is used to discover a process model filtered_log = variants_filter.apply_auto_filter(deepcopy(log)) # discover a process tree using inductive miner tree = inductive_miner.apply_tree(filtered_log) print(tree) # apply the conversion of a process tree into a Petri net net, im, fm = converter.apply(tree) # Footprints discovery: discover a list of footprints # for all the cases of the log fp_log = footprints_discovery.apply(log) # discover the footpritns from the process tree fp_tree = footprints_discovery.apply(tree) # discover the footpritns from the Petri net fp_net = footprints_discovery.apply(net, im) print(len(fp_tree["sequence"]), len(fp_tree["parallel"]), len(fp_net["sequence"]), len(fp_net["parallel"])) print(fp_tree["sequence"] == fp_net["sequence"] and fp_tree["parallel"] == fp_net["parallel"]) # apply the footprints conformance checking conf = footprints_conformance.apply(fp_log, fp_net) for trace_an in conf: if trace_an: # print the first anomalous trace (containing deviations # that are contained in the trace but not allowed by the model) print(trace_an) break # finds the footprints for the entire log (not case-by-case, but taking # the relations that appear inside the entire log) fp_log_entire = footprints_discovery.apply( log, variant=footprints_discovery.Variants.ENTIRE_EVENT_LOG) # visualize the footprint table gviz = fp_visualizer.apply(fp_log_entire, fp_net, parameters={"format": "svg"}) fp_visualizer.view(gviz)
def execute_script(): log = xes_importer.apply( os.path.join("..", "tests", "input_data", "receipt.xes")) throughput_time = case_statistics.get_median_caseduration(log) variants, variants_times = variants_filter.get_variants_along_with_case_durations( log) dfg = dfg_discovery.apply(log) filtered_log = variants_filter.apply_auto_filter(deepcopy(log)) # filtered_log = log tree = inductive_miner.apply_tree(filtered_log) fp_log = fp_discovery.apply(log, variant=fp_discovery.Variants.ENTIRE_EVENT_LOG) fp_model = fp_discovery.apply(tree) conf = fp_conformance.apply(fp_log, fp_model) conf_occ = sorted([(x, dfg[x]) for x in conf], key=lambda y: (y[1], y[0][0], y[0][1]), reverse=True) print( "source activity\t\ttarget activity\t\toccurrences\t\tthroughput time log\t\tthroughput time traces with path" ) for i in range(min(10, len(conf_occ))): path = conf_occ[i][0] occ = conf_occ[i][1] red_log = paths_filter.apply(log, [path]) red_throughput_time = case_statistics.get_median_caseduration(red_log) print("%s\t\t%s\t\t%d\t\t%s\t\t%s" % (path[0], path[1], occ, human_readable_stat(throughput_time), human_readable_stat(red_throughput_time))) variants_length = sorted([(x, len(variants[x])) for x in variants.keys()], key=lambda y: (y[1], y[0]), reverse=True) print( "\nvariant\t\toccurrences\t\tthroughput time log\t\tthroughput time traces with path" ) for i in range(min(10, len(variants_length))): var = variants_length[i][0] vark = str(var) if len(vark) > 10: vark = vark[:10] occ = variants_length[i][1] fp_log_var = fp_discovery.apply( variants[var], variant=fp_discovery.Variants.ENTIRE_EVENT_LOG) conf_var = fp_conformance.apply(fp_log_var, fp_model) is_fit = str(len(conf_var) == 0) var_throughput = case_statistics.get_median_caseduration(variants[var]) print("%s\t\t%d\t\t%s\t\t%s\t\t%s" % (vark, occ, is_fit, throughput_time, human_readable_stat(var_throughput))) # print(conf_occ) conf_colors = tree_visualization.apply(tree, conf) if True: gviz = pt_visualizer.apply( tree, parameters={ "format": "svg", pt_visualizer.Variants.WO_DECORATION.value.Parameters.COLOR_MAP: conf_colors, pt_visualizer.Variants.WO_DECORATION.value.Parameters.ENABLE_DEEPCOPY: False }) pt_visualizer.view(gviz)
fitness_token_imdf[logName] = \ fitness_evaluator.apply(log, inductive_model, inductive_im, inductive_fm, parameters=parameters, variant=fitness_evaluator.Variants.TOKEN_BASED)[ 'perc_fit_traces'] print( str(time.time()) + " fitness_token_inductive for " + logName + " succeeded! " + str(fitness_token_imdf[logName])) t2 = time.time() times_tokenreplay_imdf[logName] = t2 - t1 t1 = time.time() fp_log = footprints_discovery.apply(log, parameters=parameters) fp_tree = footprints_discovery.apply(tree, parameters=parameters) conf = footprints_conformance.apply( fp_log, fp_tree, variant=footprints_conformance.Variants.TRACE_EXTENSIVE, parameters=parameters) # fitness_fp = float(len([x for x in conf if len(x) == 0])) / float(len(conf)) * 100.0 if conf else 0.0 fitness_fp = float(len([x for x in conf if x["is_footprints_fit"]])) / float( len(conf)) * 100.0 if conf else 0.0 t2 = time.time() fitness_footprints_imdf[logName] = fitness_fp times_footprints_imdf[logName] = t2 - t1 if ENABLE_ALIGNMENTS: t1 = time.time() fitness_align_imdf[logName] = \ fitness_evaluator.apply(log, inductive_model, inductive_im, inductive_fm, variant=fitness_evaluator.Variants.ALIGNMENT_BASED, parameters=parameters)[