def execute_script(variant="frequency"): # read the log using the nonstandard importer (faster) log_path = os.path.join("..", "tests", "input_data", "receipt.xes") log = xes_importer.import_log(log_path, variant="nonstandard") # applies Inductive Miner on the log net, initial_marking, final_marking = inductive_miner.apply(log) # find shortest paths in the net spaths = get_shortest_paths(net) # then we start to decorate the net # we decide if we should decorate it with frequency or performance # we decide the aggregation measure (sum, min, max, mean, median, stdev) aggregation_measure = "mean" if variant == "frequency": aggregation_measure = "sum" # we find the DFG dfg = dfg_factory.apply(log, variant=variant) # we find the number of activities occurrences in the log activities_count = attributes_filter.get_attribute_values(log, "concept:name") # we calculate the statistics on the Petri net applying the greedy algorithm aggregated_statistics = get_decorations_from_dfg_spaths_acticount(net, dfg, spaths, activities_count, variant=variant, aggregation_measure=aggregation_measure) # we find the gviz gviz = pn_vis_factory.apply(net, initial_marking, final_marking, variant=variant, aggregated_statistics=aggregated_statistics, parameters={"format": "svg"}) # we show the viz on screen pn_vis_factory.view(gviz)
def handle_uploaded_file(f, algorithm): logPath2 = '/home/pm4py_test/logs/log.xes' logPath1 = '/home/pm4py_test/media/log.xes' # os.remove('static/proc/model.png') fs = FileSystemStorage() fs.save('log.xes', f) move_file(logPath1, logPath2) log = xes_importer.import_log(logPath2) if algorithm == "alpha": print(algorithm) net, initial_marking, final_marking = heuristics_miner.apply(log) elif algorithm == "inductive": print(algorithm) net, initial_marking, final_marking = inductive_miner.apply(log) elif algorithm == "heuristics": print(algorithm) net, initial_marking, final_marking = heuristics_miner.apply(log) else: print('error !!!') print(algorithm) return gviz = vis_factory.apply(net, initial_marking, final_marking) vis_factory.view(gviz) pngUris = glob.glob('/home/pm4py_test/*.png') gvUris = glob.glob('/home/pm4py_test/*.gv') modelUri = '/home/pm4py_test/proc/static/proc/model.png' print(pngUris) print(gvUris) os.remove(gvUris[0]) move_file(pngUris[0], modelUri)
def execute_script(): log_path = os.path.join("..", "tests", "input_data", "running-example.xes") log = xes_import.apply(log_path) net, i_m, f_m = alpha_miner.apply(log) gviz = pn_vis_factory.apply(net, i_m, f_m, parameters={"format": "svg", "debug": True}) pn_vis_factory.view(gviz)
def execute_script(): log_path = os.path.join("..", "tests", "input_data", "running-example.xes") log = xes_importer.apply(log_path) dfg = dfg_factory.apply(log) dfg_gv = dfg_vis_fact.apply(dfg, log, parameters={"format": "svg"}) dfg_vis_fact.view(dfg_gv) net, im, fm = dfg_conv_factory.apply(dfg) gviz = pn_vis_factory.apply(net, im, fm, parameters={"format": "svg"}) pn_vis_factory.view(gviz)
def test_partially_ordered_trace_net_creation(self): partially_ordered_trace_net, initial_marking, final_marking = utils.construct_partially_ordered_trace_net( self.log[0]) pn_vis_factory.view( pn_vis_factory.apply(partially_ordered_trace_net, parameters={"format": "svg"})) self.assertEqual(len(self.trace_net.transitions), len(partially_ordered_trace_net.transitions)) self.assertEqual(len(self.trace_net.places), len(partially_ordered_trace_net.places))
def execute_script(): log = xes_importer.apply( os.path.join("..", "tests", "compressed_input_data", "09_a32f0n00.xes.gz")) heu_net = heuristics_miner.apply_heu( log, parameters={"dependency_thresh": 0.99}) gviz = hn_vis_factory.apply(heu_net, parameters={"format": "svg"}) hn_vis_factory.view(gviz) net, im, fm = heuristics_miner.apply( log, parameters={"dependency_thresh": 0.99}) gviz2 = petri_vis_factory.apply(net, im, fm, parameters={"format": "svg"}) petri_vis_factory.view(gviz2)
def visualize_as_petri_net(net, initial_marking, final_marking, path=''): if len(path) > 0: parameters = { pn_visualizer.Variants.WO_DECORATION.value.Parameters.FORMAT: "svg" } gviz = pn_visualizer.apply(net, initial_marking, final_marking, parameters=parameters) pn_visualizer.save(gviz, path) else: gviz = pn_vis_factory.apply(net, initial_marking, final_marking) pn_vis_factory.view(gviz) return net, initial_marking, final_marking
def execute_script(): log = xes_importer.apply( os.path.join("..", "tests", "input_data", "receipt.xes")) log = sorting.sort_timestamp(log) net, im, fm = inductive_miner.apply(log) log1 = EventLog(log[:500]) log2 = EventLog(log[len(log) - 500:]) statistics = element_usage_comparison.compare_element_usage_two_logs( net, im, fm, log1, log2) gviz = pn_vis_factory.apply(net, im, fm, variant="frequency", aggregated_statistics=statistics, parameters={"format": "svg"}) pn_vis_factory.view(gviz)
def execute_script(): # import the log log_path = os.path.join("..", "tests", "input_data", "receipt.xes") log = xes_importer.import_log(log_path) # apply Inductive Miner net, initial_marking, final_marking = inductive_miner.apply(log) # get visualization variant = "performance" parameters_viz = {"aggregationMeasure": "mean", "format": "svg"} gviz = pn_vis_factory.apply(net, initial_marking, final_marking, log=log, variant=variant, parameters=parameters_viz) pn_vis_factory.view(gviz)
def generate_petri_net_visual(net, initial_marking, final_marking, xes_log): try: # visualizing the petri net using graphviz library, by passing net initial and final marking and viewing it parameters = {"format": "png"} gviz = pn_vis_factory.apply(net, initial_marking, final_marking, parameters=parameters, variant="frequency", log=xes_log) pn_vis_factory.view(gviz) print("By what name do you want to save the petri net image?") filename = str(input()) pn_vis_factory.save(gviz, filename + ".png") except TypeError: print("Please check input values")
def import_csv_file(filename): filename = os.path.basename(filename) file_path = global_util.get_full_path_input_file(filename) event_stream = csv_importer.import_event_stream(file_path) # dataframe = csv_import_adapter.import_dataframe_from_path(file_path, sep=",") log = conversion_factory.apply( event_stream, parameters={constants.PARAMETER_CONSTANT_TIMESTAMP_KEY: "日期和时间"}) net, initial_marking, final_marking = alpha_miner.apply(log) gviz = visualizer.apply(net, initial_marking, final_marking) visualizer.view(gviz)
def execute_script(): log_path = os.path.join("..", "tests", "input_data", "running-example.xes") log = xes_importer.import_log(log_path) net, marking, final_marking = alpha_factory.apply(log) for place in marking: print("initial marking " + place.name) for place in final_marking: print("final marking " + place.name) gviz = pn_vis_factory.apply(net, marking, final_marking, parameters={"format": "svg"}) pn_vis_factory.view(gviz) print("started token replay") aligned_traces = token_replay.apply(log, net, marking, final_marking) fit_traces = [x for x in aligned_traces if x['trace_is_fit']] perc_fitness = 0.00 if len(aligned_traces) > 0: perc_fitness = len(fit_traces) / len(aligned_traces) print("perc_fitness=", perc_fitness)
def import_csv_file(filename): filename = os.path.basename(filename) # 修改日志文件路径 file_path = global_util.get_full_path_input_file(filename) # 将文件按 csv 文件格式导入,得到事件流结构 event_stream = csv_importer.import_event_stream(file_path) # dataframe = csv_import_adapter.import_dataframe_from_path(file_path, sep=",") # 将事件流转换成 xes 结构 log = conversion_factory.apply(event_stream, parameters={constants.PARAMETER_CONSTANT_TIMESTAMP_KEY: "日期和时间"}) # 矿工应用日志文件,这里简单的应用阿尔法矿工 net, initial_marking, final_marking = alpha_miner.apply(log) # 可视化界面应用分析结果 gviz = visualizer.apply(net, initial_marking, final_marking) # 显示结果 visualizer.view(gviz)
def execute_script(): log_path = os.path.join("..", "tests", "input_data", "running-example.xes") log = xes_importer.import_log(log_path) net, marking, final_marking = inductive_factory.apply(log) for place in marking: print("initial marking " + place.name) for place in final_marking: print("final marking " + place.name) gviz = pn_vis_factory.apply(net, marking, final_marking, parameters={"format": "svg", "debug": True}) pn_vis_factory.view(gviz) if True: fit_traces = [] for i in range(0, len(log)): try: print("\n", i, [x["concept:name"] for x in log[i]]) cf_result = pm4py.algo.conformance.alignments.versions.state_equation_a_star.apply(log[i], net, marking, final_marking)[ 'alignment'] if cf_result is None: print("alignment is none!") else: is_fit = True for couple in cf_result: print(couple) if not (couple[0] == couple[1] or couple[0] == ">>" and couple[1] is None): is_fit = False print("isFit = " + str(is_fit)) if is_fit: fit_traces.append(log[i]) except TypeError: print("EXCEPTION ", i) traceback.print_exc() print(fit_traces) print(len(fit_traces))
def startSimulator(env, df2, tns): #Using iloc to access can also use iat or at to access elements as matrix sfc_sim = str(df2.iloc[0]['SFC'])[:6] sfc_act = int(sfc_sim) sfc_gen = int(sfc_generator()) print(sfc_act) print(sfc_gen) merged_sfc = str(sfc_act) + str(sfc_gen) print(f'Starting simulation for the SFC {merged_sfc}') #print(f'Processed at the resources {resource_list}') df_construct = {} new_df = pd.DataFrame() #print(new_df) for index, row in df2.iterrows(): #print(index, row['OPERATION'],row['PROCESSING_TIME_SECS'],row['WAITING_TIME_SECS']) if index + 1 > tns: break row['SFC'] = merged_sfc row['case:concept:name'] = row['WORK_CENTER'] row['concept:name'] = row['OPERATION'] row['org:resource'] = row['RESRCE'] #row['DATE_TIME'] = f"{datetime.now():%d-%m-%Y %H:%M:%S}" row['DATE_TIME'] = f"{datetime.fromtimestamp(env.now):%d-%m-%Y %H:%M:%S}" row['time:timestamp'] = row['DATE_TIME'] curr_row = pd.DataFrame([row]) pt_time = int(row['PROCESSING_TIME_SECS']) print(pt_time) yield env.timeout(pt_time) new_df = new_df.append([curr_row], ignore_index=True) #print(new_df) # Implementation of Petrin nets representation will be added log = conversion_factory.apply(new_df) print(log) net, initial_marking, final_marking = alpha_miner.apply(log) gviz = vis_factory.apply(net, initial_marking, final_marking) vis_factory.view(gviz)
from pm4py.algo.simulation.tree_generator import factory as pt_gen from pm4py.objects.conversion.process_tree import factory as pt_conv from pm4py.visualization.petrinet import factory as pn_viz from pm4py.visualization.process_tree import factory as pt_viz from pm4py.objects.process_tree import util as pt_util from pm4py.objects.petri import utils as pn_util import time from pm4py.objects.petri.exporter import factory as pn_exp if __name__ == '__main__': pt = pt_gen.apply() gviz = pt_viz.apply(pt, parameters={'format': 'svg'}) pt_viz.view(gviz) time.sleep(1) pt = pt_util.fold(pt) gviz = pt_viz.apply(pt, parameters={'format': 'svg'}) pt_viz.view(gviz) time.sleep(1) pn, ini, fin = pt_conv.apply(pt) gviz = pn_viz.apply(pn, ini, fin, parameters={"format": "svg"}) pn_viz.view(gviz) time.sleep(1) pn_exp.apply(pn, ini, 'C:/Users/zelst/Desktop/translation_test.pnml', final_marking=fin)
def apply(trace, petri_net, initial_marking, final_marking, parameters=None, debug_print=False, derive_heuristic=True, dijkstra=False, recalculate_heuristic_open_set=True): start_time = time.time() duration_solving_lps_total = 0 activity_key = DEFAULT_NAME_KEY if parameters is None or PARAMETER_CONSTANT_ACTIVITY_KEY not in parameters else \ parameters[ pm4py.util.constants.PARAMETER_CONSTANT_ACTIVITY_KEY] incremental_trace = Trace() # create empty closed and open set open_set = [] closed_set = set() first_event = True alignment = None visited_states_total = 0 traversed_arcs_total = 0 queued_states_total = 0 intermediate_results = [] heuristic_computation_time_total = 0 number_solved_lps_total = 0 for event in trace: start_time_trace = time.time() incremental_trace.append(event) if debug_print: print(incremental_trace) if first_event: # activity_key: :class:`str` key of the attribute of the events that defines the activity name trace_net, trace_im, trace_fm = petri.utils.construct_trace_net(incremental_trace, activity_key=activity_key) sync_prod, sync_im, sync_fm = petri.synchronous_product.construct(trace_net, trace_im, trace_fm, petri_net, initial_marking, final_marking, SKIP) first_event = False else: sync_prod, sync_fm = petri.synchronous_product.extend_trace_net_of_synchronous_product_net(sync_prod, event, sync_fm, SKIP, activity_key) if debug_print: gviz = pn_vis_factory.apply(sync_prod, sync_im, sync_fm, parameters={"debug": True, "format": "svg"}) pn_vis_factory.view(gviz) cost_function = alignments.utils.construct_standard_cost_function(sync_prod, SKIP) prefix_alignment, open_set, closed_set, duration_solving_lps, number_solved_lps = __search(sync_prod, sync_im, sync_fm, cost_function, SKIP, open_set, closed_set, derive_heuristic=derive_heuristic, dijkstra=dijkstra, recalculate_heuristic_open_set=recalculate_heuristic_open_set) duration_solving_lps_total += duration_solving_lps alignment = prefix_alignment # update statistic values visited_states_total += prefix_alignment['visited_states'] traversed_arcs_total += prefix_alignment['traversed_arcs'] queued_states_total += prefix_alignment['queued_states'] heuristic_computation_time_total += duration_solving_lps number_solved_lps_total += number_solved_lps res = {'trace_length': len(incremental_trace), 'alignment': prefix_alignment['alignment'], 'cost': prefix_alignment['cost'], 'visited_states': prefix_alignment['visited_states'], 'queued_states': prefix_alignment['queued_states'], 'traversed_arcs': prefix_alignment['traversed_arcs'], 'total_computation_time': time.time() - start_time_trace, 'heuristic_computation_time': duration_solving_lps, 'number_solved_lps': number_solved_lps} intermediate_results.append(res) if debug_print: print(prefix_alignment) print_alignment(prefix_alignment) print("cost: ", prefix_alignment["cost"]) print(open_set) print(closed_set) print("\n\n---------------------------------------------------\n\n") duration_total = time.time() - start_time res = {'alignment': alignment['alignment'], 'cost': alignment['cost'], 'visited_states': visited_states_total, 'queued_states': queued_states_total, 'traversed_arcs': traversed_arcs_total, 'total_computation_time': duration_total, 'heuristic_computation_time': heuristic_computation_time_total, 'number_solved_lps': number_solved_lps_total, 'intermediate_results': intermediate_results} return res
from pm4py.visualization.process_tree import factory as pt_viz from pm4py.algo.simulation.tree_generator import factory as pt_gen from pm4py.objects.process_tree import util as pt_util from pm4py.objects.conversion.process_tree import factory as pt_conv from pm4py.visualization.petrinet import factory as pn_viz from pm4py.objects.petri import utils as pn_util import time if __name__ == "__main__": pt = pt_gen.apply() pt_viz.view(pt_viz.apply(pt, parameters={"format": "svg"})) time.sleep(1) pn, im, fm = pt_conv.apply(pt) pn_viz.view(pn_viz.apply(pn, parameters={'format': 'svg'})) time.sleep(1) pt = pt_util.fold(pt) pt_viz.view(pt_viz.apply(pt, parameters={"format": "svg"}))
def plot_petri_net(path_to_petri_net): net, im, fm = petri.importer.pnml.import_net(path_to_petri_net) gviz = petri_net_visualization_factory.apply(net, im, fm, parameters={"debug": False, "format": "svg"}) petri_net_visualization_factory.view(gviz)
def draw_lock_pn4pt(tree, parameters=None): net, initial_marking, final_marking = pt_to_lock_net.apply( tree, parameters) gviz = pn_vis_factory.apply(net) pn_vis_factory.view(gviz)
from tests.translucent_event_log_new.objects.tel.importer.xes.utils import log_to_tel from pm4py.algo.discovery.alpha import factory as alpha_miner from tests.translucent_event_log_new.objects.tel.utils import tel_set_enabled from pm4py.visualization.transition_system import factory as vis_factory from pm4py.visualization.petrinet import factory as petri_factory from tests.translucent_event_log_new.algo.discover_automaton import utils from datetime import timedelta from tests.translucent_event_log_new.algo.discover_petrinet import state_based_region as sb input_file_path = os.path.join("input_data", "running-example_tel.xes") log = import_tel(input_file_path) tel = tel_set_enabled(log) auto = utils.discover_annotated_automaton(tel) gviz = vis_factory.apply(auto) vis_factory.view(gviz) #show automaton auto = utils.discover_annotated_automaton(tel, parameters={ 'sfreq_thresh': 2, 'afreq_thresh': 3 }) gviz = vis_factory.apply(auto) vis_factory.view(gviz) #show automaton nett, im, fm = sb.petri_net_synthesis(auto) # gviz = petri_factory.apply(nett, im, fm) petri_factory.view(gviz)
import os from pm4py.objects.log.importer.csv import factory as csv_importer from pm4py.objects.conversion.log import factory as conversion_factory from pm4py.algo.discovery.alpha import factory as alpha_miner from pm4py.visualization.petrinet import factory as vis_factory from tests.translucent_event_log_new.algo.discover_petrinet.alpha_revise import trans_alpha event_stream = csv_importer.import_event_stream( os.path.join("input_data", "sample.csv")) log = conversion_factory.apply(event_stream) net, im, fm = trans_alpha(log) nett, imm, fmm = alpha_miner.apply(log) gviz = vis_factory.apply(net, im, fm) gvizz = vis_factory.apply(nett, imm, fmm) vis_factory.view(gviz) vis_factory.view(gvizz)
def __calculate_prefix_alignment_for_next_event(process_net, sync_net, initial_marking, final_marking, marking_after_prefix_alignment, cost_function, skip, prefix_alignment, trace, activity_key, window_size, debug_print=False): start_time = time.time() event_to_align = trace[len(trace) - 1] activity_name = event_to_align.get_dict()[activity_key] if debug_print: print("Next Event: ", activity_name) if window_size > 0: prefix_alignment_reverted = [] marking_after_prefix_alignment = initial_marking cost_so_far = 0 upper_limit_for_search = 1999 if len(prefix_alignment) > 0: upper_limit_for_search = prefix_alignment[-1]['cost_so_far'] + 1999 # revert prefix alignment by window size prefix_alignment_reverted = prefix_alignment[:-window_size] if len(prefix_alignment_reverted) > 0: marking_after_prefix_alignment = prefix_alignment_reverted[-1][ "marking_after_transition"] cost_so_far = prefix_alignment_reverted[-1]['cost_so_far'] # cost for log move = 1000 plus 999 to allow to execute 999 times arbitrary silent transitions else: marking_after_prefix_alignment = initial_marking cost_so_far = 0 if debug_print: print("START FROM SCRATCH Reverted Marking") gviz = petri_net_visualization_factory.apply( sync_net, marking_after_prefix_alignment, final_marking, parameters={ 'debug': True, "format": "svg" }) petri_net_visualization_factory.view(gviz) res = __search(sync_net, marking_after_prefix_alignment, final_marking, cost_function, skip, cost_so_far, upper_limit_for_search=upper_limit_for_search) return { 'alignment': prefix_alignment_reverted + res['alignment'], 'cost': res['cost'], 'visited_states': res['visited_states'], 'queued_states': res['queued_states'], 'traversed_arcs': res['traversed_arcs'], 'total_computation_time': time.time() - start_time, 'heuristic_computation_time': res['heuristic_computation_time'], 'number_solved_lps': res['number_solved_lps'] } if len(prefix_alignment) > 0: cost_so_far = prefix_alignment[-1]['cost_so_far'] upper_limit_for_search = prefix_alignment[-1]['cost_so_far'] + 1999 else: cost_so_far = 0 upper_limit_for_search = math.inf # check if there is a model move/ synchronous move transition that is labelled equally to event_to_align for t in process_net.transitions: if t.label == activity_name: # there is a corresponding transition in the process net synchronous_move_transition = None for t_s in sync_net.transitions: if t_s.label[0] == t_s.label[1] == activity_name and \ t_s in enabled_transitions(sync_net, marking_after_prefix_alignment): # there is a corresponding activated synchronous move transition in the synchronous product net synchronous_move_transition = t_s break if synchronous_move_transition: # ADD SYNCHRONOUS MOVE if debug_print: print("ADD SYNCHRONOUS MOVE ") new_marking = petri.semantics.execute( synchronous_move_transition, sync_net, marking_after_prefix_alignment) cost_of_synchronous_move = cost_function[ synchronous_move_transition] if len(prefix_alignment) > 0: cost_prefix_alignment = cost_so_far + cost_of_synchronous_move else: # first step in alignment cost_prefix_alignment = cost_of_synchronous_move # add sync move to alignment prefix_alignment = prefix_alignment + [{ "marking_before_transition": marking_after_prefix_alignment, "label": synchronous_move_transition.label, "name": synchronous_move_transition.name, "cost_so_far": cost_so_far + cost_function[synchronous_move_transition], "marking_after_transition": new_marking }] return { 'alignment': prefix_alignment, 'cost': cost_prefix_alignment, 'visited_states': 0, 'queued_states': 0, 'traversed_arcs': 0, 'total_computation_time': time.time() - start_time, 'heuristic_computation_time': 0, 'number_solved_lps': 0 } else: # USE A* TO FIND NEW OPTIMAL ALIGNMENT if debug_print: print("START FROM SCRATCH -> A*") res = __search(sync_net, initial_marking, final_marking, cost_function, skip, 0, upper_limit_for_search=upper_limit_for_search) return { 'alignment': res['alignment'], 'cost': res['cost'], 'visited_states': res['visited_states'], 'queued_states': res['queued_states'], 'traversed_arcs': res['traversed_arcs'], 'total_computation_time': time.time() - start_time, 'heuristic_computation_time': res['heuristic_computation_time'], 'number_solved_lps': res['number_solved_lps'] } # no corresponding transition found -> ADD LOG MOVE if debug_print: print("ADD LOG MOVE") for t in sync_net.transitions: if is_log_move(t, skip) and t.label[0] == activity_name and \ petri.semantics.is_enabled(t, sync_net, marking_after_prefix_alignment): new_marking = petri.semantics.execute( t, sync_net, marking_after_prefix_alignment) prefix_alignment = prefix_alignment + [ { "marking_before_transition": marking_after_prefix_alignment, "label": t.label, "name": t.name, "cost_so_far": 1000 + cost_so_far, "marking_after_transition": new_marking } ] return { 'alignment': prefix_alignment, 'cost': 1000 + cost_so_far, 'visited_states': 0, 'queued_states': 0, 'traversed_arcs': 0, 'total_computation_time': time.time() - start_time, 'heuristic_computation_time': 0, 'number_solved_lps': 0 } raise Exception('No corresponding log move transition found in sync net')
#----------------- from pm4py.objects.log.importer.csv import factory as csv_importer excellentLog1A = csv_importer.import_event_stream('Excellent1A_fixed.csv') from pm4py.objects.conversion.log import factory as conversion_factory log1 = conversion_factory.apply(excellentLog1A) from pm4py.visualization.dfg import factory as dfg_vis_factory gviz = dfg_vis_factory.apply(dfg1, log=log1, variant="frequency") dfg_vis_factory.view(gviz) from pm4py.objects.conversion.dfg import factory as dfg_mining_factory net, im, fm = dfg_mining_factory.apply(dfg1) from pm4py.visualization.petrinet import factory as pn_vis_factory gviz = pn_vis_factory.apply(net, im, fm) pn_vis_factory.view(gviz) from pm4py.evaluation.replay_fitness import factory as replay_factory fitness_alpha = replay_factory.apply(log1, net, im, fm) from pm4py.algo.conformance.alignments import factory as align_factory alignments = align_factory.apply(log1, net, im, fm) print(alignments) #excellentLog1A = excellentLog1A.sort_values(by=['org:resource','case','time:timestamp'])
def apply(trace, petri_net, initial_marking, final_marking, window_size=0, parameters=None, debug_print=False): start_time = time.time() activity_key = DEFAULT_NAME_KEY if parameters is None or PARAMETER_CONSTANT_ACTIVITY_KEY not in parameters else \ parameters[ pm4py.util.constants.PARAMETER_CONSTANT_ACTIVITY_KEY] incremental_trace = Trace() first_event = True prefix_alignment = [] visited_states_total = 0 traversed_arcs_total = 0 queued_states_total = 0 intermediate_results = [] heuristic_computation_time_total = 0 number_solved_lps_total = 0 current_marking = None for event in trace: start_time_trace = time.time() incremental_trace.append(event) if debug_print: trace_as_string = "" for e in incremental_trace: trace_as_string += "," + e[activity_key] print(trace_as_string) print("event", len(incremental_trace), "/", len(trace), "\n") if first_event: # activity_key: :class:`str` key of the attribute of the events that defines the activity name trace_net, trace_im, trace_fm = petri.utils.construct_trace_net( incremental_trace, activity_key=activity_key) sync_prod, sync_im, sync_fm = petri.synchronous_product.construct( trace_net, trace_im, trace_fm, petri_net, initial_marking, final_marking, SKIP) first_event = False current_marking = sync_im else: sync_prod, sync_fm = petri.synchronous_product.extend_trace_net_of_synchronous_product_net( sync_prod, event, sync_fm, SKIP, activity_key) if debug_print: gviz = pn_vis_factory.apply(sync_prod, sync_im, sync_fm, parameters={ "debug": True, "format": "svg" }) pn_vis_factory.view(gviz) cost_function = alignments.utils.construct_standard_cost_function( sync_prod, SKIP) if not current_marking: current_marking = sync_im res = __calculate_prefix_alignment_for_next_event( petri_net, sync_prod, sync_im, sync_fm, current_marking, cost_function, SKIP, prefix_alignment, incremental_trace, activity_key, window_size, debug_print=debug_print) prefix_alignment = res['alignment'] # update statistic values visited_states_total += res['visited_states'] traversed_arcs_total += res['traversed_arcs'] queued_states_total += res['queued_states'] heuristic_computation_time_total += res['heuristic_computation_time'] number_solved_lps_total += res['number_solved_lps'] intermediate_res = { 'trace_length': len(incremental_trace), 'alignment': res['alignment'], 'cost': res['cost'], 'visited_states': res['visited_states'], 'queued_states': res['queued_states'], 'traversed_arcs': res['traversed_arcs'], 'total_computation_time': time.time() - start_time_trace, 'heuristic_computation_time': res['heuristic_computation_time'], 'number_solved_lps': res['number_solved_lps'] } intermediate_results.append(intermediate_res) current_marking = res['alignment'][-1]['marking_after_transition'] if debug_print: print_alignment(res) print("\n Marking:") print(current_marking) duration_total = time.time() - start_time return { 'alignment': res['alignment'], 'cost': res['cost'], 'visited_states': visited_states_total, 'queued_states': queued_states_total, 'traversed_arcs': traversed_arcs_total, 'total_computation_time': duration_total, 'heuristic_computation_time': heuristic_computation_time_total, 'number_solved_lps': number_solved_lps_total, 'intermediate_results': intermediate_results }
#Topic: #----------------------------- #libraries from pm4py.algo.discovery.alpha import factory as alpha_miner from pm4py.objects.log.importer.xes import factory as importer from pm4py.visualization.petrinet import factory as visualizer from pm4py.objects.log.importer.csv import factory as csv_importer event_stream = csv_importer.import_event_stream( os.path.join("pmdata/", "running-example.csv")) event_stream event_stream_length = len(event_stream) print(event_stream_length) for event in event_stream: print(event) from pm4py.objects.conversion.log import factory as conversion_factory log = conversion_factory.apply(event_stream) from pm4py.objects.log.exporter.csv import factory as csv_exporter csv_exporter.export(event_stream, "data/outputFile1.csv") #log = importer.apply('pmdata/running-example.xes') net, initial_marking, final_marking = alpha_miner.apply(log) gviz = visualizer.apply(net, initial_marking, final_marking) visualizer.view(gviz)
def execute_script(): time1 = time.time() dataframe = csv_import_adapter.import_dataframe_from_path_wo_timeconversion( inputLog, sep=SEP, quotechar=QUOTECHAR) time2 = time.time() print("time2 - time1: " + str(time2 - time1)) parameters_filtering = { constants.PARAMETER_CONSTANT_CASEID_KEY: CASEID_GLUE, constants.PARAMETER_CONSTANT_ACTIVITY_KEY: ACTIVITY_KEY } if enable_auto_filter: dataframe = auto_filter.apply_auto_filter( dataframe, parameters=parameters_filtering) else: dataframe = attributes_filter.apply_auto_filter( dataframe, parameters=parameters_filtering) time3 = time.time() print("time3 - time2: " + str(time3 - time2)) if enable_filtering_on_cases: dataframe = case_filter.filter_on_ncases(dataframe, case_id_glue=CASEID_GLUE, max_no_cases=max_no_cases) time4 = time.time() dataframe = csv_import_adapter.convert_caseid_column_to_str( dataframe, case_id_glue=CASEID_GLUE) dataframe = csv_import_adapter.convert_timestamp_columns_in_df( dataframe, timest_columns=TIMEST_COLUMNS, timest_format=TIMEST_FORMAT) time6 = time.time() print("time6 - time4: " + str(time6 - time4)) # dataframe = dataframe.sort_values('time:timestamp') time7 = time.time() print("time7 - time6: " + str(time7 - time6)) # show the filtered dataframe on the screen activities_count = attributes_filter.get_attribute_values( dataframe, attribute_key=ACTIVITY_KEY) [dfg_frequency, dfg_performance ] = df_statistics.get_dfg_graph(dataframe, measure="both", perf_aggregation_key="median", case_id_glue=CASEID_GLUE, activity_key=ACTIVITY_KEY, timestamp_key=TIMEST_KEY) if enable_filtering_df: print("len dfg_frequency 0=", len(dfg_frequency)) dfg_frequency = dfg_filtering.apply( dfg_frequency, {"noiseThreshold": filtering_df_noise}) print("len dfg_frequency 1=", len(dfg_frequency)) time8 = time.time() print("time8 - time7: " + str(time8 - time7)) gviz = dfg_vis_factory.apply(dfg_frequency, activities_count=activities_count, parameters={"format": "svg"}) dfg_vis_factory.view(gviz) net, initial_marking, final_marking = inductive_factory.apply_dfg( dfg_frequency) # net, initial_marking, final_marking = alpha_factory.apply_dfg(dfg_frequency) spaths = get_shortest_paths(net) time9 = time.time() print("time9 - time8: " + str(time9 - time8)) aggregated_statistics = get_decorations_from_dfg_spaths_acticount( net, dfg_performance, spaths, activities_count, variant="performance") gviz = pn_vis_factory.apply(net, initial_marking, final_marking, variant="performance", aggregated_statistics=aggregated_statistics, parameters={"format": "svg"}) time10 = time.time() print("time10 - time9: " + str(time10 - time9)) print("time10 - time1: " + str(time10 - time1)) pn_vis_factory.view(gviz)