def create_log(G, conn_comp, timestamps, max_comp_len=50, include_loops=False): log = EventLog() for i in range(len(conn_comp)): if len(conn_comp[i]) <= max_comp_len: trace = Trace() trace.attributes["concept:name"] = str(i) SG = G.subgraph(conn_comp[i]) SGG = networkx.DiGraph(SG) edges = list(SGG.edges) for e in edges: if e[0] == e[1]: SGG.remove_edge(e[0], e[1]) sorted_nodes = list(networkx.topological_sort(SGG)) for n in sorted_nodes: selfloop = 1 if (n, n) in SG.edges else 0 trace.append( Event({ 'time:timestamp': timestamps[n.split("=")[1]], 'concept:name': n.split("=")[0], 'value': n.split("=")[1], 'typevalue': n, 'selfloop': selfloop })) if include_loops and selfloop: trace.append( Event({ 'time:timestamp': timestamps[n.split("=")[1]], 'concept:name': n.split("=")[0], 'value': n.split("=")[1], 'typevalue': n, 'selfloop': selfloop })) log.append(trace) log = sorting.sort_timestamp_log(log, "time:timestamp") return log
def apply_from_variants_list(var_list, parameters=None): """ Discovers the log skeleton from the variants list Parameters --------------- var_list Variants list parameters Parameters Returns --------------- model Log skeleton model """ if parameters is None: parameters = {} activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY, parameters, xes.DEFAULT_NAME_KEY) variant_delimiter = exec_utils.get_param_value( Parameters.PARAMETER_VARIANT_DELIMITER, parameters, constants.DEFAULT_VARIANT_SEP) log = EventLog() for cv in var_list: v = cv[0] tr = v.split(variant_delimiter) trace = Trace() for act in tr: trace.append(Event({activity_key: act})) log.append(trace) return apply(log, parameters=parameters)
def apply(df, parameters=None): """ Convert a dataframe into a log containing 1 case per variant (only control-flow perspective is considered) Parameters ------------- df Dataframe parameters Parameters of the algorithm Returns ------------- log Event log """ from pm4py.statistics.traces.pandas import case_statistics if parameters is None: parameters = {} variant_stats = case_statistics.get_variant_statistics(df, parameters=parameters) activity_key = parameters[ pm4_constants.PARAMETER_CONSTANT_ACTIVITY_KEY] if pm4_constants.PARAMETER_CONSTANT_ACTIVITY_KEY in parameters else xes.DEFAULT_NAME_KEY log = EventLog() for vd in variant_stats: variant = vd['variant'].split(",") trace = Trace() for activity in variant: event = Event() event[activity_key] = activity trace.append(event) log.append(trace) return log
def import_tel_from_yawl(input_file_path): ''' Imports translucent event log from yawl logging Parameters ---------- :param input_file_path: input file path of yawl logging Returns -------- :return: translucent event log (only complete) ''' log = import_tel(input_file_path) new_log = EventLog() s = set() for trace in log: new_trace = Trace() ci = trace.attributes['concept:name'] for event in trace: if event['lifecycle:instance'] == ci: if event['lifecycle:transition'] == 'schedule': s.add(event['concept:name']) elif event['lifecycle:transition'] == 'complete': event.set_enabled(frozenset(s)) new_trace.append(event) s.remove(event['concept:name']) new_log.append(new_trace) return new_log
def project(log: EventLog, cut: Cut, activity_key: str) -> List[EventLog]: do = cut[0] redo = cut[1:] do_log = EventLog() redo_logs = [EventLog()] * len(redo) for t in log: do_trace = Trace() redo_trace = Trace() for e in t: if e[activity_key] in do: do_trace.append(e) if len(redo_trace) > 0: redo_logs = _append_trace_to_redo_log(redo_trace, redo_logs, redo, activity_key) redo_trace = Trace() else: redo_trace.append(e) if len(do_trace) > 0: do_log.append(do_trace) do_trace = Trace() if len(redo_trace) > 0: redo_logs = _append_trace_to_redo_log(redo_trace, redo_logs, redo, activity_key) do_log.append(do_trace) logs = [do_log] logs.extend(redo_logs) return logs
def get_log_with_log_prefixes(log, parameters=None): """ Gets an extended log that contains, in order, all the prefixes for a case of the original log Parameters -------------- log Original log parameters Possible parameters of the algorithm Returns ------------- all_prefixes_log Log with all the prefixes change_indexes Indexes of the extended log where there was a change between cases """ all_prefixes_log = EventLog() change_indexes = [] for trace in log: cumulative_trace = Trace() for event in trace: all_prefixes_log.append(deepcopy(cumulative_trace)) cumulative_trace.append(event) all_prefixes_log.append(deepcopy(cumulative_trace)) change_indexes.append([len(all_prefixes_log) - 1] * len(trace)) return all_prefixes_log, change_indexes
def list_of_str_to_trace(activities: List[str]) -> Trace: t = Trace() for a in activities: e = Event() e["concept:name"] = a t.append(e) return t
def get_log_with_log_prefixes(log, parameters=None): """ Gets an extended log that contains, in order, all the prefixes for a case of the original log Parameters -------------- log Original log parameters Possible parameters of the algorithm Returns ------------- all_prefixes_log Log with all the prefixes """ all_prefixes_log = EventLog() for trace in log: cumulative_trace = Trace() for event in trace: all_prefixes_log.append(deepcopy(cumulative_trace)) cumulative_trace.append(event) all_prefixes_log.append(deepcopy(cumulative_trace)) return all_prefixes_log
def apply(df, parameters=None): """ Convert a dataframe into a log containing 1 case per variant (only control-flow perspective is considered) Parameters ------------- df Dataframe parameters Parameters of the algorithm Returns ------------- log Event log """ if parameters is None: parameters = {} variant_stats = case_statistics.get_variant_statistics( df, parameters=parameters) log = EventLog() for vd in variant_stats: variant = vd['variant'].split(",") trace = Trace() for activity in variant: event = Event() event[xes.DEFAULT_NAME_KEY] = activity trace.append(event) log.append(trace) return log
def apply_tree_variants(variants, parameters=None): """ Apply the IM algorithm to a dictionary of variants obtaining a process tree Parameters ---------- variants Variants parameters Parameters of the algorithm, including: Parameters.ACTIVITY_KEY -> attribute of the log_skeleton to use as activity name (default concept:name) Returns ---------- process_tree Process tree """ log = EventLog() activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY, parameters, xes_constants.DEFAULT_NAME_KEY) var_keys = list(variants.keys()) for var in var_keys: trace = Trace() activities = var.split(constants.DEFAULT_VARIANT_SEP) for act in activities: trace.append(Event({activity_key: act})) log.append(trace) return apply_tree(log, parameters=parameters)
def create_trace(labels: List[str]) -> Trace: trace = Trace() for label in labels: e = Event() e["concept:name"] = label trace.append(e) return trace
def apply(bytes, parameters=None): """ Apply the deserialization to the bytes produced by Pyarrow serialization Parameters -------------- bytes Bytes parameters Parameters of the algorithm Returns -------------- deser Deserialized object """ if parameters is None: parameters = {} buffer = pyarrow.py_buffer(bytes) list_objs = pyarrow.deserialize(buffer) log = EventLog(attributes=list_objs[0], extensions=list_objs[1], omni_present=list_objs[2], classifiers=list_objs[3]) for i in range(len(list_objs[4])): trace = Trace(attributes=list_objs[4][i]) for j in range(len(list_objs[5][i])): trace.append(Event(list_objs[5][i][j])) log.append(trace) return log
def generate_log(pt, no_traces=100): """ Generate a log out of a process tree Parameters ------------ pt Process tree no_traces Number of traces contained in the process tree Returns ------------ log Trace log object """ log = TraceLog() for i in range(no_traces): ex_seq = execute(pt) ex_seq_labels = pt_util.project_execution_sequence_to_labels(ex_seq) trace = Trace() trace.attributes[xes.DEFAULT_NAME_KEY] = str(i) for label in ex_seq_labels: event = Event() event[xes.DEFAULT_NAME_KEY] = label trace.append(event) log.append(trace) return log
def variant_to_trace(variant, parameters=None): if parameters is None: parameters = {} activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY, parameters, xes_constants.DEFAULT_NAME_KEY) variant_delimiter = exec_utils.get_param_value( Parameters.PARAMETER_VARIANT_DELIMITER, parameters, constants.DEFAULT_VARIANT_SEP) from pm4py.objects.log.log import Trace, Event trace = Trace() if type(variant) is tuple or type(variant) is list: for act in variant: event = Event({activity_key: act}) trace.append(event) elif type(variant) is str: var_act = variant.split(variant_delimiter) for act in var_act: event = Event({activity_key: act}) trace.append(event) return trace
def list_to_xes(log): traces = list() for t in log: trace = Trace() for e in t.split(", "): event = Event() event["concept:name"] = e trace.append(event) traces.append(trace) return EventLog(traces)
def create_event_log(log): traces = list() for events in log.split(", "): trace = Trace() for e in list(events): event = Event() event["concept:name"] = e trace.append(event) traces.append(trace) return EventLog(traces)
def calculate_prefix_alignments_from_scratch(trace, petri_net, initial_marking, final_marking, dijkstra: bool): ''' This method calculates for a trace prefix alignments by starting A* every time from scratch, e.g, for <e_1, e2, .. e_n>, this methods calculates first an alignment for <e_1>, afterwards <e_1,e_2>, ... and so on :return: ''' visited_states_total = 0 queued_states_total = 0 traversed_arcs_total = 0 total_computation_time_total = 0 heuristic_computation_time_total = 0 number_solved_lps_total = 0 intermediate_results = [] incremental_trace = Trace() for event in trace: incremental_trace.append(event) res = state_equation_a_star_apply(incremental_trace, petri_net, initial_marking, final_marking, dijkstra=dijkstra) intermediate_result = { 'trace_length': len(incremental_trace), 'alignment': res['alignment'], 'cost': res['cost'], 'visited_states': res['visited_states'], 'queued_states': res['queued_states'], 'traversed_arcs': res['traversed_arcs'], 'total_computation_time': res['total_computation_time'], 'heuristic_computation_time': res['heuristic_computation_time'], 'number_solved_lps': res['number_solved_lps'] } visited_states_total += res['visited_states'] queued_states_total += res['queued_states'] traversed_arcs_total += res['traversed_arcs'] total_computation_time_total += res['total_computation_time'] heuristic_computation_time_total += res['heuristic_computation_time'] number_solved_lps_total += res['number_solved_lps'] intermediate_results.append(intermediate_result) res['intermediate_results'] = intermediate_results res['visited_states'] = visited_states_total res['queued_states'] = queued_states_total res['traversed_arcs'] = traversed_arcs_total res['total_computation_time'] = total_computation_time_total res['heuristic_computation_time'] = heuristic_computation_time_total res['number_solved_lps'] = number_solved_lps_total return res
def apply(tree, parameters=None): """ Performs an extensive playout of the process tree Parameters ------------- tree Process tree parameters Possible parameters, including: - Parameters.MAX_TRACE_LENGTH => maximum length of a trace (default: min_allowed_trace_length) - Parameters.MAX_LOOP_OCC => maximum number of occurrences for a loop (default: MAX_TRACE_LENGTH) - Parameters.ACTIVITY_KEY => activity key - Parameters.MAX_LIMIT_NUM_TRACES => maximum number to the limit of traces; the playout shall stop when the number is reached (default: sys.maxsize) Returns ------------- log_skeleton Event log_skeleton """ if parameters is None: parameters = {} activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY, parameters, xes_constants.DEFAULT_NAME_KEY) # to save memory in the returned log_skeleton, allocate each activity once. to know the list of activities of the # process tree, use the footprints module fp_tree = fp_discovery.apply(tree, parameters=parameters) activities = fp_tree["activities"] activities = {act: Event({activity_key: act}) for act in activities} min_allowed_trace_length = bottomup_discovery.get_min_trace_length(tree, parameters=parameters) max_trace_length = exec_utils.get_param_value(Parameters.MAX_TRACE_LENGTH, parameters, min_allowed_trace_length) max_loop_occ = exec_utils.get_param_value(Parameters.MAX_LOOP_OCC, parameters, int(max_trace_length/2)) max_limit_num_traces = exec_utils.get_param_value(Parameters.MAX_LIMIT_NUM_TRACES, parameters, 100000) return_set_strings = exec_utils.get_param_value(Parameters.RETURN_SET_STRINGS, parameters, False) bottomup = bottomup_discovery.get_bottomup_nodes(tree, parameters=parameters) min_rem_dict = bottomup_discovery.get_min_rem_dict(tree, parameters=parameters) playout_dictio = {} for i in range(len(bottomup)): get_playout(bottomup[i], playout_dictio, max_trace_length, max_loop_occ, min_rem_dict, max_limit_num_traces) tree_playout_traces = playout_dictio[tree][TRACES] if return_set_strings: return tree_playout_traces log = EventLog() for tr0 in tree_playout_traces: trace = Trace() for act in tr0: trace.append(activities[act]) log.append(trace) return log
def apply(df, parameters=None): """ Convert a dataframe into a log containing N case per variant (only control-flow perspective is considered) Parameters ------------- df Dataframe parameters Parameters of the algorithm Returns ------------- log Event log """ from pm4py.statistics.traces.pandas import case_statistics if parameters is None: parameters = {} return_variants = parameters[ RETURN_VARIANTS] if RETURN_VARIANTS in parameters else False case_glue = parameters[ pm4_constants. PARAMETER_CONSTANT_CASEID_KEY] if pm4_constants.PARAMETER_CONSTANT_CASEID_KEY in parameters else pm4_constants.CASE_CONCEPT_NAME activity_key = parameters[ pm4_constants. PARAMETER_CONSTANT_ACTIVITY_KEY] if pm4_constants.PARAMETER_CONSTANT_ACTIVITY_KEY in parameters else xes.DEFAULT_NAME_KEY variant_stats = case_statistics.get_variant_statistics( df, parameters=parameters) log = EventLog() all_variants_log = {} for vd in variant_stats: variant = vd['variant'].split(",") variant_count = vd[case_glue] trace = Trace() for activity in variant: event = Event() event[activity_key] = activity trace.append(event) all_variants_log[vd['variant']] = [] for i in range(variant_count): log.append(trace) all_variants_log[vd['variant']].append(len(log) - 1) if return_variants: return log, all_variants_log return log
def acyclic_net_variants(net, initial_marking, final_marking, activity_key=xes_util.DEFAULT_NAME_KEY): """ Given an acyclic accepting Petri net, initial and final marking extracts a set of variants (in form of traces) replayable on the net. Warning: this function is based on a marking exploration. If the accepting Petri net contains loops, the method will not work properly as it stops the search if a specific marking has already been encountered. Parameters ---------- :param net: An acyclic workflow net :param initial_marking: The initial marking of the net. :param final_marking: The final marking of the net. :param activity_key: activity key to use Returns ------- :return: variants: :class:`list` Set of variants - in the form of Trace objects - obtainable executing the net """ active = {(initial_marking, ())} visited = set() variants = set() while active: curr_marking, curr_partial_trace = active.pop() curr_pair = (curr_marking, curr_partial_trace) enabled_transitions = petri.semantics.enabled_transitions( net, curr_marking) for transition in enabled_transitions: if transition.label is not None: next_partial_trace = curr_partial_trace + (transition.label, ) else: next_partial_trace = curr_partial_trace next_marking = petri.semantics.execute(transition, net, curr_marking) next_pair = (next_marking, next_partial_trace) if next_marking == final_marking: variants.add(next_partial_trace) else: # If the next marking is not in visited, if the next marking+partial trace is different from the current one+partial trace if next_pair not in visited and curr_pair != next_pair: active.add(next_pair) visited.add(curr_pair) trace_variants = [] for variant in variants: trace = Trace() for activity_label in variant: trace.append(Event({activity_key: activity_label})) trace_variants.append(trace) return trace_variants
def get_log(self): log = EventLog() for i in range(self.__height): for j in range(self.__width): node = self.__array[i][j] if node != None: new_trace = Trace() new_trace.attributes['concept:name'] = node.caseid for event in node.trace: new_trace.append(event) log.append(new_trace) return log
def form_log_from_dictio_couple(first_cases_repr, second_cases_repr, enable_multiplier=False): """ Form a log from a couple of dictionary, to use for root cause analysis Parameters ------------- first_cases_repr First cases representation second_cases_repr Second cases representation enable_multiplier Enable balancing of classes Returns ------------ log Trace log object """ log = EventLog() if enable_multiplier: multiplier_first = int( max( float(len(second_cases_repr)) / float(len(first_cases_repr)), 1)) multiplier_second = int( max( float(len(first_cases_repr)) / float(len(second_cases_repr)), 1)) else: multiplier_first = 1 multiplier_second = 1 for j in range(multiplier_first): for i in range(len(first_cases_repr)): trace = Trace() event = Event(first_cases_repr[i]) trace.append(event) log.append(trace) for j in range(multiplier_second): for i in range(len(second_cases_repr)): trace = Trace() event = Event(second_cases_repr[i]) trace.append(event) log.append(trace) return log
def generate_log(pt0, actdict, no_traces=100): """ Generate a log out of a process tree Parameters ------------ pt Process tree no_traces Number of traces contained in the process tree Returns ------------ log Trace log object """ pt = deepcopy(pt0) #for ele in pt: #print(ele,'here is line 50') # different taus must give different ID in log generation!!!! # so we cannot use the default process tree class # we use this different one! pt = GenerationTree(pt) log = EventLog() #print(pt,'line 56') # assigns to each event an increased timestamp from 1970 curr_timestamp = 10000000 for i in range(no_traces): ex_seq = execute(pt, actdict) #print(ex_seq,'ex_seq') ex_seq_labels = pt_util.project_execution_sequence_to_labels(ex_seq) #print(ex_seq_labels,'ex_seq_labels') trace = Trace() trace.attributes[xes.DEFAULT_NAME_KEY] = str(i) #print('line 67') for label in ex_seq_labels: event = Event() event[xes.DEFAULT_NAME_KEY] = label event[xes.DEFAULT_TIMESTAMP_KEY] = datetime.datetime.fromtimestamp( curr_timestamp) trace.append(event) #print(event,'line 73') curr_timestamp = curr_timestamp + 1 log.append(trace) return log
def filter_log_by_paths(log, paths, variants, vc, threshold, attribute_key="concept:name"): """ Keep only paths which number of occurrences is above the threshold (or they belong to the first variant) Parameters ---------- log Log paths Dictionary of paths associated with their count variants (If specified) Dictionary with variant as the key and the list of traces as the value vc List of variant names along with their count threshold Cutting threshold (remove paths which number of occurrences is below the threshold) attribute_key (If specified) Specify the attribute key to use (default concept:name) Returns ---------- filtered_log Filtered log_skeleton """ filtered_log = EventLog() fvft = variants[vc[0][0]][0] fvp = set() for i in range(0, len(fvft) - 1): path = fvft[i][attribute_key] + "," + fvft[i + 1][attribute_key] fvp.add(path) for trace in log: new_trace = Trace() jj = 0 if len(trace) > 0: new_trace.append(trace[0]) for j in range(1, len(trace) - 1): jj = j if j >= len(trace): break if attribute_key in trace[j] and attribute_key in trace[j + 1]: path = trace[j][attribute_key] + "," + trace[ j + 1][attribute_key] if path in paths: if path in fvp or paths[path] >= threshold: new_trace.append(trace[j]) new_trace.append(trace[j + 1]) if len(trace) > 1 and not jj == len(trace): new_trace.append(trace[-1]) if len(new_trace) > 0: for attr in trace.attributes: new_trace.attributes[attr] = trace.attributes[attr] filtered_log.append(new_trace) return filtered_log
def filter_log_by_attributes_threshold(log, attributes, variants, vc, threshold, attribute_key=xes.DEFAULT_NAME_KEY): """ Keep only attributes which number of occurrences is above the threshold (or they belong to the first variant) Parameters ---------- log Log attributes Dictionary of attributes associated with their count variants (If specified) Dictionary with variant as the key and the list of traces as the value vc List of variant names along with their count threshold Cutting threshold (remove attributes which number of occurrences is below the threshold) attribute_key (If specified) Specify the activity key in the log (default concept:name) Returns ---------- filtered_log Filtered log """ filtered_log = EventLog() fva = [ x[attribute_key] for x in variants[vc[0][0]][0] if attribute_key in x ] for trace in log: new_trace = Trace() for j in range(len(trace)): if attribute_key in trace[j]: attribute_value = trace[j][attribute_key] if attribute_value in attributes: if (attribute_value in fva and attribute_key == xes.DEFAULT_NAME_KEY ) or attributes[attribute_value] >= threshold: new_trace.append(trace[j]) if len(new_trace) > 0: for attr in trace.attributes: new_trace.attributes[attr] = trace.attributes[attr] filtered_log.append(new_trace) return filtered_log
def apply_from_variant(variant, dfg, sa, ea, parameters=None): if parameters is None: parameters = {} activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY, parameters, xes_constants.DEFAULT_NAME_KEY) variant_delimiter = exec_utils.get_param_value( Parameters.PARAMETER_VARIANT_DELIMITER, parameters, constants.DEFAULT_VARIANT_SEP) variant_split = variant.split( variant_delimiter) if type(variant) is str else variant trace = Trace() for act in variant_split: trace.append(Event({activity_key: act})) return apply_trace(trace, dfg, sa, ea, parameters=parameters)
def detect_change_scope(align, subtree, trace, ret_tuple_as_trans_desc): """ Return the change scope in the alignment Parameters ------------ align alignment on the original process tree of one trace subtree the subtree that need to be detected trace the original trace ret_tuple_as_trans_desc True or False Returns ----------- index_anchor `list()` Store the index of anchor in alignments e.g,[1, 3, 5, 9] """ scope, index, e_index = Scope(), 0, 0 children = pt_mani_utils.non_none_leaves_labels(subtree) while True: if index == len(align): break if align_utils.is_node_start(align[index], subtree, ret_tuple_as_trans_desc): scope.index_append(index) new_trace = Trace() while not align_utils.is_node_end(align[index], subtree, ret_tuple_as_trans_desc): if align_utils.is_log_move(align[index], ret_tuple_as_trans_desc) or \ (align_utils.check_model_label_belong_to_subtree(align[index], children, ret_tuple_as_trans_desc) and not align_utils.is_model_move(align[index], ret_tuple_as_trans_desc)): new_trace.append(trace[e_index]) e_index = e_index + 1 if not align_utils.is_model_move( align[index], ret_tuple_as_trans_desc) else e_index index += 1 scope.traces_append(new_trace) e_index = e_index + 1 if not align_utils.is_model_move( align[index], ret_tuple_as_trans_desc) else e_index index += 1 return scope
def execute_script(): L = EventLog() e1 = Event() e1["concept:name"] = "A" e2 = Event() e2["concept:name"] = "B" e3 = Event() e3["concept:name"] = "C" e4 = Event() e4["concept:name"] = "D" t = Trace() t.append(e1) t.append(e2) t.append(e3) t.append(e4) for i in range(10000): L.append(deepcopy(t)) print(len(L))
def apply(tree, no, prob, has_empty_trace=False): """ Returns non-fitting EventLog with fixed traces randomly created by the process tree. Parameters ----------- tree Process Tree no Number of traces that will be in the event log prob Randomness of the traces has_empty_trace True, when the event log has empty trace Returns ------------ EventLog Non-fitting event log """ log, non_fit_traces = generate_log(tree, no), list() label_num = pt_mani_utils.non_none_leaves_number(tree) traces = list(map(lambda t: t, log)) while len(traces) > 0: trace = traces.pop() non_fit_t = Trace(attributes=log.attributes) for event in trace: if random.random() < prob: index = random.randint(0, 2) if index == 1: # add a new event non_fit_t.append(event) new_event = Event() new_event[ xes.DEFAULT_NAME_KEY] = pt_gene_utils.get_cur_label( label_num + 1) non_fit_t.append(new_event) elif index == 2: # replace with other event new_event = Event() new_event[ xes.DEFAULT_NAME_KEY] = pt_gene_utils.get_cur_label( random.randint(1, label_num)) non_fit_t.append(new_event) else: non_fit_t.append(event) if not has_empty_trace and len(non_fit_t) == 0: traces.append(generate_log(tree, 1)[0]) else: non_fit_traces.append(non_fit_t) return EventLog(non_fit_traces, attributes=log.attributes, classifiers=log.classifiers, omni_present=log.omni_present, extensions=log.extensions)
def apply_from_variants_list(var_list, model, parameters=None): """ Performs conformance checking using the log skeleton, applying it from a list of variants Parameters -------------- var_list List of variants model Log skeleton model parameters Parameters Returns -------------- conformance_dictio Dictionary containing, for each variant, the result of log skeleton checking """ if parameters is None: parameters = {} activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY, parameters, xes.DEFAULT_NAME_KEY) variant_delimiter = exec_utils.get_param_value( Parameters.PARAMETER_VARIANT_DELIMITER, parameters, constants.DEFAULT_VARIANT_SEP) conformance_output = {} for cv in var_list: v = cv[0] tr = v.split(variant_delimiter) trace = Trace() for act in tr: trace.append(Event({activity_key: act})) conformance_output[v] = apply_trace(trace, model, parameters=parameters) return conformance_output