Ejemplo n.º 1
0
def execute_script():
    log_input_directory = "xesinput"
    all_logs_names = os.listdir(log_input_directory)
    all_logs_names = [log for log in all_logs_names if ".xe" in log]

    for logName in all_logs_names:
        # logPath = os.path.join("..", "tests", "inputData", logName)
        log_path = log_input_directory + "\\" + logName
        log = xes_importer.apply(log_path)
        print("\n\n")
        print("log loaded")
        print("Number of traces - ", len(log))
        event_log = log_conversion.apply(
            log, variant=log_conversion.TO_EVENT_STREAM)
        print("Number of events - ", len(event_log))
        print("Classifiers ", log.classifiers)
        exp_log_name = "xescert_exportlogs" + "\\" + "exp_" + logName
        print("exporting log", exp_log_name)
        xes_exporter.apply(log, exp_log_name)
        print("exported log", exp_log_name)

        log, classifier_attr_key = insert_classifier.search_act_class_attr(log)

        classifiers = list(log.classifiers.keys())
        if classifier_attr_key is None and classifiers:
            try:
                print(classifiers)
                log, classifier_attr_key = insert_classifier.insert_activity_classifier_attribute(
                    log, classifiers[0])
                print(classifier_attr_key)
            except:
                print("exception in handling classifier")

        if classifier_attr_key is None:
            classifier_attr_key = "concept:name"

        if len(event_log) > 0 and classifier_attr_key in event_log[0]:
            parameters = {
                constants.PARAMETER_CONSTANT_ACTIVITY_KEY: classifier_attr_key
            }

            dfg = dfg_algorithm.apply(log, parameters=parameters)
            gviz = dfg_vis.apply(dfg,
                                 log=log,
                                 variant="frequency",
                                 parameters=parameters)
            # dfg_vis.view(gviz)

            dfg_vis.save(gviz,
                         "xescert_images\\" + logName.replace("xes", "png"))

        print("Reimporting log file just exported - ", exp_log_name)

        log = xes_importer.apply(exp_log_name)
        print("log loaded", exp_log_name)
        print("Number of traces - ", len(log))
        event_log = log_conversion.apply(
            log, variant=log_conversion.TO_EVENT_STREAM)
        print("Number of events - ", len(event_log))
        print("Classifiers ", log.classifiers)
Ejemplo n.º 2
0
def main_ts():

    # specify path of training and test sets
    train_path = '../data/BPIChallenge2011_training_0-80.xes'
    test_path = '../data/BPIChallenge2011_testing_80-100.xes'

    #import training log
    train_log = xes_importer.apply(train_path)
    activity_names_1 = get_activity_names(train_log)

    #import test log
    test_log = xes_importer.apply(test_path)
    activity_names_2 = get_activity_names(test_log)

    #create list of unique activities in all the dataset
    activity_names = activity_names_1 + activity_names_2
    activity_names = sorted(set(activity_names),
                            key=lambda x: activity_names.index(x))

    #create list of unique timestamps in all the dataset
    all_ts = get_ts_list(train_log) + get_ts_list(test_log)
    all_ts = sorted(set(all_ts), key=lambda x: all_ts.index(x))

    print('Number of features: %d' % len(activity_names))
    print('Feature list: ', activity_names)
    print("\n")

    #obtain timestamp encoding for each case in the training log
    enc_si_ts_train = []
    for case_index, case in enumerate(train_log):
        encoded_row = [case.attributes["concept:name"]]
        enc_si_ts = encode_trace_simple_index_with_timestamp(
            case, activity_names, all_ts)
        encoded_row.extend(np.reshape(enc_si_ts, -1))
        encoded_row.append(case.attributes["label"])
        enc_si_ts_train.append(encoded_row)

    #obtain timestamp encoding for each case in the test log
    enc_si_ts_test = []
    for case_index, case in enumerate(test_log):
        encoded_row = [case.attributes["concept:name"]]
        enc_si_ts = encode_trace_simple_index_with_timestamp(
            case, activity_names, all_ts)
        encoded_row.extend(np.reshape(enc_si_ts, -1))
        encoded_row.append(case.attributes["label"])
        enc_si_ts_test.append(encoded_row)

    print("First case with timestamp informations: \n", enc_si_ts_train[0])

    #create training dataframe with timestamp encoding
    df_train = pd.DataFrame(data=enc_si_ts_train, columns=compute_columns())
    print(df_train.head())
    df_train.to_csv('train_si_ts.csv')

    #create test dataframe with timestamp encoding
    df_test = pd.DataFrame(data=enc_si_ts_test, columns=compute_columns())
    print(df_test.head())
    df_test.to_csv('test_si_ts.csv')

    return df_train, df_test
Ejemplo n.º 3
0
 def test_importing_xes(self):
     from pm4py.objects.log.importer.xes import importer as xes_importer
     log = xes_importer.apply(os.path.join("input_data",
                                           "running-example.xes"),
                              variant=xes_importer.Variants.ITERPARSE)
     log = xes_importer.apply(os.path.join("input_data",
                                           "running-example.xes"),
                              variant=xes_importer.Variants.LINE_BY_LINE)
Ejemplo n.º 4
0
def main_si():

    #----------->specify path of training and test sets
    train_path = '../data/BPIChallenge2011_training_0-80.xes'
    test_path = '../data/BPIChallenge2011_testing_80-100.xes'

    #import training log
    train_log = xes_importer.apply(train_path)
    activity_names_1 = get_activity_names(train_log)

    #import test log
    test_log = xes_importer.apply(test_path)
    activity_names_2 = get_activity_names(test_log)

    #create list of unique activities in all the dataset
    activity_names = activity_names_1 + activity_names_2
    activity_names = sorted(set(activity_names),
                            key=lambda x: activity_names.index(x))

    #obtain simple index encoding for each case in training log
    train_encoded_log = []
    for case_index, case in enumerate(train_log):
        encoded_row = [case.attributes["concept:name"]]
        encoded_row.extend(encode_trace_simple_index(case, activity_names))
        encoded_row.append(case.attributes["label"])
        train_encoded_log.append(encoded_row)

    #obtain simple index encoding for each case in test log
    test_encoded_log = []
    for case_index, case in enumerate(test_log):
        encoded_row = [case.attributes["concept:name"]]
        encoded_row.extend(encode_trace_simple_index(case, activity_names))
        encoded_row.append(case.attributes["label"])
        test_encoded_log.append(encoded_row)

    print('Number of features: %d' % len(activity_names))
    print('Feature list: ', activity_names)
    print("\n")

    #check if all the cases have only 20 events
    print(train_encoded_log[:4])
    print(len(train_encoded_log))
    for i in range(len(train_encoded_log)):
        print(len(train_encoded_log[i]))

    #create the dataframe for training set
    train_df = pd.DataFrame(data=train_encoded_log, columns=compute_columns())
    print(train_df.head())
    train_df.to_csv("train_si.csv")

    #create the dataframe for test set
    test_df = pd.DataFrame(data=test_encoded_log, columns=compute_columns())
    print(test_df.head())
    test_df.to_csv("test_si.csv")

    return train_df, test_df
Ejemplo n.º 5
0
 def test_importExportXEStoXES(self):
     # to avoid static method warnings in tests,
     # that by construction of the unittest package have to be expressed in such way
     self.dummy_variable = "dummy_value"
     log = xes_importer.apply(
         os.path.join(INPUT_DATA_DIR, "running-example.xes"))
     xes_exporter.apply(
         log, os.path.join(OUTPUT_DATA_DIR, "running-example-exported.xes"))
     log_imported_after_export = xes_importer.apply(
         os.path.join(OUTPUT_DATA_DIR, "running-example-exported.xes"))
     self.assertEqual(len(log), len(log_imported_after_export))
     os.remove(os.path.join(OUTPUT_DATA_DIR,
                            "running-example-exported.xes"))
Ejemplo n.º 6
0
def load_data():
    """Load and return example data.

    Returns:
        log (List[List[str]]): example log.

    """

    # file path
    absPath = os.path.abspath(__file__)
    fileDir = os.path.dirname(absPath)
    code = os.path.dirname(fileDir)
    data = os.path.join(code, "data")

    # load the first 2000 traces of the example log
    variant = xes_importer.Variants.ITERPARSE
    parameters = {variant.value.Parameters.MAX_TRACES: 2000}
    log = xes_importer.apply(
        os.path.join(data, "BPI_Challenge_2012.xes"),
        variant=variant,
        parameters=parameters,
    )

    # get log as format List[List[str]]
    return [[event["concept:name"] for event in trace] for trace in log]
Ejemplo n.º 7
0
 def test_log_skeleton(self):
     log = xes_importer.apply(
         os.path.join("input_data", "running-example.xes"))
     from pm4py.algo.discovery.log_skeleton import algorithm as lsk_discovery
     model = lsk_discovery.apply(log)
     from pm4py.algo.conformance.log_skeleton import algorithm as lsk_conformance
     conf = lsk_conformance.apply(log, model)
Ejemplo n.º 8
0
    def import_log(self, complete_filename, filename):
        # import the chosen event log and calculate some statistics
        self.current_log = LogInfo(complete_filename, filename)
        if '.xes' in complete_filename:
            # Assume that it is a XES file
            variant = xes_importer.Variants.ITERPARSE
            parameters = {variant.value.Parameters.TIMESTAMP_SORT: True}
            self.current_log.log = xes_importer.apply(complete_filename,
                                                      variant=variant,
                                                      parameters=parameters)
            self.current_log.first_traces = log_converter.apply(
                EventLog(self.current_log.log[0:self.MAX_TRACES]),
                variant=log_converter.Variants.TO_DATA_FRAME)

            self.current_log.median_case_duration = case_statistics.get_median_caseduration(
                self.current_log.log,
                parameters={
                    case_statistics.Parameters.TIMESTAMP_KEY: "time:timestamp"
                })
            self.current_log.median_case_duration_in_hours = self.current_log.median_case_duration / 60 / 60
            self.current_log.total_of_cases = len(self.current_log.log)
            self.current_log.total_of_events = len(self.current_log.log)
            print(
                f'Log [{filename}] - total of cases [{self.current_log.total_of_cases}] - median case duration '
                f'[{self.current_log.median_case_duration / 60 / 60}hrs]')
Ejemplo n.º 9
0
def compute_alignment(xes_file, pnml_file):
    """
    Compute alignments for event log with given model.
    Save alignments results and all metrics during computation in a csv file.

    Parameters
    ----------
    xes_file : .xes  file
               The xes file of the event log
    pnml_file : .pnml file
                The petri net model
    """

    event_log = xes_importer.apply(xes_file)
    model_net, model_im, model_fm = petri_importer.apply(pnml_file)
    # get log name and model name
    log_name = Path(log_path).stem
    model_name = Path(model_path).stem
    # define the column name in result file
    field_names = [
        'case_id', 'total', 'heuristic', 'queue', 'states', 'arcs', 'sum',
        'num_insert', 'num_removal', 'num_update', 'simple_lp', 'complex_lp',
        'restart', 'split_num', 'trace_length', 'alignment_length', 'cost',
        'alignment'
    ]
    df = pd.DataFrame(columns=field_names)
    trace_variant_lst = {}
    # iterate every case in log
    for case_index in tqdm(range(len(event_log))):
        events_lst = []
        for event in event_log[case_index]:
            events_lst.append(event['concept:name'])
        trace_str = ''.join(events_lst)
        # if the sequence of events is met for the first time
        if trace_str not in trace_variant_lst:
            # construct synchronous product net
            sync_product = SynchronousProduct(event_log[case_index], model_net,
                                              model_im, model_fm)
            initial_marking, final_marking, cost_function, incidence_matrix, trace_sync, trace_log \
                = sync_product.construct_sync_product(event_log[case_index], model_net, model_im, model_fm)
            # compute alignment with split-pint-based algorithm + caching strategy + reopen method
            start_time = timeit.default_timer()
            ali_with_split_astar = AlignmentWithCacheReopenAstar(
                initial_marking, final_marking, cost_function,
                incidence_matrix, trace_sync, trace_log)
            alignment_result = ali_with_split_astar.search()
            # get the total computation time
            alignment_result['total'] = timeit.default_timer() - start_time
            alignment_result['case_id'] = event_log[case_index].attributes[
                'concept:name']
            trace_variant_lst[trace_str] = alignment_result
        else:
            alignment_result = trace_variant_lst[trace_str]
            alignment_result['case_id'] = event_log[case_index].attributes[
                'concept:name']
        df = df.append(alignment_result, ignore_index=True)
    # The name of result csv file is of the form: 'log_name + model_name + algorithm type.csv'
    df.to_csv('../results/log=' + log_name + '&model=' + model_name +
              '&algorithm=cache_reopen_astar' + '.csv',
              index=False)
Ejemplo n.º 10
0
 def test_concurrent_activities_xes(self):
     log = xes_importer.apply(
         os.path.join("input_data", "interval_event_log.xes"))
     from pm4py.statistics.concurrent_activities.log import get
     conc_act = get.apply(
         log,
         parameters={get.Parameters.START_TIMESTAMP_KEY: "start_timestamp"})
Ejemplo n.º 11
0
def read_xes(filename, p=1, n_DPI=False):
    '''
    read event log in xes format 
        input   filename, percentage
        output  log object, variants_count

    filename = filename in xes format
    p = percentage of traces % to exploit from the log
    '''
    log = xes_importer.apply(filename)
    if p < 1:
        log = variants_filter.filter_log_variants_percentage(log, percentage=p)
    # variants = variants_filter.get_variants(log)
    variants = case_statistics.get_variant_statistics(log)
    # #
    VARIANT = []
    for v in variants:
        VARIANT.append(v['variant'])
    #
    # VARIANT = list(variants.keys())

    if n_DPI:
        VARIANT = VARIANT[:n_DPI]
        log = variants_filter.apply(log, VARIANT)
    print('=' * 100, '\n=READ THE XES FILE\n'
          'length of log', len(log), '\nlength of event',
          sum(len(trace) for trace in log),
          '\nnumber of variants : {}'.format(len(VARIANT)))
    return log, VARIANT
Ejemplo n.º 12
0
def read_xes(path: str) -> obj.EventStream:
    """
    Reads the .xes file, extracts the events and preprocesses data for CDESF.

    Parameters
    --------------------------------------
    path: str
        File path and name

    Returns
    --------------------------------------
    An event stream
    """

    variant = xes_importer.Variants.ITERPARSE
    parameters = {variant.value.Parameters.TIMESTAMP_SORT: True}
    log_xes = xes_importer.apply(path, variant=variant, parameters=parameters)

    event_stream = log_converter.apply(
        log_xes,
        variant=log_converter.Variants.TO_EVENT_STREAM,
        parameters={
            constants.PARAMETER_CONSTANT_CASEID_KEY: "case:concept:name",
            constants.PARAMETER_CONSTANT_ACTIVITY_KEY: "concept:name",
            constants.PARAMETER_CONSTANT_TIMESTAMP_KEY: "time:timestamp",
        },
    )

    return event_stream
Ejemplo n.º 13
0
def execute_script():
    log = xes_importer.apply(
        os.path.join("..", "tests", "input_data", "running-example.xes"))
    print(len(log))
    log2 = func.filter_(lambda x: len(x) > 5, log)
    print(type(log2))
    print(len(log2))
Ejemplo n.º 14
0
def execute_script():
    log = importer.apply(os.path.join("..", "tests", "input_data", "running-example.xes"))
    tree = inductive_miner.apply_tree(log)
    gviz1 = pt_vis_factory.apply(tree, parameters={"format": "svg"})
    # pt_vis_factory.view(gviz1)
    gviz2 = pt_visualizer.apply(tree, parameters={pt_visualizer.Variants.WO_DECORATION.value.Parameters.FORMAT: "svg"})
    pt_visualizer.view(gviz2)
Ejemplo n.º 15
0
def evaluate_logwithmodel(logpath):
    """
    Calculate and return evaluation measurements like fitness, precision, simplicity and generalization, given the path
    of event log.

    Parameters:
        logpath (str): Path of event log

    Returns:
        fitness (float): Fitness value measured using pm4py
        precision (float): Precision value measured using pm4py
        simplicity (float): Simplicity value measured using pm4py
        generalization (float): Generalization value measured using pm4py
    """
    xes_log = importer.apply(logpath)
    net, initial_marking, final_marking = inductive_miner.apply(xes_log)

    fitness = replay_fitness_evaluator.apply(
        xes_log,
        net,
        initial_marking,
        final_marking,
        variant=replay_fitness_evaluator.Variants.TOKEN_BASED)
    prec = precision_evaluator.apply(
        xes_log,
        net,
        initial_marking,
        final_marking,
        variant=precision_evaluator.Variants.ETCONFORMANCE_TOKEN)
    simp = simplicity_evaluator.apply(net)
    gen = generalization_evaluator.apply(xes_log, net, initial_marking,
                                         final_marking)

    return round(fitness['log_fitness'],
                 3), round(prec, 3), round(simp, 3), round(gen, 3)
Ejemplo n.º 16
0
 def test_playout_tree_basic(self):
     log = xes_importer.apply(
         os.path.join("input_data", "running-example.xes"))
     from pm4py.algo.discovery.inductive import algorithm as inductive_miner
     tree = inductive_miner.apply_tree(log)
     from pm4py.simulation.tree_playout import algorithm as tree_playout
     new_log = tree_playout.apply(tree)
Ejemplo n.º 17
0
 def test_sojourn_time_xes(self):
     log = xes_importer.apply(
         os.path.join("input_data", "interval_event_log.xes"))
     from pm4py.statistics.sojourn_time.log import get
     soj_time = get.apply(
         log,
         parameters={get.Parameters.START_TIMESTAMP_KEY: "start_timestamp"})
Ejemplo n.º 18
0
 def test_importingPetriLogAlignment(self):
     # to avoid static method warnings in tests,
     # that by construction of the unittest package have to be expressed in such way
     self.dummy_variable = "dummy_value"
     imported_petri1, marking1, fmarking1 = petri_importer.apply(
         os.path.join(INPUT_DATA_DIR, "running-example.pnml"))
     log = xes_importer.apply(
         os.path.join(INPUT_DATA_DIR, "running-example.xes"))
     final_marking = petri.petrinet.Marking()
     for p in imported_petri1.places:
         if not p.out_arcs:
             final_marking[p] = 1
     for trace in log:
         cf_result = align_alg.apply(
             trace,
             imported_petri1,
             marking1,
             final_marking,
             variant=align_alg.VERSION_DIJKSTRA_NO_HEURISTICS)['alignment']
         is_fit = True
         for couple in cf_result:
             if not (couple[0] == couple[1]
                     or couple[0] == ">>" and couple[1] is None):
                 is_fit = False
         if not is_fit:
             raise Exception("should be fit")
Ejemplo n.º 19
0
 def test_efg_xes(self):
     log = xes_importer.apply(
         os.path.join("input_data", "interval_event_log.xes"))
     from pm4py.statistics.eventually_follows.log import get
     efg = get.apply(
         log,
         parameters={get.Parameters.START_TIMESTAMP_KEY: "start_timestamp"})
Ejemplo n.º 20
0
 def test_48(self):
     import os
     from pm4py.objects.log.importer.xes import importer as xes_importer
     from pm4py.algo.discovery.alpha import algorithm as alpha_miner
     log = xes_importer.apply(os.path.join("input_data", "running-example.xes"))
     parameters = {alpha_miner.Variants.ALPHA_VERSION_CLASSIC.value.Parameters.ACTIVITY_KEY: "concept:name"}
     net, initial_marking, final_marking = alpha_miner.apply(log, parameters=parameters)
def execute_script():
    log = importer.apply(os.path.join("..", "tests", "input_data", "running-example.xes"))
    net, im, fm = inductive_miner.apply(log)
    aligned_traces = alignments.apply(log, net, im, fm)
    gviz = visualizer.apply(log, aligned_traces,
                            parameters={visualizer.Variants.CLASSIC.value.Parameters.FORMAT: "svg"})
    visualizer.view(gviz)
Ejemplo n.º 22
0
    def test_58(self):
        import os
        from pm4py.objects.log.importer.xes import importer as xes_importer
        from pm4py.algo.discovery.inductive import algorithm as inductive_miner

        log = xes_importer.apply(os.path.join("input_data", "running-example.xes"))

        net, initial_marking, final_marking = inductive_miner.apply(log)

        from pm4py.algo.conformance.alignments import algorithm as alignments

        model_cost_function = dict()
        sync_cost_function = dict()
        for t in net.transitions:
            # if the label is not None, we have a visible transition
            if t.label is not None:
                # associate cost 1000 to each move-on-model associated to visible transitions
                model_cost_function[t] = 1000
                # associate cost 0 to each move-on-log
                sync_cost_function[t] = 0
            else:
                # associate cost 1 to each move-on-model associated to hidden transitions
                model_cost_function[t] = 1

        parameters = {}
        parameters[
            alignments.Variants.VERSION_STATE_EQUATION_A_STAR.value.Parameters.PARAM_MODEL_COST_FUNCTION] = model_cost_function
        parameters[
            alignments.Variants.VERSION_STATE_EQUATION_A_STAR.value.Parameters.PARAM_SYNC_COST_FUNCTION] = sync_cost_function

        alignments = alignments.apply_log(log, net, initial_marking, final_marking, parameters=parameters)
Ejemplo n.º 23
0
 def test_playout(self):
     log = xes_importer.apply(
         os.path.join("input_data", "running-example.xes"))
     from pm4py.algo.discovery.alpha import algorithm as alpha_miner
     net, im, fm = alpha_miner.apply(log)
     from pm4py.simulation.playout import simulator
     log2 = simulator.apply(net, im, fm)
Ejemplo n.º 24
0
    def test_61(self):
        import os
        from pm4py.objects.log.importer.xes import importer as xes_importer
        log = xes_importer.apply(os.path.join("input_data", "roadtraffic50traces.xes"))

        from pm4py.objects.log.util import get_log_representation
        str_trace_attributes = []
        str_event_attributes = ["concept:name"]
        num_trace_attributes = []
        num_event_attributes = ["amount"]

        data, feature_names = get_log_representation.get_representation(log, str_trace_attributes, str_event_attributes,
                                                                        num_trace_attributes, num_event_attributes)

        data, feature_names = get_log_representation.get_default_representation(log)

        from pm4py.objects.log.util import get_class_representation
        target, classes = get_class_representation.get_class_representation_by_trace_duration(log, 2 * 8640000)

        from sklearn import tree
        clf = tree.DecisionTreeClassifier()
        clf.fit(data, target)

        from pm4py.visualization.decisiontree import visualizer as dectree_visualizer
        gviz = dectree_visualizer.apply(clf, feature_names, classes)
Ejemplo n.º 25
0
 def test_alignment(self):
     log = xes_importer.apply(
         os.path.join("input_data", "running-example.xes"))
     from pm4py.algo.discovery.alpha import algorithm as alpha_miner
     net, im, fm = alpha_miner.apply(log)
     from pm4py.algo.conformance.alignments import algorithm as alignments
     aligned_traces = alignments.apply(
         log,
         net,
         im,
         fm,
         variant=alignments.Variants.VERSION_STATE_EQUATION_A_STAR)
     aligned_traces = alignments.apply(
         log,
         net,
         im,
         fm,
         variant=alignments.Variants.VERSION_DIJKSTRA_NO_HEURISTICS)
     from pm4py.evaluation.replay_fitness import evaluator as rp_fitness_evaluator
     fitness = rp_fitness_evaluator.apply(
         log,
         net,
         im,
         fm,
         variant=rp_fitness_evaluator.Variants.ALIGNMENT_BASED)
     evaluation = rp_fitness_evaluator.evaluate(
         aligned_traces,
         variant=rp_fitness_evaluator.Variants.ALIGNMENT_BASED)
     from pm4py.evaluation.precision import evaluator as precision_evaluator
     precision = precision_evaluator.apply(
         log,
         net,
         im,
         fm,
         variant=rp_fitness_evaluator.Variants.ALIGNMENT_BASED)
Ejemplo n.º 26
0
def execute_script():
    log = xes_importer.apply(
        os.path.join("..", "tests", "input_data", "running-example.xes"))
    frequency_dfg = dfg_miner.apply(log, variant=dfg_miner.Variants.FREQUENCY)
    net, im, fm = dfg_conv.apply(frequency_dfg)
    # perform the Montecarlo simulation with the arrival rate inferred by the log (the simulation lasts 5 secs)
    parameters = {}
    parameters[montecarlo_simulation.Variants.PETRI_SEMAPH_FIFO.value.
               Parameters.TOKEN_REPLAY_VARIANT] = Variants.BACKWARDS
    parameters[montecarlo_simulation.Variants.PETRI_SEMAPH_FIFO.value.
               Parameters.PARAM_ENABLE_DIAGNOSTICS] = False
    parameters[montecarlo_simulation.Variants.PETRI_SEMAPH_FIFO.value.
               Parameters.PARAM_MAX_THREAD_EXECUTION_TIME] = 5
    log, res = montecarlo_simulation.apply(log,
                                           net,
                                           im,
                                           fm,
                                           parameters=parameters)
    print(
        "\n(Montecarlo - Petri net) case arrival ratio inferred from the log")
    print(res["median_cases_ex_time"])
    print(res["total_cases_time"])
    # perform the Montecarlo simulation with the arrival rate specified (the simulation lasts 5 secs)
    parameters[montecarlo_simulation.Variants.PETRI_SEMAPH_FIFO.value.
               Parameters.PARAM_CASE_ARRIVAL_RATIO] = 60
    log, res = montecarlo_simulation.apply(log,
                                           net,
                                           im,
                                           fm,
                                           parameters=parameters)
    print(
        "\n(Montecarlo - Petri net) case arrival ratio specified by the user")
    print(res["median_cases_ex_time"])
    print(res["total_cases_time"])
Ejemplo n.º 27
0
 def test_tokenreplay(self):
     log = xes_importer.apply(
         os.path.join("input_data", "running-example.xes"))
     from pm4py.algo.discovery.alpha import algorithm as alpha_miner
     net, im, fm = alpha_miner.apply(log)
     from pm4py.algo.conformance.tokenreplay import algorithm as token_replay
     replayed_traces = token_replay.apply(
         log, net, im, fm, variant=token_replay.Variants.TOKEN_REPLAY)
     replayed_traces = token_replay.apply(
         log, net, im, fm, variant=token_replay.Variants.BACKWARDS)
     from pm4py.evaluation.replay_fitness import evaluator as rp_fitness_evaluator
     fitness = rp_fitness_evaluator.apply(
         log,
         net,
         im,
         fm,
         variant=rp_fitness_evaluator.Variants.TOKEN_BASED)
     evaluation = rp_fitness_evaluator.evaluate(
         replayed_traces, variant=rp_fitness_evaluator.Variants.TOKEN_BASED)
     from pm4py.evaluation.precision import evaluator as precision_evaluator
     precision = precision_evaluator.apply(
         log,
         net,
         im,
         fm,
         variant=precision_evaluator.Variants.ETCONFORMANCE_TOKEN)
     from pm4py.evaluation.generalization import evaluator as generalization_evaluation
     generalization = generalization_evaluation.apply(
         log,
         net,
         im,
         fm,
         variant=generalization_evaluation.Variants.GENERALIZATION_TOKEN)
Ejemplo n.º 28
0
def execute_script():
    # import the log
    log_path = os.path.join("..", "tests", "input_data", "receipt.xes")
    log = xes_importer.apply(log_path)
    # apply Inductive Miner
    net, initial_marking, final_marking = inductive_miner.apply(log)
    # get visualization
    variant = pn_vis.Variants.PERFORMANCE
    parameters_viz = {
        pn_vis.Variants.PERFORMANCE.value.Parameters.AGGREGATION_MEASURE:
        "mean",
        pn_vis.Variants.PERFORMANCE.value.Parameters.FORMAT: "svg"
    }
    gviz = pn_vis.apply(net,
                        initial_marking,
                        final_marking,
                        log=log,
                        variant=variant,
                        parameters=parameters_viz)
    pn_vis.view(gviz)
    # do another visualization with frequency
    variant = pn_vis.Variants.FREQUENCY
    parameters_viz = {pn_vis.Variants.FREQUENCY.value.Parameters.FORMAT: "svg"}
    gviz = pn_vis.apply(net,
                        initial_marking,
                        final_marking,
                        log=log,
                        variant=variant,
                        parameters=parameters_viz)
    pn_vis.view(gviz)
Ejemplo n.º 29
0
 def test_alignment(self):
     log = xes_importer.apply(os.path.join("input_data", "running-example.xes"))
     from pm4py.algo.discovery.alpha import algorithm as alpha_miner
     net, im, fm = alpha_miner.apply(log)
     from pm4py.algo.conformance.alignments import algorithm as alignments
     aligned_traces = alignments.apply(log, net, im, fm, variant=alignments.Variants.VERSION_STATE_EQUATION_A_STAR)
     aligned_traces = alignments.apply(log, net, im, fm, variant=alignments.Variants.VERSION_DIJKSTRA_NO_HEURISTICS)
Ejemplo n.º 30
0
 def test_performance_spectrum(self):
     log = xes_importer.apply(os.path.join("input_data", "running-example.xes"))
     from pm4py.algo.discovery.performance_spectrum import algorithm as pspectrum
     ps = pspectrum.apply(log, ["register request", "decide"])
     df = pd.read_csv(os.path.join("input_data", "running-example.csv"))
     df = dataframe_utils.convert_timestamp_columns_in_df(df)
     ps = pspectrum.apply(df, ["register request", "decide"])