def test_classifiers1documentation(self):
        from pm4py.log.importer import xes as xes_importer

        log = xes_importer.import_from_file_xes("inputData\\receipt.xes")
        #print(log.classifiers)

        from pm4py.log.util import insert_classifier

        log, activity_key = insert_classifier.insert_classifier_attribute(
            log, "Activity classifier")
        #print(activity_key)

        from pm4py.algo.alpha import factory as alpha_miner

        net, initial_marking, final_marking = alpha_miner.apply(
            log, activity_key=activity_key)

        from pm4py.log.importer import xes as xes_importer

        log = xes_importer.import_from_file_xes("inputData\\receipt.xes")

        for trace in log:
            for event in trace:
                event["customClassifier"] = event["concept:name"] + event[
                    "lifecycle:transition"]

        from pm4py.algo.alpha import factory as alpha_miner

        net, initial_marking, final_marking = alpha_miner.apply(
            log, activity_key="customClassifier")
Example #2
0
 def obtainPetriNetThroughAlphaMiner(self, logName):
     if ".xes" in logName:
         traceLog = xes_importer.import_from_file_xes(logName)
     else:
         eventLog = csv_importer.import_from_path(logName)
         traceLog = log_transform.transform_event_log_to_trace_log(eventLog)
     net, marking, fmarking = alpha_factory.apply(traceLog)
     return traceLog, net, marking, fmarking
 def test_alphadoc1(self):
     from pm4py.log.importer import xes as xes_importer
     log = xes_importer.import_from_file_xes(
         "inputData\\running-example.xes")
     from pm4py.algo.alpha import factory as alpha_miner
     net, initial_marking, final_marking = alpha_miner.apply(log)
     from pm4py.models.petri import visualize as pn_viz
     gviz = pn_viz.graphviz_visualization(net,
                                          initial_marking=initial_marking,
                                          final_marking=final_marking)
Example #4
0
	def test_docmeasures11(self):
		from pm4py.log.importer import xes as xes_importer

		log = xes_importer.import_from_file_xes('inputData\\receipt.xes')

		from pm4py.algo.alpha import factory as alpha_miner
		from pm4py.algo.inductive import factory as inductive_miner

		alpha_petri, alpha_initial_marking, alpha_final_marking = alpha_miner.apply(log)
		inductive_petri, inductive_initial_marking, inductive_final_marking = inductive_miner.apply(log)


		from pm4py.evaluation.replay_fitness import factory as replay_factory

		fitness_alpha = replay_factory.apply(log, alpha_petri, alpha_initial_marking, alpha_final_marking)
		fitness_inductive = replay_factory.apply(log, inductive_petri, inductive_initial_marking, inductive_final_marking)
		# print("fitness_alpha=",fitness_alpha)
		# print("fitness_inductive=",fitness_inductive)

		from pm4py.evaluation.precision import factory as precision_factory

		precision_alpha = precision_factory.apply(log, alpha_petri, alpha_initial_marking, alpha_final_marking)
		precision_inductive = precision_factory.apply(log, inductive_petri, inductive_initial_marking, inductive_final_marking)

		# print("precision_alpha=",precision_alpha)
		# print("precision_inductive=",precision_inductive)

		from pm4py.evaluation.generalization import factory as generalization_factory

		generalization_alpha = generalization_factory.apply(log, alpha_petri, alpha_initial_marking, alpha_final_marking)
		generalization_inductive = generalization_factory.apply(log, inductive_petri, inductive_initial_marking,
																inductive_final_marking)

		# print("generalization_alpha=",generalization_alpha)
		# print("generalization_inductive=",generalization_inductive)

		from pm4py.evaluation.simplicity import factory as simplicity_factory

		simplicity_alpha = simplicity_factory.apply(alpha_petri)
		simplicity_inductive = simplicity_factory.apply(inductive_petri)

		# print("simplicity_alpha=",simplicity_alpha)
		# print("simplicity_inductive=",simplicity_inductive)


		from pm4py.evaluation import factory as evaluation_factory

		alpha_evaluation_result = evaluation_factory.apply(log, alpha_petri, alpha_initial_marking, alpha_final_marking)
		# print("alpha_evaluation_result=",alpha_evaluation_result)

		inductive_evaluation_result = evaluation_factory.apply(log, inductive_petri, inductive_initial_marking,
															   inductive_final_marking)
 def test_alignment_alpha(self):
     traceLog = xes_importer.import_from_file_xes(
         os.path.join(INPUT_DATA_DIR, "running-example.xes"))
     net, marking, fmarking = alpha_factory.apply(traceLog)
     final_marking = petri.petrinet.Marking()
     for p in net.places:
         if not p.out_arcs:
             final_marking[p] = 1
     for trace in traceLog:
         cfResult = align.versions.state_equation_a_star.apply(
             trace, net, marking, final_marking)['alignment']
         isFit = True
         for couple in cfResult:
             if not (couple[0] == couple[1]
                     or couple[0] == ">>" and couple[1] == None):
                 isFit = False
         if not isFit:
             raise Exception("should be fit")
Example #6
0
def get_process_schema():
    """
    Gets the process model in the specified format (e.g. SVG)

    Argument parameters:
        process -> (MANDATORY) Name of the process to consider
        decreasingfactor -> Filtering factor that is passed to the algorithms
        format -> Format of the diagram that is returned
        activitykey -> Activity key (if not specified, then concept:name)
        timestampkey -> Timestamp key (if not specified, then time:timestamp)
        decreasingfactor -> Decreasing factor for the filtering algorithm
        discoveryalgorithm -> Applied discovery algorithm (Alpha, Inductive)
        replayenabled -> Is replay enabled?
        replaymeasure -> Measure to show in the replay (frequency/performance)
    :return:
    """

    # read the requested process name
    process = request.args.get('process', type=str)
    # read the activity key
    activity_key = request.args.get('activitykey', default=None, type=str)
    # read the timestamp key
    timestamp_key = request.args.get('timestampkey',
                                     default="time:timestamp",
                                     type=str)
    # read the decreasing factor
    decreasingFactor = request.args.get('decreasingfactor',
                                        default=0.6,
                                        type=float)
    # read the image format
    imageFormat = request.args.get('format', default='svg', type=str)
    # specification of process discovery algorithm
    discoveryAlgorithm = request.args.get('discoveryalgorithm',
                                          default='inductive',
                                          type=str)
    # replay enabled
    replayEnabled = request.args.get('replayenabled', default=True, type=bool)
    # replay measure
    replayMeasure = request.args.get('replaymeasure',
                                     default="frequency",
                                     type=str)

    # acquire the semaphore as we want to access the logs
    # without desturbing
    shared.sem.acquire()

    try:
        # if the specified process is in memory, then proceed
        if process in shared.trace_logs:
            # retrieve the log
            original_log = shared.trace_logs[process]
            original_log, classifier_key = insert_classifier.search_and_insert_event_classifier_attribute(
                original_log)
            if activity_key is None:
                activity_key = classifier_key
            if activity_key is None:
                activity_key = "concept:name"
            # release the semaphore
            shared.sem.release()
            # apply automatically a filter
            log = auto_filter.apply_auto_filter(
                copy(original_log),
                decreasingFactor=decreasingFactor,
                activity_key=activity_key)
            # apply a process discovery algorithm
            if discoveryAlgorithm == "dfg":
                # gets the number of occurrences of the single attributes in the filtered log
                filtered_log_activities_count = activities_module.get_activities_from_log(
                    log)
                # gets an intermediate log that is the original log restricted to the list
                # of attributes that appears in the filtered log
                intermediate_log = activities_module.filter_log_by_specified_attributes(
                    original_log, filtered_log_activities_count)
                # gets the number of occurrences of the single attributes in the intermediate log
                activities_count = activities_module.get_activities_from_log(
                    intermediate_log)
                # calculate DFG of the filtered log and of the intermediate log
                dfg_filtered_log = dfg_factory.apply(log,
                                                     variant=replayMeasure)
                dfg_intermediate_log = dfg_factory.apply(intermediate_log,
                                                         variant=replayMeasure)
                # replace edges values in the filtered DFG from the one found in the intermediate log
                dfg_filtered_log = dfg_replacement.replace_values(
                    dfg_filtered_log, dfg_intermediate_log)
                # retrieve the diagram in base64
                diagram = dfg_visualize.return_diagram_as_base64(
                    activities_count,
                    dfg_filtered_log,
                    format=imageFormat,
                    measure=replayMeasure)
            else:
                if discoveryAlgorithm == "inductive":
                    net, initial_marking, final_marking = inductive_factory.apply(
                        log, activity_key=activity_key)
                elif discoveryAlgorithm == "alpha":
                    net, initial_marking, final_marking = alpha_factory.apply(
                        log, activity_key=activity_key)
                if replayEnabled:
                    # do the replay
                    [traceIsFit, traceFitnessValue, activatedTransitions, placeFitness, reachedMarkings, enabledTransitionsInMarkings] =\
                        token_replay.apply_log(original_log, net, initial_marking, final_marking, activity_key=activity_key)
                    element_statistics = performance_map.single_element_statistics(
                        original_log,
                        net,
                        initial_marking,
                        activatedTransitions,
                        activity_key=activity_key,
                        timestamp_key=timestamp_key)
                    aggregated_statistics = performance_map.aggregate_statistics(
                        element_statistics, measure=replayMeasure)
                    # return the diagram in base64
                    diagram = return_diagram_as_base64(
                        net,
                        format=imageFormat,
                        initial_marking=initial_marking,
                        final_marking=final_marking,
                        decorations=aggregated_statistics)
                else:
                    # return the diagram in base64
                    diagram = return_diagram_as_base64(
                        net,
                        format=imageFormat,
                        initial_marking=initial_marking,
                        final_marking=final_marking)
            return diagram
        else:
            # release the semaphore
            shared.sem.release()
    except Exception as e:
        # manage exception
        logging.error("exception calculating process schema: " + str(e))
        logging.error("traceback: " + traceback.format_exc())

    return ""
Example #7
0
        print("\nelaborating " + logName)

        logPath = os.path.join(logFolder, logName)
        log = xes_importer.import_from_file_xes(logPath)

        log, classifier_key = insert_classifier.search_and_insert_event_classifier_attribute(
            log)

        print("loaded log")

        activity_key = "concept:name"
        if not classifier_key is None:
            activity_key = classifier_key

        t1 = time.time()
        alpha_model, alpha_initial_marking, alpha_final_marking = alpha.apply(
            log, activity_key=activity_key)
        pnml_exporter.export_petri_to_pnml(
            alpha_model, alpha_initial_marking,
            os.path.join(pnmlFolder, logNamePrefix + "_alpha.pnml"))
        t2 = time.time()
        print("time interlapsed for calculating Alpha Model", (t2 - t1))

        t1 = time.time()
        inductive_model, inductive_initial_marking, inductive_final_marking = inductive.apply(
            log, activity_key=activity_key)
        pnml_exporter.export_petri_to_pnml(
            inductive_model, inductive_initial_marking,
            os.path.join(pnmlFolder, logNamePrefix + "_inductive.pnml"))
        t2 = time.time()
        print("time interlapsed for calculating Inductive Model", (t2 - t1))
import os, sys, inspect
currentdir = os.path.dirname(
    os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from pm4py.algo.alpha import factory as alpha_factory
from pm4py.log.importer import xes as xes_importer
from pm4py.models.petri import visualize as pn_viz
from pm4py.models import petri
from pm4py.algo.tokenreplay.versions import token_replay
import time

log = xes_importer.import_from_file_xes(
    '..\\tests\\inputData\\running-example.xes')
#log = xes_importer.import_from_path_xes('a32f0n00.xes')
net, marking, final_marking = alpha_factory.apply(log)
for place in marking:
    print("initial marking " + place.name)
final_marking = petri.petrinet.Marking()
for p in net.places:
    if not p.out_arcs:
        final_marking[p] = 1
for place in final_marking:
    print("final marking " + place.name)
gviz = pn_viz.graphviz_visualization(net,
                                     initial_marking=marking,
                                     final_marking=final_marking)
gviz.view()
log = log[0:min(100, len(log))]
time0 = time.time()
print("started token replay")