def apply(log, petri_net, initial_marking, final_marking, parameters=None):
    """
    Apply token replay fitness evaluation

    Parameters
    -----------
    log
        Trace log
    petri_net
        Petri net
    initial_marking
        Initial marking
    final_marking
        Final marking
    parameters
        Parameters

    Returns
    -----------
    dictionary
        Containing two keys (percFitTraces and averageFitness)
    """

    if parameters is None:
        parameters = {}
    activity_key = parameters[PARAM_ACTIVITY_KEY] if PARAM_ACTIVITY_KEY in parameters else log_lib.util.xes.DEFAULT_NAME_KEY
    [traceIsFit, traceFitnessValue, activatedTransitions, placeFitness, reachedMarkings, enabledTransitionsInMarkings] =\
        token_replay.apply(log, petri_net, initial_marking, final_marking, activity_key=activity_key)

    return get_fitness(traceIsFit, traceFitnessValue)
Exemple #2
0
def get_decorations(log, net, initial_marking, final_marking, parameters=None, measure="frequency"):
    """
    Calculate decorations in order to annotate the Petri net

    Parameters
    -----------
    log
        Trace log
    net
        Petri net
    initial_marking
        Initial marking
    final_marking
        Final marking
    parameters
        Parameters associated to the algorithm
    measure
        Measure to represent on the process model (frequency/performance)

    Returns
    ------------
    decorations
        Decorations to put on the process model
    """
    if parameters is None:
        parameters = {}

    activity_key = parameters[
            PARAM_ACTIVITY_KEY] if PARAM_ACTIVITY_KEY in parameters else log_lib.util.xes.DEFAULT_NAME_KEY
    timestamp_key = parameters[PARAM_TIMESTAMP_KEY] if PARAM_TIMESTAMP_KEY in parameters else "time:timestamp"

    # do the replay
    [traceIsFit, traceFitnessValue, activatedTransitions, placeFitness, reachedMarkings, enabledTransitionsInMarkings] = \
        token_replay.apply(log, net, initial_marking, final_marking, activity_key=activity_key)

    element_statistics = performance_map.single_element_statistics(log, net, initial_marking,
                                                                   activatedTransitions,
                                                                   activity_key=activity_key,
                                                                   timestamp_key=timestamp_key)
    aggregated_statistics = performance_map.aggregate_statistics(element_statistics, measure=measure)

    return aggregated_statistics
def apply(log, petri_net, initial_marking, final_marking, parameters=None):
    """
    Calculates generalization on the provided log and Petri net.

    The approach has been suggested by the paper
    Buijs, Joos CAM, Boudewijn F. van Dongen, and Wil MP van der Aalst. "Quality dimensions in process discovery:
    The importance of fitness, precision, generalization and simplicity."
    International Journal of Cooperative Information Systems 23.01 (2014): 1440001.

    A token replay is applied and, for each transition, we can measure the number of occurrences
    in the replay. The following formula is applied for generalization

           \sum_{t \in transitions} (math.sqrt(1.0/(n_occ_replay(t)))
    1 -    ----------------------------------------------------------
                             # transitions

    Parameters
    -----------
    log
        Trace log
    petri_net
        Petri net
    initial_marking
        Initial marking
    final_marking
        Final marking
    parameters
        Algorithm parameters

    Returns
    -----------
    generalization
        Generalization measure
    """
    if parameters is None:
        parameters = {}
    activity_key = parameters[
        PARAM_ACTIVITY_KEY] if PARAM_ACTIVITY_KEY in parameters else log_lib.util.xes.DEFAULT_NAME_KEY
    [traceIsFit, traceFitnessValue, activatedTransitions, placeFitness, reachedMarkings, enabledTransitionsInMarkings] =\
        token_replay.apply(log, petri_net, initial_marking, final_marking, activity_key=activity_key)

    return get_generalization(petri_net, activatedTransitions)
"""final_marking = petri.petrinet.Marking()
for p in net.places:
    if not p.out_arcs:
        final_marking[p] = 1"""
for place in final_marking:
    print("final marking " + place.name)
gviz = pn_viz.graphviz_visualization(net,
                                     initial_marking=marking,
                                     final_marking=final_marking,
                                     debug=True)
gviz.view()
time0 = time.time()
print("started token replay")
parameters = {"enable_placeFitness": True}
[traceIsFit, traceFitnessValue, activatedTransitions, placeFitness, reachedMarkings, enabledTransitionsInMarkings] = \
 token_factory.apply(log, net, marking, final_marking, parameters=parameters)
print(
    "underfed places: ",
    sorted([
        place.name for place in placeFitness.keys()
        if len(placeFitness[place]['underfedTraces']) > 0
    ]))
print(
    "overfed places: ",
    sorted([
        place.name for place in placeFitness.keys()
        if len(placeFitness[place]['overfedTraces']) > 0
    ]))
time1 = time.time()
print("time interlapsed", (time1 - time0))
fitTraces = [x for x in traceIsFit if x]
Exemple #5
0
def apply_token_replay(log,
                       net,
                       initial_marking,
                       final_marking,
                       parameters=None):
    """
    Calculates all metrics based on token-based replay and returns a unified dictionary

    Parameters
    -----------
    log
        Trace log
    net
        Petri net
    initial_marking
        Initial marking
    final_marking
        Final marking
    parameters
        Parameters

    Returns
    -----------
    dictionary
        Dictionary containing fitness, precision, generalization and simplicity; along with the average weight of these metrics
    """
    if parameters is None:
        parameters = {}
    activity_key = parameters[
        PARAM_ACTIVITY_KEY] if PARAM_ACTIVITY_KEY in parameters else log_lib.util.xes.DEFAULT_NAME_KEY
    fitness_weight = parameters[
        PARAM_FITNESS_WEIGHT] if PARAM_FITNESS_WEIGHT in parameters else 0.25
    precision_weight = parameters[
        PARAM_PRECISION_WEIGHT] if PARAM_PRECISION_WEIGHT in parameters else 0.25
    simplicity_weight = parameters[
        PARAM_SIMPLICITY_WEIGHT] if PARAM_SIMPLICITY_WEIGHT in parameters else 0.25
    generalization_weight = parameters[
        PARAM_GENERALIZATION_WEIGHT] if PARAM_GENERALIZATION_WEIGHT in parameters else 0.25

    sum_of_weights = (fitness_weight + precision_weight + simplicity_weight +
                      generalization_weight)
    fitness_weight = fitness_weight / sum_of_weights
    precision_weight = precision_weight / sum_of_weights
    simplicity_weight = simplicity_weight / sum_of_weights
    generalization_weight = generalization_weight / sum_of_weights

    [traceIsFit, traceFitnessValue, activatedTransitions, placeFitness, reachedMarkings, enabledTransitionsInMarkings] =\
        token_replay.apply(log, net, initial_marking, final_marking, activity_key=activity_key)

    parameters = {}
    parameters["activity_key"] = activity_key

    fitness = fitness_token_based.get_fitness(traceIsFit, traceFitnessValue)
    precision = precision_token_based.apply(log,
                                            net,
                                            initial_marking,
                                            final_marking,
                                            parameters=parameters)
    generalization = generalization_token_based.get_generalization(
        net, activatedTransitions)
    simplicity = simplicity_arc_degree.apply(net)

    dictionary = {}
    dictionary["fitness"] = fitness
    dictionary["precision"] = precision
    dictionary["generalization"] = generalization
    dictionary["simplicity"] = simplicity
    metricsAverageWeight = fitness_weight * fitness["averageFitness"] + precision_weight * precision\
                           + generalization_weight * generalization + simplicity_weight * simplicity
    dictionary["metricsAverageWeight"] = metricsAverageWeight

    return dictionary