コード例 #1
0
def setup(log, env, no_traces, net, initial_marking):
    """

    :param log: The input events in the form of a log
    :param env: The simulation environment
    :param no_traces: Number of traces that needs to be generated, given by the user
    :param net: The petrinet representing the process model
    :param initial_marking: The initial marking of the activities of the model
    """
    user_yn = input(
        "Do you want to configure the average arrival rate of the cases? Enter y or n "
    )
    if user_yn.lower() == "y":
        case_arrival_time = float(
            input("Enter in seconds the case arrival time "))

    else:
        case_arrival_time = get_case_arrival_avg(log)

    casegen = methods.Trace(env)

    # Create more cases while the simulation is running
    for i in range(1, no_traces + 1):
        yield env.timeout(case_arrival_time)
        env.process(
            simulation(env, 'Case %d' % i, casegen, net, initial_marking,
                       no_traces))
コード例 #2
0
def execute_script():
    log_name = os.path.join("..", "tests", "input_data", "running-example.xes")
    log = xes_importer.apply(log_name, variant="nonstandard")
    print("imported log")
    # obtain a simple, sound workflow net, containing only visibile unique transitions,
    # applying the Alpha Miner to some of the top variants (methods by Ale)
    activity_key = "concept:name"
    parameters = {PARAMETER_CONSTANT_ACTIVITY_KEY: activity_key, PARAMETER_CONSTANT_ATTRIBUTE_KEY: activity_key}
    net, initial_marking, final_marking = simple_extraction_factory.apply(log, classic_output=True,
                                                                          parameters=parameters)
    print("obtained model")
    # visualize the output Petri net
    gviz = pn_vis_factory.apply(net, initial_marking, final_marking)
    del gviz
    # pn_vis_factory.view(gviz)
    # gets the average time between cases starts
    avg_time_starts = get_case_arrival_avg(log)
    print("avg_time_starts real=", avg_time_starts)
    # gets the stochastic distribution associated to the Petri net and the log
    smap = stochastic_map.get_map_from_log_and_net(log, net, initial_marking, final_marking,
                                                   force_distribution="EXPONENTIAL")
    print("smap=", smap)

    perf_bound_obj = LpPerfBounds(net, initial_marking, final_marking, smap, avg_time_starts)
    net1, imarking1, fmarking1 = perf_bound_obj.get_net()
    gviz = pn_vis_factory.apply(net1, imarking1, fmarking1)

    for var in perf_bound_obj.var_corr:
        corr = perf_bound_obj.var_corr[var]

        minimum = perf_bound_obj.solve_problem(var, maximize=False)
        maximum = perf_bound_obj.solve_problem(var, maximize=True)

        print(var, minimum[corr], maximum[corr])
コード例 #3
0
def gerar_estatisticas_model_from_log_eventos(eventLog):

    parameters_stats = {
        case_statistics.Parameters.TIMESTAMP_KEY: "time:timestamp"
    }

    # (quantidade de casos no event log)
    all_case_durations = case_statistics.get_all_casedurations(
        eventLog, parameters=parameters_stats)
    # (duração do caso mais rápido)
    min_case_duration = min(all_case_durations)
    # (duração do caso mais demorado)
    max_case_duration = max(all_case_durations)
    # (média de duração dos casos)
    median_case_duration = case_statistics.get_median_caseduration(
        eventLog, parameters=parameters_stats)

    parameters_arrival = {
        case_arrival.Parameters.TIMESTAMP_KEY: "time:timestamp"
    }

    # (distância média entre a chegada de dois casos consecutivos)
    case_arrival_ratio = case_arrival.get_case_arrival_avg(
        eventLog, parameters=parameters_arrival)
    # (distância média entre a finalização de dois casos consecutivos)
    case_dispersion_ratio = case_arrival.get_case_dispersion_avg(
        eventLog, parameters=parameters_arrival)

    previsoes = gerar_previsoes_modelo_from_log_eventos(eventLog)

    return ModeloEstatisticas(qtde_casos=len(all_case_durations),
                              caso_dur_min=min_case_duration,
                              caso_dur_max=max_case_duration,
                              caso_dur_media=median_case_duration,
                              taxa_chegada_casos=case_arrival_ratio,
                              taxa_dispersao_casos=case_dispersion_ratio,
                              previsoes_termino=previsoes)
コード例 #4
0
def apply(log, net, im, fm, parameters=None):
    """
    Performs a Monte Carlo simulation of an accepting Petri net without duplicate transitions and where the preset is always
    distinct from the postset (FIFO variant; the semaphores pile up if waiting is needed, and the first in is the first to win
    the semaphore)

    Parameters
    -------------
    log
        Event log
    net
        Accepting Petri net without duplicate transitions and where the preset is always distinct from the postset
    im
        Initial marking
    fm
        Final marking
    parameters
        Parameters of the algorithm:
            PARAM_NUM_SIMULATIONS => (default: 100)
            PARAM_FORCE_DISTRIBUTION => Force a particular stochastic distribution (e.g. normal) when the stochastic map
            is discovered from the log (default: None; no distribution is forced)
            PARAM_ENABLE_DIAGNOSTICS => Enable the printing of diagnostics (default: True)
            PARAM_DIAGN_INTERVAL => Interval of time in which diagnostics of the simulation are printed (default: 32)
            PARAM_CASE_ARRIVAL_RATIO => Case arrival of new cases (default: None; inferred from the log)
            PARAM_PROVIDED_SMAP => Stochastic map that is used in the simulation (default: None; inferred from the log)
            PARAM_MAP_RESOURCES_PER_PLACE => Specification of the number of resources available per place
            (default: None; each place gets the default number of resources)
            PARAM_DEFAULT_NUM_RESOURCES_PER_PLACE => Default number of resources per place when not specified
            (default: 1; each place gets 1 resource and has to wait for the resource to finish)
            PARAM_SMALL_SCALE_FACTOR => Scale factor for the sleeping time of the actual simulation
            (default: 864000.0, 10gg)
            PARAM_MAX_THREAD_EXECUTION_TIME => Maximum execution time per thread (default: 60.0, 1 minute)

    Returns
    ------------
    simulated_log
        Simulated event log
    simulation_result
        Result of the simulation:
            Outputs.OUTPUT_PLACES_INTERVAL_TREES => inteval trees that associate to each place the times in which it was occupied.
            Outputs.OUTPUT_TRANSITIONS_INTERVAL_TREES => interval trees that associate to each transition the intervals of time
            in which it could not fire because some token was in the output.
            Outputs.OUTPUT_CASES_EX_TIME => Throughput time of the cases included in the simulated log
            Outputs.OUTPUT_MEDIAN_CASES_EX_TIME => Median of the throughput times
            Outputs.OUTPUT_CASE_ARRIVAL_RATIO => Case arrival ratio that was specified in the simulation
            Outputs.OUTPUT_TOTAL_CASES_TIME => Total time occupied by cases of the simulated log
    """
    if parameters is None:
        parameters = {}

    from intervaltree import IntervalTree, Interval

    timestamp_key = exec_utils.get_param_value(
        Parameters.TIMESTAMP_KEY, parameters,
        xes_constants.DEFAULT_TIMESTAMP_KEY)
    no_simulations = exec_utils.get_param_value(
        Parameters.PARAM_NUM_SIMULATIONS, parameters, 100)
    force_distribution = exec_utils.get_param_value(
        Parameters.PARAM_FORCE_DISTRIBUTION, parameters, None)
    enable_diagnostics = exec_utils.get_param_value(
        Parameters.PARAM_ENABLE_DIAGNOSTICS, parameters, True)
    diagn_interval = exec_utils.get_param_value(
        Parameters.PARAM_DIAGN_INTERVAL, parameters, 32.0)
    case_arrival_ratio = exec_utils.get_param_value(
        Parameters.PARAM_CASE_ARRIVAL_RATIO, parameters, None)
    smap = exec_utils.get_param_value(Parameters.PARAM_PROVIDED_SMAP,
                                      parameters, None)
    resources_per_places = exec_utils.get_param_value(
        Parameters.PARAM_MAP_RESOURCES_PER_PLACE, parameters, None)
    default_num_resources_per_places = exec_utils.get_param_value(
        Parameters.PARAM_DEFAULT_NUM_RESOURCES_PER_PLACE, parameters, 1)
    small_scale_factor = exec_utils.get_param_value(
        Parameters.PARAM_SMALL_SCALE_FACTOR, parameters, 864000)
    max_thread_exec_time = exec_utils.get_param_value(
        Parameters.PARAM_MAX_THREAD_EXECUTION_TIME, parameters, 60.0)

    if case_arrival_ratio is None:
        case_arrival_ratio = case_arrival.get_case_arrival_avg(
            log, parameters=parameters)
    if resources_per_places is None:
        resources_per_places = {}

    logging.basicConfig()
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)

    places_interval_trees = {}
    transitions_interval_trees = {}
    cases_ex_time = []
    list_cases = {}

    for place in net.places:
        # assign a semaphore to each place.
        if place in resources_per_places:
            place.semaphore = Semaphore(resources_per_places[place])
        else:
            # if the user does not specify the number of resources per place,
            # the default number is used
            place.semaphore = Semaphore(default_num_resources_per_places)
        place.assigned_time = []
        places_interval_trees[place] = IntervalTree()
    for trans in net.transitions:
        transitions_interval_trees[trans] = IntervalTree()

    # when the user does not specify any map from transitions to random variables,
    # a replay operation is performed
    if smap is None:
        if enable_diagnostics:
            logger.info(str(time()) + " started the replay operation.")
        if force_distribution is not None:
            smap = replay.get_map_from_log_and_net(
                log,
                net,
                im,
                fm,
                force_distribution=force_distribution,
                parameters=parameters)
        else:
            smap = replay.get_map_from_log_and_net(log,
                                                   net,
                                                   im,
                                                   fm,
                                                   parameters=parameters)
        if enable_diagnostics:
            logger.info(str(time()) + " ended the replay operation.")

    # the start timestamp is set to 1000000 instead of 0 to avoid problems with 32 bit machines
    start_time = 1000000
    threads = []
    for i in range(no_simulations):
        list_cases[i] = Trace()
        t = SimulationThread(i, net, im, fm, smap, start_time,
                             places_interval_trees, transitions_interval_trees,
                             cases_ex_time, list_cases, enable_diagnostics,
                             diagn_interval, small_scale_factor,
                             max_thread_exec_time)
        t.start()
        threads.append(t)
        start_time = start_time + case_arrival_ratio
        # wait a factor before opening a thread and the next one
        sleep(case_arrival_ratio / small_scale_factor)

    for t in threads:
        t.join()

    i = 0
    while i < len(threads):
        if threads[i].terminated_correctly is False:
            del list_cases[threads[i].id]
            del threads[i]
            del cases_ex_time[i]
            continue
        i = i + 1

    if enable_diagnostics:
        logger.info(str(time()) + " ended the Monte carlo simulation.")

    log = EventLog(list(list_cases.values()))
    min_timestamp = log[0][0][timestamp_key].timestamp()
    max_timestamp = max(y[timestamp_key].timestamp() for x in log for y in x)

    transitions_interval_trees = {
        t.name: y
        for t, y in transitions_interval_trees.items()
    }

    return log, {
        Outputs.OUTPUT_PLACES_INTERVAL_TREES.value: places_interval_trees,
        Outputs.OUTPUT_TRANSITIONS_INTERVAL_TREES.value:
        transitions_interval_trees,
        Outputs.OUTPUT_CASES_EX_TIME.value: cases_ex_time,
        Outputs.OUTPUT_MEDIAN_CASES_EX_TIME.value: median(cases_ex_time),
        Outputs.OUTPUT_CASE_ARRIVAL_RATIO.value: case_arrival_ratio,
        Outputs.OUTPUT_TOTAL_CASES_TIME.value: max_timestamp - min_timestamp
    }
コード例 #5
0
def apply(log, net, im, fm, parameters=None):
    """
    Performs a Monte Carlo simulation of the Petri net

    Parameters
    -------------
    log
        Event log
    net
        Petri net
    im
        Initial marking
    fm
        Final marking
    parameters
        Parameters of the algorithm

    Returns
    ------------
    simulated_log
        Simulated event log
    simulation_result
        Result of the simulation
    """
    if parameters is None:
        parameters = {}

    parameters["business_hours"] = True
    no_simulations = parameters[
        "no_simulations"] if "no_simulations" in parameters else 100
    force_distribution = parameters[
        "force_distribution"] if "force_distribution" in parameters else None

    case_arrival_ratio = parameters[
        "case_arrival_ratio"] if "case_arrival_ratio" in parameters else case_arrival.get_case_arrival_avg(
            log, parameters=parameters)

    places_interval_trees = {}
    transitions_interval_trees = {}
    cases_ex_time = []
    list_cases = {}

    for place in net.places:
        place.semaphore = Semaphore(1)
        place.assigned_time = -1
        places_interval_trees[place] = IntervalTree()
    for trans in net.transitions:
        transitions_interval_trees[trans] = IntervalTree()

    if force_distribution is not None:
        map = mapping.get_map_from_log_and_net(
            log,
            net,
            im,
            fm,
            force_distribution=force_distribution,
            parameters=parameters)
    else:
        map = mapping.get_map_from_log_and_net(log,
                                               net,
                                               im,
                                               fm,
                                               parameters=parameters)

    start_time = 1000000
    threads = []
    for i in range(no_simulations):
        list_cases[i] = Trace()
        t = SimulationThread(i, net, im, fm, map, start_time,
                             places_interval_trees, transitions_interval_trees,
                             cases_ex_time, list_cases)
        t.start()
        threads.append(t)
        start_time = start_time + case_arrival_ratio

    for t in threads:
        t.join()

    log = EventLog(list(list_cases.values()))
    min_timestamp = log[0][0]['time:timestamp'].timestamp()
    max_timestamp = max(y['time:timestamp'].timestamp() for x in log
                        for y in x)

    transitions_interval_trees = {
        t.name: y
        for t, y in transitions_interval_trees.items()
    }

    return log, {
        "places_interval_trees": places_interval_trees,
        "transitions_interval_trees": transitions_interval_trees,
        "cases_ex_time": cases_ex_time,
        "median_cases_ex_time": median(cases_ex_time),
        "input_case_arrival_ratio": case_arrival_ratio,
        "total_time": max_timestamp - min_timestamp
    }
コード例 #6
0
 def test_case_arrival(self):
     from pm4py.statistics.traces.log import case_arrival
     log = self.get_log()
     case_arrival.get_case_arrival_avg(log)
     case_arrival.get_case_dispersion_avg(log)
コード例 #7
0
ファイル: pm1.py プロジェクト: Godcomplex11/DU
gviz = dectree_visualizer.apply(clf, feature_names, classes)

#%%% Statistics

from pm4py.statistics.traces.log import case_statistics
all_case_durations = case_statistics.get_all_casedurations(log, parameters={    case_statistics.Parameters.TIMESTAMP_KEY: "time:timestamp"})

all_case_durations

from pm4py.statistics.traces.log import case_statistics
median_case_duration = case_statistics.get_median_caseduration(log, parameters={    case_statistics.Parameters.TIMESTAMP_KEY: "time:timestamp"})
median_case_duration

#Case Arrival
from pm4py.statistics.traces.log import case_arrival
case_arrival_ratio = case_arrival.get_case_arrival_avg(log, parameters={    case_arrival.Parameters.TIMESTAMP_KEY: "time:timestamp"})
case_arrival_ratio

from pm4py.statistics.traces.log import case_arrival
case_dispersion_ratio = case_arrival.get_case_dispersion_avg(log, parameters={    case_arrival.Parameters.TIMESTAMP_KEY: "time:timestamp"})
case_dispersion_ratio


#Peformance Spectrum
from pm4py.statistics.performance_spectrum import algorithm as performance_spectrum
ps = performance_spectrum.apply(log, ["register request", "decide"], parameters= {performance_spectrum.Parameters.ACTIVITY_KEY: "concept:name", performance_spectrum.Parameters.TIMESTAMP_KEY: "time:timestamp"})
ps

#Business Hours
from pm4py.util.business_hours import BusinessHours
from datetime import datetime