def test_mistake_hooks(self):

        construct_universe()
        num_traces_to_make = 5
        while not num_traces_to_make == ExperimentAdvice.invoked_count:

            run_model(([
                (ExperimentAdvice, 'all actors', list(), dict()),
                (CompetenceModel, 'get_next_task', list(), dict()),
            ]),
                      num_start_messages=1,
                      num_ticks=100)

            if not ExperimentAdvice.invoked:
                action_log.pop()
                if 'fuzzed tasks' in experimental_environment.keys(
                ) and experimental_environment['fuzzed tasks'][-1] == []:
                    experimental_environment['fuzzed tasks'].pop()
            new_trace()
            reset_universe()

        # We have one too many traces, so we have to delete the one made by the last new_trace call.
        action_log.pop()

        generate_XES()
        generate_CSV()
    def test_massive_data_gen(self):
        construct_universe()
        ticks = 350

        iterations = 5
        simulate = partial(run_model, ([
            (CompetenceModel, 'get_next_task',
             [0.05 * iterations, ticks / 2 / iterations], dict()),
            (ExperimentAdvice, 'all workflows', list(), dict()),
            (NewTasksOnEnd, 'end', list(), dict()),
        ]),
                           num_start_messages=1,
                           num_ticks=ticks)
        # run_model(([
        #     # (ActAbovePermissions, 'get_next_task', list(), dict()),
        #     (CompetenceModel, 'get_next_task', [0.05*iterations, ticks/2/iterations], dict()),
        #     (ExperimentAdvice, 'all workflows', list(), dict()),
        #     (NewTasksOnEnd, 'end', list(), dict()),
        # ]),
        #     num_start_messages=1,
        #     num_ticks=ticks,
        # )

        self.play_simulation(simulate, discard_work=True)
        while len(action_log) < 10000:
            self.play_simulation(simulate, discard_work=False)
            print(len(action_log))

        # We also stopped halfway through a simulation...so unless the last action in the last action log is one the simulation should end on, we should remove that trace (and its fuzzing effects)
        try:
            if action_log[-1][-1] != "A_declined" \
                    and action_log[-1][-1] != "W_beoordelen_fraude_complete" \
                    and action_log[-1][-1] != "W_wijzigen_contractgegevens_schedule":
                action_log.pop()
                if len(experimental_environment["fuzzed tasks"]) != len(
                        action_log):
                    experimental_environment["fuzzed tasks"].pop()
        except:
            pass

        generate_XES(log_path="10000.xes")
        generate_CSV(csv_path="10000.csv")
Пример #3
0
            return behaviour_function(attribute, actor)

        super(ChangingBehaviour, self).__init__(select_behaviour, "",
                                                times_to_trigger)


def remove_previous_action(_, __):
    action_log[-1].pop()


SkippingAdvice = partial(ExperimentAdvice, remove_previous_action, 'skipping')
RepeatingAdvice = partial(ExperimentAdvice, manually_log, 'repeating')
AlternatingBehaviour = partial(ChangingBehaviour,
                               [(remove_previous_action, "skipping"),
                                (manually_log, "repeating")])

bpi_13_experimental_frontend.action_log = []

construct_universe()

while len(action_log) < 25:
    run_model(FuzzerClass=AlternatingBehaviour,
              num_ticks=250,
              num_start_messages=1)
    new_trace()
    print(len(action_log))

action_log.pop()
generate_XES()
generate_CSV()
Пример #4
0
    def test_replay_data_generation(self):
        construct_universe()
        ticks = 50000
        min_dataset_required = 1000  # We want at least 1000 traces.
        iterations = 1

        class MonteCarlo:
            traces = []

            class Governor:
                def encore(self, _attribute, _actor, _res):
                    MonteCarlo.traces.append(list())

            class Collector:
                def encore(self, attribute, _actor, _res):
                    MonteCarlo.traces[-1].append(attribute.func_name)

        simulate = partial(
            run_model,
            ([
                # (ActAbovePermissions, 'get_next_task', list(), dict()),
                # (CompetenceModel, 'get_next_task', [0.05*iterations, ticks/2/iterations], dict()),
                # (ExperimentAdvice, 'all workflows', list(), dict()),
                (NewTasksOnEnd, 'end', list(), dict()),
                (MonteCarlo.Governor, 'start', list(), dict()),
                (MonteCarlo.Collector, 'all workflows', list(), dict()),
            ]),
            num_start_messages=1,
            num_ticks=ticks)

        self.play_simulation(simulate)
        while len(MonteCarlo.traces) < min_dataset_required:
            self.play_simulation(simulate, discard_work=False)
            while [] in MonteCarlo.traces:
                MonteCarlo.traces.remove([])
            try:
                if MonteCarlo.traces[-1][-1] != "A_declined" \
                        and MonteCarlo.traces[-1][-1] != "W_beoordelen_fraude_complete" \
                        and MonteCarlo.traces[-1][-1] != "W_wijzigen_contractgegevens_schedule":
                    MonteCarlo.traces.pop()
            except:
                pass

            print(len(MonteCarlo.traces))

        # We also stopped halfway through a simulation...so unless the last action in the last action log is one the simulation should end on, we should remove that trace (and its fuzzing effects)
        try:
            if action_log[-1][-1] != "A_declined" \
                    and action_log[-1][-1] != "W_beoordelen_fraude_complete" \
                    and action_log[-1][-1] != "W_wijzigen_contractgegevens_schedule":
                action_log.pop()
                if len(experimental_environment["fuzzed tasks"]) != len(
                        action_log):
                    experimental_environment["fuzzed tasks"].pop()
        except:
            pass

        def completes_trace(incomplete, sample):
            inc_position = 0
            for event in sample:
                if event == incomplete[inc_position]:
                    inc_position += 1
                if inc_position == len(incomplete):
                    return True
            return False

        to_complete = copy(choice(MonteCarlo.traces))

        num_gaps = len(to_complete) - 3
        for _ in range(num_gaps):
            to_complete.remove(choice(to_complete))

        completes_sample = partial(completes_trace, to_complete)

        MonteCarlo.traces = map(tuple, MonteCarlo.traces)

        histogram = {
            trace: MonteCarlo.traces.count(trace)
            for trace in MonteCarlo.traces if completes_sample(trace)
        }

        lim = randint(0, sum(histogram.values()))
        total = 0
        for completing_trace, count in histogram.items():
            total += count
            if total > lim:
                break

        print("We complete the trace: \n\t" + str(to_complete) +
              "\n...with the trace...\n\t" + str(completing_trace))
 def test_competence_application(self):
     construct_universe()
     run_model([(CompetenceModel, 'get_next_task', list(), dict())],
               num_start_messages=1,
               num_ticks=150)