예제 #1
0
def example_simulator_01():
    path = stormpy.examples.files.prism_dtmc_die
    prism_program = stormpy.parse_prism_program(path)

    model = stormpy.build_model(prism_program)
    simulator = stormpy.simulator.create_simulator(model, seed=42)
    final_outcomes = dict()
    for n in range(1000):
        while not simulator.is_done():
            observation = simulator.step()
        if observation not in final_outcomes:
            final_outcomes[observation] = 1
        else:
            final_outcomes[observation] += 1
        simulator.restart()
    print(final_outcomes)

    options = stormpy.BuilderOptions([])
    options.set_build_state_valuations()
    model = stormpy.build_sparse_model_with_options(prism_program, options)
    simulator = stormpy.simulator.create_simulator(model, seed=42)
    simulator.set_observation_mode(stormpy.simulator.SimulatorObservationMode.PROGRAM_LEVEL)
    final_outcomes = dict()
    for n in range(1000):
        while not simulator.is_done():
            observation = simulator.step()
        if observation not in final_outcomes:
            final_outcomes[observation] = 1
        else:
            final_outcomes[observation] += 1
        simulator.restart()
    print(", ".join([f"{str(k)}: {v}" for k,v in final_outcomes.items()]))
예제 #2
0
def example_simulator_02():
    path = stormpy.examples.files.prism_mdp_maze
    prism_program = stormpy.parse_prism_program(path)

    model = stormpy.build_model(prism_program)
    simulator = stormpy.simulator.create_simulator(model, seed=42)
    # 5 paths of at most 20 steps.
    paths = []
    for m in range(5):
        path = []
        state, reward, labels = simulator.restart()
        path = [f"{state}"]
        for n in range(20):
            actions = simulator.available_actions()
            select_action = random.randint(0,len(actions)-1)
            #print(f"Randomly select action nr: {select_action} from actions {actions}")
            path.append(f"--act={actions[select_action]}-->")
            state, reward, labels = simulator.step(actions[select_action])
            #print(state)
            path.append(f"{state}")
            if simulator.is_done():
                #print("Trapped!")
                break
        paths.append(path)
    for path in paths:
        print(" ".join(path))

    options = stormpy.BuilderOptions()
    options.set_build_state_valuations()
    options.set_build_choice_labels(True)
    model = stormpy.build_sparse_model_with_options(prism_program, options)
    print(model)
    simulator = stormpy.simulator.create_simulator(model, seed=42)
    simulator.set_observation_mode(stormpy.simulator.SimulatorObservationMode.PROGRAM_LEVEL)
    simulator.set_action_mode(stormpy.simulator.SimulatorActionMode.GLOBAL_NAMES)
    # 5 paths of at most 20 steps.
    paths = []
    for m in range(5):
        path = []
        state, reward, labels = simulator.restart()
        path = [f"{state}"]
        for n in range(20):
            actions = simulator.available_actions()
            select_action = random.randint(0,len(actions)-1)
            #print(f"Randomly select action nr: {select_action} from actions {actions}")
            path.append(f"--act={actions[select_action]}-->")
            state, reward, labels = simulator.step(actions[select_action])
            #print(state)
            path.append(f"{state}")
            if simulator.is_done():
                #print("Trapped!")
                break
        paths.append(path)
    for path in paths:
        print(" ".join(path))
예제 #3
0
def example_simulator_01():
    path = stormpy.examples.files.prism_mdp_maze
    prism_program = stormpy.parse_prism_program(path)

    model = stormpy.build_model(prism_program)
    simulator = stormpy.simulator.create_simulator(model, seed=42)
    # 5 paths of at most 20 steps.
    paths = []
    for m in range(5):
        path = []
        state = simulator.restart()
        path = [f"{state}"]
        for n in range(20):
            actions = simulator.available_actions()
            select_action = random.randint(0,len(actions)-1)
            #print(f"Randomly select action nr: {select_action} from actions {actions}")
            path.append(f"--act={actions[select_action]}-->")
            state = simulator.step(actions[select_action])
            #print(state)
            path.append(f"{state}")
            if simulator.is_done():
                #print("Trapped!")
                break
        paths.append(path)
    for path in paths:
        print(" ".join(path))
예제 #4
0
def example_simulator_03():
    path = stormpy.examples.files.prism_mdp_firewire
    prism_program = stormpy.parse_prism_program(path)
    prism_program = stormpy.preprocess_symbolic_input(
        prism_program, [], "delay=10,fast=0.8")[0].as_prism_program()

    simulator = stormpy.simulator.create_simulator(prism_program, seed=42)
    simulator.set_action_mode(
        stormpy.simulator.SimulatorActionMode.GLOBAL_NAMES)
    final_outcomes = dict()
    for n in range(5):
        while not simulator.is_done():
            actions = simulator.available_actions()
            observation, reward, labels = simulator.step(actions[0])
        if observation not in final_outcomes:
            final_outcomes[observation] = 1
        else:
            final_outcomes[observation] += 1
        simulator.restart()
예제 #5
0
def example_simulator_04():
    path = stormpy.examples.files.prism_mdp_coin_2_2
    prism_program = stormpy.parse_prism_program(path)
    #prism_program = stormpy.preprocess_symbolic_input(prism_program, [], "delay=10,fast=0.8")[0].as_prism_program()
    new_prism_program = prism_program.label_unlabelled_commands(dict())

    simulator = stormpy.simulator.create_simulator(new_prism_program, seed=42)
    simulator.set_action_mode(
        stormpy.simulator.SimulatorActionMode.GLOBAL_NAMES)
    final_outcomes = dict()
    for n in range(5):
        while not simulator.is_done():
            actions = simulator.available_actions()
            print(actions)
            observation, reward, labels = simulator.step(actions[0])
            print(labels)
        if observation not in final_outcomes:
            final_outcomes[observation] = 1
        else:
            final_outcomes[observation] += 1
        simulator.restart()

    suggestions = dict()
    for m in prism_program.modules:
        for c in m.commands:
            if not c.is_labeled:
                suggestions[c.global_index] = "tau_" + str(m.name)

    new_prism_program = prism_program.label_unlabelled_commands(suggestions)
    simulator = stormpy.simulator.create_simulator(new_prism_program, seed=42)
    simulator.set_action_mode(
        stormpy.simulator.SimulatorActionMode.GLOBAL_NAMES)
    final_outcomes = dict()
    for n in range(5):
        while not simulator.is_done():
            actions = simulator.available_actions()
            print(actions)
            observation, reward, labels = simulator.step(actions[0])
        if observation not in final_outcomes:
            final_outcomes[observation] = 1
        else:
            final_outcomes[observation] += 1
        simulator.restart()
예제 #6
0
    def test_negative_values(self):
        prism_program = stormpy.parse_prism_program(
            get_example_path("dtmc", "negativevals.pm"))
        prism_program = stormpy.preprocess_symbolic_input(
            prism_program, [], "")[0].as_prism_program()

        simulator = stormpy.simulator.create_simulator(prism_program, seed=42)
        simulator.set_action_mode(
            stormpy.simulator.SimulatorActionMode.GLOBAL_NAMES)
        state, rew, labels = simulator.restart()
        assert state["s"] == -1
        assert int(state["s"]) == -1
예제 #7
0
class TestSparseSimulator:
    path = stormpy.examples.files.prism_dtmc_die
    prism_program = stormpy.parse_prism_program(path)

    model = stormpy.build_model(prism_program)
    simulator = stormpy.simulator.create_simulator(model, seed=42)
    final_outcomes = dict()
    for n in range(7):
        while not simulator.is_done():
            observation, reward, labels = simulator.step()
        assert len(labels) == 2
        assert "done" in labels
        if observation not in final_outcomes:
            final_outcomes[observation] = 1
        else:
            final_outcomes[observation] += 1
        simulator.restart()
예제 #8
0
def filtering(simulator,
              tracker,
              trace_length,
              convex_reduction,
              stats_file,
              verbose,
              observation_valuations=None,
              terminate_on_deadline=True,
              deadline=None):
    """

    :param simulator: The simulator that spits out the observations
    :param tracker: The tracker that keeps track of the state estimation
    :param trace_length: How many steps to take.
    :param convex_reduction: If True, apply reduction after each step (recommended).
    :param stats_file: The file to output all the statistics to.
    :param verbose: If True, print some additional information
    :param observation_valuations:
    :param deadline: How long can each step take.
    :param terminate_on_deadline: Can we abort if we took longer than deadline?
    :return:
    """
    #TODO make stats files optional.
    observation, _ = simulator.restart()
    tracker.reset(observation)

    with open(stats_file, 'w') as file:
        writer = csv.writer(file)
        writer.writerow([
            "Index", "Observation", "Risk", "TrackTime", "ReduceTime",
            "TotalTime", "NrBeliefsBR", "NrBeliefsAR", "Dimension", "TimedOut"
        ])
        iterator = tqdm(range(trace_length))
        for i in iterator:
            observation, _ = simulator.random_step()
            if verbose:
                hl_obs = observation_valuations.get_string(
                    observation, pretty=True)[1:-1].replace("\t", " ")
                print(f"Observe {hl_obs}")
                for belief in tracker.obtain_beliefs():
                    print(belief)
            start_time = time.monotonic()
            passed = tracker.track(observation)
            if not passed:
                writer.writerow([i, observation, 0, 0, 0, 0, 0, 0, 0, True])
                file.flush()

                iterator.close()
                return False
            risk = tracker.obtain_current_risk()
            end_time = time.monotonic()
            track_time = end_time - start_time
            start_time = time.monotonic()
            sizeBR = tracker.size()
            if convex_reduction:
                tracker.reduce()
            end_time = time.monotonic()
            reduce_time = end_time - start_time
            timed_out = tracker.reduction_timed_out()
            total_time = track_time + reduce_time
            if deadline is not None and total_time * 1000 > deadline:
                timed_out = True
            writer.writerow([
                i, observation, risk, track_time, reduce_time, total_time,
                sizeBR,
                tracker.size(),
                tracker.dimension(), timed_out
            ])
            file.flush()
            if deadline and terminate_on_deadline and timed_out:
                iterator.close()
                return False
        iterator.close()
    return True
예제 #9
0
def monitor(path,
            risk_property,
            constants,
            trace_length,
            options,
            verbose=False,
            simulator_seed=0,
            promptness_deadline=10000,
            model_id="no_id_given"):
    """

    :param path: The path the the model file
    :param risk_property: The property that describes the risk
    :param constants: Values for constants that appear in the model.
    :param trace_length: How long should the traces be
    :param options: Options to configure the monitoring.
    :param verbose: Should we print
    :param simulator_seed: A range of seeds we use for the simulator.
    :param promptness_deadline:
    :param model_id: A name for creating good stats files.
    :return:
    """
    start_time = time.monotonic()
    use_forward_filtering = isinstance(options, ForwardFilteringOptions)
    use_unfolding = isinstance(options, UnfoldingOptions)
    if not use_forward_filtering and not use_unfolding:
        raise RuntimeError("Unknown type of options, method cannot be deduced")
    assert not (use_forward_filtering and use_unfolding)

    logger.info("Parse MDP representation...")
    prism_program = sp.parse_prism_program(path)
    prop = sp.parse_properties_for_prism_program(risk_property,
                                                 prism_program)[0]
    prism_program, props = sp.preprocess_symbolic_input(
        prism_program, [prop], constants)
    prop = props[0]
    prism_program = prism_program.as_prism_program()
    raw_formula = prop.raw_formula

    logger.info("Construct MDP representation...")
    model = build_model(prism_program,
                        raw_formula,
                        exact_arithmetic=options.exact_arithmetic)
    if (verbose):
        print(model)
    assert model.has_observation_valuations()
    logger.info("Compute risk per state")
    risk_assessment = analyse_model(model, prop).get_values()

    # The seed can be given as a single value or as a
    if isinstance(simulator_seed, Iterable):
        simulator_seed_range = simulator_seed
    else:
        simulator_seed_range = range(simulator_seed, simulator_seed + 1)

    logger.info("Initialize simulator...")
    simulator = sp.simulator.create_simulator(model)
    if use_forward_filtering:
        logger.info("Initialize tracker...")
        tracker = stormpy.pomdp.create_nondeterminstic_belief_tracker(
            model, promptness_deadline, promptness_deadline)
        tracker.set_risk(risk_assessment)
    if use_unfolding:
        expr_manager = stormpy.ExpressionManager()
        unfolder = stormpy.pomdp.create_observation_trace_unfolder(
            model, risk_assessment, expr_manager)

    initialize_time = time.monotonic() - start_time
    #stormpy.export_to_drn(model, "model.drn")
    stats_folder = f"stats/{model_id}-{options.method_id}/"
    if not os.path.isdir(stats_folder):
        os.makedirs(stats_folder)
    else:
        raise RuntimeWarning(
            f"We are writing to an existing folder '{stats_folder}'.")
    with open(os.path.join(stats_folder, "stats.out"), 'w') as file:
        file.write(f"states={model.nr_states}\n")
        file.write(f"transitions={model.nr_transitions}\n")
        file.write(f"init_time={initialize_time}\n")
        file.write(f"promptness_deadline={promptness_deadline}")

    for seed in tqdm(simulator_seed_range):
        simulator.set_seed(seed)
        logger.info("Restart simulator...")
        observation, _ = simulator.restart()

        stats_file = f"{stats_folder}/stats-{model_id}-{options.method_id}-{seed}.csv"
        if use_forward_filtering:
            filtering(simulator,
                      tracker,
                      trace_length,
                      options.convex_hull_reduction,
                      stats_file,
                      deadline=promptness_deadline,
                      verbose=verbose,
                      observation_valuations=model.observation_valuations)
        if use_unfolding:
            unfolding(simulator,
                      unfolder,
                      trace_length,
                      stats_file,
                      use_ovi=not options.exact_arithmetic,
                      deadline=promptness_deadline)
예제 #10
0
def unfolding(simulator,
              unfolder,
              trace_length,
              stats_file,
              deadline=None,
              terminate_on_deadline=True,
              use_ovi=False):
    """

    :param simulator: The simulator that spits out the observations
    :param tracker: The tracker that keeps track of the state estimation
    :param trace_length: How many steps to take.
    :param stats_file: The file to output all the statistics to.
    :param deadline: The deadline for every computation step
    :param terminate_on_deadline: Should we abort the trace if we exceeded computation time
    :param use_ovi: Whether to use OVI or not.
    :return:
    """
    #TODO make the stats_file optional
    observation, _ = simulator.restart()
    unfolder.reset(observation)
    env = stormpy.Environment()
    if use_ovi:
        env.solver_environment.minmax_solver_environment.method = stormpy.MinMaxMethod.optimistic_value_iteration
        env.solver_environment.minmax_solver_environment.precision = stormpy.Rational(
            "0.01")

    with open(stats_file, 'w') as file:
        writer = csv.writer(file)
        writer.writerow([
            "Index", "Observation", "Risk", "UnfTime", "McTime", "TotalTime",
            "MdpStates", "MdpTransitions", "TimedOut"
        ])
        for i in tqdm(range(trace_length)):
            observation, _ = simulator.random_step()
            start_time = time.monotonic()
            mdp = unfolder.extend(observation)
            end_time = time.monotonic()
            unfold_time = end_time - start_time
            #stormpy.export_to_drn(mdp,"unrolling.out")
            prop = sp.parse_properties("Pmax=? [F \"_goal\"]")[0]
            #
            #mdpl = stormpy.build_model_from_drn("unrolling.out")
            timeout = False
            start_time = time.monotonic()
            stormpy.reset_timeout()
            stormpy.set_timeout(int(deadline / 1000))
            stormpy.install_signal_handlers()
            try:
                result = stormpy.model_checking(mdp,
                                                prop,
                                                environment=env,
                                                only_initial_states=True)
                risk = result.at(0)
            except RuntimeError:
                timeout = True
            stormpy.reset_timeout()
            end_time = time.monotonic()
            mc_time = end_time - start_time
            total_time = unfold_time + mc_time
            if deadline and total_time * 1000 > deadline:
                timeout = True
            writer.writerow([
                i, observation, risk, unfold_time, mc_time, total_time,
                mdp.nr_states, mdp.nr_transitions, timeout
            ])
            file.flush()
            if terminate_on_deadline and timeout:
                return False
    return True