def example_simulator_01(): path = stormpy.examples.files.prism_dtmc_die prism_program = stormpy.parse_prism_program(path) model = stormpy.build_model(prism_program) simulator = stormpy.simulator.create_simulator(model, seed=42) final_outcomes = dict() for n in range(1000): while not simulator.is_done(): observation = simulator.step() if observation not in final_outcomes: final_outcomes[observation] = 1 else: final_outcomes[observation] += 1 simulator.restart() print(final_outcomes) options = stormpy.BuilderOptions([]) options.set_build_state_valuations() model = stormpy.build_sparse_model_with_options(prism_program, options) simulator = stormpy.simulator.create_simulator(model, seed=42) simulator.set_observation_mode(stormpy.simulator.SimulatorObservationMode.PROGRAM_LEVEL) final_outcomes = dict() for n in range(1000): while not simulator.is_done(): observation = simulator.step() if observation not in final_outcomes: final_outcomes[observation] = 1 else: final_outcomes[observation] += 1 simulator.restart() print(", ".join([f"{str(k)}: {v}" for k,v in final_outcomes.items()]))
def example_simulator_02(): path = stormpy.examples.files.prism_mdp_maze prism_program = stormpy.parse_prism_program(path) model = stormpy.build_model(prism_program) simulator = stormpy.simulator.create_simulator(model, seed=42) # 5 paths of at most 20 steps. paths = [] for m in range(5): path = [] state, reward, labels = simulator.restart() path = [f"{state}"] for n in range(20): actions = simulator.available_actions() select_action = random.randint(0,len(actions)-1) #print(f"Randomly select action nr: {select_action} from actions {actions}") path.append(f"--act={actions[select_action]}-->") state, reward, labels = simulator.step(actions[select_action]) #print(state) path.append(f"{state}") if simulator.is_done(): #print("Trapped!") break paths.append(path) for path in paths: print(" ".join(path)) options = stormpy.BuilderOptions() options.set_build_state_valuations() options.set_build_choice_labels(True) model = stormpy.build_sparse_model_with_options(prism_program, options) print(model) simulator = stormpy.simulator.create_simulator(model, seed=42) simulator.set_observation_mode(stormpy.simulator.SimulatorObservationMode.PROGRAM_LEVEL) simulator.set_action_mode(stormpy.simulator.SimulatorActionMode.GLOBAL_NAMES) # 5 paths of at most 20 steps. paths = [] for m in range(5): path = [] state, reward, labels = simulator.restart() path = [f"{state}"] for n in range(20): actions = simulator.available_actions() select_action = random.randint(0,len(actions)-1) #print(f"Randomly select action nr: {select_action} from actions {actions}") path.append(f"--act={actions[select_action]}-->") state, reward, labels = simulator.step(actions[select_action]) #print(state) path.append(f"{state}") if simulator.is_done(): #print("Trapped!") break paths.append(path) for path in paths: print(" ".join(path))
def example_simulator_01(): path = stormpy.examples.files.prism_mdp_maze prism_program = stormpy.parse_prism_program(path) model = stormpy.build_model(prism_program) simulator = stormpy.simulator.create_simulator(model, seed=42) # 5 paths of at most 20 steps. paths = [] for m in range(5): path = [] state = simulator.restart() path = [f"{state}"] for n in range(20): actions = simulator.available_actions() select_action = random.randint(0,len(actions)-1) #print(f"Randomly select action nr: {select_action} from actions {actions}") path.append(f"--act={actions[select_action]}-->") state = simulator.step(actions[select_action]) #print(state) path.append(f"{state}") if simulator.is_done(): #print("Trapped!") break paths.append(path) for path in paths: print(" ".join(path))
def example_simulator_04(): path = stormpy.examples.files.prism_mdp_coin_2_2 prism_program = stormpy.parse_prism_program(path) #prism_program = stormpy.preprocess_symbolic_input(prism_program, [], "delay=10,fast=0.8")[0].as_prism_program() new_prism_program = prism_program.label_unlabelled_commands(dict()) simulator = stormpy.simulator.create_simulator(new_prism_program, seed=42) simulator.set_action_mode( stormpy.simulator.SimulatorActionMode.GLOBAL_NAMES) final_outcomes = dict() for n in range(5): while not simulator.is_done(): actions = simulator.available_actions() print(actions) observation, reward, labels = simulator.step(actions[0]) print(labels) if observation not in final_outcomes: final_outcomes[observation] = 1 else: final_outcomes[observation] += 1 simulator.restart() suggestions = dict() for m in prism_program.modules: for c in m.commands: if not c.is_labeled: suggestions[c.global_index] = "tau_" + str(m.name) new_prism_program = prism_program.label_unlabelled_commands(suggestions) simulator = stormpy.simulator.create_simulator(new_prism_program, seed=42) simulator.set_action_mode( stormpy.simulator.SimulatorActionMode.GLOBAL_NAMES) final_outcomes = dict() for n in range(5): while not simulator.is_done(): actions = simulator.available_actions() print(actions) observation, reward, labels = simulator.step(actions[0]) if observation not in final_outcomes: final_outcomes[observation] = 1 else: final_outcomes[observation] += 1 simulator.restart()
class TestSparseSimulator: path = stormpy.examples.files.prism_dtmc_die prism_program = stormpy.parse_prism_program(path) model = stormpy.build_model(prism_program) simulator = stormpy.simulator.create_simulator(model, seed=42) final_outcomes = dict() for n in range(7): while not simulator.is_done(): observation, reward, labels = simulator.step() assert len(labels) == 2 assert "done" in labels if observation not in final_outcomes: final_outcomes[observation] = 1 else: final_outcomes[observation] += 1 simulator.restart()
def example_simulator_03(): path = stormpy.examples.files.prism_mdp_firewire prism_program = stormpy.parse_prism_program(path) prism_program = stormpy.preprocess_symbolic_input( prism_program, [], "delay=10,fast=0.8")[0].as_prism_program() simulator = stormpy.simulator.create_simulator(prism_program, seed=42) simulator.set_action_mode( stormpy.simulator.SimulatorActionMode.GLOBAL_NAMES) final_outcomes = dict() for n in range(5): while not simulator.is_done(): actions = simulator.available_actions() observation, reward, labels = simulator.step(actions[0]) if observation not in final_outcomes: final_outcomes[observation] = 1 else: final_outcomes[observation] += 1 simulator.restart()