def test_change_benchmark_mid_episode(env: LlvmEnv):
    """Test that changing the benchmark while in an episode has no effect until
    the next call to reset()."""
    env.reset(benchmark="benchmark://cBench-v0/crc32")
    assert env.benchmark == "benchmark://cBench-v0/crc32"
    env.benchmark = "benchmark://cBench-v0/dijkstra"
    assert env.benchmark == "benchmark://cBench-v0/crc32"
    env.reset()
    assert env.benchmark == "benchmark://cBench-v0/dijkstra"
Exemple #2
0
def test_same_reward_after_reset(env: LlvmEnv):
    """Check that running the same action after calling reset() produces
    same reward.
    """
    env.reward_space = "IrInstructionCount"
    env.benchmark = "cbench-v1/dijkstra"

    action = env.action_space.flags.index("-instcombine")
    env.reset()

    _, reward_a, _, _ = env.step(action)
    assert reward_a, "Sanity check that action produces a reward"

    env.reset()
    _, reward_b, _, _ = env.step(action)
    assert reward_a == reward_b
def test_set_benchmark_invalid_type(env: LlvmEnv):
    with pytest.raises(TypeError) as ctx:
        env.benchmark = 10
    assert str(ctx.value) == "Unsupported benchmark type: int"