예제 #1
0
        benchmark(lambda: env.reward[reward_space])


@pytest.mark.parametrize(
    "make_env",
    [
        lambda: gym.make("llvm-autophase-ic-v0", benchmark="cbench-v1/crc32"),
        lambda: gym.make("llvm-autophase-ic-v0", benchmark="cbench-v1/jpeg-d"),
        # TODO: Example service does not yet support fork() operator.
        # lambda: gym.make("example-cc-v0"),
        # lambda: gym.make("example-py-v0"),
    ],
    ids=["llvm;fast-benchmark", "llvm;slow-benchmark"],
)
def test_fork(benchmark, make_env):
    with make_env() as env:
        env.reset()
        benchmark(lambda: env.fork().close())


if __name__ == "__main__":
    main(
        extra_pytest_args=[
            f"--benchmark-storage=/tmp/compiler_gym_{getuser()}/pytest_benchmark",
            "--benchmark-save=bench_test",
            "--benchmark-sort=name",
            "-x",
        ],
        debug_level=0,
    )
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""

import numpy as np

from compiler_gym.envs import CompilerEnv
from compiler_gym.third_party.autophase import AUTOPHASE_FEATURE_DIM
from tests.test_main import main

pytest_plugins = ["tests.envs.llvm.fixtures"]


def test_step(env: CompilerEnv, action_name: str):
    """Run each action on a single benchmark."""
    env.reward_space = "IrInstructionCount"
    env.observation_space = "Autophase"
    env.reset(benchmark="cBench-v0/crc32")
    observation, reward, done, _ = env.step(
        env.action_space.from_string(action_name))

    assert isinstance(observation, np.ndarray)
    assert observation.shape == (AUTOPHASE_FEATURE_DIM, )
    assert isinstance(reward, float)
    assert isinstance(done, bool)


if __name__ == "__main__":
    main()
예제 #3
0
    env.reset(benchmark_name)
    action = env.action_space.flags.index(action_name)
    benchmark(env.step, action)


def test_observation(benchmark, env: CompilerEnv, fast_benchmark_name,
                     observation_space):
    env.reset(fast_benchmark_name)
    benchmark(lambda: env.observation[observation_space])


def test_reward(benchmark, env: CompilerEnv, benchmark_name, reward_space):
    env.reset(benchmark_name)
    benchmark(lambda: env.reward[reward_space])


def test_fork(benchmark, env: CompilerEnv, benchmark_name):
    env.reset(benchmark_name)
    benchmark(lambda: env.fork().close())


if __name__ == "__main__":
    main(
        extra_pytest_args=[
            "--benchmark-storage=/tmp/compiler_gym/benchmarks",
            "--benchmark-save=bench_test",
            "-x",
        ],
        verbose_service_logging=False,
    )