Ejemplo n.º 1
0
def _init_worker(env_name: str, n: int, outfile: Path):
    with open(outfile, "w") as f:
        for _ in range(0, n, min(100, n)):
            runtimes = get_runtimes(
                lambda: compiler_gym.make(env_name).close(), min(100, n)
            )
            print("\n".join(f"{x:.8f}" for x in runtimes), file=f, flush=True)
Ejemplo n.º 2
0
def test_induced_remainder(backend):
    with compiler_gym.make("loop_tool-v0") as env:
        env.observation_space = "loop_tree"
        # reset
        env.reset(
            benchmark=env.datasets.benchmark(
                uri=f"benchmark://loop_tool-{backend}-v0/1024"),
            action_space="simple",
        )
        # action toggle_mode
        env.step(0)
        # action up
        env.step(1)
        # action toggle_mode
        env.step(0)
        # action up
        env.step(1)
        # action up
        o = env.step(1)
        expected = f"""
for a in 341 r 1 : L0 {'cpu_parallel ' if backend=='cpu' else ''}[thread]
 for a' in 3 : L1
  for a'' in 1 : L2
   %0[a] <- read()
  for a'' in 1 : L4
   %1[a] <- read()
  for a'' in 1 : L6
   %2[a] <- add(%0, %1)
  for a'' in 1 : L8
   %3[a] <- write(%2)
"""
        lines = o[0].strip().split("\n")
        out = "\n".join(line.rstrip() for line in lines)
        assert out == expected.strip(), f"{out} \n vs \n {expected.strip()}"
Ejemplo n.º 3
0
def test_thread_removal(backend):
    with compiler_gym.make("loop_tool-v0") as env:
        env.observation_space = "loop_tree"
        # reset
        env.reset(
            benchmark=env.datasets.benchmark(
                uri=f"benchmark://loop_tool-{backend}-v0/1024"),
            action_space="simple",
        )
        # action toggle_thread
        o = env.step(3)
        expected = """
for a in 1024 : L0
 for a' in 1 : L1
  for a'' in 1 : L2
   %0[a] <- read()
  for a'' in 1 : L4
   %1[a] <- read()
  for a'' in 1 : L6
   %2[a] <- add(%0, %1)
  for a'' in 1 : L8
   %3[a] <- write(%2)
"""
        lines = o[0].strip().split("\n")
        out = "\n".join(line.rstrip() for line in lines)
        assert out == expected.strip(), f"{out} \n vs \n {expected.strip()}"
Ejemplo n.º 4
0
    def make_env(self, benchmark: Union[str, Benchmark]) -> LlvmEnv:
        env: LlvmEnv = compiler_gym.make("llvm-v0")

        # TODO(cummins): This does not work with custom benchmarks, as the URI
        # will not be known to the new environment.
        if str(benchmark).startswith("file:///"):
            benchmark = env.make_benchmark(Path(benchmark[len("file:///"):]))

        env.benchmark = benchmark

        if self.value == OptimizationTarget.CODESIZE:
            env.reward_space = "IrInstructionCountOz"
        elif self.value == OptimizationTarget.BINSIZE:
            env.reward_space = "ObjectTextSizeOz"
        elif self.value == OptimizationTarget.RUNTIME:
            env = RuntimePointEstimateReward(env,
                                             warmup_count=0,
                                             runtime_count=3)
        else:
            assert False, f"Unknown OptimizationTarget: {self.value}"

        # Wrap the env to ignore errors during search.
        env = JustKeepGoingEnv(env)

        return env
Ejemplo n.º 5
0
def test_benchmarks_uris_list():
    cfg = Benchmarks(uris=["benchmark://cbench-v1/qsort"])
    assert cfg.uris == ["benchmark://cbench-v1/qsort"]

    with compiler_gym.make("llvm-v0") as env:
        assert list(cfg.benchmarks_iterator(env)) == ["benchmark://cbench-v1/qsort"]
        assert isinstance(list(cfg.benchmarks_iterator(env))[0], Benchmark)
        assert list(cfg.benchmark_uris_iterator(env)) == ["benchmark://cbench-v1/qsort"]
Ejemplo n.º 6
0
    def final_reward(self, env: LlvmEnv, runtime_count: int = 30) -> float:
        """Compute the final reward of the environment.

        Note that this may modify the environment state. You should call
        :code:`reset()` before continuing to use the environment after this.
        """
        # Reapply the environment state in a retry loop.
        actions = list(env.actions)
        env.reset()
        for i in range(1, 5 + 1):
            _, _, done, info = env.step(actions)
            if not done:
                break
            logger.warning(
                "Attempt %d to apply actions during final reward failed: %s",
                i,
                info.get("error_details"),
            )
        else:
            raise ValueError("Failed to replay environment's actions")

        if self.value == OptimizationTarget.CODESIZE:
            return env.observation.IrInstructionCountOz() / max(
                env.observation.IrInstructionCount(), 1)

        if self.value == OptimizationTarget.BINSIZE:
            return env.observation.ObjectTextSizeOz() / max(
                env.observation.ObjectTextSizeBytes(), 1)

        if self.value == OptimizationTarget.RUNTIME:
            with _RUNTIME_LOCK:
                with compiler_gym.make("llvm-v0",
                                       benchmark=env.benchmark) as new_env:
                    new_env.reset()
                    new_env.runtime_observation_count = runtime_count
                    new_env.runtime_warmup_count = 0
                    new_env.apply(env.state)
                    final_runtimes = new_env.observation.Runtime()
                    assert len(final_runtimes) == runtime_count

                    new_env.reset()
                    new_env.send_param("llvm.apply_baseline_optimizations",
                                       "-O3")
                    o3_runtimes = new_env.observation.Runtime()
                    assert len(o3_runtimes) == runtime_count

                logger.debug("O3 runtimes: %s", o3_runtimes)
                logger.debug("Final runtimes: %s", final_runtimes)
                speedup = np.median(o3_runtimes) / max(
                    np.median(final_runtimes), 1e-12)
                logger.debug("Speedup: %.4f", speedup)

                return speedup

        assert False, f"Unknown OptimizationTarget: {self.value}"
Ejemplo n.º 7
0
def test_testing_config():
    cfg = Testing(**OmegaConf.create("""\
timeout_hours: 12
runs_per_benchmark: 6
benchmarks:
    - dataset: benchmark://cbench-v1
      max_benchmarks: 5
"""))
    assert cfg.timeout_hours == 12
    with compiler_gym.make("llvm-v0") as env:
        assert len(list(cfg.benchmark_uris_iterator(env))) == 5 * 6
Ejemplo n.º 8
0
def test_run_random_walk_smoke_test():
    FLAGS.unparse_flags()
    FLAGS(["argv0"])
    with capture_output() as out:
        with compiler_gym.make("llvm-autophase-ic-v0") as env:
            env.benchmark = "cbench-v1/crc32"
            run_random_walk(env=env, step_count=5)

    print(out.stdout)
    # Note the ".*" before and after the step count to ignore the shell
    # formatting.
    assert re.search(r"Completed .*5.* steps in ", out.stdout)
Ejemplo n.º 9
0
 def make_env(self) -> CompilerEnv:
     """Construct a compiler environment from the given config."""
     env = compiler_gym.make(self.id)
     if self.observation_space:
         env.observation_space = self.observation_space
     if self.reward_space:
         env.reward_space = self.reward_space
     env = TimeLimit(env, max_episode_steps=self.max_episode_steps)
     for wrapper in self.wrappers:
         env = wrapper.wrap(env)
     # Wrap the env to ignore errors during search.
     env = JustKeepGoingEnv(env)
     return env
Ejemplo n.º 10
0
def run_one_sweep(
    device: str,
    k: int,
    vectorize: int = 1,
    linear: bool = False,
    logdir: Optional[Path] = None,
):
    """Run a single sweep."""
    logdir = logdir or create_user_logs_dir("loop_tool_sweep")
    logfile = logdir / f"k{k}-v{vectorize}-{device}-{'linear' if linear else 'log'}.txt"
    print("Logging results to", logfile)
    print()
    print("Device", "K", "Inner", "Vec.", "FLOPS", sep="\t")
    with open(logfile, "w") as f:
        print("device", "k", "inner", "vectorize", "flops", sep=",", file=f)

    def log(k, inner, vectorize, flops):
        print(device.upper(), k, inner, vectorize, flops, sep="\t", flush=True)
        with open(logfile, "a") as f:
            print(device, k, inner, vectorize, flops, sep=",", file=f)

    actions = [3, 0, 1, 3, 0]
    k *= 1024  # raw number of elements

    with compiler_gym.make("loop_tool-v0") as env:
        env.reset(
            benchmark=env.datasets.benchmark(
                uri=f"benchmark://loop_tool-{device}-v0/{k}"),
            action_space="simple",
        )
        if vectorize - 1:
            vs = [1] * (vectorize - 1)
            actions += vs + [0, 1, 0] + vs + [0, 2, 0]
        for a in actions:
            wrapped_step(env, a)

        if linear:
            for i in range(k // (vectorize * 1024)):
                step_count = 1022 if i == 0 else 1023
                flops = flops_after_steps(env, step_count)
                log(k, (i + 1) * 1024, vectorize, flops)
        else:  # linear=False (log)
            inner = 1
            step = 512
            wrapped_step(env, [1] * (step - 1))
            inner += step - 1
            while inner * vectorize <= k:
                flops = flops_after_steps(env, step)
                inner += step
                log(k, inner, vectorize, flops)
                step *= 2
Ejemplo n.º 11
0
 def get_benchmarks():
     benchmarks = []
     with compiler_gym.make("gcc-v0", gcc_bin=FLAGS.gcc_bin) as env:
         env.reset()
         if FLAGS.gcc_benchmark == ["all"]:
             for dataset in env.datasets:
                 benchmarks += islice(dataset.benchmark_uris(), 50)
         elif FLAGS.gcc_benchmark:
             for uri in FLAGS.gcc_benchmark:
                 benchmarks.append(env.datasets.benchmark(uri).uri)
         else:
             benchmarks = list(
                 env.datasets["benchmark://chstone-v0"].benchmark_uris())
     benchmarks.sort()
     return benchmarks
Ejemplo n.º 12
0
def _reset_worker(
    num_measurements_per_benchmark: int,
    benchmarks: List[str],
    env_name: str,
    outfile: Path,
):
    with compiler_gym.make(env_name) as env:
        with open(outfile, "w") as f:
            for benchmark in benchmarks:
                env.reset(benchmark=benchmark)
                runtimes = get_runtimes(
                    lambda: env.reset(benchmark=benchmark),
                    num_measurements_per_benchmark,
                )
                print("\n".join(f"{x:.8f} {benchmark}" for x in runtimes), file=f)
Ejemplo n.º 13
0
def test_rand(backend):
    with compiler_gym.make("loop_tool-v0") as env:
        env.observation_space = "flops"
        env.reset(
            benchmark=env.datasets.benchmark(
                uri=f"benchmark://loop_tool-{backend}-v0/128"),
            action_space="simple",
        )
        best = 0
        for i in range(10):
            a = env.action_space.sample()
            o = env.step(a)
            flops = o[0]
            if flops > best:
                best = flops
                print(best)
Ejemplo n.º 14
0
def _step_worker(
    num_measurements_per_benchmark: int,
    benchmarks: List[str],
    env_name: str,
    seed: str,
    step_outfile: Path,
    batched_outfile: Path,
):
    def get_step_times(env: CompilerEnv, num_steps: int, batched=False):
        while batched:
            # Run all actions in a single step().
            steps = [env.action_space.sample() for _ in range(num_steps)]
            with Timer() as timer:
                _, _, done, _ = env.step(steps)
            if not done:
                return [timer.time / num_steps] * num_steps
            env.reset()

        # Run each action as a step().
        runtimes = []
        while len(runtimes) < num_steps:
            with Timer() as timer:
                _, _, done, _ = env.step(env.action_space.sample())
            if done:
                env.reset()
            else:
                runtimes.append(timer.time)
        return runtimes

    with compiler_gym.make(env_name) as env:
        with open(step_outfile, "w") as f:
            for i, benchmark in enumerate(benchmarks, start=seed):
                env.reset(benchmark=benchmark)
                env.seed(i)
                runtimes = get_step_times(env, num_measurements_per_benchmark)
                print("\n".join(f"{x:.8f} {benchmark}" for x in runtimes),
                      file=f)

        with open(batched_outfile, "w") as f:
            for i, benchmark in enumerate(benchmarks, start=seed):
                env.reset(benchmark=benchmark)
                env.seed(i)
                runtimes = get_step_times(env,
                                          num_measurements_per_benchmark,
                                          batched=True)
                print("\n".join(f"{x:.8f} {benchmark}" for x in runtimes),
                      file=f)
Ejemplo n.º 15
0
def get_benchmarks(env_name: str, n: int, seed: int,
                   outdir: Path) -> List[str]:
    """Get `n` benchmarks from all datasets.

    If the dataset is smaller than `n`, benchmarks are repeated. If the dataset
    is larger than `n`, `n` random unique programs are sampled.
    """
    benchmarks = []
    with compiler_gym.make(env_name) as env:
        datasets = sorted(list(env.datasets))
        benchmarks_per_dataset = int(ceil(n / len(datasets)))

        for ds in datasets:
            logger.info(
                "Enumerating %s benchmarks from dataset from %s ...",
                benchmarks_per_dataset,
                ds,
            )
            if ds.size == 0 or ds.size > benchmarks_per_dataset:
                rng = np.random.default_rng(seed)
                uniq_bm_uris = set()
                benchmarks_from_dataset = []
                while len(benchmarks_from_dataset) < benchmarks_per_dataset:
                    bm = ds.random_benchmark(rng)
                    if bm.uri in uniq_bm_uris:
                        continue
                    uniq_bm_uris.add(bm.uri)
                    # Start an environment to check that the benchmark can be
                    # initialized.
                    try:
                        env.reset(benchmark=bm)
                    except (BenchmarkInitError, ValueError, TimeoutError):
                        continue
                    benchmarks_from_dataset.append(bm.uri)
                benchmarks += benchmarks_from_dataset
            else:
                bms = list(ds.benchmark_uris())
                bms *= int(ceil(benchmarks_per_dataset / len(bms)))
                benchmarks += bms[:benchmarks_per_dataset]

    benchmarks = sorted(benchmarks)
    with open(outdir / "benchmarks.txt", "w") as f:
        for bm in benchmarks:
            print(bm, file=f)
    return benchmarks
Ejemplo n.º 16
0
def test_basic(backend):
    with compiler_gym.make("loop_tool-v0") as env:
        env.observation_space = "flops"
        env.reset(
            benchmark=env.datasets.benchmark(
                uri=f"benchmark://loop_tool-{backend}-v0/1024"),
            action_space="simple",
        )
        env.step(0)
        env.step(1)
        env.step(0)
        env.step(1)
        env.step(1)
        env.step(0)
        env.step(1)
        env.step(0)
        o = env.step(1)
        print(o)
Ejemplo n.º 17
0
def test_autotune():
    with compiler_gym.make("llvm-v0", reward_space="IrInstructionCount") as env:
        env.reset(benchmark="benchmark://cbench-v1/crc32")

        autotuner = Autotuner(
            algorithm="random",
            optimization_target="codesize",
            search_time_seconds=3,
        )

        result = autotuner(env)
        print(result)
        assert result.benchmark == "benchmark://cbench-v1/crc32"
        assert result.walltime >= 3
        assert result.commandline == env.commandline()
        assert env.episode_reward >= 0
        assert env.benchmark == "benchmark://cbench-v1/crc32"
        assert env.reward_space == "IrInstructionCount"
Ejemplo n.º 18
0
def _observations_worker(
    observation_space: str,
    num_measurements_per_benchmark: int,
    benchmarks: List[str],
    env_name: str,
    outfile: Path,
):
    with compiler_gym.make(env_name) as env:
        with open(outfile, "w") as f:
            for benchmark in benchmarks:
                env.reset(benchmark=benchmark)
                if "llvm-" in env_name and observation_space == "Runtime":
                    if not env.observation.IsRunnable():
                        return []
                    env.runtime_observation_count = 1
                    env.runtime_warmups_count = 0
                runtimes = get_runtimes(
                    lambda: env.observation[observation_space],
                    num_measurements_per_benchmark,
                )
                print("\n".join(f"{x:.8f}" for x in runtimes), file=f, flush=True)
Ejemplo n.º 19
0
def test_validation_benchmarks_uris_list():
    cfg = Validation(**OmegaConf.create("""\
benchmarks:
    - uris:
        - benchmark://cbench-v1/qsort
    - dataset: benchmark://cbench-v1
      max_benchmarks: 2
"""))

    with compiler_gym.make("llvm-v0") as env:
        assert list(cfg.benchmarks_iterator(env)) == [
            "benchmark://cbench-v1/qsort",
            "benchmark://cbench-v1/adpcm",
            "benchmark://cbench-v1/bitcount",
        ]
        bm = list(cfg.benchmarks_iterator(env))[0]
        print(type(bm).__name__)
        assert isinstance(bm, Benchmark)
        assert list(cfg.benchmark_uris_iterator(env)) == [
            "benchmark://cbench-v1/qsort",
            "benchmark://cbench-v1/adpcm",
            "benchmark://cbench-v1/bitcount",
        ]
Ejemplo n.º 20
0
def run_search(search: str, benchmark: str, seed: int) -> SearchResult:
    """Run a search and return the search class instance."""
    with GCC_ENV_CONSTRUCTOR_LOCK:
        env = compiler_gym.make("gcc-v0", gcc_bin=FLAGS.gcc_bin)

    try:
        random.seed(seed)
        np.random.seed(seed)

        env.reset(benchmark=benchmark)
        env.step(env.action_space["-Os"])
        baseline_size = objective(env)
        env.reset(benchmark=benchmark)
        best_size = _SEARCH_FUNCTIONS[search](env)
    finally:
        env.close()

    return SearchResult(
        search=search,
        benchmark=benchmark,
        best_size=best_size,
        baseline_size=baseline_size,
    )
Ejemplo n.º 21
0
def test_compiler_gym_make():
    """Test that compiler_gym.make() is equivalent to gym.make()."""
    with compiler_gym.make("llvm-v0") as env:
        assert isinstance(env, LlvmEnv)
Ejemplo n.º 22
0
def make_env() -> compiler_gym.envs.CompilerEnv:
    env = compiler_gym.make("llvm-v0", observation_space="InstCount", reward_space="IrInstructionCountOz")
    env = stepWrapper(env)
    #env = observationWrapper(env)
    return env
Ejemplo n.º 23
0
def env():
    with compiler_gym.make("llvm-v0") as env:
        yield env
Ejemplo n.º 24
0
import compiler_gym
import examples.loop_optimizations_service as loop_optimizations_service  # noqa Register environments.

with compiler_gym.make(
        "loops-opt-py-v0",
        benchmark="loops-opt-v0/add",
        observation_space="AutophaseDict",
        reward_space="runtime",
) as env:
    compiler_gym.set_debug_level(4)  # TODO: check why this has no effect

    observation = env.reset()
    print("observation: ", observation)

    print()

    observation, reward, done, info = env.step(env.action_space.sample())
    print("observation: ", observation)
    print("reward: ", reward)
    print("done: ", done)
    print("info: ", info)

    env.close()

    # TODO: implement write_bitcode(..) or write_ir(..)
    # env.write_bitcode("/tmp/output.bc")
Ejemplo n.º 25
0
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This script demonstrates how the example services defined in this directory
can be used as gym environments. Usage:

    $ bazel run -c opt //examples/example_unrolling_service:example
"""
import compiler_gym
import examples.example_unrolling_service as unrolling_service  # noqa Register environments.

with compiler_gym.make(
        "unrolling-py-v0",
        benchmark="unrolling-v0/offsets1",
        observation_space="features",
        reward_space="runtime",
) as env:
    compiler_gym.set_debug_level(4)  # TODO: check why this has no effect

    observation = env.reset()
    print("observation: ", observation)

    print()

    observation, reward, done, info = env.step(env.action_space.sample())
    print("observation: ", observation)
    print("reward: ", reward)
    print("done: ", done)
    print("info: ", info)
Ejemplo n.º 26
0
from pydantic import BaseModel

import compiler_gym
from compiler_gym.envs import LlvmEnv
from compiler_gym.util.truncate import truncate

app = Flask("compiler_gym")
CORS(app)

resource_dir: Path = (Path(__file__).parent /
                      "frontends/compiler_gym/build").absolute()

logger = logging.getLogger(__name__)

# A single compiler environment that is used to serve all endpoints.
env: LlvmEnv = compiler_gym.make("llvm-v0")
env_lock = Lock()


class StateToVisualize(BaseModel):
    """Encapsulates the state to visualize in the frontend."""

    instcount: Dict[str, int]
    autophase: Dict[str, int]
    # The reward signal measures how "good" the previous action was. Over time
    # the sequence of actions that produces the highest cumulative reward is the
    # best:
    reward: float


class StepRequest(BaseModel):