コード例 #1
0
def observations(
    env: str = "llvm-autophase-ic-v0",
    observation_spaces: List[str] = [
        "Ir",
        "InstCount",
        "Autophase",
        "Inst2vec",
        "Programl",
        "IrInstructionCount",
        "ObjectTextSizeBytes",
        "Runtime",
    ],
    n: int = int(1e6),
    num_benchmarks: int = int(1e3),
    j: int = cpu_count(),
    seed: int = 0xCC,
    outdir: Optional[Path] = None,
) -> List[float]:
    """Benchmark the environment observation spaces."""
    executor = Executor(type="local", cpus=j)
    outdir = Path(outdir or create_user_logs_dir("op_benchmarks"))
    benchmarks = get_benchmarks(env_name=env,
                                n=min(n, num_benchmarks),
                                seed=seed,
                                outdir=outdir)
    with executor.get_executor(logs_dir=outdir) as session:
        _observations(
            session=session,
            env_name=env,
            benchmarks=benchmarks,
            j=j,
            outdir=outdir,
            observation_spaces=observation_spaces,
            n=n,
        )
コード例 #2
0
def step(
        n: int = int(1e6),
        num_benchmarks: int = int(1e3),
        env: str = "llvm-autophase-ic-v0",
        j: int = cpu_count(),
        seed: int = 0xCC,
        outdir: Optional[Path] = None,
):
    """Benchmark the env.step() operator."""
    executor = Executor(type="local", cpus=j)
    outdir = Path(outdir or create_user_logs_dir("op_benchmarks"))
    benchmarks = get_benchmarks(env_name=env,
                                n=min(n, num_benchmarks),
                                seed=seed,
                                outdir=outdir)
    with executor.get_executor(logs_dir=outdir) as session:
        _step(
            session=session,
            outdir=outdir,
            benchmarks=benchmarks,
            n=n,
            j=j,
            env_name=env,
            seed=seed,
        )
コード例 #3
0
def _reset(
    benchmarks: List[str],
    n: int,
    outdir: Path,
    env_name: str,
    j: int,
    session: Executor,
):
    outdir.mkdir(exist_ok=True, parents=True)
    num_measurements_per_benchmark = int(ceil(n / len(benchmarks)))
    for i, benchmarks_chunk in enumerate(chunkify(benchmarks, j), start=1):
        session.submit(
            _reset_worker,
            num_measurements_per_benchmark=num_measurements_per_benchmark,
            benchmarks=benchmarks_chunk,
            env_name=env_name,
            outfile=outdir / f".op:2:reset-shard-{i:02d}.txt",
        )
コード例 #4
0
def _step(
    n: int,
    benchmarks: List[str],
    env_name: str,
    seed: int,
    j: int,
    outdir: Path,
    session: Executor,
):
    outdir.mkdir(exist_ok=True, parents=True)
    num_measurements_per_benchmark = int(ceil(n / len(benchmarks)))
    for i, benchmarks_chunk in enumerate(chunkify(benchmarks, j), start=1):
        session.submit(
            _step_worker,
            num_measurements_per_benchmark=num_measurements_per_benchmark,
            seed=seed + (i * len(benchmarks_chunk)),
            benchmarks=benchmarks_chunk,
            env_name=env_name,
            step_outfile=outdir / f".op:3:step-shard-{i:02d}.txt",
            batched_outfile=outdir / f".op:3:step-batched-shard-{i:02d}.txt",
        )
コード例 #5
0
ファイル: op_benchmarks.py プロジェクト: kokizzu/CompilerGym
def _observations(
    observation_spaces: List[str],
    benchmarks: List[str],
    n: int,
    j: int,
    session: Executor,
    outdir: Path,
    env_name: str,
):
    outdir.mkdir(exist_ok=True, parents=True)
    num_measurements_per_benchmark = int(ceil(n / len(benchmarks)))
    for i, benchmarks_chunk in enumerate(chunkify(benchmarks, j), start=1):
        for observation_space in observation_spaces:
            session.submit(
                _observations_worker,
                observation_space=observation_space,
                num_measurements_per_benchmark=num_measurements_per_benchmark,
                benchmarks=benchmarks_chunk,
                env_name=env_name,
                outfile=outdir / f".observation:{observation_space}-shard-{i:02d}.txt",
            )
コード例 #6
0
def main(argv):
    del argv  # Unused.

    # Validate the --search values now.
    for search in FLAGS.search:
        if search not in _SEARCH_FUNCTIONS:
            raise app.UsageError(f"Invalid --search value: {search}")

    def get_benchmarks():
        benchmarks = []
        with compiler_gym.make("gcc-v0", gcc_bin=FLAGS.gcc_bin) as env:
            env.reset()
            if FLAGS.gcc_benchmark == ["all"]:
                for dataset in env.datasets:
                    benchmarks += islice(dataset.benchmark_uris(), 50)
            elif FLAGS.gcc_benchmark:
                for uri in FLAGS.gcc_benchmark:
                    benchmarks.append(env.datasets.benchmark(uri).uri)
            else:
                benchmarks = list(
                    env.datasets["benchmark://chstone-v0"].benchmark_uris())
        benchmarks.sort()
        return benchmarks

    logdir = (Path(FLAGS.output_dir)
              if FLAGS.output_dir else create_user_logs_dir("gcc_autotuning"))
    logdir.mkdir(exist_ok=True, parents=True)
    with open(logdir / "results.csv", "w") as f:
        print(
            "search",
            "benchmark",
            "scaled_size",
            "size",
            "baseline_size",
            sep=",",
            file=f,
        )
    print("Logging results to", logdir)

    # Parallel execution environment. Use flag --nproc to control the number of
    # worker processes.
    executor = Executor(type="local",
                        timeout_hours=12,
                        cpus=FLAGS.nproc,
                        block=True)
    with executor.get_executor(logs_dir=logdir) as session:
        jobs = []
        # Submit each search instance as a separate job.
        grid = product(range(FLAGS.gcc_search_repetitions), FLAGS.search,
                       get_benchmarks())
        for _, search, benchmark in grid:
            if not benchmark:
                raise app.UsageError("Empty benchmark name not allowed")

            jobs.append(
                session.submit(
                    run_search,
                    search=search,
                    benchmark=benchmark,
                    seed=FLAGS.seed + len(jobs),
                ))

        for job in jobs:
            result = job.result()
            print(result.benchmark, f"{result.scaled_best:.3f}x", sep="\t")
            with open(logdir / "results.csv", "a") as f:
                print(
                    result.search,
                    result.benchmark,
                    result.scaled_best,
                    result.best_size,
                    result.baseline_size,
                    sep=",",
                    file=f,
                )

    # Print results aggregates.
    info([logdir])