示例#1
0
def main(argv):
    """Main entry point."""
    argv = FLAGS(argv)
    if len(argv) != 1:
        raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")

    # Use default logdir of <base>/brute_force/<benchmark> unless told
    # otherwise.
    benchmark = benchmark_from_flags()
    if not benchmark:
        raise app.UsageError("No benchmark specified.")

    env = env_from_flags(benchmark)
    env.reset()
    benchmark = env.benchmark
    sanitized_benchmark_name = "/".join(benchmark.split("/")[-2:])
    env.close()
    logs_dir = Path(
        FLAGS.output_dir
        or create_logging_dir(f"brute_force/{sanitized_benchmark_name}"))

    run_brute_force(
        make_env=lambda: env_from_flags(benchmark_from_flags()),
        action_names=FLAGS.actions,
        episode_length=FLAGS.episode_length,
        outdir=logs_dir,
        nproc=FLAGS.nproc,
    )
def main(argv):
    """Main entry point."""
    argv = FLAGS(argv)
    if len(argv) != 1:
        raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")

    # Use default logdir of <base>/brute_force/<benchmark> unless told
    # otherwise.
    benchmark = benchmark_from_flags()
    if not benchmark:
        raise app.UsageError("No benchmark specified.")

    with env_from_flags(benchmark) as env:
        env.reset()
        logs_dir = Path(FLAGS.output_dir or create_logging_dir(
            f'brute_force/{os.path.normpath(f"random/{env.benchmark.uri.scheme}/{env.benchmark.uri.path}")}'
        ))

    run_brute_force(
        make_env=lambda: env_from_flags(benchmark_from_flags()),
        action_names=FLAGS.brute_force_action_list,
        episode_length=FLAGS.episode_length,
        outdir=logs_dir,
        nproc=FLAGS.nproc,
    )
示例#3
0
def main(argv):
    """Main entry point."""
    argv = FLAGS(argv)
    if len(argv) != 1:
        raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")

    # Determine the benchmark that is being analyzed, or use all of them.
    benchmark = benchmark_from_flags()
    if benchmark:
        benchmarks = [benchmark]
    else:
        with env_from_flags() as env:
            benchmarks = islice(env.benchmarks, 100)

    logs_dir = Path(FLAGS.output_dir
                    or create_logging_dir("benchmark_sensitivity_analysis"))
    rewards_path = logs_dir / f"benchmarks_{FLAGS.reward}.csv"
    runtimes_path = logs_dir / f"benchmarks_{FLAGS.reward}_runtimes.csv"

    run_benchmark_sensitivity_analysis(
        rewards_path=rewards_path,
        runtimes_path=runtimes_path,
        benchmarks=benchmarks,
        reward=FLAGS.reward,
        num_trials=FLAGS.num_benchmark_sensitivity_trials,
        min_steps=FLAGS.min_steps,
        max_steps=FLAGS.max_steps,
        nproc=FLAGS.nproc,
        max_attempts_multiplier=FLAGS.max_benchmark_attempts_multiplier,
    )
def get_rewards(
    action: int,
    action_name: str,
    reward_space: str,
    num_trials: int,
    max_warmup_steps: int,
    max_attempts_multiplier: int = 5,
) -> SensitivityAnalysisResult:
    """Run random trials to get a list of num_trials immediate rewards."""
    rewards, runtimes = [], []
    benchmark = benchmark_from_flags()
    num_attempts = 0
    while (
        num_attempts < max_attempts_multiplier * num_trials
        and len(rewards) < num_trials
    ):
        num_attempts += 1
        with env_from_flags(benchmark=benchmark) as env:
            env.observation_space = None
            env.reward_space = None
            env.reset(benchmark=benchmark)
            with Timer() as t:
                reward = run_one_trial(env, reward_space, action, max_warmup_steps)
            if reward is not None:
                rewards.append(reward)
                runtimes.append(t.time)

    return SensitivityAnalysisResult(
        name=action_name, runtimes=np.array(runtimes), rewards=np.array(rewards)
    )
示例#5
0
def main(argv):
    """Main entry point."""
    assert len(argv) == 1, f"Unrecognized flags: {argv[1:]}"

    with env_from_flags(benchmark=benchmark_from_flags()) as env:
        step_min = min(FLAGS.step_min, FLAGS.step_max)
        step_max = max(FLAGS.step_min, FLAGS.step_max)
        run_random_walk(env=env, step_count=random.randint(step_min, step_max))
示例#6
0
def make_env():
    FLAGS.env = "llvm-v0"
    if not FLAGS.reward:
        FLAGS.reward = "IrInstructionCountOz"
    env = env_from_flags(benchmark=benchmark_from_flags())
    env = ConstrainedCommandline(env, flags=FLAGS.flags)
    env = TimeLimit(env, max_episode_steps=FLAGS.episode_len)
    env = HistoryObservation(env)
    return env
示例#7
0
def run_random_search(num_episodes, num_steps) -> None:
    """The inner loop of a load test benchmark."""
    with env_from_flags(benchmark=benchmark_from_flags()) as env:
        for _ in range(num_episodes):
            env.reset()
            for _ in range(num_steps):
                _, _, done, _ = env.step(env.action_space.sample())
                if done:
                    break
示例#8
0
def main(argv):
    """Main entry point."""
    argv = FLAGS(argv)
    if len(argv) != 1:
        raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")

    if FLAGS.ls_benchmark:
        benchmark = benchmark_from_flags()
        env = env_from_flags(benchmark)
        print("\n".join(sorted(env.benchmarks)))
        env.close()
        return

    with Timer("Initialized environment"):
        benchmark = benchmark_from_flags()
        env = env_from_flags(benchmark)

    run_manual_env(env)
示例#9
0
def main(argv):
    """Main entry point."""
    argv = FLAGS(argv)
    if len(argv) != 1:
        raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")

    if FLAGS.ls_benchmark:
        benchmark = benchmark_from_flags()
        env = env_from_flags(benchmark)
        print("\n".join(sorted(env.benchmarks)))
        env.close()
        return

    with Timer("Initialized environment"):
        # FIXME Chris, I don't seem to actually get a benchmark
        benchmark = benchmark_from_flags()
        env = env_from_flags(benchmark)

    shell = CompilerGymShell(env)
    shell.cmdloop()
示例#10
0
def main(argv):
    """Main entry point."""
    argv = FLAGS(argv)
    if len(argv) != 1:
        raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")

    with Timer("Initialized environment"):
        benchmark = benchmark_from_flags()
        env = env_from_flags(benchmark)

    shell = CompilerGymShell(env)
    shell.cmdloop()
示例#11
0
def main(argv):
    """Main entry point."""
    argv = FLAGS(argv)
    if len(argv) != 1:
        raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")

    if FLAGS.ls_reward:
        with env_from_flags() as env:
            print("\n".join(sorted(env.reward.indices.keys())))
        return

    assert FLAGS.patience >= 0, "--patience must be >= 0"

    # Create an environment now to catch a startup time error before we launch
    # a bunch of workers.
    with env_from_flags() as env:
        env.reset(benchmark=benchmark_from_flags())

    env = random_search(
        make_env=lambda: env_from_flags(benchmark=benchmark_from_flags()),
        outdir=Path(FLAGS.output_dir) if FLAGS.output_dir else None,
        patience=FLAGS.patience,
        total_runtime=FLAGS.runtime,
        nproc=FLAGS.nproc,
        skip_done=FLAGS.skip_done,
    )
    try:
        # Exit with error if --fail_threshold was set and the best reward does not
        # meet this value.
        if (
            FLAGS.fail_threshold is not None
            and env.episode_reward < FLAGS.fail_threshold
        ):
            print(
                f"Best reward {env.episode_reward:.3f} below threshold of {FLAGS.fail_threshold}",
                file=sys.stderr,
            )
            sys.exit(1)
    finally:
        env.close()
示例#12
0
def main(argv):
    """Main entry point."""
    argv = FLAGS(argv)
    if len(argv) != 1:
        raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")

    output_dir = Path(FLAGS.output_dir).expanduser().resolve().absolute()
    assert (
        output_dir / logs.METADATA_NAME
    ).is_file(), f"Invalid --output_dir: {output_dir}"

    env = env_from_flags()
    benchmark = benchmark_from_flags()
    replay_actions_from_logs(env, output_dir, benchmark=benchmark)
示例#13
0
def main(argv):
    # Initialize a Q table.
    q_table: Dict[StateActionTuple, float] = {}
    benchmark = benchmark_from_flags()
    assert benchmark, "You must specify a benchmark using the --benchmark flag"

    with gym.make("llvm-ic-v0", benchmark=benchmark) as env:
        env.observation_space = "Autophase"

        # Train a Q-table.
        with Timer("Constructing Q-table"):
            train(q_table, env)

        # Rollout resulting policy.
        rollout(q_table, env, printout=True)
示例#14
0
def main(argv):
    """Main entry point."""
    argv = FLAGS(argv)
    if len(argv) != 1:
        raise app.UsageError(f"Unknown command line arguments: {argv[1:]}")

    if FLAGS.ls_benchmark:
        env = env_from_flags()
        print("\n".join(sorted(env.benchmarks)))
        env.close()
        return
    if FLAGS.ls_reward:
        env = env_from_flags()
        print("\n".join(sorted(env.reward.indices.keys())))
        env.close()
        return

    assert FLAGS.patience > 0, "--patience must be > 0"

    make_env = lambda: env_from_flags(benchmark=benchmark_from_flags())

    env = make_env()
    try:
        env.reset()
        if not env.benchmark:
            raise app.UsageError("No benchmark specified.")
    finally:
        env.close()

    best_reward = random_search(
        make_env=make_env,
        outdir=Path(FLAGS.output_dir) if FLAGS.output_dir else None,
        patience=FLAGS.patience,
        total_runtime=FLAGS.runtime,
        nproc=FLAGS.nproc,
        skip_done=FLAGS.skip_done,
    )

    # Exit with error if --fail_threshold was set and the best reward does not
    # meet this value.
    if FLAGS.fail_threshold is not None and best_reward < FLAGS.fail_threshold:
        print(
            f"Best reward {best_reward:.3f} below threshold of {FLAGS.fail_threshold}",
            file=sys.stderr,
        )
        sys.exit(1)
示例#15
0
    def __init__(self):
        self._env = env_from_flags(benchmark_from_flags())
        try:
            # Project onto the subset of transformations that have
            # been specified to be used.
            if not FLAGS.actions:
                self._action_indices = list(
                    range(len(self._env.action_space.names)))
            else:
                self._action_indices = [
                    self._env.action_space.flags.index(a)
                    for a in FLAGS.actions
                ]
            self._action_names = [
                self._env.action_space.names[a] for a in self._action_indices
            ]

        finally:
            # The program will not terminate until the environment is
            # closed, not even if there is an exception.
            self._env.close()
示例#16
0
def make_env():
    env = env_from_flags(benchmark=benchmark_from_flags())
    if FLAGS.explore_actions:
        env = ConstrainedCommandline(env, flags=FLAGS.explore_actions)
    return env
示例#17
0
 def make_env():
     return env_from_flags(benchmark=benchmark_from_flags())