Пример #1
0
def test_load_test(env, tmpwd):
    del env  # Unused.
    del tmpwd  # Unused.
    set_command_line_flags(
        [
            "arv0",
            "--env=llvm-v0",
            "--benchmark=cbench-v1/crc32",
            "--max_nproc=3",
            "--nproc_increment=1",
            "--num_steps=2",
            "--num_episodes=2",
        ]
    )
    with capture_output() as out:
        load_test(["argv0"])

    assert "Run 1 threaded workers in " in out.stdout
    assert "Run 1 process workers in " in out.stdout
    assert "Run 2 threaded workers in " in out.stdout
    assert "Run 2 process workers in " in out.stdout
    assert "Run 3 threaded workers in " in out.stdout
    assert "Run 3 process workers in " in out.stdout

    assert Path("parallelization_load_test.csv").is_file()
Пример #2
0
def test_random_search_smoke_test():
    with tempfile.TemporaryDirectory() as tmp:
        outdir = Path(tmp)
        set_command_line_flags(["argv0"])
        env = random_search(
            make_env=make_env,
            outdir=outdir,
            patience=50,
            total_runtime=3,
            nproc=1,
            skip_done=False,
        )
        env.close()

        assert (outdir / "random_search.json").is_file()
        assert (outdir / "random_search_progress.csv").is_file()
        assert (outdir / "random_search_best_actions.txt").is_file()
        assert (outdir / "optimized.bc").is_file()

        with make_env() as env:
            replay_actions_from_logs(env, Path(outdir))
            assert (outdir /
                    "random_search_best_actions_progress.csv").is_file()
            assert (outdir /
                    "random_search_best_actions_commandline.txt").is_file()
def test_no_input(monkeypatch):
    set_command_line_flags(["argv0", "--env=llvm-ic-v0"])
    monkeypatch.setattr("sys.stdin", StringIO(""))

    with capture_output() as out:
        with pytest.raises(SystemExit):
            main(["argv0", "-"])

    assert "No inputs to validate" in out.stderr
def test_invalid_csv_format(monkeypatch):
    stdin = "invalid\ncsv\nformat"
    set_command_line_flags(["argv0", "--env=llvm-ic-v0"])
    monkeypatch.setattr("sys.stdin", StringIO(stdin))

    with capture_output() as out:
        with pytest.raises(SystemExit):
            main(["argv0", "-"])

    assert "Expected 4 columns in the first row of CSV" in out.stderr
def test_validate_cbench_null_options(monkeypatch, benchmarks: List[str]):
    stdin = "\n".join([
        "benchmark,reward,walltime,commandline",
    ] + [f"{b},,0,opt  input.bc -o output.bc" for b in benchmarks])
    set_command_line_flags(["argv0", "--env=llvm-v0"])
    monkeypatch.setattr("sys.stdin", StringIO(stdin))
    with capture_output() as out:
        main(["argv0", "-"])

    assert not out.stderr
    assert out.stdout.count("✅") == len(benchmarks)  # Every benchmark passed.
def test_random_search():
    set_command_line_flags([
        "argv0",
        "--n=1",
        "--max_benchmarks=1",
        "--search_time=1",
        "--nproc=1",
        "--patience_ratio=0.1",
        "--novalidate",
    ])
    with pytest.raises(SystemExit):
        eval_llvm_instcount_policy(random_search)
def test_okay_llvm_result(monkeypatch):
    stdin = """
benchmark,reward,commandline,walltime
benchmark://cbench-v1/crc32,0,opt  input.bc -o output.bc,0.3
""".strip()
    set_command_line_flags(["argv0", "--env=llvm-ic-v0"])
    monkeypatch.setattr("sys.stdin", StringIO(stdin))

    with capture_output() as out:
        main(["argv0", "-"])

    assert "✅  cbench-v1/crc32 " in out.stdout
    assert not out.stderr
def test_invalid_reward_llvm_result(monkeypatch):
    stdin = """
benchmark,reward,commandline,walltime
benchmark://cbench-v1/crc32,0.5,opt  input.bc -o output.bc,0.3
""".strip()
    set_command_line_flags(["argv0", "--env=llvm-ic-v0"])
    monkeypatch.setattr("sys.stdin", StringIO(stdin))
    with capture_output() as out:
        with pytest.raises(SystemExit):
            main(["argv0", "-"])

    assert ("❌  cbench-v1/crc32  Expected reward 0.5 but received reward 0.0\n"
            in out.stdout)
    assert not out.stderr
def test_multiple_valid_inputs(monkeypatch):
    stdin = """
benchmark,reward,walltime,commandline
benchmark://cbench-v1/crc32,,0,opt  input.bc -o output.bc
benchmark://cbench-v1/crc32,,0,opt  input.bc -o output.bc
benchmark://cbench-v1/crc32,,0,opt  input.bc -o output.bc
""".strip()
    set_command_line_flags(["argv0", "--env=llvm-v0"])
    monkeypatch.setattr("sys.stdin", StringIO(stdin))

    with capture_output() as out:
        main(["argv0", "-"])

    assert not out.stderr
    assert out.stdout.count("✅") == 3  # Every benchmark passed.
def test_okay_llvm_result_file_input():
    with tempfile.TemporaryDirectory() as d:
        path = Path(d) / "test.csv"
        with open(str(path), "w") as f:
            f.write("""
benchmark,reward,commandline,walltime
benchmark://cbench-v1/crc32,0,opt  input.bc -o output.bc,0.3
""".strip())
        set_command_line_flags(["argv0", "--env=llvm-ic-v0"])

        with capture_output() as out:
            main(["argv0", str(path)])

    assert "✅  cbench-v1/crc32 " in out.stdout
    assert not out.stderr
Пример #11
0
def test_eval_llvm_instcount_policy_resume(tmpwd):
    # Run eval on a single benchmark.
    set_command_line_flags([
        "argv0",
        "--n=1",
        "--max_benchmarks=1",
        "--novalidate",
        "--resume",
        "--leaderboard_results=test.csv",
    ])
    with pytest.raises(SystemExit):
        eval_llvm_instcount_policy(null_policy)

    # Check that the log has a single entry (and a header row.)
    assert Path("test.csv").is_file()
    with open("test.csv") as f:
        log = f.read()
    assert len(log.rstrip().split("\n")) == 2
    init_logfile = log

    # Repeat, but for two benchmarks.
    set_command_line_flags([
        "argv0",
        "--n=1",
        "--max_benchmarks=2",
        "--novalidate",
        "--resume",
        "--leaderboard_results=test.csv",
    ])
    with pytest.raises(SystemExit):
        eval_llvm_instcount_policy(null_policy)

    # Check that the log extends the original.
    assert Path("test.csv").is_file()
    with open("test.csv") as f:
        log = f.read()
    assert log.startswith(init_logfile)
    assert len(log.rstrip().split("\n")) == 3
    init_logfile = log

    # Repeat, but for two runs of each benchmark.
    set_command_line_flags([
        "argv0",
        "--n=2",
        "--max_benchmarks=2",
        "--novalidate",
        "--resume",
        "--leaderboard_results=test.csv",
    ])
    with pytest.raises(SystemExit):
        eval_llvm_instcount_policy(null_policy)

    # Check that the log extends the original.
    assert Path("test.csv").is_file()
    with open("test.csv") as f:
        log = f.read()
    assert log.startswith(init_logfile)
    assert len(log.rstrip().split("\n")) == 5
Пример #12
0
def test_eval_llvm_instcount_policy_invalid_flag():
    set_command_line_flags(["argv0", "--n=-1"])
    with pytest.raises(AssertionError):
        eval_llvm_instcount_policy(null_policy)
Пример #13
0
def test_eval_llvm_instcount_policy():
    set_command_line_flags(
        ["argv0", "--n=1", "--max_benchmarks=1", "--novalidate"])
    with pytest.raises(SystemExit):
        eval_llvm_instcount_policy(null_policy)