def test_make_benchmark_missing_file(): with tempfile.TemporaryDirectory() as d: with pytest.raises(FileNotFoundError): llvm.make_benchmark(Path(d) / "a.c") with pytest.raises(FileNotFoundError): llvm.make_benchmark(str(Path(d) / "a.c"))
def test_make_benchmark_unrecognized_file_type(): with tempfile.TemporaryDirectory() as d: path = Path(d) / "foo.txt" path.touch() with pytest.raises(ValueError, match=r"Unrecognized file type"): llvm.make_benchmark(path)
def test_make_benchmark_unrecognized_file_type(): with tempfile.TemporaryDirectory() as d: path = Path(d) / "foo.txt" path.touch() with pytest.raises(ValueError) as ctx: llvm.make_benchmark(path) assert "Unrecognized file type" in str(ctx.value)
def test_make_benchmark_single_clang_invocation_multiple_inputs(): with tempfile.TemporaryDirectory() as d: source_1 = Path(d) / "a.c" source_2 = Path(d) / "b.c" with open(str(source_1), "w") as f: f.write("int B() { return A(); }") with open(str(source_2), "w") as f: f.write("int A() { return 0; }") # cannot specify -o when generating multiple output files with pytest.raises(OSError): llvm.make_benchmark(llvm.ClangInvocation([str(source_1), str(source_2)]))
def test_reset_invalid_ir(env: LlvmEnv): """Test that setting the $CXX to an invalid binary raises an error.""" benchmark = llvm.make_benchmark(INVALID_IR_PATH) with pytest.raises(BenchmarkInitError, match="Failed to compute .text size cost"): env.reset(benchmark=benchmark)
def test_make_benchmark_single_bitcode(env: LlvmEnv): benchmark = llvm.make_benchmark(EXAMPLE_BITCODE_FILE) assert benchmark.uri == f"file:///{EXAMPLE_BITCODE_FILE}" assert benchmark.program.uri == f"file:///{EXAMPLE_BITCODE_FILE}" env.reset(benchmark=benchmark) assert env.benchmark == benchmark.uri assert env.observation[ "IrInstructionCount"] == EXAMPLE_BITCODE_IR_INSTRUCTION_COUNT
def test_two_custom_benchmarks_reset(env: LlvmEnv): with tempfile.TemporaryDirectory() as d: source = Path(d) / "a.c" with open(str(source), "w") as f: f.write("int main() { return 0; }") benchmark1 = llvm.make_benchmark(source) benchmark2 = llvm.make_benchmark(source) assert benchmark1.uri != benchmark2.uri env.reset(benchmark=benchmark1) assert env.benchmark == benchmark1.uri env.reset() assert env.benchmark == benchmark1.uri env.benchmark = benchmark2 # assert env.benchmark == benchmark1.uri env.reset() assert env.benchmark == benchmark2.uri
def test_make_benchmark_single_clang_job(env: LlvmEnv): with tempfile.TemporaryDirectory() as d: source = Path(d) / "input.c" with open(str(source), "w") as f: f.write("int A() { return 0; }") benchmark = llvm.make_benchmark(str(source)) env.reset(benchmark=benchmark) assert env.benchmark == benchmark.uri print(env.observation["Ir"]) assert re.search(r"define (dso_local )?i32 @A\(\)", env.observation["Ir"])
def test_make_benchmark_undefined_symbol(env: LlvmEnv): with tempfile.TemporaryDirectory() as d: source = Path(d) / "a.c" with open(str(source), "w") as f: f.write("int main() { return A(); }") benchmark = llvm.make_benchmark(source) env.reset(benchmark=benchmark) assert env.benchmark == benchmark.uri print(env.observation["Ir"]) assert re.search(r"declare (dso_local )?i32 @A\(\.\.\.\)", env.observation["Ir"])
def test_two_custom_benchmarks_reset(env: LlvmEnv): with tempfile.TemporaryDirectory() as d: source = Path(d) / "a.c" with open(str(source), "w") as f: f.write("int main() { return 0; }") benchmark1 = llvm.make_benchmark(source) benchmark2 = llvm.make_benchmark(source) assert benchmark1.uri != benchmark2.uri env.reset(benchmark=benchmark1) assert env.benchmark == benchmark1.uri env.reset() assert env.benchmark == benchmark1.uri with pytest.warns( UserWarning, match=r"Changing the benchmark has no effect until reset\(\) is called", ): env.benchmark = benchmark2 env.reset() assert env.benchmark == benchmark2.uri
def test_make_benchmark_clang_job_standard_libraries(env: LlvmEnv): with tempfile.TemporaryDirectory() as d: source = Path(d) / "input.cc" with open(str(source), "w") as f: f.write('#include <stdio.h>\nint A() { printf(""); return 0; }') benchmark = llvm.make_benchmark(str(source)) env.reset(benchmark=benchmark) assert env.benchmark == benchmark.uri print(env.observation["Ir"]) assert re.search(r"define (dso_local )?i32 @_Z1Av\(\)", env.observation["Ir"]) assert re.search(r"declare (dso_local )?i32 @printf", env.observation["Ir"])
def test_make_benchmark_single_bitcode(env: LlvmEnv): benchmark = llvm.make_benchmark(EXAMPLE_BITCODE_FILE) assert benchmark == f"benchmark://file-v0{EXAMPLE_BITCODE_FILE}" assert benchmark.uri.scheme == "benchmark" assert benchmark.uri.dataset == "file-v0" with open(EXAMPLE_BITCODE_FILE, "rb") as f: contents = f.read() assert benchmark.proto.program.contents == contents env.reset(benchmark=benchmark) assert env.benchmark == benchmark.uri assert env.observation["IrInstructionCount"] == EXAMPLE_BITCODE_IR_INSTRUCTION_COUNT
def test_make_benchmark_split_clang_job(env: LlvmEnv): with tempfile.TemporaryDirectory() as d: source_1 = Path(d) / "a.c" source_2 = Path(d) / "b.c" with open(str(source_1), "w") as f: f.write("int B() { return A(); }") with open(str(source_2), "w") as f: f.write("int A() { return 0; }") benchmark = llvm.make_benchmark([ str(source_1), str(source_2), ]) env.reset(benchmark=benchmark) assert env.benchmark == benchmark.uri print(env.observation["Ir"]) assert re.search(r"define (dso_local )?i32 @A\(\)", env.observation["Ir"]) assert re.search(r"define (dso_local )?i32 @B\(\)", env.observation["Ir"])
def test_custom_benchmark_is_added_on_service_restart(env: LlvmEnv): # When the service is restarted, the environment must send a custom # benchmark to it again. with tempfile.TemporaryDirectory() as d: source = Path(d) / "a.c" with open(str(source), "w") as f: f.write("int main() { return 0; }") benchmark = llvm.make_benchmark(source) env.reset(benchmark=benchmark) assert env.benchmark == benchmark.uri # Kill the service so that the next call to reset() starts a new one. env.service.close() env.service = None env.reset() assert env.benchmark == benchmark.uri
def test_make_benchmark_invalid_clang_job(): with pytest.raises(OSError) as ctx: llvm.make_benchmark(llvm.ClangInvocation(["-invalid-arg"])) assert "Compilation job failed with returncode" in str(ctx.value) assert "-invalid-arg" in str(ctx.value)
def test_make_benchmark_single_ll(): """Test passing a single .ll file into make_benchmark().""" benchmark = llvm.make_benchmark(INVALID_IR_PATH) assert str(benchmark.uri).startswith("benchmark://user-v0/") assert benchmark.uri.scheme == "benchmark" assert benchmark.uri.dataset == "user-v0"
def test_make_benchmark_invalid_clang_job(): with pytest.raises(OSError, match="Compilation job failed with returncode"): llvm.make_benchmark(llvm.ClangInvocation(["-invalid-arg"]))
def test_make_benchmark_single_ll(): """Test passing a single .ll file into make_benchmark().""" benchmark = llvm.make_benchmark(INVALID_IR_PATH) assert benchmark.uri.startswith("benchmark://user-v0/") assert BENCHMARK_URI_RE.match(benchmark.uri)