def test_timeout_expired(env: LlvmEnv, tmpdir):
    tmpdir = Path(tmpdir)
    env.reset(benchmark="cbench-v1/jpeg-c")  # larger benchmark
    env.write_bitcode(tmpdir / "ir.bc")
    space = env.observation.spaces["Programl"]

    with pytest.raises(
            TimeoutError,
            match="Failed to compute Programl observation in 0.1 seconds"):
        compute_observation(space, tmpdir / "ir.bc", timeout=0.1)
def test_invalid_observation_space_name(env: LlvmEnv, tmpdir):
    tmpdir = Path(tmpdir)
    env.reset()
    env.write_bitcode(tmpdir / "ir.bc")
    space = env.observation.spaces["Ir"]
    space.id = "NotARealName"

    with pytest.raises(
            ValueError,
            match="Invalid observation space name: NOT_A_REAL_NAME"):
        compute_observation(space, tmpdir / "ir.bc")
def test_observation_equivalence(env: LlvmEnv, tmpdir, observation_space: str):
    """Test that compute_observation() produces the same result as the environment."""
    tmpdir = Path(tmpdir)
    env.reset()
    env.write_bitcode(tmpdir / "ir.bc")

    observation = compute_observation(
        env.observation.spaces[observation_space], tmpdir / "ir.bc")
    assert observation == env.observation[observation_space]
def test_observation_programl_equivalence(env: LlvmEnv, tmpdir):
    """Test that compute_observation() produces the same result as the environment."""
    tmpdir = Path(tmpdir)
    env.reset()
    env.write_bitcode(tmpdir / "ir.bc")

    G = compute_observation(env.observation.spaces["Programl"],
                            tmpdir / "ir.bc")
    networkx.algorithms.isomorphism.is_isomorphic(G,
                                                  env.observation.Programl())
예제 #5
0
    def run(self, desired_result, input, limit):
        """Run a single config."""
        del input  # Unused
        del limit  # Unused

        self.run_count += 1

        try:
            # Run opt to produce an optimized bitcode file.
            cmd = [
                self.opt,
                self.unoptimized_path,
                "-o",
                self.tmp_optimized_path,
            ]
            cmd += self.serialize_flags(desired_result.configuration.data)
            subprocess.check_call(cmd,
                                  timeout=300,
                                  stdout=subprocess.DEVNULL,
                                  stderr=subprocess.DEVNULL)
            if not Path(self.tmp_optimized_path).is_file():
                return Result(time=float("inf"))
        except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
            return Result(time=float("inf"))

        # We need to jump through a couple of hoops to optimize for runtime
        # using OpenTuner. Replace the environment benchmark with the current
        # optimized file. Use the same benchmark protocol buffer so that any
        # dynamic configuration is preserved.
        if self.target == OptimizationTarget.RUNTIME:
            try:
                new_benchmark = self.env.benchmark
                new_benchmark.proto.program.uri = f"file:///{self.tmp_optimized_path}"
                self.env.reset(benchmark=new_benchmark)
                return Result(
                    time=float(np.median(self.env.observation.Runtime())))
            except (ServiceError, TimeoutError):
                return Result(time=float("inf"))

        try:
            return Result(time=float(
                compute_observation(self.observation_space,
                                    self.tmp_optimized_path)))
        except (ValueError, TimeoutError):
            return Result(time=float("inf"))
def test_missing_file(env: LlvmEnv, tmpdir):
    tmpdir = Path(tmpdir)
    env.reset()

    with pytest.raises(FileNotFoundError, match=str(tmpdir / "ir.bc")):
        compute_observation(env.observation.spaces["Ir"], tmpdir / "ir.bc")