コード例 #1
0
    def after_run(self, tool):
        elapsed = (time.monotonic_ns() - self._start_nanos) // 1000

        bench_file = Path(self._config["output"])
        with flock_append(bench_file) as io:
            bench_record = {"tool": tool.asdict(), "elapsed": elapsed}
            print(json.dumps(bench_record), file=io)
コード例 #2
0
ファイル: benchmark.py プロジェクト: trailofbits/blight
    def after_run(self, tool: Tool, *, run_skipped: bool = False) -> None:
        elapsed = (time.monotonic_ns() - self._start_nanos) // 1000
        bench = BenchmarkRecord(tool=tool, elapsed=elapsed, run_skipped=run_skipped)

        if tool.is_journaling():
            self._result = bench.dict()
        else:
            bench_file = Path(self._config["output"])
            with flock_append(bench_file) as io:
                print(bench.json(), file=io)
コード例 #3
0
ファイル: find_inputs.py プロジェクト: trailofbits/blight
    def after_run(self, tool: Tool, *, run_skipped: bool = False) -> None:
        inputs = InputsRecord(tool=tool, inputs=self._inputs)

        if tool.is_journaling():
            # NOTE(ms): The `tool` member is excluded to avoid journal bloat.
            self._result = inputs.dict(exclude={"tool"})
        else:
            output_path = Path(self._config["output"])
            with flock_append(output_path) as io:
                print(inputs.json(), file=io)
コード例 #4
0
    def after_run(self, tool: Tool, *, run_skipped: bool = False):
        # TODO(ww): Restructure this dictionary; it should be more like:
        # { run: {...}, tool: {...}}
        tool_record = tool.asdict()
        tool_record["run_skipped"] = run_skipped

        if tool.is_journaling():
            self._result = tool_record
        else:
            record_file = Path(self._config["output"])
            with flock_append(record_file) as io:
                print(json.dumps(tool_record), file=io)
コード例 #5
0
ファイル: find_outputs.py プロジェクト: thinkmoore/blight
    def before_run(self, tool):
        output_map = defaultdict(list)
        for output in tool.outputs:
            output = Path(output)
            if not output.is_absolute():
                output = tool.cwd / output

            # Special case: a.out is produced by both the linker
            # and compiler tools by default.
            if output.name == "a.out" and tool.__class__ in [CC, CXX, LD]:
                output_map[OutputKind.Executable.value].append(str(output))
            else:
                kind = OUTPUT_SUFFIX_KIND_MAP.get(output.suffix,
                                                  OutputKind.Unknown)
                output_map[kind.value].append(str(output))

        output = Path(self._config["output"])
        with flock_append(output) as io:
            outputs_record = {"tool": tool.asdict(), "outputs": output_map}
            print(json.dumps(outputs_record), file=io)
コード例 #6
0
ファイル: find_outputs.py プロジェクト: trailofbits/blight
    def after_run(self, tool: Tool, *, run_skipped: bool = False) -> None:
        store = self._config.get("store")
        if store is not None:
            store_path = Path(store)
            store_path.mkdir(parents=True, exist_ok=True)

            for output in self._outputs:
                # We don't copy output directories into the store, for now.
                if output.path.is_dir():
                    continue

                if not output.path.exists():
                    logger.warning(f"tool={tool}'s output ({output.path}) does not exist")
                    continue

                # Outputs aren't guaranteed to have unique basenames and subsequent
                # steps in the build system could even modify a particular output
                # in-place, so we give each output a `store_path` based on a hash
                # of its content.
                content_hash = hashlib.sha256(output.path.read_bytes()).hexdigest()
                # Append hash to the filename unless `append_hash=false` is specified in the config
                append_hash = self._config.get("append_hash") != "false"
                filename = f"{output.path.name}-{content_hash}" if append_hash else output.path.name
                output_store_path = store_path / filename
                if not output_store_path.exists():
                    shutil.copy(output.path, output_store_path)
                output.store_path = output_store_path
                output.content_hash = content_hash

        outputs = OutputsRecord(tool=tool, outputs=self._outputs)

        if tool.is_journaling():
            # NOTE(ms): The `tool` member is excluded to avoid journal bloat.
            self._result = outputs.dict(exclude={"tool"})
        else:
            output_path = Path(self._config["output"])
            with flock_append(output_path) as io:
                print(outputs.json(), file=io)
コード例 #7
0
    def before_run(self, tool):
        record_file = Path(self._config["output"])

        with flock_append(record_file) as io:
            print(json.dumps(tool.asdict()), file=io)
コード例 #8
0
 def _commit_journal(self) -> None:
     if self.is_journaling():
         with util.flock_append(self._journal_path) as io:  # type: ignore
             json.dump(self._action_results, io, default=json_helper)
             # NOTE(ww): `json.dump` doesn't do this for us.
             io.write("\n")