Example #1
0
    def run(self, start_time: float) -> ExitCode:
        spec_parser = SpecsParser(get_buildroot())
        specs = [str(spec_parser.parse_spec(spec)) for spec in self.options.specs]
        self.run_tracker.start(run_start_time=start_time, specs=specs)

        with maybe_profiled(self.profile_path):
            global_options = self.options.for_global_scope()

            if self.options.help_request:
                return self._print_help(self.options.help_request)

            streaming_reporter = StreamingWorkunitHandler(
                self.graph_session.scheduler_session,
                run_tracker=self.run_tracker,
                callbacks=self._get_workunits_callbacks(),
                report_interval_seconds=global_options.streaming_workunits_report_interval,
            )

            goals = tuple(self.options.goals)
            with streaming_reporter.session():
                if not goals:
                    return PANTS_SUCCEEDED_EXIT_CODE
                engine_result = PANTS_FAILED_EXIT_CODE
                try:
                    engine_result = self._perform_run(goals)
                except Exception as e:
                    ExceptionSink.log_exception(e)

                metrics = self.graph_session.scheduler_session.metrics()
                self.run_tracker.set_pantsd_scheduler_metrics(metrics)
                self.run_tracker.end_run(engine_result)

            return engine_result
Example #2
0
    def run(self, start_time: float) -> ExitCode:
        self._start_run(start_time)

        with maybe_profiled(self.profile_path):
            global_options = self.options.for_global_scope()

            if self.options.help_request:
                return self._print_help(self.options.help_request)

            streaming_reporter = StreamingWorkunitHandler(
                self.graph_session.scheduler_session,
                run_tracker=self.run_tracker,
                callbacks=self._get_workunits_callbacks(),
                report_interval_seconds=global_options.
                streaming_workunits_report_interval,
            )

            goals = tuple(self.options.goals)
            with streaming_reporter.session():
                engine_result = PANTS_FAILED_EXIT_CODE
                try:
                    engine_result = self._run_v2(goals)
                except Exception as e:
                    ExceptionSink.log_exception(e)

                self._finish_run(engine_result)
            return engine_result
Example #3
0
def test_context_object_on_streaming_workunits(
        rule_runner: RuleRunner, run_tracker: RunTracker) -> None:
    scheduler = rule_runner.scheduler

    def callback(**kwargs) -> None:
        context = kwargs["context"]
        assert isinstance(context, StreamingWorkunitContext)

        completed_workunits = kwargs["completed_workunits"]
        for workunit in completed_workunits:
            if "artifacts" in workunit and "stdout_digest" in workunit[
                    "artifacts"]:
                digest = workunit["artifacts"]["stdout_digest"]
                output = context.single_file_digests_to_bytes([digest])
                assert output == (b"stdout output\n", )

    handler = StreamingWorkunitHandler(
        scheduler,
        run_tracker=run_tracker,
        callbacks=[callback],
        report_interval_seconds=0.01,
        max_workunit_verbosity=LogLevel.INFO,
    )

    stdout_process = Process(argv=("/bin/bash", "-c",
                                   "/bin/echo 'stdout output'"),
                             description="Stdout process")

    with handler.session():
        rule_runner.request(ProcessResult, [stdout_process])
Example #4
0
def test_more_complicated_engine_aware(rule_runner: RuleRunner,
                                       run_tracker: RunTracker) -> None:
    tracker = WorkunitTracker()
    handler = StreamingWorkunitHandler(
        rule_runner.scheduler,
        run_tracker=run_tracker,
        callbacks=[tracker.add],
        report_interval_seconds=0.01,
        max_workunit_verbosity=LogLevel.TRACE,
        specs=Specs.empty(),
        options_bootstrapper=create_options_bootstrapper([]),
    )
    with handler.session():
        input_1 = CreateDigest((
            FileContent(path="a.txt", content=b"alpha"),
            FileContent(path="b.txt", content=b"beta"),
        ))
        digest_1 = rule_runner.request(Digest, [input_1])
        snapshot_1 = rule_runner.request(Snapshot, [digest_1])

        input_2 = CreateDigest((FileContent(path="g.txt", content=b"gamma"), ))
        digest_2 = rule_runner.request(Digest, [input_2])
        snapshot_2 = rule_runner.request(Snapshot, [digest_2])

        input = ComplicatedInput(snapshot_1=snapshot_1, snapshot_2=snapshot_2)

        rule_runner.request(Output, [input])

    finished = list(
        itertools.chain.from_iterable(tracker.finished_workunit_chunks))
    workunit = next(
        item for item in finished
        if item["name"] == "pants.engine.internals.engine_test.a_rule")

    streaming_workunit_context = handler._context

    artifacts = workunit["artifacts"]
    output_snapshot_1 = artifacts["snapshot_1"]
    output_snapshot_2 = artifacts["snapshot_2"]

    output_contents_list = streaming_workunit_context.snapshots_to_file_contents(
        [output_snapshot_1, output_snapshot_2])
    assert len(output_contents_list) == 2

    assert isinstance(output_contents_list[0], DigestContents)
    assert isinstance(output_contents_list[1], DigestContents)

    digest_contents_1 = output_contents_list[0]
    digest_contents_2 = output_contents_list[1]

    assert len(tuple(x for x in digest_contents_1
                     if x.content == b"alpha")) == 1
    assert len(tuple(x for x in digest_contents_1
                     if x.content == b"beta")) == 1

    assert len(tuple(x for x in digest_contents_2
                     if x.content == b"gamma")) == 1
Example #5
0
def test_streaming_workunits_expanded_specs(run_tracker: RunTracker) -> None:

    rule_runner = RuleRunner(
        target_types=[PythonLibrary],
        rules=[
            QueryRule(ProcessResult, (Process, )),
        ],
    )

    rule_runner.set_options(["--backend-packages=pants.backend.python"])

    rule_runner.create_file("src/python/somefiles/BUILD", "python_library()")
    rule_runner.create_file("src/python/somefiles/a.py", "print('')")
    rule_runner.create_file("src/python/somefiles/b.py", "print('')")

    rule_runner.create_file("src/python/others/BUILD", "python_library()")
    rule_runner.create_file("src/python/others/a.py", "print('')")
    rule_runner.create_file("src/python/others/b.py", "print('')")

    specs = SpecsParser(get_buildroot()).parse_specs(
        ["src/python/somefiles::", "src/python/others/b.py"])

    def callback(**kwargs) -> None:
        context = kwargs["context"]
        assert isinstance(context, StreamingWorkunitContext)

        expanded = context.get_expanded_specs()
        targets = expanded.targets

        assert len(targets.keys()) == 2
        assert targets["src/python/others/b.py"] == [
            TargetInfo(filename="src/python/others/b.py")
        ]
        assert set(targets["src/python/somefiles"]) == {
            TargetInfo(filename="src/python/somefiles/a.py"),
            TargetInfo(filename="src/python/somefiles/b.py"),
        }

    handler = StreamingWorkunitHandler(
        scheduler=rule_runner.scheduler,
        run_tracker=run_tracker,
        callbacks=[callback],
        report_interval_seconds=0.01,
        max_workunit_verbosity=LogLevel.INFO,
        specs=specs,
        options_bootstrapper=create_options_bootstrapper(
            ["--backend-packages=pants.backend.python"]),
    )

    stdout_process = Process(argv=("/bin/bash", "-c",
                                   "/bin/echo 'stdout output'"),
                             description="Stdout process")

    with handler.session():
        rule_runner.request(ProcessResult, [stdout_process])
    def run(self, start_time: float) -> ExitCode:
        with maybe_profiled(self.profile_path):
            spec_parser = SpecsParser(get_buildroot())
            specs = [
                str(spec_parser.parse_spec(spec))
                for spec in self.options.specs
            ]
            self.run_tracker.start(run_start_time=start_time, specs=specs)
            global_options = self.options.for_global_scope()

            streaming_reporter = StreamingWorkunitHandler(
                self.graph_session.scheduler_session,
                run_tracker=self.run_tracker,
                specs=self.specs,
                options_bootstrapper=self.options_bootstrapper,
                callbacks=self._get_workunits_callbacks(),
                report_interval_seconds=global_options.
                streaming_workunits_report_interval,
                pantsd=global_options.pantsd,
            )
            with streaming_reporter:
                engine_result = PANTS_FAILED_EXIT_CODE
                try:
                    engine_result = self._run_inner()
                finally:
                    metrics = self.graph_session.scheduler_session.metrics()
                    self.run_tracker.set_pantsd_scheduler_metrics(metrics)
                    self.run_tracker.end_run(engine_result)

                return engine_result
Example #7
0
    def run(self, start_time: float) -> ExitCode:
        with maybe_profiled(self.profile_path):
            spec_parser = SpecsParser()
            specs = []
            for spec_str in self.options.specs:
                spec, is_ignore = spec_parser.parse_spec(spec_str)
                specs.append(f"-{spec}" if is_ignore else str(spec))

            self.run_tracker.start(run_start_time=start_time, specs=specs)
            global_options = self.options.for_global_scope()

            streaming_reporter = StreamingWorkunitHandler(
                self.graph_session.scheduler_session,
                run_tracker=self.run_tracker,
                specs=self.specs,
                options_bootstrapper=self.options_bootstrapper,
                callbacks=self._get_workunits_callbacks(),
                report_interval_seconds=global_options.
                streaming_workunits_report_interval,
                allow_async_completion=(
                    global_options.pantsd
                    and global_options.streaming_workunits_complete_async),
                max_workunit_verbosity=global_options.
                streaming_workunits_level,
            )
            with streaming_reporter:
                engine_result = PANTS_FAILED_EXIT_CODE
                try:
                    engine_result = self._run_inner()
                finally:
                    metrics = self.graph_session.scheduler_session.metrics()
                    self.run_tracker.set_pantsd_scheduler_metrics(metrics)
                    self.run_tracker.end_run(engine_result)

                return engine_result
Example #8
0
def test_streaming_workunits_expanded_specs(run_tracker: RunTracker) -> None:
    rule_runner = RuleRunner(
        target_types=[PythonSourcesGeneratorTarget],
        rules=[
            QueryRule(ProcessResult, (Process, )),
        ],
    )
    rule_runner.set_options(["--backend-packages=pants.backend.python"])
    rule_runner.write_files({
        "src/python/somefiles/BUILD": "python_sources()",
        "src/python/somefiles/a.py": "print('')",
        "src/python/somefiles/b.py": "print('')",
        "src/python/others/BUILD": "python_sources()",
        "src/python/others/a.py": "print('')",
        "src/python/others/b.py": "print('')",
    })
    specs = SpecsParser().parse_specs(
        ["src/python/somefiles::", "src/python/others/b.py"],
        convert_dir_literal_to_address_literal=False,
        description_of_origin="tests",
    )

    class Callback(WorkunitsCallback):
        @property
        def can_finish_async(self) -> bool:
            return False

        def __call__(self, **kwargs) -> None:
            context = kwargs["context"]
            assert isinstance(context, StreamingWorkunitContext)

            expanded = context.get_expanded_specs()
            targets = expanded.targets

            assert len(targets.keys()) == 2
            assert targets["src/python/others/b.py"] == [
                TargetInfo(filename="src/python/others/b.py")
            ]
            assert set(targets["src/python/somefiles"]) == {
                TargetInfo(filename="src/python/somefiles/a.py"),
                TargetInfo(filename="src/python/somefiles/b.py"),
            }

    handler = StreamingWorkunitHandler(
        scheduler=rule_runner.scheduler,
        run_tracker=run_tracker,
        callbacks=[Callback()],
        report_interval_seconds=0.01,
        max_workunit_verbosity=LogLevel.INFO,
        specs=specs,
        options_bootstrapper=create_options_bootstrapper(
            ["--backend-packages=pants.backend.python"]),
        allow_async_completion=False,
    )
    stdout_process = Process(argv=("/bin/bash", "-c",
                                   "/bin/echo 'stdout output'"),
                             description="Stdout process")
    with handler:
        rule_runner.request(ProcessResult, [stdout_process])
Example #9
0
 def _fixture_for_rules(
     self, rules, max_workunit_verbosity: LogLevel = LogLevel.INFO
 ) -> Tuple[SchedulerSession, WorkunitTracker, StreamingWorkunitHandler]:
     scheduler = self.mk_scheduler(rules, include_trace_on_error=False)
     tracker = WorkunitTracker()
     handler = StreamingWorkunitHandler(
         scheduler,
         run_tracker=new_run_tracker(),
         callbacks=[tracker],
         report_interval_seconds=0.01,
         max_workunit_verbosity=max_workunit_verbosity,
         specs=Specs.empty(),
         options_bootstrapper=create_options_bootstrapper([]),
         pantsd=False,
     )
     return scheduler, tracker, handler
Example #10
0
    def _fixture_for_rules(
        self,
        rules,
        max_workunit_verbosity: LogLevel = LogLevel.INFO
    ) -> Tuple[SchedulerSession, WorkunitTracker, StreamingWorkunitHandler]:
        scheduler = self.mk_scheduler(rules, include_trace_on_error=False)

        tracker = WorkunitTracker()
        handler = StreamingWorkunitHandler(
            scheduler,
            run_tracker=new_run_tracker(),
            callbacks=[tracker.add],
            report_interval_seconds=0.01,
            max_workunit_verbosity=max_workunit_verbosity,
        )
        return (scheduler, tracker, handler)
Example #11
0
def test_counters(rule_runner: RuleRunner, run_tracker: RunTracker) -> None:
    scheduler = rule_runner.scheduler

    tracker = WorkunitTracker()
    handler = StreamingWorkunitHandler(
        scheduler,
        run_tracker=run_tracker,
        callbacks=[tracker],
        report_interval_seconds=0.01,
        max_workunit_verbosity=LogLevel.TRACE,
        specs=Specs.empty(),
        options_bootstrapper=create_options_bootstrapper([]),
        allow_async_completion=False,
    )

    with handler:
        scheduler.record_test_observation(128)
        rule_runner.request(
            ProcessResult,
            [
                Process(
                    ["/bin/sh", "-c", "true"],
                    description="always true",
                    cache_scope=ProcessCacheScope.PER_SESSION,
                )
            ],
        )
        histograms_info = scheduler.get_observation_histograms()

    finished = list(
        itertools.chain.from_iterable(tracker.finished_workunit_chunks))
    workunits_with_counters = [item for item in finished if "counters" in item]
    assert workunits_with_counters[0]["counters"]["local_cache_requests"] == 1
    assert workunits_with_counters[0]["counters"][
        "local_cache_requests_uncached"] == 1
    assert workunits_with_counters[1]["counters"][
        "local_execution_requests"] == 1

    assert histograms_info["version"] == 0
    assert "histograms" in histograms_info
    assert "test_observation" in histograms_info["histograms"]
    assert (
        histograms_info["histograms"]["test_observation"] ==
        b"\x1c\x84\x93\x14\x00\x00\x00\x1fx\x9c\x93i\x99,\xcc\xc0\xc0\xc0\xcc\x00\x010\x9a\x11J3\xd9\x7f\x800\xfe32\x01\x00E\x0c\x03\x81"
    )
Example #12
0
def test_context_object_on_streaming_workunits(
        rule_runner: RuleRunner, run_tracker: RunTracker) -> None:
    scheduler = rule_runner.scheduler

    class Callback(WorkunitsCallback):
        @property
        def can_finish_async(self) -> bool:
            return False

        def __call__(self, **kwargs) -> None:
            context = kwargs["context"]
            assert isinstance(context, StreamingWorkunitContext)

            completed_workunits = kwargs["completed_workunits"]
            for workunit in completed_workunits:
                if "artifacts" in workunit and "stdout_digest" in workunit[
                        "artifacts"]:
                    digest = workunit["artifacts"]["stdout_digest"]
                    output = context.single_file_digests_to_bytes([digest])
                    assert output == [b"stdout output\n"]

    handler = StreamingWorkunitHandler(
        scheduler,
        run_tracker=run_tracker,
        callbacks=[Callback()],
        report_interval_seconds=0.01,
        max_workunit_verbosity=LogLevel.INFO,
        specs=Specs.empty(),
        options_bootstrapper=create_options_bootstrapper([]),
        allow_async_completion=False,
    )
    stdout_process = Process(argv=("/bin/bash", "-c",
                                   "/bin/echo 'stdout output'"),
                             description="Stdout process")
    with handler:
        rule_runner.request(ProcessResult, [stdout_process])
Example #13
0
def test_process_digests_on_streaming_workunits(
        rule_runner: RuleRunner, run_tracker: RunTracker) -> None:
    scheduler = rule_runner.scheduler

    tracker = WorkunitTracker()
    handler = StreamingWorkunitHandler(
        scheduler,
        run_tracker=run_tracker,
        callbacks=[tracker],
        report_interval_seconds=0.01,
        max_workunit_verbosity=LogLevel.DEBUG,
        specs=Specs.empty(),
        options_bootstrapper=create_options_bootstrapper([]),
        allow_async_completion=False,
    )

    stdout_process = Process(argv=("/bin/bash", "-c",
                                   "/bin/echo 'stdout output'"),
                             description="Stdout process")

    with handler:
        result = rule_runner.request(ProcessResult, [stdout_process])

    assert tracker.finished
    finished = list(
        itertools.chain.from_iterable(tracker.finished_workunit_chunks))

    process_workunit = next(item for item in finished
                            if item["name"] == "process")
    assert process_workunit is not None
    stdout_digest = process_workunit["artifacts"]["stdout_digest"]
    stderr_digest = process_workunit["artifacts"]["stderr_digest"]

    assert result.stdout == b"stdout output\n"
    assert stderr_digest == EMPTY_FILE_DIGEST
    assert stdout_digest.serialized_bytes_length == len(result.stdout)

    tracker = WorkunitTracker()
    handler = StreamingWorkunitHandler(
        scheduler,
        run_tracker=run_tracker,
        callbacks=[tracker],
        report_interval_seconds=0.01,
        max_workunit_verbosity=LogLevel.DEBUG,
        specs=Specs.empty(),
        options_bootstrapper=create_options_bootstrapper([]),
        allow_async_completion=False,
    )
    stderr_process = Process(argv=("/bin/bash", "-c",
                                   "1>&2 /bin/echo 'stderr output'"),
                             description="Stderr process")
    with handler:
        result = rule_runner.request(ProcessResult, [stderr_process])

    assert tracker.finished
    finished = list(
        itertools.chain.from_iterable(tracker.finished_workunit_chunks))
    process_workunit = next(item for item in finished
                            if item["name"] == "process")

    assert process_workunit is not None
    stdout_digest = process_workunit["artifacts"]["stdout_digest"]
    stderr_digest = process_workunit["artifacts"]["stderr_digest"]

    assert result.stderr == b"stderr output\n"
    assert stdout_digest == EMPTY_FILE_DIGEST
    assert stderr_digest.serialized_bytes_length == len(result.stderr)

    assert process_workunit["metadata"]["exit_code"] == 0

    try:
        scheduler.ensure_remote_has_recursive([stdout_digest, stderr_digest])
    except Exception as e:
        # This is the exception message we should expect from invoking ensure_remote_has_recursive()
        # in rust.
        assert str(e) == "Cannot ensure remote has blobs without a remote"

    byte_outputs = scheduler.single_file_digests_to_bytes(
        [stdout_digest, stderr_digest])
    assert byte_outputs[0] == result.stdout
    assert byte_outputs[1] == result.stderr