Пример #1
0
    def run(self):
        global_options = self.options.for_global_scope()

        exiter = LocalExiter.wrap_global_exiter(self._run_tracker, self._repro)
        profiled = maybe_profiled(self.profile_path)

        with exiter, profiled:
            streaming_handlers = global_options.streaming_workunits_handlers
            report_interval = global_options.streaming_workunits_report_interval
            callbacks = Subsystem.get_streaming_workunit_callbacks(
                streaming_handlers)
            streaming_reporter = StreamingWorkunitHandler(
                self.graph_session.scheduler_session,
                callbacks=callbacks,
                report_interval_seconds=report_interval,
            )

            if self.options.help_request:
                help_printer = HelpPrinter(
                    options=self.options,
                    union_membership=self.union_membership)
                help_output = help_printer.print_help()
                self._exiter.exit(help_output)

            v1 = global_options.v1
            v2 = global_options.v2
            with streaming_reporter.session():
                try:
                    engine_result = self._maybe_run_v2(v2)
                    goal_runner_result = self._maybe_run_v1(v1)
                finally:
                    run_tracker_result = self._finish_run()
            final_exit_code = self._compute_final_exit_code(
                engine_result, goal_runner_result, run_tracker_result)
            self._exiter.exit(final_exit_code)
Пример #2
0
    def run(self, start_time: float) -> ExitCode:
        run_tracker = RunTracker.global_instance()
        self._start_run(run_tracker, start_time)

        with maybe_profiled(self.profile_path):
            global_options = self.options.for_global_scope()

            if self.options.help_request:
                return self._print_help(self.options.help_request)

            streaming_handlers = global_options.streaming_workunits_handlers
            callbacks = Subsystem.get_streaming_workunit_callbacks(
                streaming_handlers)
            streaming_reporter = StreamingWorkunitHandler(
                self.graph_session.scheduler_session,
                callbacks=callbacks,
                report_interval_seconds=global_options.
                streaming_workunits_report_interval,
            )

            goals = tuple(self.options.goals)
            with streaming_reporter.session():
                engine_result = PANTS_FAILED_EXIT_CODE
                try:
                    engine_result = self._run_v2(goals)
                except Exception as e:
                    ExceptionSink.log_exception(e)

                self._finish_run(run_tracker, engine_result)
            return engine_result
Пример #3
0
  def _run(self):
    engine_result = PANTS_FAILED_EXIT_CODE
    goal_runner_result = PANTS_FAILED_EXIT_CODE
    try:
      self._maybe_handle_help()

      streaming_handlers = self._options.for_global_scope().streaming_workunits_handlers
      callbacks = Subsystem.get_streaming_workunit_callbacks(streaming_handlers)
      streaming_reporter = StreamingWorkunitHandler(self._scheduler_session, callbacks=callbacks)
      with streaming_reporter.session():
        engine_result = self._maybe_run_v2()

      goal_runner_result = self._maybe_run_v1()
    finally:
      try:
        self._update_stats()
        run_tracker_result = self._run_tracker.end()
      except ValueError as e:
        # Calling .end() sometimes writes to a closed file, so we return a dummy result here.
        logger.exception(e)
        run_tracker_result = PANTS_SUCCEEDED_EXIT_CODE

    final_exit_code = self._compute_final_exit_code(
      engine_result,
      goal_runner_result,
      run_tracker_result
    )
    self._exiter.exit(final_exit_code)
Пример #4
0
    def test_engine_aware_none_case(self):
        @dataclass(frozen=True)
        # If level() returns None, the engine shouldn't try to set
        # a new workunit level.
        class ModifiedOutput(EngineAwareReturnType):
            _level: Optional[LogLevel]
            val: int

            def level(self):
                return self._level

        @rule(desc="a_rule")
        def a_rule(n: int) -> ModifiedOutput:
            return ModifiedOutput(val=n, _level=None)

        rules = [a_rule, QueryRule(ModifiedOutput, (int,))]
        scheduler = self.mk_scheduler(
            rules, include_trace_on_error=False, should_report_workunits=True
        )

        tracker = WorkunitTracker()
        handler = StreamingWorkunitHandler(
            scheduler,
            callbacks=[tracker.add],
            report_interval_seconds=0.01,
            max_workunit_verbosity=LogLevel.TRACE,
        )
        with handler.session():
            scheduler.product_request(ModifiedOutput, subjects=[0])

        finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
        workunit = next(
            item for item in finished if item["name"] == "pants.engine.internals.engine_test.a_rule"
        )
        assert workunit["level"] == "TRACE"
Пример #5
0
    def test_streaming_workunits_reporting(self):
        rules = [fib, QueryRule(Fib, (int,))]
        scheduler = self.mk_scheduler(
            rules, include_trace_on_error=False, should_report_workunits=True
        )

        tracker = WorkunitTracker()
        handler = StreamingWorkunitHandler(
            scheduler,
            callbacks=[tracker.add],
            report_interval_seconds=0.01,
            max_workunit_verbosity=LogLevel.INFO,
        )
        with handler.session():
            scheduler.product_request(Fib, subjects=[0])

        flattened = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
        # The execution of the single named @rule "fib" should be providing this one workunit.
        self.assertEqual(len(flattened), 1)

        tracker.finished_workunit_chunks = []
        with handler.session():
            scheduler.product_request(Fib, subjects=[10])

        # Requesting a bigger fibonacci number will result in more rule executions and thus more reported workunits.
        # In this case, we expect 10 invocations of the `fib` rule.
        flattened = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
        assert len(flattened) == 10
        assert tracker.finished
Пример #6
0
    def test_context_object(self):
        scheduler = self.scheduler

        def callback(**kwargs) -> None:
            context = kwargs["context"]
            assert isinstance(context, StreamingWorkunitContext)

            completed_workunits = kwargs["completed_workunits"]
            for workunit in completed_workunits:
                if "artifacts" in workunit and "stdout_digest" in workunit["artifacts"]:
                    digest = workunit["artifacts"]["stdout_digest"]
                    output = context.single_file_digests_to_bytes([digest])
                    assert output == (b"stdout output\n",)

        handler = StreamingWorkunitHandler(
            scheduler,
            callbacks=[callback],
            report_interval_seconds=0.01,
            max_workunit_verbosity=LogLevel.INFO,
        )

        stdout_process = Process(
            argv=("/bin/bash", "-c", "/bin/echo 'stdout output'"), description="Stdout process"
        )

        with handler.session():
            self.request(ProcessResult, [stdout_process])
Пример #7
0
    def test_streaming_workunits_parent_id(self):
        rules = [RootRule(Input), rule_one, rule_two, rule_three, rule_four]
        scheduler = self.mk_scheduler(
            rules, include_trace_on_error=False, should_report_workunits=True
        )
        tracker = self.WorkunitTracker()
        handler = StreamingWorkunitHandler(
            scheduler, callbacks=[tracker.add], report_interval_seconds=0.01
        )

        with handler.session():
            i = Input()
            scheduler.product_request(Beta, subjects=[i])

        assert tracker.finished

        r1 = next(item for item in tracker.workunits if item["name"] == "rule_one")
        r2 = next(item for item in tracker.workunits if item["name"] == "rule_two")
        r3 = next(item for item in tracker.workunits if item["name"] == "rule_three")
        r4 = next(item for item in tracker.workunits if item["name"] == "rule_four")

        assert r1.get("parent_id", None) is None
        assert r2["parent_id"] == r1["span_id"]
        assert r3["parent_id"] == r1["span_id"]
        assert r4["parent_id"] == r2["span_id"]
Пример #8
0
    def _run(self):
        global_options = self._options.for_global_scope()

        streaming_handlers = global_options.streaming_workunits_handlers
        report_interval = global_options.streaming_workunits_report_interval
        callbacks = Subsystem.get_streaming_workunit_callbacks(
            streaming_handlers)
        streaming_reporter = StreamingWorkunitHandler(
            self._scheduler_session,
            callbacks=callbacks,
            report_interval_seconds=report_interval)

        help_output = self._maybe_handle_help()
        if help_output is not None:
            self._exiter.exit(help_output)

        with streaming_reporter.session():
            try:
                engine_result = self._maybe_run_v2()
                goal_runner_result = self._maybe_run_v1()
            finally:
                run_tracker_result = self._finish_run()
        final_exit_code = self._compute_final_exit_code(
            engine_result, goal_runner_result, run_tracker_result)
        self._exiter.exit(final_exit_code)
Пример #9
0
    def test_async_reporting(self):
        rules = [fib, RootRule(int)]
        scheduler = self.mk_scheduler(rules,
                                      include_trace_on_error=False,
                                      should_report_workunits=True)

        @dataclass
        class Tracker:
            workunits: List[dict] = field(default_factory=list)

            def add(self, workunits) -> None:
                self.workunits.extend(workunits)

        tracker = Tracker()
        async_reporter = StreamingWorkunitHandler(scheduler,
                                                  callbacks=[tracker.add],
                                                  report_interval_seconds=0.01)
        with async_reporter.session():
            scheduler.product_request(Fib, subjects=[0])

        # The execution of the single named @rule "fib" should be providing this one workunit.
        self.assertEquals(len(tracker.workunits), 1)

        tracker.workunits = []
        with async_reporter.session():
            scheduler.product_request(Fib, subjects=[10])

        # Requesting a bigger fibonacci number will result in more rule executions and thus more reported workunits.
        # In this case, we expect 10 invocations of the `fib` rule.
        self.assertEquals(len(tracker.workunits), 10)
Пример #10
0
    def run(self, start_time: float) -> ExitCode:
        self._set_start_time(start_time)

        with maybe_profiled(self.profile_path):
            global_options = self.options.for_global_scope()
            streaming_handlers = global_options.streaming_workunits_handlers
            report_interval = global_options.streaming_workunits_report_interval
            callbacks = Subsystem.get_streaming_workunit_callbacks(
                streaming_handlers)
            streaming_reporter = StreamingWorkunitHandler(
                self.graph_session.scheduler_session,
                callbacks=callbacks,
                report_interval_seconds=report_interval,
            )

            if self.options.help_request:
                help_printer = HelpPrinter(
                    options=self.options,
                    union_membership=self.union_membership)
                return help_printer.print_help()

            v1 = global_options.v1
            v2 = global_options.v2
            with streaming_reporter.session():
                engine_result, goal_runner_result = PANTS_FAILED_EXIT_CODE, PANTS_FAILED_EXIT_CODE
                try:
                    engine_result = self._maybe_run_v2(v2)
                    goal_runner_result = self._maybe_run_v1(v1)
                except Exception as e:
                    ExceptionSink.log_exception(e)
                run_tracker_result = self._finish_run(
                    self._merge_exit_codes(engine_result, goal_runner_result))
            return self._merge_exit_codes(engine_result, goal_runner_result,
                                          run_tracker_result)
Пример #11
0
    def test_artifacts_on_engine_aware_type(self) -> None:
        @dataclass(frozen=True)
        class Output(EngineAware):
            val: int

            def artifacts(self):
                return {"some_arbitrary_key": EMPTY_DIGEST}

        @rule(desc="a_rule")
        def a_rule(n: int) -> Output:
            return Output(val=n)

        rules = [a_rule, RootRule(int)]
        scheduler = self.mk_scheduler(rules,
                                      include_trace_on_error=False,
                                      should_report_workunits=True)

        tracker = WorkunitTracker()
        handler = StreamingWorkunitHandler(
            scheduler,
            callbacks=[tracker.add],
            report_interval_seconds=0.01,
            max_workunit_verbosity=LogLevel.DEBUG,
        )
        with handler.session():
            scheduler.product_request(Output, subjects=[0])

        finished = list(
            itertools.chain.from_iterable(tracker.finished_workunit_chunks))
        workunit = next(
            item for item in finished
            if item["name"] == "pants.engine.internals.engine_test.a_rule")
        artifacts = workunit["artifacts"]
        assert artifacts["some_arbitrary_key"] == EMPTY_DIGEST
Пример #12
0
    def test_engine_aware_rule(self):
        @dataclass(frozen=True)
        class ModifiedOutput(EngineAware):
            _level: LogLevel
            val: int

            def level(self):
                return self._level

        @rule(desc="a_rule")
        def a_rule(n: int) -> ModifiedOutput:
            return ModifiedOutput(val=n, _level=LogLevel.ERROR)

        rules = [a_rule, RootRule(int)]
        scheduler = self.mk_scheduler(rules,
                                      include_trace_on_error=False,
                                      should_report_workunits=True)

        tracker = WorkunitTracker()
        handler = StreamingWorkunitHandler(
            scheduler,
            callbacks=[tracker.add],
            report_interval_seconds=0.01,
            max_workunit_verbosity=LogLevel.DEBUG,
        )
        with handler.session():
            scheduler.product_request(ModifiedOutput, subjects=[0])

        finished = list(
            itertools.chain.from_iterable(tracker.finished_workunit_chunks))
        workunit = next(
            item for item in finished
            if item["name"] == "pants.engine.internals.engine_test.a_rule")
        assert workunit["level"] == "ERROR"
Пример #13
0
    def test_metadata_on_engine_aware_type(self) -> None:
        @dataclass(frozen=True)
        class Output(EngineAwareReturnType):
            val: int

            def metadata(self):
                return {"k1": 1, "k2": "a string", "k3": [1, 2, 3]}

        @rule(desc="a_rule")
        def a_rule(n: int) -> Output:
            return Output(val=n)

        rules = [a_rule, QueryRule(Output, (int,))]
        scheduler = self.mk_scheduler(
            rules, include_trace_on_error=False, should_report_workunits=True
        )

        tracker = WorkunitTracker()
        handler = StreamingWorkunitHandler(
            scheduler,
            callbacks=[tracker.add],
            report_interval_seconds=0.01,
            max_workunit_verbosity=LogLevel.TRACE,
        )
        with handler.session():
            scheduler.product_request(Output, subjects=[0])

        finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
        workunit = next(
            item for item in finished if item["name"] == "pants.engine.internals.engine_test.a_rule"
        )

        metadata = workunit["metadata"]
        assert metadata == {"k1": 1, "k2": "a string", "k3": [1, 2, 3]}
Пример #14
0
    def test_streaming_workunit_log_levels(self) -> None:
        rules = [RootRule(Input), rule_one_function, rule_two, rule_three, rule_four]
        scheduler = self.mk_scheduler(
            rules, include_trace_on_error=False, should_report_workunits=True
        )
        tracker = WorkunitTracker()
        handler = StreamingWorkunitHandler(
            scheduler,
            callbacks=[tracker.add],
            report_interval_seconds=0.01,
            max_workunit_verbosity=LogLevel.TRACE,
        )

        with handler.session():
            i = Input()
            scheduler.product_request(Beta, subjects=[i])

        assert tracker.finished
        finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))

        # With the max_workunit_verbosity set to TRACE, we should see the workunit corresponding to the Select node.
        select = next(
            item
            for item in finished
            if item["name"] not in {"rule_one", "rule_two", "rule_three", "rule_four"}
        )
        assert select["name"] == "select"
        assert select["level"] == "DEBUG"

        r1 = next(item for item in finished if item["name"] == "rule_one")
        assert r1["parent_id"] == select["span_id"]
Пример #15
0
    def test_streaming_workunits_parent_id_and_rule_metadata(self):
        rules = [
            RootRule(Input), rule_one_function, rule_two, rule_three, rule_four
        ]
        scheduler = self.mk_scheduler(rules,
                                      include_trace_on_error=False,
                                      should_report_workunits=True)
        tracker = WorkunitTracker()
        handler = StreamingWorkunitHandler(scheduler,
                                           callbacks=[tracker.add],
                                           report_interval_seconds=0.01)

        with handler.session():
            i = Input()
            scheduler.product_request(Beta, subjects=[i])

        assert tracker.finished

        # rule_one should complete well-after the other rules because of the artificial delay in it caused by the sleep().
        assert {item["name"]
                for item in tracker.finished_workunit_chunks[0]} == {
                    "rule_two",
                    "rule_three",
                    "rule_four",
                }

        # Because of the artificial delay in rule_one, it should have time to be reported as
        # started but not yet finished.
        started = list(
            itertools.chain.from_iterable(tracker.started_workunit_chunks))
        assert len(list(item
                        for item in started if item["name"] == "rule_one")) > 0

        assert {item["name"]
                for item in tracker.finished_workunit_chunks[1]
                } == {"rule_one"}

        finished = list(
            itertools.chain.from_iterable(tracker.finished_workunit_chunks))

        r1 = next(item for item in finished if item["name"] == "rule_one")
        r2 = next(item for item in finished if item["name"] == "rule_two")
        r3 = next(item for item in finished if item["name"] == "rule_three")
        r4 = next(item for item in finished if item["name"] == "rule_four")

        # rule_one should have no parent_id because its actual parent workunit was filted based on level
        assert r1.get("parent_id", None) is None

        assert r2["parent_id"] == r1["span_id"]
        assert r3["parent_id"] == r1["span_id"]
        assert r4["parent_id"] == r2["span_id"]

        assert r3["description"] == "Rule number 3"
        assert r4["description"] == "Rule number 4"
        assert r4["level"] == "INFO"
Пример #16
0
    def test_more_complicated_engine_aware(self) -> None:
        tracker = WorkunitTracker()
        handler = StreamingWorkunitHandler(
            self.scheduler,
            callbacks=[tracker.add],
            report_interval_seconds=0.01,
            max_workunit_verbosity=LogLevel.TRACE,
        )
        with handler.session():
            input_1 = CreateDigest((
                FileContent(path="a.txt", content=b"alpha"),
                FileContent(path="b.txt", content=b"beta"),
            ))
            digest_1 = self.request(Digest, [input_1])
            snapshot_1 = self.request(Snapshot, [digest_1])

            input_2 = CreateDigest((FileContent(path="g.txt",
                                                content=b"gamma"), ))
            digest_2 = self.request(Digest, [input_2])
            snapshot_2 = self.request(Snapshot, [digest_2])

            input = ComplicatedInput(snapshot_1=snapshot_1,
                                     snapshot_2=snapshot_2)

            self.request(Output, [input])

        finished = list(
            itertools.chain.from_iterable(tracker.finished_workunit_chunks))
        workunit = next(
            item for item in finished
            if item["name"] == "pants.engine.internals.engine_test.a_rule")

        streaming_workunit_context = handler._context

        artifacts = workunit["artifacts"]
        output_snapshot_1 = artifacts["snapshot_1"]
        output_snapshot_2 = artifacts["snapshot_2"]

        output_contents_list = streaming_workunit_context.snapshots_to_file_contents(
            [output_snapshot_1, output_snapshot_2])
        assert len(output_contents_list) == 2

        assert isinstance(output_contents_list[0], DigestContents)
        assert isinstance(output_contents_list[1], DigestContents)

        digest_contents_1 = output_contents_list[0]
        digest_contents_2 = output_contents_list[1]

        assert len(tuple(x for x in digest_contents_1
                         if x.content == b"alpha")) == 1
        assert len(tuple(x for x in digest_contents_1
                         if x.content == b"beta")) == 1

        assert len(tuple(x for x in digest_contents_2
                         if x.content == b"gamma")) == 1
Пример #17
0
    def test_streaming_workunit_log_level_parent_rewrite(self) -> None:
        rules = [rule_A, rule_B, rule_C, QueryRule(Alpha, (Input,))]
        scheduler = self.mk_scheduler(
            rules, include_trace_on_error=False, should_report_workunits=True
        )
        tracker = WorkunitTracker()
        info_level_handler = StreamingWorkunitHandler(
            scheduler,
            callbacks=[tracker.add],
            report_interval_seconds=0.01,
            max_workunit_verbosity=LogLevel.INFO,
        )

        with info_level_handler.session():
            i = Input()
            scheduler.product_request(Alpha, subjects=[i])

        assert tracker.finished
        finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))

        assert len(finished) == 2
        r_A = next(
            item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_A"
        )
        r_C = next(
            item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_C"
        )
        assert "parent_id" not in r_A
        assert r_C["parent_id"] == r_A["span_id"]

        scheduler = self.mk_scheduler(
            rules, include_trace_on_error=False, should_report_workunits=True
        )
        tracker = WorkunitTracker()
        debug_level_handler = StreamingWorkunitHandler(
            scheduler,
            callbacks=[tracker.add],
            report_interval_seconds=0.01,
            max_workunit_verbosity=LogLevel.TRACE,
        )

        with debug_level_handler.session():
            i = Input()
            scheduler.product_request(Alpha, subjects=[i])

        assert tracker.finished
        finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))

        r_A = next(
            item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_A"
        )
        r_B = next(
            item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_B"
        )
        r_C = next(
            item for item in finished if item["name"] == "pants.engine.internals.engine_test.rule_C"
        )
        assert r_B["parent_id"] == r_A["span_id"]
        assert r_C["parent_id"] == r_B["span_id"]
Пример #18
0
    def test_metadata_non_string_key_behavior(self) -> None:
        # If someone passes a non-string key in a metadata() method,
        # this should fail to produce a meaningful metadata entry on
        # the workunit (with a warning), but not fail.

        @dataclass(frozen=True)
        class Output(EngineAwareReturnType):
            val: int

            def metadata(self):
                return {10: "foo", "other_key": "other value"}

        @rule(desc="a_rule")
        def a_rule(n: int) -> Output:
            return Output(val=n)

        rules = [a_rule, QueryRule(Output, (int,))]
        scheduler = self.mk_scheduler(
            rules, include_trace_on_error=False, should_report_workunits=True
        )

        tracker = WorkunitTracker()
        handler = StreamingWorkunitHandler(
            scheduler,
            callbacks=[tracker.add],
            report_interval_seconds=0.01,
            max_workunit_verbosity=LogLevel.TRACE,
        )
        with handler.session():
            scheduler.product_request(Output, subjects=[0])

        finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
        workunit = next(
            item for item in finished if item["name"] == "pants.engine.internals.engine_test.a_rule"
        )

        assert workunit["metadata"] == {}
Пример #19
0
    def run(self, start_time: float) -> ExitCode:
        self._set_start_time(start_time)

        with maybe_profiled(self.profile_path):
            global_options = self.options.for_global_scope()
            streaming_handlers = global_options.streaming_workunits_handlers
            report_interval = global_options.streaming_workunits_report_interval
            callbacks = Subsystem.get_streaming_workunit_callbacks(
                streaming_handlers)
            streaming_reporter = StreamingWorkunitHandler(
                self.graph_session.scheduler_session,
                callbacks=callbacks,
                report_interval_seconds=report_interval,
            )

            if self.options.help_request:
                all_help_info = HelpInfoExtracter.get_all_help_info(
                    self.options,
                    self.union_membership,
                    self.graph_session.goal_consumed_subsystem_scopes,
                )
                help_printer = HelpPrinter(
                    bin_name=global_options.pants_bin_name,
                    help_request=self.options.help_request,
                    all_help_info=all_help_info,
                    use_color=global_options.colors,
                )
                return help_printer.print_help()

            with streaming_reporter.session():
                engine_result = PANTS_FAILED_EXIT_CODE
                try:
                    engine_result = self._run_v2()
                except Exception as e:
                    ExceptionSink.log_exception(e)
                run_tracker_result = self._finish_run(engine_result)
            return self._merge_exit_codes(engine_result, run_tracker_result)
Пример #20
0
    def test_streaming_workunits_reporting(self):
        rules = [fib, RootRule(int)]
        scheduler = self.mk_scheduler(
            rules, include_trace_on_error=False, should_report_workunits=True
        )

        tracker = self.WorkunitTracker()
        handler = StreamingWorkunitHandler(
            scheduler, callbacks=[tracker.add], report_interval_seconds=0.01
        )
        with handler.session():
            scheduler.product_request(Fib, subjects=[0])

        # The execution of the single named @rule "fib" should be providing this one workunit.
        self.assertEquals(len(tracker.workunits), 1)

        tracker.workunits = []
        with handler.session():
            scheduler.product_request(Fib, subjects=[10])

        # Requesting a bigger fibonacci number will result in more rule executions and thus more reported workunits.
        # In this case, we expect 10 invocations of the `fib` rule.
        assert len(tracker.workunits) == 10
        assert tracker.finished
Пример #21
0
    def test_process_digests_on_workunits(self):
        self._init_engine(
        )  # need to call this so that self._scheduler is not None when we pass it to StreamingWorkunitHandler

        tracker = WorkunitTracker()
        handler = StreamingWorkunitHandler(
            self._scheduler,
            callbacks=[tracker.add],
            report_interval_seconds=0.01,
            max_workunit_verbosity=LogLevel.INFO,
        )

        stdout_process = Process(argv=("/bin/bash", "-c",
                                       "/bin/echo 'stdout output'"),
                                 description="Stdout process")

        with handler.session():
            result = self.request_single_product(ProcessResult, stdout_process)

        assert tracker.finished
        finished = list(
            itertools.chain.from_iterable(tracker.finished_workunit_chunks))

        process_workunit = next(
            item for item in finished
            if item["name"] == "multi_platform_process-running")
        assert process_workunit is not None
        stdout_digest = process_workunit["artifacts"]["stdout_digest"]
        stderr_digest = process_workunit["artifacts"]["stderr_digest"]

        assert result.stdout == b"stdout output\n"
        assert stderr_digest == EMPTY_DIGEST
        assert stdout_digest.serialized_bytes_length == len(result.stdout)

        tracker = WorkunitTracker()
        handler = StreamingWorkunitHandler(
            self._scheduler,
            callbacks=[tracker.add],
            report_interval_seconds=0.01,
            max_workunit_verbosity=LogLevel.INFO,
        )

        stderr_process = Process(argv=("/bin/bash", "-c",
                                       "1>&2 /bin/echo 'stderr output'"),
                                 description="Stderr process")

        with handler.session():
            result = self.request_single_product(ProcessResult, stderr_process)

        assert tracker.finished
        finished = list(
            itertools.chain.from_iterable(tracker.finished_workunit_chunks))
        process_workunit = next(
            item for item in finished
            if item["name"] == "multi_platform_process-running")

        assert process_workunit is not None
        stdout_digest = process_workunit["artifacts"]["stdout_digest"]
        stderr_digest = process_workunit["artifacts"]["stderr_digest"]

        assert result.stderr == b"stderr output\n"
        assert stdout_digest == EMPTY_DIGEST
        assert stderr_digest.serialized_bytes_length == len(result.stderr)
Пример #22
0
    def test_process_digests_on_workunits(self):
        scheduler = self.scheduler

        tracker = WorkunitTracker()
        handler = StreamingWorkunitHandler(
            scheduler,
            callbacks=[tracker.add],
            report_interval_seconds=0.01,
            max_workunit_verbosity=LogLevel.INFO,
        )

        stdout_process = Process(
            argv=("/bin/bash", "-c", "/bin/echo 'stdout output'"), description="Stdout process"
        )

        with handler.session():
            result = self.request(ProcessResult, [stdout_process])

        assert tracker.finished
        finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))

        process_workunit = next(
            item for item in finished if item["name"] == "multi_platform_process-running"
        )
        assert process_workunit is not None
        stdout_digest = process_workunit["artifacts"]["stdout_digest"]
        stderr_digest = process_workunit["artifacts"]["stderr_digest"]

        assert result.stdout == b"stdout output\n"
        assert stderr_digest == EMPTY_FILE_DIGEST
        assert stdout_digest.serialized_bytes_length == len(result.stdout)

        tracker = WorkunitTracker()
        handler = StreamingWorkunitHandler(
            self._scheduler,
            callbacks=[tracker.add],
            report_interval_seconds=0.01,
            max_workunit_verbosity=LogLevel.INFO,
        )

        stderr_process = Process(
            argv=("/bin/bash", "-c", "1>&2 /bin/echo 'stderr output'"), description="Stderr process"
        )

        with handler.session():
            result = self.request(ProcessResult, [stderr_process])

        assert tracker.finished
        finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks))
        process_workunit = next(
            item for item in finished if item["name"] == "multi_platform_process-running"
        )

        assert process_workunit is not None
        stdout_digest = process_workunit["artifacts"]["stdout_digest"]
        stderr_digest = process_workunit["artifacts"]["stderr_digest"]

        assert result.stderr == b"stderr output\n"
        assert stdout_digest == EMPTY_FILE_DIGEST
        assert stderr_digest.serialized_bytes_length == len(result.stderr)

        try:
            self._scheduler.ensure_remote_has_recursive([stdout_digest, stderr_digest])
        except Exception as e:
            # This is the exception message we should expect from invoking ensure_remote_has_recursive()
            # in rust.
            assert str(e) == "Cannot ensure remote has blobs without a remote"

        byte_outputs = self._scheduler.single_file_digests_to_bytes([stdout_digest, stderr_digest])
        assert byte_outputs[0] == result.stdout
        assert byte_outputs[1] == result.stderr