Exemple #1
0
 def test_materialize_input_files(self) -> None:
     program_text = b'#!/usr/bin/python\nprint("hello")'
     binary = self.create_mock_run_request(program_text)
     interactive_runner = InteractiveRunner(self.scheduler)
     process = InteractiveProcess(
         argv=("./program.py", ),
         run_in_workspace=False,
         input_digest=binary.digest,
     )
     result = interactive_runner.run(process)
     assert result.exit_code == 0
Exemple #2
0
def test_materialize_input_files(rule_runner: RuleRunner) -> None:
    program_text = b'#!/usr/bin/python\nprint("hello")'
    binary = create_mock_run_request(rule_runner, program_text)
    with mock_console(rule_runner.options_bootstrapper):
        interactive_runner = InteractiveRunner(rule_runner.scheduler)
        process = InteractiveProcess(
            argv=("./program.py",),
            run_in_workspace=False,
            input_digest=binary.digest,
        )
        result = interactive_runner.run(process)
    assert result.exit_code == 0
def run_pytest(
    rule_runner: RuleRunner,
    test_target: Target,
    *,
    extra_args: list[str] | None = None,
    env: dict[str, str] | None = None,
) -> TestResult:
    # pytest-html==1.22.1 has an undeclared dep on setuptools. This, unfortunately,
    # is the most recent version of pytest-html that works with the low version of
    # pytest that we pin to.
    plugins = [
        "zipp==1.0.0", "pytest-cov>=2.8.1,<2.9", "pytest-html==1.22.1",
        "setuptools"
    ]
    plugins_str = "['" + "', '".join(plugins) + "']"
    args = [
        "--backend-packages=pants.backend.python",
        f"--source-root-patterns={SOURCE_ROOT}",
        # pin to lower versions so that we can run Python 2 tests
        "--pytest-version=pytest>=4.6.6,<4.7",
        f"--pytest-pytest-plugins={plugins_str}",
        *(extra_args or ()),
    ]
    rule_runner.set_options(args,
                            env=env,
                            env_inherit={"PATH", "PYENV_ROOT", "HOME"})
    inputs = [PythonTestFieldSet.create(test_target)]
    test_result = rule_runner.request(TestResult, inputs)
    debug_request = rule_runner.request(TestDebugRequest, inputs)
    if debug_request.process is not None:
        with mock_console(rule_runner.options_bootstrapper):
            debug_result = InteractiveRunner(rule_runner.scheduler).run(
                debug_request.process)
            assert test_result.exit_code == debug_result.exit_code
    return test_result
Exemple #4
0
    def run_goal_rule(
        self,
        goal: Type[Goal],
        *,
        global_args: Iterable[str] | None = None,
        args: Iterable[str] | None = None,
        env: Mapping[str, str] | None = None,
        env_inherit: set[str] | None = None,
    ) -> GoalRuleResult:
        merged_args = (*(global_args or []), goal.name, *(args or []))
        self.set_options(merged_args, env=env, env_inherit=env_inherit)

        raw_specs = self.options_bootstrapper.full_options_for_scopes([
            GlobalOptions.get_scope_info(),
            goal.subsystem_cls.get_scope_info()
        ]).specs
        specs = SpecsParser(self.build_root).parse_specs(raw_specs)

        stdout, stderr = StringIO(), StringIO()
        console = Console(stdout=stdout, stderr=stderr)

        exit_code = self.scheduler.run_goal_rule(
            goal,
            Params(
                specs,
                console,
                Workspace(self.scheduler),
                InteractiveRunner(self.scheduler),
            ),
        )

        console.flush()
        return GoalRuleResult(exit_code, stdout.getvalue(), stderr.getvalue())
Exemple #5
0
    def run_goal_rule(
        self,
        goal: Type[Goal],
        *,
        global_args: Optional[Iterable[str]] = None,
        args: Optional[Iterable[str]] = None,
        env: Optional[Mapping[str, str]] = None,
    ) -> GoalRuleResult:
        options_bootstrapper = create_options_bootstrapper(
            args=(*(global_args or []), goal.name, *(args or [])),
            env=env,
        )

        raw_specs = options_bootstrapper.get_full_options([
            *GlobalOptions.known_scope_infos(),
            *goal.subsystem_cls.known_scope_infos()
        ]).specs
        specs = SpecsParser(self.build_root).parse_specs(raw_specs)

        stdout, stderr = StringIO(), StringIO()
        console = Console(stdout=stdout, stderr=stderr)

        exit_code = self.scheduler.run_goal_rule(
            goal,
            Params(
                specs,
                console,
                options_bootstrapper,
                Workspace(self.scheduler),
                InteractiveRunner(self.scheduler),
            ),
        )

        console.flush()
        return GoalRuleResult(exit_code, stdout.getvalue(), stderr.getvalue())
 def test_repl_ipython(self) -> None:
     self.setup_python_library()
     self.execute_rule(
         global_args=[
             "--backend-packages=pants.backend.python",
             "--source-root-patterns=src/python",
         ],
         args=["--shell=ipython", "src/python/lib.py"],
         additional_params=[InteractiveRunner(self.scheduler)],
     )
 def test_repl_bogus_repl_name(self) -> None:
     self.setup_python_library()
     result = self.execute_rule(
         global_args=[
             "--backend-packages=pants.backend.python",
             "--source-root-patterns=src/python",
         ],
         args=["--shell=bogus-repl", "src/python/lib.py"],
         additional_params=[InteractiveRunner(self.scheduler)],
         exit_code=-1,
     )
     assert "'bogus-repl' is not a registered REPL. Available REPLs" in result.stderr
Exemple #8
0
    def single_target_run(
        self,
        *,
        console: MockConsole,
        program_text: bytes,
        address_spec: str,
    ) -> Run:
        workspace = Workspace(self.scheduler)
        interactive_runner = InteractiveRunner(self.scheduler)

        class TestRunFieldSet(RunFieldSet):
            required_fields = ()

        class TestBinaryTarget(Target):
            alias = "binary"
            core_fields = ()

        address = Address.parse(address_spec)
        target = TestBinaryTarget({}, address=address)
        target_with_origin = TargetWithOrigin(
            target, AddressLiteralSpec(address.spec_path, address.target_name))
        field_set = TestRunFieldSet.create(target)

        res = run_rule_with_mocks(
            run,
            rule_args=[
                create_goal_subsystem(RunSubsystem, args=[]),
                create_subsystem(GlobalOptions,
                                 pants_workdir=self.pants_workdir),
                console,
                interactive_runner,
                workspace,
                BuildRoot(),
            ],
            mock_gets=[
                MockGet(
                    product_type=TargetsToValidFieldSets,
                    subject_type=TargetsToValidFieldSetsRequest,
                    mock=lambda _: TargetsToValidFieldSets(
                        {target_with_origin: [field_set]}),
                ),
                MockGet(
                    product_type=RunRequest,
                    subject_type=TestRunFieldSet,
                    mock=lambda _: self.create_mock_run_request(program_text),
                ),
            ],
        )
        return cast(Run, res)
Exemple #9
0
async def run(
    run_subsystem: RunSubsystem,
    global_options: GlobalOptions,
    console: Console,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    build_root: BuildRoot,
    complete_env: CompleteEnvironment,
) -> Run:
    targets_to_valid_field_sets = await Get(
        TargetRootsToFieldSets,
        TargetRootsToFieldSetsRequest(
            RunFieldSet,
            goal_description="the `run` goal",
            no_applicable_targets_behavior=NoApplicableTargetsBehavior.error,
            expect_single_field_set=True,
        ),
    )
    field_set = targets_to_valid_field_sets.field_sets[0]
    request = await Get(RunRequest, RunFieldSet, field_set)

    with temporary_dir(root_dir=global_options.options.pants_workdir,
                       cleanup=True) as tmpdir:
        workspace.write_digest(request.digest,
                               path_prefix=PurePath(tmpdir).relative_to(
                                   build_root.path).as_posix())

        args = (arg.format(chroot=tmpdir) for arg in request.args)
        env = {
            **complete_env,
            **{
                k: v.format(chroot=tmpdir)
                for k, v in request.extra_env.items()
            }
        }
        try:
            result = interactive_runner.run(
                InteractiveProcess(
                    argv=(*args, *run_subsystem.args),
                    env=env,
                    run_in_workspace=True,
                ))
            exit_code = result.exit_code
        except Exception as e:
            console.print_stderr(
                f"Exception when attempting to run {field_set.address}: {e!r}")
            exit_code = -1

    return Run(exit_code)
Exemple #10
0
 def test_repl_with_targets(self) -> None:
     # TODO(#9108): A mock InteractiveRunner that allows us to actually run code in
     #  the repl and verify that, e.g., the generated protobuf code is available.
     #  Right now this test prepares for that by including generated code, but cannot
     #  actually verify it.
     self.setup_sources()
     self.execute_rule(
         global_args=[
             "--backend-packages=pants.backend.python",
             "--backend-packages=pants.backend.codegen.protobuf.python",
             "--source-root-patterns=src/python",
         ],
         args=["src/python/lib.py"],
         additional_params=[InteractiveRunner(self.scheduler)],
     )
Exemple #11
0
def single_target_run(
    rule_runner: RuleRunner,
    address: Address,
    *,
    program_text: bytes,
) -> Run:
    workspace = Workspace(rule_runner.scheduler)
    interactive_runner = InteractiveRunner(rule_runner.scheduler)

    class TestRunFieldSet(RunFieldSet):
        required_fields = ()

    class TestBinaryTarget(Target):
        alias = "binary"
        core_fields = ()

    target = TestBinaryTarget({}, address)
    field_set = TestRunFieldSet.create(target)

    with mock_console(rule_runner.options_bootstrapper) as (console, _):
        res = run_rule_with_mocks(
            run,
            rule_args=[
                create_goal_subsystem(RunSubsystem, args=[]),
                create_subsystem(GlobalOptions,
                                 pants_workdir=rule_runner.pants_workdir),
                console,
                interactive_runner,
                workspace,
                BuildRoot(),
                rule_runner.environment,
            ],
            mock_gets=[
                MockGet(
                    output_type=TargetRootsToFieldSets,
                    input_type=TargetRootsToFieldSetsRequest,
                    mock=lambda _: TargetRootsToFieldSets(
                        {target: [field_set]}),
                ),
                MockGet(
                    output_type=RunRequest,
                    input_type=TestRunFieldSet,
                    mock=lambda _: create_mock_run_request(
                        rule_runner, program_text),
                ),
            ],
        )
        return cast(Run, res)
Exemple #12
0
def run_pytest(
    rule_runner: RuleRunner,
    test_target: PythonTests,
    *,
    passthrough_args: Optional[str] = None,
    junit_xml_dir: Optional[str] = None,
    use_coverage: bool = False,
    execution_slot_var: Optional[str] = None,
    extra_env_vars: Optional[str] = None,
    env: Optional[Mapping[str, str]] = None,
    config: Optional[str] = None,
    force: bool = False,
) -> TestResult:
    args = [
        "--backend-packages=pants.backend.python",
        f"--source-root-patterns={SOURCE_ROOT}",
        # pin to lower versions so that we can run Python 2 tests
        "--pytest-version=pytest>=4.6.6,<4.7",
        "--pytest-pytest-plugins=['zipp==1.0.0', 'pytest-cov>=2.8.1,<2.9']",
    ]
    if passthrough_args:
        args.append(f"--pytest-args='{passthrough_args}'")
    if extra_env_vars:
        args.append(f"--test-extra-env-vars={extra_env_vars}")
    if junit_xml_dir:
        args.append(f"--pytest-junit-xml-dir={junit_xml_dir}")
    if use_coverage:
        args.append("--test-use-coverage")
    if execution_slot_var:
        args.append(f"--pytest-execution-slot-var={execution_slot_var}")
    if config:
        rule_runner.create_file(relpath="pytest.ini", contents=config)
        args.append("--pytest-config=pytest.ini")
    if force:
        args.append("--test-force")
    rule_runner.set_options(args,
                            env=env,
                            env_inherit={"PATH", "PYENV_ROOT", "HOME"})

    inputs = [PythonTestFieldSet.create(test_target)]
    test_result = rule_runner.request(TestResult, inputs)
    debug_request = rule_runner.request(TestDebugRequest, inputs)
    if debug_request.process is not None:
        with mock_console(rule_runner.options_bootstrapper):
            debug_result = InteractiveRunner(rule_runner.scheduler).run(
                debug_request.process)
            assert test_result.exit_code == debug_result.exit_code
    return test_result
Exemple #13
0
    def run_goal_rules(
        self,
        *,
        options_bootstrapper: OptionsBootstrapper,
        union_membership: UnionMembership,
        goals: Iterable[str],
        specs: Specs,
        poll: bool = False,
        poll_delay: Optional[float] = None,
    ) -> int:
        """Runs @goal_rules sequentially and interactively by requesting their implicit Goal
        products.

        For retryable failures, raises scheduler.ExecutionError.

        :returns: An exit code.
        """

        workspace = Workspace(self.scheduler_session)
        interactive_runner = InteractiveRunner(self.scheduler_session)

        for goal in goals:
            goal_product = self.goal_map[goal]
            # NB: We no-op for goals that have no implementation because no relevant backends are
            # registered. We might want to reconsider the behavior to instead warn or error when
            # trying to run something like `./pants run` without any backends registered.
            is_implemented = union_membership.has_members_for_all(
                goal_product.subsystem_cls.required_union_implementations)
            if not is_implemented:
                continue
            # NB: Keep this in sync with the method `goal_consumed_types`.
            params = Params(specs, options_bootstrapper, self.console,
                            workspace, interactive_runner)
            logger.debug(
                f"requesting {goal_product} to satisfy execution of `{goal}` goal"
            )
            try:
                exit_code = self.scheduler_session.run_goal_rule(
                    goal_product, params, poll=poll, poll_delay=poll_delay)
            finally:
                self.console.flush()

            if exit_code != PANTS_SUCCEEDED_EXIT_CODE:
                return exit_code

        return PANTS_SUCCEEDED_EXIT_CODE
Exemple #14
0
    def run_goal_rule(
        self,
        goal: Type[Goal],
        *,
        global_args: Optional[Iterable[str]] = None,
        args: Optional[Iterable[str]] = None,
        env: Optional[Mapping[str, str]] = None,
    ) -> GoalRuleResult:
        options_bootstrapper = create_options_bootstrapper(
            args=(*(global_args or []), goal.name, *(args or [])),
            env=env,
        )

        raw_specs = options_bootstrapper.get_full_options([
            *GlobalOptions.known_scope_infos(),
            *goal.subsystem_cls.known_scope_infos()
        ]).specs
        specs = SpecsParser(self.build_root).parse_specs(raw_specs)

        stdout, stderr = StringIO(), StringIO()
        console = Console(stdout=stdout, stderr=stderr)

        session = self.scheduler.scheduler.new_session(
            build_id="buildid_for_test",
            should_report_workunits=True,
            session_values=SessionValues({
                OptionsBootstrapper:
                options_bootstrapper,
                PantsEnvironment:
                PantsEnvironment(env)
            }),
        )

        exit_code = session.run_goal_rule(
            goal,
            Params(
                specs,
                console,
                Workspace(self.scheduler),
                InteractiveRunner(self.scheduler),
            ),
        )

        console.flush()
        return GoalRuleResult(exit_code, stdout.getvalue(), stderr.getvalue())
Exemple #15
0
async def run_repl(
    console: Console,
    workspace: Workspace,
    interactive_runner: InteractiveRunner,
    repl_subsystem: ReplSubsystem,
    all_specified_addresses: Addresses,
    build_root: BuildRoot,
    union_membership: UnionMembership,
    global_options: GlobalOptions,
) -> Repl:
    transitive_targets = await Get(
        TransitiveTargets, TransitiveTargetsRequest(all_specified_addresses))

    # TODO: When we support multiple languages, detect the default repl to use based
    #  on the targets.  For now we default to the python repl.
    repl_shell_name = repl_subsystem.shell or "python"

    implementations: Dict[str, Type[ReplImplementation]] = {
        impl.name: impl
        for impl in union_membership[ReplImplementation]
    }
    repl_implementation_cls = implementations.get(repl_shell_name)
    if repl_implementation_cls is None:
        available = sorted(implementations.keys())
        console.print_stderr(
            f"{repr(repl_shell_name)} is not a registered REPL. Available REPLs (which may "
            f"be specified through the option `--repl-shell`): {available}")
        return Repl(-1)

    with temporary_dir(root_dir=global_options.options.pants_workdir,
                       cleanup=False) as tmpdir:
        repl_impl = repl_implementation_cls(targets=Targets(
            transitive_targets.closure),
                                            chroot=tmpdir)
        request = await Get(ReplRequest, ReplImplementation, repl_impl)

        workspace.write_digest(request.digest,
                               path_prefix=PurePath(tmpdir).relative_to(
                                   build_root.path).as_posix())
        result = interactive_runner.run(
            InteractiveProcess(argv=request.args,
                               env=request.extra_env,
                               run_in_workspace=True,
                               hermetic_env=False))
    return Repl(result.exit_code)
Exemple #16
0
async def run_repl(
    console: Console,
    workspace: Workspace,
    interactive_runner: InteractiveRunner,
    repl_subsystem: ReplSubsystem,
    transitive_targets: TransitiveTargets,
    build_root: BuildRoot,
    union_membership: UnionMembership,
    global_options: GlobalOptions,
) -> Repl:
    repl_shell_name = repl_subsystem.shell or "python"

    implementations: Dict[str, Type[ReplImplementation]] = {
        impl.name: impl
        for impl in union_membership[ReplImplementation]
    }
    repl_implementation_cls = implementations.get(repl_shell_name)
    if repl_implementation_cls is None:
        available = sorted(implementations.keys())
        console.print_stderr(
            f"{repr(repl_shell_name)} is not a registered REPL. Available REPLs (which may "
            f"be specified through the option `--repl-shell`): {available}")
        return Repl(-1)

    repl_impl = repl_implementation_cls(targets=Targets(
        tgt for tgt in transitive_targets.closure
        if repl_implementation_cls.is_valid(tgt)))
    request = await Get(ReplRequest, ReplImplementation, repl_impl)

    with temporary_dir(root_dir=global_options.options.pants_workdir,
                       cleanup=False) as tmpdir:
        tmpdir_relative_path = PurePath(tmpdir).relative_to(
            build_root.path).as_posix()
        exe_path = PurePath(tmpdir, request.binary_name).as_posix()
        workspace.write_digest(request.digest,
                               path_prefix=tmpdir_relative_path)
        result = interactive_runner.run(
            InteractiveProcess(argv=(exe_path, ),
                               env=request.env,
                               run_in_workspace=True))

    return Repl(result.exit_code)
Exemple #17
0
async def run(
    run_subsystem: RunSubsystem,
    global_options: GlobalOptions,
    console: Console,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    build_root: BuildRoot,
) -> Run:
    targets_to_valid_field_sets = await Get(
        TargetsToValidFieldSets,
        TargetsToValidFieldSetsRequest(
            BinaryFieldSet,
            goal_description="the `run` goal",
            error_if_no_valid_targets=True,
            expect_single_field_set=True,
        ),
    )
    field_set = targets_to_valid_field_sets.field_sets[0]
    request = await Get(RunRequest, BinaryFieldSet, field_set)

    with temporary_dir(root_dir=global_options.options.pants_workdir,
                       cleanup=True) as tmpdir:
        tmpdir_relative_path = PurePath(tmpdir).relative_to(
            build_root.path).as_posix()
        workspace.write_digest(request.digest,
                               path_prefix=tmpdir_relative_path)

        exe_path = PurePath(tmpdir, request.binary_name).as_posix()
        process = InteractiveProcess(
            argv=(exe_path, *request.prefix_args, *run_subsystem.args),
            env=request.env,
            run_in_workspace=True,
        )
        try:
            result = interactive_runner.run(process)
            exit_code = result.exit_code
        except Exception as e:
            console.print_stderr(
                f"Exception when attempting to run {field_set.address}: {e!r}")
            exit_code = -1

    return Run(exit_code)
Exemple #18
0
    def run_pytest(
        self,
        *,
        passthrough_args: Optional[str] = None,
        junit_xml_dir: Optional[str] = None,
        use_coverage: bool = False,
        execution_slot_var: Optional[str] = None,
    ) -> TestResult:
        args = [
            "--backend-packages=pants.backend.python",
            f"--source-root-patterns={self.source_root}",
            # pin to lower versions so that we can run Python 2 tests
            "--pytest-version=pytest>=4.6.6,<4.7",
            "--pytest-pytest-plugins=['zipp==1.0.0', 'pytest-cov>=2.8.1,<2.9']",
        ]
        if passthrough_args:
            args.append(f"--pytest-args='{passthrough_args}'")
        if junit_xml_dir:
            args.append(f"--pytest-junit-xml-dir={junit_xml_dir}")
        if use_coverage:
            args.append("--test-use-coverage")
        if execution_slot_var:
            args.append(f"--pytest-execution-slot-var={execution_slot_var}")

        params = Params(
            PythonTestFieldSet.create(
                PythonTests({},
                            address=Address(self.package,
                                            target_name="target"))),
            create_options_bootstrapper(args=args),
        )
        test_result = self.request_single_product(TestResult, params)
        debug_request = self.request_single_product(TestDebugRequest, params)
        debug_result = InteractiveRunner(self.scheduler).run(
            debug_request.process)
        if test_result.status == Status.SUCCESS:
            assert debug_result.exit_code == 0
        else:
            assert debug_result.exit_code != 0
        return test_result
def run_pytest(
    rule_runner: RuleRunner,
    test_target: Target,
    *,
    extra_args: list[str] | None = None,
    env: dict[str, str] | None = None,
) -> TestResult:
    args = [
        "--backend-packages=pants.backend.python",
        f"--source-root-patterns={SOURCE_ROOT}",
        *(extra_args or ()),
    ]
    rule_runner.set_options(args,
                            env=env,
                            env_inherit={"PATH", "PYENV_ROOT", "HOME"})
    inputs = [PythonTestFieldSet.create(test_target)]
    test_result = rule_runner.request(TestResult, inputs)
    debug_request = rule_runner.request(TestDebugRequest, inputs)
    if debug_request.process is not None:
        with mock_console(rule_runner.options_bootstrapper):
            debug_result = InteractiveRunner(rule_runner.scheduler).run(
                debug_request.process)
            assert test_result.exit_code == debug_result.exit_code
    return test_result
Exemple #20
0
def run_shunit2(
    rule_runner: RuleRunner,
    test_target: Target,
    *,
    extra_args: list[str] | None = None,
    env: dict[str, str] | None = None,
) -> TestResult:
    rule_runner.set_options(
        [
            "--backend-packages=pants.backend.shell",
            *(extra_args or ()),
        ],
        env=env,
        env_inherit={"PATH"},
    )
    inputs = [Shunit2FieldSet.create(test_target)]
    test_result = rule_runner.request(TestResult, inputs)
    debug_request = rule_runner.request(TestDebugRequest, inputs)
    if debug_request.process is not None:
        with mock_console(rule_runner.options_bootstrapper):
            debug_result = InteractiveRunner(rule_runner.scheduler).run(
                debug_request.process)
            assert test_result.exit_code == debug_result.exit_code
    return test_result
Exemple #21
0
def run_test_rule(
    rule_runner: RuleRunner,
    *,
    field_set: Type[TestFieldSet],
    targets: List[Target],
    debug: bool = False,
    use_coverage: bool = False,
    output: ShowOutput = ShowOutput.ALL,
    include_sources: bool = True,
    valid_targets: bool = True,
) -> Tuple[int, str]:
    console = MockConsole(use_colors=False)
    test_subsystem = create_goal_subsystem(
        TestSubsystem,
        debug=debug,
        use_coverage=use_coverage,
        output=output,
        extra_env_vars=[],
    )
    interactive_runner = InteractiveRunner(rule_runner.scheduler)
    workspace = Workspace(rule_runner.scheduler)
    union_membership = UnionMembership({
        TestFieldSet: [field_set],
        CoverageDataCollection: [MockCoverageDataCollection]
    })

    def mock_find_valid_field_sets(
        _: TargetRootsToFieldSetsRequest, ) -> TargetRootsToFieldSets:
        if not valid_targets:
            return TargetRootsToFieldSets({})
        return TargetRootsToFieldSets(
            {tgt: [field_set.create(tgt)]
             for tgt in targets})

    def mock_debug_request(_: TestFieldSet) -> TestDebugRequest:
        digest = rule_runner.request(Digest, [
            CreateDigest((FileContent(path="program.py",
                                      content=b"def test(): pass"), ))
        ])
        process = InteractiveProcess(["/usr/bin/python", "program.py"],
                                     input_digest=digest)
        return TestDebugRequest(process)

    def mock_coverage_report_generation(
        coverage_data_collection: MockCoverageDataCollection,
    ) -> CoverageReports:
        addresses = ", ".join(coverage_data.address.spec
                              for coverage_data in coverage_data_collection)
        console_report = ConsoleCoverageReport(f"Ran coverage on {addresses}")
        return CoverageReports(reports=(console_report, ))

    result: Test = run_rule_with_mocks(
        run_tests,
        rule_args=[
            console,
            test_subsystem,
            interactive_runner,
            workspace,
            union_membership,
        ],
        mock_gets=[
            MockGet(
                output_type=TargetRootsToFieldSets,
                input_type=TargetRootsToFieldSetsRequest,
                mock=mock_find_valid_field_sets,
            ),
            MockGet(
                output_type=EnrichedTestResult,
                input_type=TestFieldSet,
                mock=lambda fs: fs.test_result,
            ),
            MockGet(
                output_type=TestDebugRequest,
                input_type=TestFieldSet,
                mock=mock_debug_request,
            ),
            MockGet(
                output_type=FieldSetsWithSources,
                input_type=FieldSetsWithSourcesRequest,
                mock=lambda field_sets: FieldSetsWithSources(
                    field_sets if include_sources else ()),
            ),
            # Merge XML results.
            MockGet(
                output_type=Digest,
                input_type=MergeDigests,
                mock=lambda _: EMPTY_DIGEST,
            ),
            MockGet(
                output_type=CoverageReports,
                input_type=CoverageDataCollection,
                mock=mock_coverage_report_generation,
            ),
            MockGet(
                output_type=OpenFiles,
                input_type=OpenFilesRequest,
                mock=lambda _: OpenFiles(()),
            ),
        ],
        union_membership=union_membership,
    )
    assert not console.stdout.getvalue()
    return result.exit_code, console.stderr.getvalue()
Exemple #22
0
async def run_tests(
    console: Console,
    test_subsystem: TestSubsystem,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Test:
    if test_subsystem.debug:
        targets_to_valid_field_sets = await Get(
            TargetsToValidFieldSets,
            TargetsToValidFieldSetsRequest(
                TestFieldSet,
                goal_description="`test --debug`",
                error_if_no_valid_targets=True,
                expect_single_field_set=True,
            ),
        )
        field_set = targets_to_valid_field_sets.field_sets[0]
        request = await Get(TestDebugRequest, TestFieldSet, field_set)
        debug_result = interactive_runner.run(request.process)
        return Test(debug_result.exit_code)

    targets_to_valid_field_sets = await Get(
        TargetsToValidFieldSets,
        TargetsToValidFieldSetsRequest(
            TestFieldSet,
            goal_description=f"the `{test_subsystem.name}` goal",
            error_if_no_valid_targets=False,
        ),
    )
    field_sets_with_sources = await Get(
        FieldSetsWithSources,
        FieldSetsWithSourcesRequest(targets_to_valid_field_sets.field_sets))

    results = await MultiGet(
        Get(AddressAndTestResult, WrappedTestFieldSet(field_set))
        for field_set in field_sets_with_sources)

    # Print details.
    for result in results:
        if test_subsystem.options.output == ShowOutput.NONE or (
                test_subsystem.options.output == ShowOutput.FAILED
                and result.test_result.status == Status.SUCCESS):
            continue
        has_output = result.test_result.stdout or result.test_result.stderr
        if has_output:
            status = (console.green("✓") if result.test_result.status
                      == Status.SUCCESS else console.red("𐄂"))
            console.print_stderr(f"{status} {result.address}")
        if result.test_result.stdout:
            console.print_stderr(result.test_result.stdout)
        if result.test_result.stderr:
            console.print_stderr(result.test_result.stderr)
        if has_output and result != results[-1]:
            console.print_stderr("")

    # Print summary
    console.print_stderr("")
    for result in results:
        color = console.green if result.test_result.status == Status.SUCCESS else console.red
        # The right-align logic sees the color control codes as characters, so we have
        # to account for that. In f-strings the alignment field widths must be literals,
        # so we have to indirect via a call to .format().
        right_align = 19 if console.use_colors else 10
        format_str = f"{{addr:80}}.....{{result:>{right_align}}}"
        console.print_stderr(
            format_str.format(addr=result.address.spec,
                              result=color(result.test_result.status.value)))

    merged_xml_results = await Get(
        Digest,
        MergeDigests(result.test_result.xml_results for result in results
                     if result.test_result.xml_results),
    )
    workspace.write_digest(merged_xml_results)

    if test_subsystem.use_coverage:
        all_coverage_data: Iterable[CoverageData] = [
            result.test_result.coverage_data for result in results
            if result.test_result.coverage_data is not None
        ]

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]] = {
                collection_cls.element_type: collection_cls
                for collection_cls in union_membership.get(
                    CoverageDataCollection)
            }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data,
                                                lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))
        # We can create multiple reports for each coverage data (console, xml and html)
        coverage_reports_collections = await MultiGet(
            Get(CoverageReports, CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections)

        coverage_report_files: List[PurePath] = []
        for coverage_reports in coverage_reports_collections:
            report_files = coverage_reports.materialize(console, workspace)
            coverage_report_files.extend(report_files)

        if coverage_report_files and test_subsystem.open_coverage:
            desktop.ui_open(console, interactive_runner, coverage_report_files)

    exit_code = (PANTS_FAILED_EXIT_CODE if any(
        res.test_result.status == Status.FAILURE
        for res in results) else PANTS_SUCCEEDED_EXIT_CODE)

    return Test(exit_code)
Exemple #23
0
async def run_tests(
    console: Console,
    test_subsystem: TestSubsystem,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Test:
    if test_subsystem.debug:
        targets_to_valid_field_sets = await Get(
            TargetRootsToFieldSets,
            TargetRootsToFieldSetsRequest(
                TestFieldSet, goal_description="`test --debug`", error_if_no_applicable_targets=True
            ),
        )
        debug_requests = await MultiGet(
            Get(TestDebugRequest, TestFieldSet, field_set)
            for field_set in targets_to_valid_field_sets.field_sets
        )
        exit_code = 0
        for debug_request in debug_requests:
            if debug_request.process is None:
                continue
            debug_result = interactive_runner.run(debug_request.process)
            if debug_result.exit_code != 0:
                exit_code = debug_result.exit_code
        return Test(exit_code)

    targets_to_valid_field_sets = await Get(
        TargetRootsToFieldSets,
        TargetRootsToFieldSetsRequest(
            TestFieldSet,
            goal_description=f"the `{test_subsystem.name}` goal",
            error_if_no_applicable_targets=False,
        ),
    )
    field_sets_with_sources = await Get(
        FieldSetsWithSources, FieldSetsWithSourcesRequest(targets_to_valid_field_sets.field_sets)
    )

    results = await MultiGet(
        Get(EnrichedTestResult, TestFieldSet, field_set) for field_set in field_sets_with_sources
    )

    # Print summary.
    exit_code = 0
    if results:
        console.print_stderr("")
    for result in sorted(results):
        if result.skipped:
            continue
        if result.exit_code == 0:
            sigil = console.green("✓")
            status = "succeeded"
        else:
            sigil = console.red("𐄂")
            status = "failed"
            exit_code = cast(int, result.exit_code)
        console.print_stderr(f"{sigil} {result.address} {status}.")

    merged_xml_results = await Get(
        Digest,
        MergeDigests(result.xml_results.digest for result in results if result.xml_results),
    )
    workspace.write_digest(merged_xml_results)

    if test_subsystem.use_coverage:
        all_coverage_data: Iterable[CoverageData] = [
            result.coverage_data for result in results if result.coverage_data is not None
        ]

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]
        ] = {
            collection_cls.element_type: collection_cls
            for collection_cls in union_membership.get(CoverageDataCollection)
        }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data, lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))
        # We can create multiple reports for each coverage data (console, xml and html)
        coverage_reports_collections = await MultiGet(
            Get(CoverageReports, CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections
        )

        coverage_report_files: List[PurePath] = []
        for coverage_reports in coverage_reports_collections:
            report_files = coverage_reports.materialize(console, workspace)
            coverage_report_files.extend(report_files)

        if coverage_report_files and test_subsystem.open_coverage:
            open_files = await Get(
                OpenFiles, OpenFilesRequest(coverage_report_files, error_if_open_not_found=False)
            )
            for process in open_files.processes:
                interactive_runner.run(process)

    return Test(exit_code)
Exemple #24
0
async def run_tests(
    console: Console,
    test_subsystem: TestSubsystem,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    union_membership: UnionMembership,
    dist_dir: DistDir,
) -> Test:
    if test_subsystem.debug:
        targets_to_valid_field_sets = await Get(
            TargetRootsToFieldSets,
            TargetRootsToFieldSetsRequest(
                TestFieldSet,
                goal_description="`test --debug`",
                no_applicable_targets_behavior=NoApplicableTargetsBehavior.error,
            ),
        )
        debug_requests = await MultiGet(
            Get(TestDebugRequest, TestFieldSet, field_set)
            for field_set in targets_to_valid_field_sets.field_sets
        )
        exit_code = 0
        for debug_request in debug_requests:
            if debug_request.process is None:
                continue
            debug_result = interactive_runner.run(debug_request.process)
            if debug_result.exit_code != 0:
                exit_code = debug_result.exit_code
        return Test(exit_code)

    targets_to_valid_field_sets = await Get(
        TargetRootsToFieldSets,
        TargetRootsToFieldSetsRequest(
            TestFieldSet,
            goal_description=f"the `{test_subsystem.name}` goal",
            no_applicable_targets_behavior=NoApplicableTargetsBehavior.warn,
        ),
    )
    field_sets_with_sources = await Get(
        FieldSetsWithSources, FieldSetsWithSourcesRequest(targets_to_valid_field_sets.field_sets)
    )

    results = await MultiGet(
        Get(EnrichedTestResult, TestFieldSet, field_set) for field_set in field_sets_with_sources
    )

    # Print summary.
    exit_code = 0
    if results:
        console.print_stderr("")
    for result in sorted(results):
        if result.skipped:
            continue
        if result.exit_code == 0:
            sigil = console.green("✓")
            status = "succeeded"
        else:
            sigil = console.red("𐄂")
            status = "failed"
            exit_code = cast(int, result.exit_code)
        console.print_stderr(f"{sigil} {result.address} {status}.")
        if result.extra_output and result.extra_output.files:
            workspace.write_digest(
                result.extra_output.digest,
                path_prefix=str(dist_dir.relpath / "test" / result.address.path_safe_spec),
            )

    merged_xml_results = await Get(
        Digest,
        MergeDigests(result.xml_results.digest for result in results if result.xml_results),
    )
    workspace.write_digest(merged_xml_results)

    if test_subsystem.use_coverage:
        # NB: We must pre-sort the data for itertools.groupby() to work properly, using the same
        # key function for both. However, you can't sort by `types`, so we call `str()` on it.
        all_coverage_data = sorted(
            (result.coverage_data for result in results if result.coverage_data is not None),
            key=lambda cov_data: str(type(cov_data)),
        )

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]
        ] = {
            collection_cls.element_type: collection_cls
            for collection_cls in union_membership.get(CoverageDataCollection)
        }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data, lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))
        # We can create multiple reports for each coverage data (console, xml and html)
        coverage_reports_collections = await MultiGet(
            Get(CoverageReports, CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections
        )

        coverage_report_files: List[PurePath] = []
        for coverage_reports in coverage_reports_collections:
            report_files = coverage_reports.materialize(console, workspace)
            coverage_report_files.extend(report_files)

        if coverage_report_files and test_subsystem.open_coverage:
            open_files = await Get(
                OpenFiles, OpenFilesRequest(coverage_report_files, error_if_open_not_found=False)
            )
            for process in open_files.processes:
                interactive_runner.run(process)

    return Test(exit_code)
Exemple #25
0
    def run_test_rule(
        self,
        *,
        field_set: Type[TestFieldSet],
        targets: List[TargetWithOrigin],
        debug: bool = False,
        use_coverage: bool = False,
        output: ShowOutput = ShowOutput.ALL,
        include_sources: bool = True,
        valid_targets: bool = True,
    ) -> Tuple[int, str]:
        console = MockConsole(use_colors=False)
        test_subsystem = create_goal_subsystem(
            TestSubsystem,
            debug=debug,
            use_coverage=use_coverage,
            output=output,
        )
        interactive_runner = InteractiveRunner(self.scheduler)
        workspace = Workspace(self.scheduler)
        union_membership = UnionMembership({
            TestFieldSet: [field_set],
            CoverageDataCollection: [MockCoverageDataCollection]
        })

        def mock_find_valid_field_sets(
            _: TargetsToValidFieldSetsRequest, ) -> TargetsToValidFieldSets:
            if not valid_targets:
                return TargetsToValidFieldSets({})
            return TargetsToValidFieldSets({
                tgt_with_origin: [field_set.create(tgt_with_origin.target)]
                for tgt_with_origin in targets
            })

        def mock_coverage_report_generation(
            coverage_data_collection: MockCoverageDataCollection,
        ) -> CoverageReports:
            addresses = ", ".join(
                coverage_data.address.spec
                for coverage_data in coverage_data_collection)
            console_report = ConsoleCoverageReport(
                f"Ran coverage on {addresses}")
            return CoverageReports(reports=(console_report, ))

        result: Test = run_rule_with_mocks(
            run_tests,
            rule_args=[
                console, test_subsystem, interactive_runner, workspace,
                union_membership
            ],
            mock_gets=[
                MockGet(
                    product_type=TargetsToValidFieldSets,
                    subject_type=TargetsToValidFieldSetsRequest,
                    mock=mock_find_valid_field_sets,
                ),
                MockGet(
                    product_type=EnrichedTestResult,
                    subject_type=TestFieldSet,
                    mock=lambda fs: fs.test_result,
                ),
                MockGet(
                    product_type=TestDebugRequest,
                    subject_type=TestFieldSet,
                    mock=lambda _: TestDebugRequest(
                        self.make_interactive_process()),
                ),
                MockGet(
                    product_type=FieldSetsWithSources,
                    subject_type=FieldSetsWithSourcesRequest,
                    mock=lambda field_sets: FieldSetsWithSources(
                        field_sets if include_sources else ()),
                ),
                # Merge XML results.
                MockGet(
                    product_type=Digest,
                    subject_type=MergeDigests,
                    mock=lambda _: EMPTY_DIGEST,
                ),
                MockGet(
                    product_type=CoverageReports,
                    subject_type=CoverageDataCollection,
                    mock=mock_coverage_report_generation,
                ),
                MockGet(
                    product_type=OpenFiles,
                    subject_type=OpenFilesRequest,
                    mock=lambda _: OpenFiles(()),
                ),
            ],
            union_membership=union_membership,
        )
        assert not console.stdout.getvalue()
        return result.exit_code, console.stderr.getvalue()