Exemple #1
0
def test_stdin_input() -> None:
    ob = create_options_bootstrapper([])
    expected_input = "my_input"
    expected_output = "my_output"
    with mock_console(ob,
                      stdin_content=f"{expected_input}\n") as (_,
                                                               stdio_reader):
        assert expected_input == input(expected_output)
        assert expected_output == stdio_reader.get_stdout()

    with mock_console(ob,
                      stdin_content=f"{expected_input}\n") as (console,
                                                               stdio_reader):
        assert expected_input == console.input(expected_output)
        assert expected_output == stdio_reader.get_stdout()
Exemple #2
0
def run_typecheck_rule(
    *,
    request_types: Sequence[Type[CheckRequest]],
    targets: list[Target],
    only: list[str] | None = None,
) -> Tuple[int, str]:
    union_membership = UnionMembership({CheckRequest: request_types})
    check_subsystem = create_subsystem(CheckSubsystem, only=only or [])
    with mock_console(create_options_bootstrapper()) as (console, stdio_reader):
        rule_runner = RuleRunner()
        result: Check = run_rule_with_mocks(
            check,
            rule_args=[
                console,
                Workspace(rule_runner.scheduler, _enforce_effects=False),
                Targets(targets),
                DistDir(relpath=Path("dist")),
                union_membership,
                check_subsystem,
            ],
            mock_gets=[
                MockGet(
                    output_type=CheckResults,
                    input_type=CheckRequest,
                    mock=lambda field_set_collection: field_set_collection.check_results,
                ),
            ],
            union_membership=union_membership,
        )
        assert not stdio_reader.get_stdout()
        return result.exit_code, stdio_reader.get_stderr()
def run_pytest(
    rule_runner: RuleRunner,
    test_target: Target,
    *,
    extra_args: list[str] | None = None,
    env: dict[str, str] | None = None,
) -> TestResult:
    # pytest-html==1.22.1 has an undeclared dep on setuptools. This, unfortunately,
    # is the most recent version of pytest-html that works with the low version of
    # pytest that we pin to.
    plugins = [
        "zipp==1.0.0", "pytest-cov>=2.8.1,<2.9", "pytest-html==1.22.1",
        "setuptools"
    ]
    plugins_str = "['" + "', '".join(plugins) + "']"
    args = [
        "--backend-packages=pants.backend.python",
        f"--source-root-patterns={SOURCE_ROOT}",
        # pin to lower versions so that we can run Python 2 tests
        "--pytest-version=pytest>=4.6.6,<4.7",
        f"--pytest-pytest-plugins={plugins_str}",
        *(extra_args or ()),
    ]
    rule_runner.set_options(args,
                            env=env,
                            env_inherit={"PATH", "PYENV_ROOT", "HOME"})
    inputs = [PythonTestFieldSet.create(test_target)]
    test_result = rule_runner.request(TestResult, inputs)
    debug_request = rule_runner.request(TestDebugRequest, inputs)
    if debug_request.process is not None:
        with mock_console(rule_runner.options_bootstrapper):
            debug_result = InteractiveRunner(rule_runner.scheduler).run(
                debug_request.process)
            assert test_result.exit_code == debug_result.exit_code
    return test_result
Exemple #4
0
def run_goal(
    targets: Sequence[Target],
    *,
    target_type: list[str] | None = None,
    address_regex: list[str] | None = None,
    tag_regex: list[str] | None = None,
    granularity: TargetGranularity | None = None,
) -> str:
    with mock_console(create_options_bootstrapper()) as (console,
                                                         stdio_reader):
        run_rule_with_mocks(
            filter_targets,
            rule_args=[
                Targets(targets),
                create_goal_subsystem(
                    FilterSubsystem,
                    sep="\\n",
                    output_file=None,
                    target_type=target_type or [],
                    address_regex=address_regex or [],
                    tag_regex=tag_regex or [],
                    granularity=granularity or TargetGranularity.all_targets,
                    # Deprecated.
                    type=[],
                    target=[],
                    regex=[],
                    ancestor=[],
                ),
                console,
                RegisteredTargetTypes.create({type(tgt)
                                              for tgt in targets}),
            ],
        )
        assert not stdio_reader.get_stderr()
        return stdio_reader.get_stdout()
Exemple #5
0
def run_typecheck_rule(
    *,
    request_types: List[Type[TypecheckRequest]],
    targets: List[Target],
    include_sources: bool = True,
) -> Tuple[int, str]:
    union_membership = UnionMembership({TypecheckRequest: request_types})
    with mock_console(create_options_bootstrapper()) as (console,
                                                         stdio_reader):
        result: Typecheck = run_rule_with_mocks(
            typecheck,
            rule_args=[console, Targets(targets), union_membership],
            mock_gets=[
                MockGet(
                    output_type=EnrichedTypecheckResults,
                    input_type=TypecheckRequest,
                    mock=lambda field_set_collection: field_set_collection.
                    typecheck_results,
                ),
                MockGet(
                    output_type=FieldSetsWithSources,
                    input_type=FieldSetsWithSourcesRequest,
                    mock=lambda field_sets: FieldSetsWithSources(
                        field_sets if include_sources else ()),
                ),
            ],
            union_membership=union_membership,
        )
        assert not stdio_reader.get_stdout()
        return result.exit_code, stdio_reader.get_stderr()
Exemple #6
0
def test_line_oriented_goal() -> None:
    class OutputtingGoalOptions(LineOriented, GoalSubsystem):
        name = "dummy"

    class OutputtingGoal(Goal):
        subsystem_cls = OutputtingGoalOptions

    @goal_rule
    def output_rule(console: Console,
                    options: OutputtingGoalOptions) -> OutputtingGoal:
        with options.output(console) as write_stdout:
            write_stdout("output...")
        with options.line_oriented(console) as print_stdout:
            print_stdout("line oriented")
        return OutputtingGoal(0)

    with mock_console(create_options_bootstrapper()) as (console,
                                                         stdio_reader):
        result: OutputtingGoal = run_rule_with_mocks(
            output_rule,
            rule_args=[
                console,
                create_goal_subsystem(OutputtingGoalOptions,
                                      sep="\\n",
                                      output_file=None),
            ],
        )
        assert result.exit_code == 0
        assert stdio_reader.get_stdout() == "output...line oriented\n"
Exemple #7
0
def run_lint_rule(
    rule_runner: RuleRunner,
    *,
    lint_request_types: List[Type[LintRequest]],
    targets: List[Target],
    per_file_caching: bool,
) -> Tuple[int, str]:
    with mock_console(rule_runner.options_bootstrapper) as (console,
                                                            stdio_reader):
        union_membership = UnionMembership({LintRequest: lint_request_types})
        result: Lint = run_rule_with_mocks(
            lint,
            rule_args=[
                console,
                Workspace(rule_runner.scheduler, _enforce_effects=False),
                Targets(targets),
                create_goal_subsystem(LintSubsystem,
                                      per_file_caching=per_file_caching,
                                      per_target_caching=False),
                union_membership,
                DistDir(relpath=Path("dist")),
            ],
            mock_gets=[
                MockGet(
                    output_type=LintResults,
                    input_type=LintRequest,
                    mock=lambda field_set_collection: field_set_collection.
                    lint_results,
                )
            ],
            union_membership=union_membership,
        )
        assert not stdio_reader.get_stdout()
        return result.exit_code, stdio_reader.get_stderr()
Exemple #8
0
def run_goal(targets: list[MockTarget],
             *,
             show_documented: bool = False) -> tuple[str, str]:
    with mock_console(create_options_bootstrapper()) as (console,
                                                         stdio_reader):
        run_rule_with_mocks(
            list_targets,
            rule_args=[
                Addresses(tgt.address for tgt in targets),
                create_goal_subsystem(
                    ListSubsystem,
                    sep="\\n",
                    output_file=None,
                    documented=show_documented,
                ),
                console,
            ],
            mock_gets=[
                MockGet(
                    output_type=UnexpandedTargets,
                    input_type=Addresses,
                    mock=lambda _: UnexpandedTargets(targets),
                )
            ],
        )
        return stdio_reader.get_stdout(), stdio_reader.get_stderr()
Exemple #9
0
def single_target_run(
    rule_runner: RuleRunner,
    address: Address,
    *,
    program_text: bytes,
) -> Run:
    workspace = Workspace(rule_runner.scheduler, _enforce_effects=False)

    class TestRunFieldSet(RunFieldSet):
        required_fields = ()

    class TestBinaryTarget(Target):
        alias = "binary"
        core_fields = ()

    target = TestBinaryTarget({}, address)
    field_set = TestRunFieldSet.create(target)

    with mock_console(rule_runner.options_bootstrapper) as (console, _):
        res = run_rule_with_mocks(
            run,
            rule_args=[
                create_goal_subsystem(RunSubsystem, args=[], cleanup=True),
                create_subsystem(GlobalOptions,
                                 pants_workdir=rule_runner.pants_workdir,
                                 process_cleanup=True),
                workspace,
                BuildRoot(),
                rule_runner.environment,
            ],
            mock_gets=[
                MockGet(
                    output_type=TargetRootsToFieldSets,
                    input_type=TargetRootsToFieldSetsRequest,
                    mock=lambda _: TargetRootsToFieldSets(
                        {target: [field_set]}),
                ),
                MockGet(
                    output_type=WrappedTarget,
                    input_type=WrappedTargetRequest,
                    mock=lambda _: WrappedTarget(target),
                ),
                MockGet(
                    output_type=RunRequest,
                    input_type=TestRunFieldSet,
                    mock=lambda _: create_mock_run_request(
                        rule_runner, program_text),
                ),
                MockEffect(
                    output_type=InteractiveProcessResult,
                    input_type=InteractiveProcess,
                    mock=rule_runner.run_interactive_process,
                ),
            ],
        )
        return cast(Run, res)
def test_repl_bogus_repl_name(rule_runner: RuleRunner) -> None:
    setup_sources(rule_runner)
    with mock_console(rule_runner.options_bootstrapper):
        result = rule_runner.run_goal_rule(
            Repl,
            global_args=["--backend-packages=pants.backend.python"],
            args=["--shell=bogus-repl", "src/python/lib.py"],
        )
    assert result.exit_code == -1
    assert "'bogus-repl' is not a registered REPL. Available REPLs" in result.stderr
Exemple #11
0
def test_materialize_input_files(rule_runner: RuleRunner) -> None:
    program_text = b'#!/usr/bin/python\nprint("hello")'
    binary = create_mock_run_request(rule_runner, program_text)
    with mock_console(rule_runner.options_bootstrapper):
        result = rule_runner.run_interactive_process(
            InteractiveProcess(
                argv=("./program.py", ),
                run_in_workspace=False,
                input_digest=binary.digest,
            ))
    assert result.exit_code == 0
def test_repl_ipython(rule_runner: RuleRunner) -> None:
    setup_sources(rule_runner)
    with mock_console(rule_runner.options_bootstrapper):
        result = rule_runner.run_goal_rule(
            Repl,
            global_args=[
                "--backend-packages=pants.backend.python",
                "--backend-packages=pants.backend.codegen.protobuf.python",
            ],
            args=["--shell=ipython", "src/python/lib.py"],
        )
    assert result.exit_code == 0
Exemple #13
0
def run_repl(rule_runner: RuleRunner,
             *,
             extra_args: list[str] | None = None) -> GoalRuleResult:
    # TODO(#9108): Expand `mock_console` to allow for providing input for the repl to verify
    # that, e.g., the generated protobuf code is available. Right now this test prepares for
    # that by including generated code, but cannot actually verify it.
    with mock_console(rule_runner.options_bootstrapper):
        return rule_runner.run_goal_rule(
            Repl,
            global_args=extra_args or (),
            args=["src/python/lib.py"],
            env_inherit={"PATH", "PYENV_ROOT", "HOME"},
        )
Exemple #14
0
def run_lint_rule(
    rule_runner: RuleRunner,
    *,
    lint_request_types: Sequence[Type[LintTargetsRequest]],
    targets: list[Target],
    run_files_linter: bool = False,
    batch_size: int = 128,
    only: list[str] | None = None,
) -> Tuple[int, str]:
    union_membership = UnionMembership({
        LintTargetsRequest:
        lint_request_types,
        LintFilesRequest: [MockFilesRequest] if run_files_linter else [],
    })
    lint_subsystem = create_goal_subsystem(
        LintSubsystem,
        batch_size=batch_size,
        only=only or [],
    )
    specs_snapshot = SpecsSnapshot(
        rule_runner.make_snapshot_of_empty_files(["f.txt"]))
    with mock_console(rule_runner.options_bootstrapper) as (console,
                                                            stdio_reader):
        result: Lint = run_rule_with_mocks(
            lint,
            rule_args=[
                console,
                Workspace(rule_runner.scheduler, _enforce_effects=False),
                Targets(targets),
                specs_snapshot,
                lint_subsystem,
                union_membership,
                DistDir(relpath=Path("dist")),
            ],
            mock_gets=[
                MockGet(
                    output_type=LintResults,
                    input_type=LintTargetsRequest,
                    mock=lambda mock_request: mock_request.lint_results,
                ),
                MockGet(
                    output_type=LintResults,
                    input_type=LintFilesRequest,
                    mock=lambda mock_request: mock_request.lint_results,
                ),
            ],
            union_membership=union_membership,
        )
        assert not stdio_reader.get_stdout()
        return result.exit_code, stdio_reader.get_stderr()
Exemple #15
0
def run_fmt_rule(
    rule_runner: RuleRunner,
    *,
    language_target_collection_types: List[Type[LanguageFmtTargets]],
    targets: List[Target],
    result_digest: Digest,
    per_file_caching: bool,
    include_sources: bool = True,
) -> str:
    with mock_console(rule_runner.options_bootstrapper) as (console,
                                                            stdio_reader):
        union_membership = UnionMembership(
            {LanguageFmtTargets: language_target_collection_types})
        result: Fmt = run_rule_with_mocks(
            fmt,
            rule_args=[
                console,
                Targets(targets),
                create_goal_subsystem(FmtSubsystem,
                                      per_file_caching=per_file_caching,
                                      per_target_caching=False),
                Workspace(rule_runner.scheduler),
                union_membership,
            ],
            mock_gets=[
                MockGet(
                    output_type=LanguageFmtResults,
                    input_type=LanguageFmtTargets,
                    mock=lambda language_targets_collection:
                    language_targets_collection.language_fmt_results(
                        result_digest),
                ),
                MockGet(
                    output_type=TargetsWithSources,
                    input_type=TargetsWithSourcesRequest,
                    mock=lambda tgts: TargetsWithSources(
                        tgts if include_sources else ()),
                ),
                MockGet(
                    output_type=Digest,
                    input_type=MergeDigests,
                    mock=lambda _: result_digest,
                ),
            ],
            union_membership=union_membership,
        )
        assert result.exit_code == 0
        assert not stdio_reader.get_stdout()
        return stdio_reader.get_stderr()
def test_repl_with_targets(rule_runner: RuleRunner) -> None:
    # TODO(#9108): Expand `mock_console` to allow for providing input for the repl to verify
    # that, e.g., the generated protobuf code is available. Right now this test prepares for
    # that by including generated code, but cannot actually verify it.
    setup_sources(rule_runner)
    with mock_console(rule_runner.options_bootstrapper):
        result = rule_runner.run_goal_rule(
            Repl,
            global_args=[
                "--backend-packages=pants.backend.python",
                "--backend-packages=pants.backend.codegen.protobuf.python",
            ],
            args=["src/python/lib.py"],
        )
    assert result.exit_code == 0
Exemple #17
0
def run_pytest(
    rule_runner: RuleRunner,
    test_target: PythonTests,
    *,
    passthrough_args: Optional[str] = None,
    junit_xml_dir: Optional[str] = None,
    use_coverage: bool = False,
    execution_slot_var: Optional[str] = None,
    extra_env_vars: Optional[str] = None,
    env: Optional[Mapping[str, str]] = None,
    config: Optional[str] = None,
    force: bool = False,
) -> TestResult:
    args = [
        "--backend-packages=pants.backend.python",
        f"--source-root-patterns={SOURCE_ROOT}",
        # pin to lower versions so that we can run Python 2 tests
        "--pytest-version=pytest>=4.6.6,<4.7",
        "--pytest-pytest-plugins=['zipp==1.0.0', 'pytest-cov>=2.8.1,<2.9']",
    ]
    if passthrough_args:
        args.append(f"--pytest-args='{passthrough_args}'")
    if extra_env_vars:
        args.append(f"--test-extra-env-vars={extra_env_vars}")
    if junit_xml_dir:
        args.append(f"--pytest-junit-xml-dir={junit_xml_dir}")
    if use_coverage:
        args.append("--test-use-coverage")
    if execution_slot_var:
        args.append(f"--pytest-execution-slot-var={execution_slot_var}")
    if config:
        rule_runner.create_file(relpath="pytest.ini", contents=config)
        args.append("--pytest-config=pytest.ini")
    if force:
        args.append("--test-force")
    rule_runner.set_options(args,
                            env=env,
                            env_inherit={"PATH", "PYENV_ROOT", "HOME"})

    inputs = [PythonTestFieldSet.create(test_target)]
    test_result = rule_runner.request(TestResult, inputs)
    debug_request = rule_runner.request(TestDebugRequest, inputs)
    if debug_request.process is not None:
        with mock_console(rule_runner.options_bootstrapper):
            debug_result = InteractiveRunner(rule_runner.scheduler).run(
                debug_request.process)
            assert test_result.exit_code == debug_result.exit_code
    return test_result
Exemple #18
0
def run_lint_rule(
    rule_runner: RuleRunner,
    *,
    lint_request_types: List[Type[LintRequest]],
    targets: List[Target],
    per_file_caching: bool,
    include_sources: bool = True,
) -> Tuple[int, str]:
    with mock_console(rule_runner.options_bootstrapper) as (console,
                                                            stdio_reader):
        workspace = Workspace(rule_runner.scheduler)
        union_membership = UnionMembership({LintRequest: lint_request_types})
        result: Lint = run_rule_with_mocks(
            lint,
            rule_args=[
                console,
                workspace,
                Targets(targets),
                create_goal_subsystem(LintSubsystem,
                                      per_file_caching=per_file_caching,
                                      per_target_caching=False),
                union_membership,
            ],
            mock_gets=[
                MockGet(
                    output_type=EnrichedLintResults,
                    input_type=LintRequest,
                    mock=lambda field_set_collection: field_set_collection.
                    lint_results,
                ),
                MockGet(
                    output_type=FieldSetsWithSources,
                    input_type=FieldSetsWithSourcesRequest,
                    mock=lambda field_sets: FieldSetsWithSources(
                        field_sets if include_sources else ()),
                ),
                MockGet(output_type=Digest,
                        input_type=MergeDigests,
                        mock=lambda _: EMPTY_DIGEST),
            ],
            union_membership=union_membership,
        )
        assert not stdio_reader.get_stdout()
        return result.exit_code, stdio_reader.get_stderr()
Exemple #19
0
def run_export_rule(rule_runner: RuleRunner,
                    targets: List[Target]) -> Tuple[int, str]:
    union_membership = UnionMembership(
        {ExportableDataRequest: [MockExportableDataRequest]})
    with open(os.path.join(rule_runner.build_root, "somefile"), "wb") as fp:
        fp.write(b"SOMEFILE")
    with mock_console(create_options_bootstrapper()) as (console,
                                                         stdio_reader):
        digest = rule_runner.request(
            Digest, [CreateDigest([FileContent("foo/bar", b"BAR")])])
        result: Export = run_rule_with_mocks(
            export,
            rule_args=[
                console,
                Targets(targets),
                create_goal_subsystem(ExportSubsystem),
                Workspace(rule_runner.scheduler, _enforce_effects=False),
                union_membership,
                BuildRoot(),
                DistDir(relpath=Path("dist")),
            ],
            mock_gets=[
                MockGet(
                    output_type=ExportableData,
                    input_type=ExportableDataRequest,
                    mock=lambda edr: mock_export(edr, digest, (Symlink(
                        "somefile", "link_to_somefile"), )),
                ),
                MockGet(
                    output_type=Digest,
                    input_type=MergeDigests,
                    mock=lambda md: rule_runner.request(Digest, [md]),
                ),
                MockGet(
                    output_type=Digest,
                    input_type=AddPrefix,
                    mock=lambda ap: rule_runner.request(Digest, [ap]),
                ),
            ],
            union_membership=union_membership,
        )
        return result.exit_code, stdio_reader.get_stdout()
Exemple #20
0
def run_shunit2(
    rule_runner: RuleRunner,
    test_target: Target,
    *,
    extra_args: list[str] | None = None,
    env: dict[str, str] | None = None,
) -> TestResult:
    rule_runner.set_options(
        [
            "--backend-packages=pants.backend.shell",
            *(extra_args or ()),
        ],
        env=env,
        env_inherit={"PATH", "PYENV_ROOT", "HOME"},
    )
    inputs = [Shunit2FieldSet.create(test_target)]
    test_result = rule_runner.request(TestResult, inputs)
    debug_request = rule_runner.request(TestDebugRequest, inputs)
    if debug_request.process is not None:
        with mock_console(rule_runner.options_bootstrapper):
            debug_result = rule_runner.run_interactive_process(debug_request.process)
            assert test_result.exit_code == debug_result.exit_code
    return test_result
Exemple #21
0
def run_typecheck_rule(
    *,
    request_types: List[Type[CheckRequest]],
    targets: List[Target],
    include_sources: bool = True,
) -> Tuple[int, str]:
    union_membership = UnionMembership({CheckRequest: request_types})
    with mock_console(create_options_bootstrapper()) as (console,
                                                         stdio_reader):
        rule_runner = RuleRunner()
        result: Check = run_rule_with_mocks(
            check,
            rule_args=[
                console,
                Workspace(rule_runner.scheduler),
                Targets(targets),
                DistDir(relpath=Path("dist")),
                union_membership,
            ],
            mock_gets=[
                MockGet(
                    output_type=CheckResults,
                    input_type=CheckRequest,
                    mock=lambda field_set_collection: field_set_collection.
                    check_results,
                ),
                MockGet(
                    output_type=FieldSetsWithSources,
                    input_type=FieldSetsWithSourcesRequest,
                    mock=lambda field_sets: FieldSetsWithSources(
                        field_sets if include_sources else ()),
                ),
            ],
            union_membership=union_membership,
        )
        assert not stdio_reader.get_stdout()
        return result.exit_code, stdio_reader.get_stderr()
Exemple #22
0
def run_pytest(
    rule_runner: RuleRunner,
    test_target: Target,
    *,
    extra_args: list[str] | None = None,
    env: dict[str, str] | None = None,
) -> TestResult:
    args = [
        "--backend-packages=pants.backend.python",
        f"--source-root-patterns={SOURCE_ROOT}",
        *(extra_args or ()),
    ]
    rule_runner.set_options(args,
                            env=env,
                            env_inherit={"PATH", "PYENV_ROOT", "HOME"})
    inputs = [PythonTestFieldSet.create(test_target)]
    test_result = rule_runner.request(TestResult, inputs)
    debug_request = rule_runner.request(TestDebugRequest, inputs)
    if debug_request.process is not None:
        with mock_console(rule_runner.options_bootstrapper):
            debug_result = rule_runner.run_interactive_process(
                debug_request.process)
            assert test_result.exit_code == debug_result.exit_code
    return test_result
Exemple #23
0
def run_test_rule(
        rule_runner: RuleRunner,
        *,
        field_set: type[TestFieldSet],
        targets: list[Target],
        debug: bool = False,
        use_coverage: bool = False,
        xml_dir: str | None = None,
        output: ShowOutput = ShowOutput.ALL,
        valid_targets: bool = True,
        run_id: RunId = RunId(999),
) -> tuple[int, str]:
    test_subsystem = create_goal_subsystem(
        TestSubsystem,
        debug=debug,
        use_coverage=use_coverage,
        xml_dir=xml_dir,
        output=output,
        extra_env_vars=[],
    )
    workspace = Workspace(rule_runner.scheduler, _enforce_effects=False)
    union_membership = UnionMembership({
        TestFieldSet: [field_set],
        CoverageDataCollection: [MockCoverageDataCollection],
    })

    def mock_find_valid_field_sets(
        _: TargetRootsToFieldSetsRequest, ) -> TargetRootsToFieldSets:
        if not valid_targets:
            return TargetRootsToFieldSets({})
        return TargetRootsToFieldSets(
            {tgt: [field_set.create(tgt)]
             for tgt in targets})

    def mock_debug_request(_: TestFieldSet) -> TestDebugRequest:
        return TestDebugRequest(
            InteractiveProcess(["/bin/example"], input_digest=EMPTY_DIGEST))

    def mock_coverage_report_generation(
        coverage_data_collection: MockCoverageDataCollection,
    ) -> CoverageReports:
        addresses = ", ".join(coverage_data.address.spec
                              for coverage_data in coverage_data_collection)
        console_report = ConsoleCoverageReport(
            coverage_insufficient=False, report=f"Ran coverage on {addresses}")
        return CoverageReports(reports=(console_report, ))

    with mock_console(rule_runner.options_bootstrapper) as (console,
                                                            stdio_reader):
        result: Test = run_rule_with_mocks(
            run_tests,
            rule_args=[
                console,
                test_subsystem,
                workspace,
                union_membership,
                DistDir(relpath=Path("dist")),
                run_id,
            ],
            mock_gets=[
                MockGet(
                    output_type=TargetRootsToFieldSets,
                    input_type=TargetRootsToFieldSetsRequest,
                    mock=mock_find_valid_field_sets,
                ),
                MockGet(
                    output_type=TestResult,
                    input_type=TestFieldSet,
                    mock=lambda fs: fs.test_result,
                ),
                MockGet(
                    output_type=TestDebugRequest,
                    input_type=TestFieldSet,
                    mock=mock_debug_request,
                ),
                # Merge XML results.
                MockGet(
                    output_type=Digest,
                    input_type=MergeDigests,
                    mock=lambda _: EMPTY_DIGEST,
                ),
                MockGet(
                    output_type=CoverageReports,
                    input_type=CoverageDataCollection,
                    mock=mock_coverage_report_generation,
                ),
                MockGet(
                    output_type=OpenFiles,
                    input_type=OpenFilesRequest,
                    mock=lambda _: OpenFiles(()),
                ),
                MockEffect(
                    output_type=InteractiveProcessResult,
                    input_type=InteractiveProcess,
                    mock=lambda _: InteractiveProcessResult(0),
                ),
            ],
            union_membership=union_membership,
        )
        assert not stdio_reader.get_stdout()
        return result.exit_code, stdio_reader.get_stderr()
Exemple #24
0
def test_valid_repl(rule_runner: RuleRunner) -> None:
    with mock_console(rule_runner.options_bootstrapper):
        result = rule_runner.run_goal_rule(Repl, args=[f"--shell={MockRepl.name}"])
    assert result.exit_code == 0
Exemple #25
0
def test_tailor_rule(rule_runner: RuleRunner) -> None:
    with mock_console(rule_runner.options_bootstrapper) as (console,
                                                            stdio_reader):
        workspace = Workspace(rule_runner.scheduler)
        union_membership = UnionMembership(
            {PutativeTargetsRequest: [MockPutativeTargetsRequest]})
        specs = Specs(address_specs=AddressSpecs(tuple()),
                      filesystem_specs=FilesystemSpecs(tuple()))
        run_rule_with_mocks(
            tailor.tailor,
            rule_args=[
                create_goal_subsystem(
                    TailorSubsystem,
                    build_file_name="BUILD",
                    build_file_header="",
                    build_file_indent="    ",
                    alias_mapping={"fortran_library": "my_fortran_lib"},
                ),
                console,
                workspace,
                union_membership,
                specs,
            ],
            mock_gets=[
                MockGet(
                    output_type=PutativeTargets,
                    input_type=PutativeTargetsRequest,
                    mock=lambda req: PutativeTargets([
                        PutativeTarget.
                        for_target_type(FortranTests, "src/fortran/foo",
                                        "tests", ["bar1_test.f90"]),
                        PutativeTarget.for_target_type(
                            FortranLibrary, "src/fortran/baz", "baz",
                            ["qux1.f90"]),
                        PutativeTarget.for_target_type(
                            FortranLibrary,
                            "src/fortran/conflict",
                            "conflict",
                            ["conflict1.f90", "conflict2.f90"],
                        ),
                    ]),
                ),
                MockGet(
                    output_type=UniquelyNamedPutativeTargets,
                    input_type=PutativeTargets,
                    mock=lambda pts: UniquelyNamedPutativeTargets(
                        PutativeTargets([
                            pt.rename("conflict0")
                            if pt.name == "conflict" else pt for pt in pts
                        ])),
                ),
                MockGet(
                    output_type=DisjointSourcePutativeTarget,
                    input_type=PutativeTarget,
                    # This test exists to test the console output, which isn't affected by
                    # whether the sources of a putative target were modified due to conflict,
                    # so we don't bother to inject such modifications. The BUILD file content
                    # generation, which is so affected, is tested separately above.
                    mock=lambda pt: DisjointSourcePutativeTarget(pt),
                ),
                MockGet(
                    output_type=EditedBuildFiles,
                    input_type=EditBuildFilesRequest,
                    mock=lambda _: EditedBuildFiles(
                        # We test that the created digest contains what we expect above, and we
                        # don't need to test here that writing digests to the Workspace works.
                        # So the empty digest is sufficient.
                        digest=EMPTY_DIGEST,
                        created_paths=("src/fortran/baz/BUILD", ),
                        updated_paths=(
                            "src/fortran/foo/BUILD",
                            "src/fortran/conflict/BUILD",
                        ),
                    ),
                ),
            ],
            union_membership=union_membership,
        )

        stdout_str = stdio_reader.get_stdout()

    assert "Created src/fortran/baz/BUILD:\n  - Added my_fortran_lib target baz" in stdout_str
    assert "Updated src/fortran/foo/BUILD:\n  - Added fortran_tests target tests" in stdout_str
    assert (
        "Updated src/fortran/conflict/BUILD:\n  - Added my_fortran_lib target conflict0"
    ) in stdout_str
Exemple #26
0
def test_unrecognized_repl(rule_runner: RuleRunner) -> None:
    with mock_console(rule_runner.options_bootstrapper):
        result = rule_runner.run_goal_rule(Repl, args=["--shell=bogus-repl"])
    assert result.exit_code == -1
    assert "'bogus-repl' is not a registered REPL. Available REPLs" in result.stderr
Exemple #27
0
def run_export_rule(rule_runner: RuleRunner,
                    targets: List[Target]) -> Tuple[int, str]:
    union_membership = UnionMembership({ExportRequest: [MockExportRequest]})
    with open(os.path.join(rule_runner.build_root, "somefile"), "wb") as fp:
        fp.write(b"SOMEFILE")
    with mock_console(create_options_bootstrapper()) as (console,
                                                         stdio_reader):
        digest = rule_runner.request(
            Digest, [CreateDigest([FileContent("foo/bar", b"BAR")])])
        result: Export = run_rule_with_mocks(
            export,
            rule_args=[
                console,
                Targets(targets),
                Workspace(rule_runner.scheduler, _enforce_effects=False),
                union_membership,
                BuildRoot(),
                DistDir(relpath=Path("dist")),
            ],
            mock_gets=[
                MockGet(
                    output_type=ExportResults,
                    input_type=ExportRequest,
                    mock=lambda req: ExportResults((mock_export(
                        req,
                        digest,
                        (
                            PostProcessingCommand([
                                "cp", "{digest_root}/foo/bar",
                                "{digest_root}/foo/bar1"
                            ]),
                            PostProcessingCommand([
                                "cp", "{digest_root}/foo/bar",
                                "{digest_root}/foo/bar2"
                            ]),
                        ),
                    ), )),
                ),
                MockGet(
                    output_type=Digest,
                    input_type=MergeDigests,
                    mock=lambda md: rule_runner.request(Digest, [md]),
                ),
                MockGet(
                    output_type=Digest,
                    input_type=AddPrefix,
                    mock=lambda ap: rule_runner.request(Digest, [ap]),
                ),
                MockGet(
                    output_type=Environment,
                    input_type=EnvironmentRequest,
                    mock=lambda env: rule_runner.request(Environment, [env]),
                ),
                MockEffect(
                    output_type=InteractiveProcessResult,
                    input_type=InteractiveProcess,
                    mock=lambda ip: _mock_run(rule_runner, ip),
                ),
            ],
            union_membership=union_membership,
        )
        return result.exit_code, stdio_reader.get_stdout()
Exemple #28
0
def test_url_assets(asset_type) -> None:
    rule_runner = RuleRunner(
        rules=[
            *target_type_rules(),
            *pex_from_targets.rules(),
            *package_pex_binary.rules(),
            *run_pex_binary.rules(),
            *python_target_type_rules.rules(),
            *run.rules(),
        ],
        target_types=[
            FileTarget, ResourceTarget, PythonSourceTarget, PexBinary
        ],
        objects={"http_source": HTTPSource},
    )
    http_source_info = (
        'url="https://raw.githubusercontent.com/python/cpython/7e46ae33bd522cf8331052c3c8835f9366599d8d/Lib/antigravity.py",'
        "len=500,"
        'sha256="8a5ee63e1b79ba2733e7ff4290b6eefea60e7f3a1ccb6bb519535aaf92b44967"'
    )
    rule_runner.write_files({
        "assets/BUILD":
        dedent(f"""\
                {asset_type}(
                    name='antigravity',
                    source=http_source(
                        {http_source_info},
                    ),
                )
                {asset_type}(
                    name='antigravity_renamed',
                    source=http_source(
                        {http_source_info},
                        filename="antigravity_renamed.py",
                    ),
                )
                """),
        "app/app.py":
        textwrap.dedent("""\
                import pathlib

                assets_path = pathlib.Path(__file__).parent.parent / "assets"
                for path in assets_path.iterdir():
                    print(path.name)
                    assert "https://xkcd.com/353/" in path.read_text()
                """),
        "app/BUILD":
        textwrap.dedent("""\
                python_source(
                    source="app.py",
                    dependencies=[
                        "assets:antigravity",
                        "assets:antigravity_renamed",
                    ]
                )
                pex_binary(name="app.py", entry_point='app.py')
                """),
    })
    with mock_console(rule_runner.options_bootstrapper) as (console,
                                                            stdout_reader):
        rule_runner.run_goal_rule(
            run.Run,
            args=["app/app.py"],
            env_inherit={"PATH", "PYENV_ROOT", "HOME"},
        )
        stdout = stdout_reader.get_stdout()
        assert "antigravity.py" in stdout
        assert "antigravity_renamed.py" in stdout
Exemple #29
0
def run_lint_rule(
    rule_runner: RuleRunner,
    *,
    lint_request_types: Sequence[Type[LintTargetsRequest]],
    fmt_request_types: Sequence[Type[FmtRequest]] = (),
    targets: list[Target],
    run_files_linter: bool = False,
    batch_size: int = 128,
    only: list[str] | None = None,
    skip_formatters: bool = False,
) -> Tuple[int, str]:
    union_membership = UnionMembership({
        LintTargetsRequest:
        lint_request_types,
        LintFilesRequest: [MockFilesRequest] if run_files_linter else [],
        FmtRequest:
        fmt_request_types,
    })
    lint_subsystem = create_goal_subsystem(
        LintSubsystem,
        batch_size=batch_size,
        only=only or [],
        skip_formatters=skip_formatters,
    )
    with mock_console(rule_runner.options_bootstrapper) as (console,
                                                            stdio_reader):
        result: Lint = run_rule_with_mocks(
            lint,
            rule_args=[
                console,
                Workspace(rule_runner.scheduler, _enforce_effects=False),
                Specs.empty(),
                lint_subsystem,
                union_membership,
                DistDir(relpath=Path("dist")),
            ],
            mock_gets=[
                MockGet(
                    output_type=SourceFiles,
                    input_type=SourceFilesRequest,
                    mock=lambda _: SourceFiles(EMPTY_SNAPSHOT, ()),
                ),
                MockGet(
                    output_type=LintResults,
                    input_type=LintTargetsRequest,
                    mock=lambda mock_request: mock_request.lint_results,
                ),
                MockGet(
                    output_type=LintResults,
                    input_type=LintFilesRequest,
                    mock=lambda mock_request: mock_request.lint_results,
                ),
                MockGet(
                    output_type=FmtResult,
                    input_type=FmtRequest,
                    mock=lambda mock_request: mock_request.fmt_result,
                ),
                MockGet(
                    output_type=FilteredTargets,
                    input_type=Specs,
                    mock=lambda _: FilteredTargets(targets),
                ),
                MockGet(
                    output_type=SpecsPaths,
                    input_type=Specs,
                    mock=lambda _: SpecsPaths(("f.txt", ), ()),
                ),
            ],
            union_membership=union_membership,
        )
        assert not stdio_reader.get_stdout()
        return result.exit_code, stdio_reader.get_stderr()
Exemple #30
0
def run_test_rule(
    rule_runner: RuleRunner,
    *,
    field_set: Type[TestFieldSet],
    targets: List[Target],
    debug: bool = False,
    use_coverage: bool = False,
    output: ShowOutput = ShowOutput.ALL,
    include_sources: bool = True,
    valid_targets: bool = True,
) -> Tuple[int, str]:
    test_subsystem = create_goal_subsystem(
        TestSubsystem,
        debug=debug,
        use_coverage=use_coverage,
        output=output,
        extra_env_vars=[],
    )
    interactive_runner = InteractiveRunner(rule_runner.scheduler)
    workspace = Workspace(rule_runner.scheduler)
    union_membership = UnionMembership(
        {TestFieldSet: [field_set], CoverageDataCollection: [MockCoverageDataCollection]}
    )

    def mock_find_valid_field_sets(
        _: TargetRootsToFieldSetsRequest,
    ) -> TargetRootsToFieldSets:
        if not valid_targets:
            return TargetRootsToFieldSets({})
        return TargetRootsToFieldSets({tgt: [field_set.create(tgt)] for tgt in targets})

    def mock_debug_request(_: TestFieldSet) -> TestDebugRequest:
        digest = rule_runner.request(
            Digest, [CreateDigest((FileContent(path="program.py", content=b"def test(): pass"),))]
        )
        process = InteractiveProcess(["/usr/bin/python", "program.py"], input_digest=digest)
        return TestDebugRequest(process)

    def mock_coverage_report_generation(
        coverage_data_collection: MockCoverageDataCollection,
    ) -> CoverageReports:
        addresses = ", ".join(
            coverage_data.address.spec for coverage_data in coverage_data_collection
        )
        console_report = ConsoleCoverageReport(f"Ran coverage on {addresses}")
        return CoverageReports(reports=(console_report,))

    with mock_console(rule_runner.options_bootstrapper) as (console, stdio_reader):
        result: Test = run_rule_with_mocks(
            run_tests,
            rule_args=[
                console,
                test_subsystem,
                interactive_runner,
                workspace,
                union_membership,
                DistDir(relpath=Path("dist")),
            ],
            mock_gets=[
                MockGet(
                    output_type=TargetRootsToFieldSets,
                    input_type=TargetRootsToFieldSetsRequest,
                    mock=mock_find_valid_field_sets,
                ),
                MockGet(
                    output_type=EnrichedTestResult,
                    input_type=TestFieldSet,
                    mock=lambda fs: fs.test_result,
                ),
                MockGet(
                    output_type=TestDebugRequest,
                    input_type=TestFieldSet,
                    mock=mock_debug_request,
                ),
                MockGet(
                    output_type=FieldSetsWithSources,
                    input_type=FieldSetsWithSourcesRequest,
                    mock=lambda field_sets: FieldSetsWithSources(
                        field_sets if include_sources else ()
                    ),
                ),
                # Merge XML results.
                MockGet(
                    output_type=Digest,
                    input_type=MergeDigests,
                    mock=lambda _: EMPTY_DIGEST,
                ),
                MockGet(
                    output_type=CoverageReports,
                    input_type=CoverageDataCollection,
                    mock=mock_coverage_report_generation,
                ),
                MockGet(
                    output_type=OpenFiles,
                    input_type=OpenFilesRequest,
                    mock=lambda _: OpenFiles(()),
                ),
            ],
            union_membership=union_membership,
        )
        assert not stdio_reader.get_stdout()
        return result.exit_code, stdio_reader.get_stderr()