Example #1
0
async def check(
    console: Console,
    workspace: Workspace,
    targets: Targets,
    dist_dir: DistDir,
    union_membership: UnionMembership,
) -> Check:
    typecheck_request_types = cast("Iterable[type[StyleRequest]]",
                                   union_membership[CheckRequest])
    requests = tuple(
        typecheck_request_type(
            typecheck_request_type.field_set_type.create(target)
            for target in targets
            if typecheck_request_type.field_set_type.is_applicable(target))
        for typecheck_request_type in typecheck_request_types)
    field_sets_with_sources = await MultiGet(
        Get(FieldSetsWithSources,
            FieldSetsWithSourcesRequest(request.field_sets))
        for request in requests)
    valid_requests = tuple(
        request_cls(request) for request_cls, request in zip(
            typecheck_request_types, field_sets_with_sources) if request)
    all_results = await MultiGet(
        Get(CheckResults, CheckRequest, request) for request in valid_requests)

    def get_tool_name(res: CheckResults) -> str:
        return res.checker_name

    write_reports(
        all_results,
        workspace,
        dist_dir,
        goal_name=CheckSubsystem.name,
        get_tool_name=get_tool_name,
    )

    exit_code = 0
    if all_results:
        console.print_stderr("")
    for results in sorted(all_results,
                          key=lambda results: results.checker_name):
        if results.skipped:
            sigil = console.sigil_skipped()
            status = "skipped"
        elif results.exit_code == 0:
            sigil = console.sigil_succeeded()
            status = "succeeded"
        else:
            sigil = console.sigil_failed()
            status = "failed"
            exit_code = results.exit_code
        console.print_stderr(f"{sigil} {results.checker_name} {status}.")

    return Check(exit_code)
Example #2
0
async def check(
    console: Console,
    workspace: Workspace,
    targets: FilteredTargets,
    dist_dir: DistDir,
    union_membership: UnionMembership,
    check_subsystem: CheckSubsystem,
) -> Check:
    request_types = cast("Iterable[type[StyleRequest]]", union_membership[CheckRequest])
    specified_names = determine_specified_tool_names("check", check_subsystem.only, request_types)

    requests = tuple(
        request_type(
            request_type.field_set_type.create(target)
            for target in targets
            if (
                request_type.name in specified_names
                and request_type.field_set_type.is_applicable(target)
            )
        )
        for request_type in request_types
    )
    all_results = await MultiGet(
        Get(CheckResults, CheckRequest, request) for request in requests if request.field_sets
    )

    def get_name(res: CheckResults) -> str:
        return res.checker_name

    write_reports(
        all_results,
        workspace,
        dist_dir,
        goal_name=CheckSubsystem.name,
        get_name=get_name,
    )

    exit_code = 0
    if all_results:
        console.print_stderr("")
    for results in sorted(all_results, key=lambda results: results.checker_name):
        if results.skipped:
            continue
        elif results.exit_code == 0:
            sigil = console.sigil_succeeded()
            status = "succeeded"
        else:
            sigil = console.sigil_failed()
            status = "failed"
            exit_code = results.exit_code
        console.print_stderr(f"{sigil} {results.checker_name} {status}.")

    return Check(exit_code)
Example #3
0
def _print_results(
    console: Console,
    results: tuple[LintResults, ...],
    formatter_failed: bool,
) -> None:
    if results:
        console.print_stderr("")

    for result in results:
        if result.skipped:
            continue
        elif result.exit_code == 0:
            sigil = console.sigil_succeeded()
            status = "succeeded"
        else:
            sigil = console.sigil_failed()
            status = "failed"
        console.print_stderr(f"{sigil} {result.linter_name} {status}.")

    if formatter_failed:
        console.print_stderr("")
        console.print_stderr(f"(One or more formatters failed. Run `{bin_name()} fmt` to fix.)")
Example #4
0
async def check_default_tools(
    console: Console,
    real_opts: _Options,
) -> CheckDefaultTools:
    # The real options know about all the registered tools.
    for scope, si in real_opts.options.known_scope_to_info.items():
        if si.subsystem_cls and issubclass(si.subsystem_cls, ExternalTool):
            tool_cls = si.subsystem_cls
            console.print_stdout(f"Checking {console.cyan(tool_cls.name)}:")
            for known_version in tool_cls.default_known_versions:
                ver, plat_val, sha256, length = tool_cls.split_known_version_str(
                    known_version)
                # Note that we don't want to use the real option values here - we want to
                # verify that the *defaults* aren't broken. However the get_request_for() method
                # requires an instance (since it can consult option values, including custom
                # options for specific tools, that we don't know about), so we construct a
                # default one, but we force the --version to the one we're checking (which will
                # typically be the same as the default version, but doesn't have to be, if the
                # tool provides default_known_versions for versions other than default_version).
                args = ("./pants", f"--{scope}-version={ver}")
                blank_opts = await Get(
                    _Options,
                    SessionValues({
                        OptionsBootstrapper:
                        OptionsBootstrapper(tuple(), ("./pants", ), args,
                                            _ChainedConfig(tuple()),
                                            CliAlias())
                    }),
                )
                instance = tool_cls(blank_opts.options.for_scope(scope))
                req = instance.get_request_for(plat_val, sha256, length)
                console.write_stdout(f"  version {ver} for {plat_val}... ")
                # TODO: We'd like to run all the requests concurrently, but since we can't catch
                #  engine exceptions, we wouldn't have an easy way to output which one failed.
                await Get(DownloadedExternalTool, ExternalToolRequest, req)
                console.print_stdout(console.sigil_succeeded())
    return CheckDefaultTools(exit_code=0)
Example #5
0
def _format_test_summary(result: TestResult, run_id: RunId,
                         console: Console) -> str:
    """Format the test summary printed to the console."""
    assert (
        result.result_metadata is not None
    ), "Skipped test results should not be outputted in the test summary"
    if result.exit_code == 0:
        sigil = console.sigil_succeeded()
        status = "succeeded"
    else:
        sigil = console.sigil_failed()
        status = "failed"

    source = _SOURCE_MAP.get(result.result_metadata.source(run_id))
    source_print = f" ({source})" if source else ""

    elapsed_print = ""
    total_elapsed_ms = result.result_metadata.total_elapsed_ms
    if total_elapsed_ms is not None:
        elapsed_secs = total_elapsed_ms / 1000
        elapsed_print = f"in {elapsed_secs:.2f}s"

    suffix = f" {elapsed_print}{source_print}"
    return f"{sigil} {result.address} {status}{suffix}."
Example #6
0
async def lint(
    console: Console,
    workspace: Workspace,
    targets: Targets,
    lint_subsystem: LintSubsystem,
    union_membership: UnionMembership,
    dist_dir: DistDir,
) -> Lint:
    request_types = cast("Iterable[type[StyleRequest]]",
                         union_membership[LintRequest])
    requests = tuple(
        request_type(
            request_type.field_set_type.create(target) for target in targets
            if request_type.field_set_type.is_applicable(target))
        for request_type in request_types)

    if lint_subsystem.per_file_caching:
        all_per_file_results = await MultiGet(
            Get(LintResults, LintRequest, request.__class__([field_set]))
            for request in requests for field_set in request.field_sets
            if request.field_sets)

        def key_fn(results: LintResults):
            return results.linter_name

        # NB: We must pre-sort the data for itertools.groupby() to work properly.
        sorted_all_per_files_results = sorted(all_per_file_results, key=key_fn)
        # We consolidate all results for each linter into a single `LintResults`.
        all_results = tuple(
            LintResults(
                itertools.chain.from_iterable(
                    per_file_results.results
                    for per_file_results in all_linter_results),
                linter_name=linter_name,
            ) for linter_name, all_linter_results in itertools.groupby(
                sorted_all_per_files_results, key=key_fn))
    else:
        all_results = await MultiGet(
            Get(LintResults, LintRequest, request) for request in requests
            if request.field_sets)

    all_results = tuple(
        sorted(all_results, key=lambda results: results.linter_name))

    def get_tool_name(res: LintResults) -> str:
        return res.linter_name

    write_reports(
        all_results,
        workspace,
        dist_dir,
        goal_name=LintSubsystem.name,
        get_tool_name=get_tool_name,
    )

    exit_code = 0
    if all_results:
        console.print_stderr("")
    for results in all_results:
        if results.skipped:
            sigil = console.sigil_skipped()
            status = "skipped"
        elif results.exit_code == 0:
            sigil = console.sigil_succeeded()
            status = "succeeded"
        else:
            sigil = console.sigil_failed()
            status = "failed"
            exit_code = results.exit_code
        console.print_stderr(f"{sigil} {results.linter_name} {status}.")

    return Lint(exit_code)
Example #7
0
async def fmt(
    console: Console,
    targets: Targets,
    fmt_subsystem: FmtSubsystem,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Fmt:
    language_target_collection_types = union_membership[LanguageFmtTargets]
    language_target_collections = tuple(
        language_target_collection_type(
            Targets(target for target in targets
                    if language_target_collection_type.belongs_to_language(
                        target))) for language_target_collection_type in
        language_target_collection_types)
    targets_with_sources = await MultiGet(
        Get(
            TargetsWithSources,
            TargetsWithSourcesRequest(language_target_collection.targets),
        ) for language_target_collection in language_target_collections)
    # NB: We must convert back the generic TargetsWithSources objects back into their
    # corresponding LanguageFmtTargets, e.g. back to PythonFmtTargets, in order for the union
    # rule to work.
    valid_language_target_collections = tuple(
        language_target_collection_cls(
            Targets(target for target in language_target_collection.targets
                    if target in language_targets_with_sources))
        for language_target_collection_cls,
        language_target_collection, language_targets_with_sources in zip(
            language_target_collection_types, language_target_collections,
            targets_with_sources) if language_targets_with_sources)

    if fmt_subsystem.per_file_caching:
        per_language_results = await MultiGet(
            Get(
                LanguageFmtResults,
                LanguageFmtTargets,
                language_target_collection.__class__(Targets([target])),
            )
            for language_target_collection in valid_language_target_collections
            for target in language_target_collection.targets)
    else:
        per_language_results = await MultiGet(
            Get(LanguageFmtResults, LanguageFmtTargets,
                language_target_collection)
            for language_target_collection in valid_language_target_collections
        )

    individual_results = list(
        itertools.chain.from_iterable(
            language_result.results
            for language_result in per_language_results))

    if not individual_results:
        return Fmt(exit_code=0)

    changed_digests = tuple(language_result.output
                            for language_result in per_language_results
                            if language_result.did_change)
    if changed_digests:
        # NB: this will fail if there are any conflicting changes, which we want to happen rather
        # than silently having one result override the other. In practice, this should never
        # happen due to us grouping each language's formatters into a single digest.
        merged_formatted_digest = await Get(Digest,
                                            MergeDigests(changed_digests))
        workspace.write_digest(merged_formatted_digest)

    if individual_results:
        console.print_stderr("")

    # We group all results for the same formatter so that we can give one final status in the
    # summary. This is only relevant if there were multiple results because of
    # `--per-file-caching`.
    formatter_to_results = defaultdict(set)
    for result in individual_results:
        formatter_to_results[result.formatter_name].add(result)

    for formatter, results in sorted(formatter_to_results.items()):
        if any(result.did_change for result in results):
            sigil = console.sigil_succeeded_with_edits()
            status = "made changes"
        elif all(result.skipped for result in results):
            sigil = console.sigil_skipped()
            status = "skipped"
        else:
            sigil = console.sigil_succeeded()
            status = "made no changes"
        console.print_stderr(f"{sigil} {formatter} {status}.")

    # Since the rules to produce FmtResult should use ExecuteRequest, rather than
    # FallibleProcess, we assume that there were no failures.
    return Fmt(exit_code=0)
Example #8
0
async def run_publish(console: Console, publish: PublishSubsystem) -> Publish:
    target_roots_to_package_field_sets, target_roots_to_publish_field_sets = await MultiGet(
        Get(
            TargetRootsToFieldSets,
            TargetRootsToFieldSetsRequest(
                PackageFieldSet,
                goal_description="",
                # Don't warn/error here because it's already covered by `PublishFieldSet`.
                no_applicable_targets_behavior=NoApplicableTargetsBehavior.
                ignore,
            ),
        ),
        Get(
            TargetRootsToFieldSets,
            TargetRootsToFieldSetsRequest(
                PublishFieldSet,
                goal_description="the `publish` goal",
                no_applicable_targets_behavior=NoApplicableTargetsBehavior.
                warn,
            ),
        ),
    )

    # Only keep field sets that both package something, and have something to publish.
    targets = set(target_roots_to_package_field_sets.targets).intersection(
        set(target_roots_to_publish_field_sets.targets))

    if not targets:
        return Publish(exit_code=0)

    # Build all packages and request the processes to run for each field set.
    processes = await MultiGet(
        Get(
            PublishProcesses,
            PublishProcessesRequest(
                target_roots_to_package_field_sets.mapping[tgt],
                target_roots_to_publish_field_sets.mapping[tgt],
            ),
        ) for tgt in targets)

    # Run all processes interactively.
    exit_code: int = 0
    outputs: list[PublishOutputData] = []
    results: list[str] = []

    for pub in chain.from_iterable(processes):
        if not pub.process:
            sigil = console.sigil_skipped()
            status = "skipped"
            if pub.description:
                status += f" {pub.description}"
            for name in pub.names:
                results.append(f"{sigil} {name} {status}.")
            outputs.append(pub.get_output_data(published=False, status=status))
            continue

        logger.debug(f"Execute {pub.process}")
        res = await Effect(InteractiveProcessResult, InteractiveProcess,
                           pub.process)
        if res.exit_code == 0:
            sigil = console.sigil_succeeded()
            status = "published"
            prep = "to"
        else:
            sigil = console.sigil_failed()
            status = "failed"
            prep = "for"
            exit_code = res.exit_code

        if pub.description:
            status += f" {prep} {pub.description}"

        for name in pub.names:
            results.append(f"{sigil} {name} {status}.")

        outputs.append(
            pub.get_output_data(
                exit_code=res.exit_code,
                published=res.exit_code == 0,
                status=status,
            ))

    console.print_stderr("")
    if not results:
        sigil = console.sigil_skipped()
        console.print_stderr(f"{sigil} Nothing published.")

    # We collect all results to the end, so all output from the interactive processes are done,
    # before printing the results.
    for line in results:
        console.print_stderr(line)

    # Log structured output
    output_data = json.dumps(outputs,
                             cls=_PublishJsonEncoder,
                             indent=2,
                             sort_keys=True)
    logger.debug(f"Publish result data:\n{output_data}")
    if publish.output:
        with open(publish.output, mode="w") as fd:
            fd.write(output_data)

    return Publish(exit_code)
Example #9
0
async def lint(
    console: Console,
    workspace: Workspace,
    targets: Targets,
    specs_snapshot: SpecsSnapshot,
    lint_subsystem: LintSubsystem,
    union_membership: UnionMembership,
    dist_dir: DistDir,
) -> Lint:
    target_request_types = cast("Iterable[type[LintTargetsRequest]]",
                                union_membership[LintTargetsRequest])
    file_request_types = union_membership[LintFilesRequest]
    specified_names = determine_specified_tool_names(
        "lint",
        lint_subsystem.only,
        target_request_types,
        extra_valid_names={request.name
                           for request in file_request_types},
    )
    target_requests = tuple(
        request_type(
            request_type.field_set_type.create(target) for target in targets
            if (request_type.name in specified_names
                and request_type.field_set_type.is_applicable(target)))
        for request_type in target_request_types)
    file_requests = (tuple(
        request_type(specs_snapshot.snapshot.files)
        for request_type in file_request_types
        if request_type.name in specified_names)
                     if specs_snapshot.snapshot.files else ())

    def address_str(fs: FieldSet) -> str:
        return fs.address.spec

    all_requests = [
        *(Get(LintResults, LintTargetsRequest,
              request.__class__(field_set_batch))
          for request in target_requests if request.field_sets
          for field_set_batch in partition_sequentially(
              request.field_sets,
              key=address_str,
              size_target=lint_subsystem.batch_size,
              size_max=4 * lint_subsystem.batch_size,
          )),
        *(Get(LintResults, LintFilesRequest, request)
          for request in file_requests),
    ]
    all_batch_results = cast(
        "tuple[LintResults, ...]",
        await MultiGet(all_requests),  # type: ignore[arg-type]
    )

    def key_fn(results: LintResults):
        return results.linter_name

    # NB: We must pre-sort the data for itertools.groupby() to work properly.
    sorted_all_batch_results = sorted(all_batch_results, key=key_fn)
    # We consolidate all results for each linter into a single `LintResults`.
    all_results = tuple(
        sorted(
            (LintResults(
                itertools.chain.from_iterable(
                    batch_results.results
                    for batch_results in all_linter_results),
                linter_name=linter_name,
            ) for linter_name, all_linter_results in itertools.groupby(
                sorted_all_batch_results, key=key_fn)),
            key=key_fn,
        ))

    def get_name(res: LintResults) -> str:
        return res.linter_name

    write_reports(
        all_results,
        workspace,
        dist_dir,
        goal_name=LintSubsystem.name,
        get_name=get_name,
    )

    exit_code = 0
    if all_results:
        console.print_stderr("")
    for results in all_results:
        if results.skipped:
            continue
        elif results.exit_code == 0:
            sigil = console.sigil_succeeded()
            status = "succeeded"
        else:
            sigil = console.sigil_failed()
            status = "failed"
            exit_code = results.exit_code
        console.print_stderr(f"{sigil} {results.linter_name} {status}.")

    return Lint(exit_code)
Example #10
0
async def run_tests(
    console: Console,
    test_subsystem: TestSubsystem,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    union_membership: UnionMembership,
    dist_dir: DistDir,
) -> Test:
    if test_subsystem.debug:
        targets_to_valid_field_sets = await Get(
            TargetRootsToFieldSets,
            TargetRootsToFieldSetsRequest(
                TestFieldSet,
                goal_description="`test --debug`",
                no_applicable_targets_behavior=NoApplicableTargetsBehavior.
                error,
            ),
        )
        debug_requests = await MultiGet(
            Get(TestDebugRequest, TestFieldSet, field_set)
            for field_set in targets_to_valid_field_sets.field_sets)
        exit_code = 0
        for debug_request in debug_requests:
            debug_result = interactive_runner.run(debug_request.process)
            if debug_result.exit_code != 0:
                exit_code = debug_result.exit_code
        return Test(exit_code)

    targets_to_valid_field_sets = await Get(
        TargetRootsToFieldSets,
        TargetRootsToFieldSetsRequest(
            TestFieldSet,
            goal_description=f"the `{test_subsystem.name}` goal",
            no_applicable_targets_behavior=NoApplicableTargetsBehavior.warn,
        ),
    )
    field_sets_with_sources = await Get(
        FieldSetsWithSources,
        FieldSetsWithSourcesRequest(targets_to_valid_field_sets.field_sets))

    results = await MultiGet(
        Get(TestResult, TestFieldSet, field_set)
        for field_set in field_sets_with_sources)

    # Print summary.
    exit_code = 0
    if results:
        console.print_stderr("")
    for result in sorted(results):
        if result.exit_code == 0:
            sigil = console.sigil_succeeded()
            status = "succeeded"
        else:
            sigil = console.sigil_failed()
            status = "failed"
            exit_code = result.exit_code
        console.print_stderr(f"{sigil} {result.address} {status}.")
        if result.extra_output and result.extra_output.files:
            workspace.write_digest(
                result.extra_output.digest,
                path_prefix=str(dist_dir.relpath / "test" /
                                result.address.path_safe_spec),
            )

    merged_xml_results = await Get(
        Digest,
        MergeDigests(result.xml_results.digest for result in results
                     if result.xml_results),
    )
    workspace.write_digest(merged_xml_results)

    if test_subsystem.use_coverage:
        # NB: We must pre-sort the data for itertools.groupby() to work properly, using the same
        # key function for both. However, you can't sort by `types`, so we call `str()` on it.
        all_coverage_data = sorted(
            (result.coverage_data
             for result in results if result.coverage_data is not None),
            key=lambda cov_data: str(type(cov_data)),
        )

        coverage_types_to_collection_types = {
            collection_cls.element_type: collection_cls  # type: ignore[misc]
            for collection_cls in union_membership.get(CoverageDataCollection)
        }
        coverage_collections = []
        for data_cls, data in itertools.groupby(all_coverage_data,
                                                lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))
        # We can create multiple reports for each coverage data (e.g., console, xml, html)
        coverage_reports_collections = await MultiGet(
            Get(CoverageReports, CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections)

        coverage_report_files: list[PurePath] = []
        for coverage_reports in coverage_reports_collections:
            report_files = coverage_reports.materialize(console, workspace)
            coverage_report_files.extend(report_files)

        if coverage_report_files and test_subsystem.open_coverage:
            open_files = await Get(
                OpenFiles,
                OpenFilesRequest(coverage_report_files,
                                 error_if_open_not_found=False))
            for process in open_files.processes:
                interactive_runner.run(process)

        for coverage_reports in coverage_reports_collections:
            if coverage_reports.coverage_insufficient:
                logger.error("Test goal failed due to insufficient coverage. "
                             "See coverage reports for details.")
                # coverage.py uses 2 to indicate failure due to insufficient coverage.
                # We may as well follow suit in the general case, for all languages.
                exit_code = 2

    return Test(exit_code)
Example #11
0
async def fmt(
    console: Console,
    targets: Targets,
    fmt_subsystem: FmtSubsystem,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Fmt:
    request_types = union_membership[FmtRequest]
    specified_names = determine_specified_tool_names("fmt", fmt_subsystem.only,
                                                     request_types)

    # Group targets by the sequence of FmtRequests that apply to them.
    targets_by_fmt_request_order = defaultdict(list)
    for target in targets:
        fmt_requests = []
        for fmt_request in request_types:
            valid_name = fmt_request.name in specified_names
            if valid_name and fmt_request.field_set_type.is_applicable(
                    target):  # type: ignore[misc]
                fmt_requests.append(fmt_request)
        if fmt_requests:
            targets_by_fmt_request_order[tuple(fmt_requests)].append(target)

    # Spawn sequential formatting per unique sequence of FmtRequests.
    per_language_results = await MultiGet(
        Get(
            _LanguageFmtResults,
            _LanguageFmtRequest(fmt_requests, Targets(target_batch)),
        ) for fmt_requests, targets in targets_by_fmt_request_order.items()
        for target_batch in partition_sequentially(
            targets,
            key=lambda t: t.address.spec,
            size_target=fmt_subsystem.batch_size,
            size_max=4 * fmt_subsystem.batch_size,
        ))

    individual_results = list(
        itertools.chain.from_iterable(
            language_result.results
            for language_result in per_language_results))

    if not individual_results:
        return Fmt(exit_code=0)

    changed_digests = tuple(language_result.output
                            for language_result in per_language_results
                            if language_result.did_change)
    if changed_digests:
        # NB: this will fail if there are any conflicting changes, which we want to happen rather
        # than silently having one result override the other. In practice, this should never
        # happen due to us grouping each language's formatters into a single digest.
        merged_formatted_digest = await Get(Digest,
                                            MergeDigests(changed_digests))
        workspace.write_digest(merged_formatted_digest)

    if individual_results:
        console.print_stderr("")

    # We group all results for the same formatter so that we can give one final status in the
    # summary. This is only relevant if there were multiple results because of
    # `--per-file-caching`.
    formatter_to_results = defaultdict(set)
    for result in individual_results:
        formatter_to_results[result.formatter_name].add(result)

    for formatter, results in sorted(formatter_to_results.items()):
        if any(result.did_change for result in results):
            sigil = console.sigil_succeeded_with_edits()
            status = "made changes"
        elif all(result.skipped for result in results):
            continue
        else:
            sigil = console.sigil_succeeded()
            status = "made no changes"
        console.print_stderr(f"{sigil} {formatter} {status}.")

    # Since the rules to produce FmtResult should use ExecuteRequest, rather than
    # FallibleProcess, we assume that there were no failures.
    return Fmt(exit_code=0)