예제 #1
0
 def batch(field_sets: Iterable[FieldSet]) -> Iterator[list[FieldSet]]:
     partitions = partition_sequentially(
         field_sets,
         key=lambda fs: fs.address.spec,
         size_target=lint_subsystem.batch_size,
         size_max=4 * lint_subsystem.batch_size,
     )
     for partition in partitions:
         yield partition
예제 #2
0
async def lint(
    console: Console,
    workspace: Workspace,
    targets: Targets,
    specs_snapshot: SpecsSnapshot,
    lint_subsystem: LintSubsystem,
    union_membership: UnionMembership,
    dist_dir: DistDir,
) -> Lint:
    target_request_types = cast("Iterable[type[LintTargetsRequest]]",
                                union_membership[LintTargetsRequest])
    file_request_types = union_membership[LintFilesRequest]
    specified_names = determine_specified_tool_names(
        "lint",
        lint_subsystem.only,
        target_request_types,
        extra_valid_names={request.name
                           for request in file_request_types},
    )
    target_requests = tuple(
        request_type(
            request_type.field_set_type.create(target) for target in targets
            if (request_type.name in specified_names
                and request_type.field_set_type.is_applicable(target)))
        for request_type in target_request_types)
    file_requests = (tuple(
        request_type(specs_snapshot.snapshot.files)
        for request_type in file_request_types
        if request_type.name in specified_names)
                     if specs_snapshot.snapshot.files else ())

    def address_str(fs: FieldSet) -> str:
        return fs.address.spec

    all_requests = [
        *(Get(LintResults, LintTargetsRequest,
              request.__class__(field_set_batch))
          for request in target_requests if request.field_sets
          for field_set_batch in partition_sequentially(
              request.field_sets,
              key=address_str,
              size_target=lint_subsystem.batch_size,
              size_max=4 * lint_subsystem.batch_size,
          )),
        *(Get(LintResults, LintFilesRequest, request)
          for request in file_requests),
    ]
    all_batch_results = cast(
        "tuple[LintResults, ...]",
        await MultiGet(all_requests),  # type: ignore[arg-type]
    )

    def key_fn(results: LintResults):
        return results.linter_name

    # NB: We must pre-sort the data for itertools.groupby() to work properly.
    sorted_all_batch_results = sorted(all_batch_results, key=key_fn)
    # We consolidate all results for each linter into a single `LintResults`.
    all_results = tuple(
        sorted(
            (LintResults(
                itertools.chain.from_iterable(
                    batch_results.results
                    for batch_results in all_linter_results),
                linter_name=linter_name,
            ) for linter_name, all_linter_results in itertools.groupby(
                sorted_all_batch_results, key=key_fn)),
            key=key_fn,
        ))

    def get_name(res: LintResults) -> str:
        return res.linter_name

    write_reports(
        all_results,
        workspace,
        dist_dir,
        goal_name=LintSubsystem.name,
        get_name=get_name,
    )

    exit_code = 0
    if all_results:
        console.print_stderr("")
    for results in all_results:
        if results.skipped:
            continue
        elif results.exit_code == 0:
            sigil = console.sigil_succeeded()
            status = "succeeded"
        else:
            sigil = console.sigil_failed()
            status = "failed"
            exit_code = results.exit_code
        console.print_stderr(f"{sigil} {results.linter_name} {status}.")

    return Lint(exit_code)
예제 #3
0
 def partitioned_buckets(items: list[str]) -> set[tuple[str, ...]]:
     return {tuple(p) for p in partition_sequentially(items, key=str, size_target=size_target)}
예제 #4
0
async def fmt(
    console: Console,
    targets: Targets,
    fmt_subsystem: FmtSubsystem,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Fmt:
    request_types = union_membership[FmtRequest]
    specified_names = determine_specified_tool_names("fmt", fmt_subsystem.only,
                                                     request_types)

    # Group targets by the sequence of FmtRequests that apply to them.
    targets_by_fmt_request_order = defaultdict(list)
    for target in targets:
        fmt_requests = []
        for fmt_request in request_types:
            valid_name = fmt_request.name in specified_names
            if valid_name and fmt_request.field_set_type.is_applicable(
                    target):  # type: ignore[misc]
                fmt_requests.append(fmt_request)
        if fmt_requests:
            targets_by_fmt_request_order[tuple(fmt_requests)].append(target)

    # Spawn sequential formatting per unique sequence of FmtRequests.
    per_language_results = await MultiGet(
        Get(
            _LanguageFmtResults,
            _LanguageFmtRequest(fmt_requests, Targets(target_batch)),
        ) for fmt_requests, targets in targets_by_fmt_request_order.items()
        for target_batch in partition_sequentially(
            targets,
            key=lambda t: t.address.spec,
            size_target=fmt_subsystem.batch_size,
            size_max=4 * fmt_subsystem.batch_size,
        ))

    individual_results = list(
        itertools.chain.from_iterable(
            language_result.results
            for language_result in per_language_results))

    if not individual_results:
        return Fmt(exit_code=0)

    changed_digests = tuple(language_result.output
                            for language_result in per_language_results
                            if language_result.did_change)
    if changed_digests:
        # NB: this will fail if there are any conflicting changes, which we want to happen rather
        # than silently having one result override the other. In practice, this should never
        # happen due to us grouping each language's formatters into a single digest.
        merged_formatted_digest = await Get(Digest,
                                            MergeDigests(changed_digests))
        workspace.write_digest(merged_formatted_digest)

    if individual_results:
        console.print_stderr("")

    # We group all results for the same formatter so that we can give one final status in the
    # summary. This is only relevant if there were multiple results because of
    # `--per-file-caching`.
    formatter_to_results = defaultdict(set)
    for result in individual_results:
        formatter_to_results[result.formatter_name].add(result)

    for formatter, results in sorted(formatter_to_results.items()):
        if any(result.did_change for result in results):
            sigil = console.sigil_succeeded_with_edits()
            status = "made changes"
        elif all(result.skipped for result in results):
            continue
        else:
            sigil = console.sigil_succeeded()
            status = "made no changes"
        console.print_stderr(f"{sigil} {formatter} {status}.")

    # Since the rules to produce FmtResult should use ExecuteRequest, rather than
    # FallibleProcess, we assume that there were no failures.
    return Fmt(exit_code=0)