async def check( console: Console, workspace: Workspace, targets: Targets, dist_dir: DistDir, union_membership: UnionMembership, ) -> Check: typecheck_request_types = cast("Iterable[type[StyleRequest]]", union_membership[CheckRequest]) requests = tuple( typecheck_request_type( typecheck_request_type.field_set_type.create(target) for target in targets if typecheck_request_type.field_set_type.is_applicable(target)) for typecheck_request_type in typecheck_request_types) field_sets_with_sources = await MultiGet( Get(FieldSetsWithSources, FieldSetsWithSourcesRequest(request.field_sets)) for request in requests) valid_requests = tuple( request_cls(request) for request_cls, request in zip( typecheck_request_types, field_sets_with_sources) if request) all_results = await MultiGet( Get(CheckResults, CheckRequest, request) for request in valid_requests) def get_tool_name(res: CheckResults) -> str: return res.checker_name write_reports( all_results, workspace, dist_dir, goal_name=CheckSubsystem.name, get_tool_name=get_tool_name, ) exit_code = 0 if all_results: console.print_stderr("") for results in sorted(all_results, key=lambda results: results.checker_name): if results.skipped: sigil = console.sigil_skipped() status = "skipped" elif results.exit_code == 0: sigil = console.sigil_succeeded() status = "succeeded" else: sigil = console.sigil_failed() status = "failed" exit_code = results.exit_code console.print_stderr(f"{sigil} {results.checker_name} {status}.") return Check(exit_code)
async def lint( console: Console, workspace: Workspace, targets: Targets, lint_subsystem: LintSubsystem, union_membership: UnionMembership, dist_dir: DistDir, ) -> Lint: request_types = cast("Iterable[type[StyleRequest]]", union_membership[LintRequest]) requests = tuple( request_type( request_type.field_set_type.create(target) for target in targets if request_type.field_set_type.is_applicable(target)) for request_type in request_types) if lint_subsystem.per_file_caching: all_per_file_results = await MultiGet( Get(LintResults, LintRequest, request.__class__([field_set])) for request in requests for field_set in request.field_sets if request.field_sets) def key_fn(results: LintResults): return results.linter_name # NB: We must pre-sort the data for itertools.groupby() to work properly. sorted_all_per_files_results = sorted(all_per_file_results, key=key_fn) # We consolidate all results for each linter into a single `LintResults`. all_results = tuple( LintResults( itertools.chain.from_iterable( per_file_results.results for per_file_results in all_linter_results), linter_name=linter_name, ) for linter_name, all_linter_results in itertools.groupby( sorted_all_per_files_results, key=key_fn)) else: all_results = await MultiGet( Get(LintResults, LintRequest, request) for request in requests if request.field_sets) all_results = tuple( sorted(all_results, key=lambda results: results.linter_name)) def get_tool_name(res: LintResults) -> str: return res.linter_name write_reports( all_results, workspace, dist_dir, goal_name=LintSubsystem.name, get_tool_name=get_tool_name, ) exit_code = 0 if all_results: console.print_stderr("") for results in all_results: if results.skipped: sigil = console.sigil_skipped() status = "skipped" elif results.exit_code == 0: sigil = console.sigil_succeeded() status = "succeeded" else: sigil = console.sigil_failed() status = "failed" exit_code = results.exit_code console.print_stderr(f"{sigil} {results.linter_name} {status}.") return Lint(exit_code)
async def fmt( console: Console, targets: Targets, fmt_subsystem: FmtSubsystem, workspace: Workspace, union_membership: UnionMembership, ) -> Fmt: language_target_collection_types = union_membership[LanguageFmtTargets] language_target_collections = tuple( language_target_collection_type( Targets(target for target in targets if language_target_collection_type.belongs_to_language( target))) for language_target_collection_type in language_target_collection_types) targets_with_sources = await MultiGet( Get( TargetsWithSources, TargetsWithSourcesRequest(language_target_collection.targets), ) for language_target_collection in language_target_collections) # NB: We must convert back the generic TargetsWithSources objects back into their # corresponding LanguageFmtTargets, e.g. back to PythonFmtTargets, in order for the union # rule to work. valid_language_target_collections = tuple( language_target_collection_cls( Targets(target for target in language_target_collection.targets if target in language_targets_with_sources)) for language_target_collection_cls, language_target_collection, language_targets_with_sources in zip( language_target_collection_types, language_target_collections, targets_with_sources) if language_targets_with_sources) if fmt_subsystem.per_file_caching: per_language_results = await MultiGet( Get( LanguageFmtResults, LanguageFmtTargets, language_target_collection.__class__(Targets([target])), ) for language_target_collection in valid_language_target_collections for target in language_target_collection.targets) else: per_language_results = await MultiGet( Get(LanguageFmtResults, LanguageFmtTargets, language_target_collection) for language_target_collection in valid_language_target_collections ) individual_results = list( itertools.chain.from_iterable( language_result.results for language_result in per_language_results)) if not individual_results: return Fmt(exit_code=0) changed_digests = tuple(language_result.output for language_result in per_language_results if language_result.did_change) if changed_digests: # NB: this will fail if there are any conflicting changes, which we want to happen rather # than silently having one result override the other. In practice, this should never # happen due to us grouping each language's formatters into a single digest. merged_formatted_digest = await Get(Digest, MergeDigests(changed_digests)) workspace.write_digest(merged_formatted_digest) if individual_results: console.print_stderr("") # We group all results for the same formatter so that we can give one final status in the # summary. This is only relevant if there were multiple results because of # `--per-file-caching`. formatter_to_results = defaultdict(set) for result in individual_results: formatter_to_results[result.formatter_name].add(result) for formatter, results in sorted(formatter_to_results.items()): if any(result.did_change for result in results): sigil = console.sigil_succeeded_with_edits() status = "made changes" elif all(result.skipped for result in results): sigil = console.sigil_skipped() status = "skipped" else: sigil = console.sigil_succeeded() status = "made no changes" console.print_stderr(f"{sigil} {formatter} {status}.") # Since the rules to produce FmtResult should use ExecuteRequest, rather than # FallibleProcess, we assume that there were no failures. return Fmt(exit_code=0)
async def run_publish(console: Console, publish: PublishSubsystem) -> Publish: target_roots_to_package_field_sets, target_roots_to_publish_field_sets = await MultiGet( Get( TargetRootsToFieldSets, TargetRootsToFieldSetsRequest( PackageFieldSet, goal_description="", # Don't warn/error here because it's already covered by `PublishFieldSet`. no_applicable_targets_behavior=NoApplicableTargetsBehavior. ignore, ), ), Get( TargetRootsToFieldSets, TargetRootsToFieldSetsRequest( PublishFieldSet, goal_description="the `publish` goal", no_applicable_targets_behavior=NoApplicableTargetsBehavior. warn, ), ), ) # Only keep field sets that both package something, and have something to publish. targets = set(target_roots_to_package_field_sets.targets).intersection( set(target_roots_to_publish_field_sets.targets)) if not targets: return Publish(exit_code=0) # Build all packages and request the processes to run for each field set. processes = await MultiGet( Get( PublishProcesses, PublishProcessesRequest( target_roots_to_package_field_sets.mapping[tgt], target_roots_to_publish_field_sets.mapping[tgt], ), ) for tgt in targets) # Run all processes interactively. exit_code: int = 0 outputs: list[PublishOutputData] = [] results: list[str] = [] for pub in chain.from_iterable(processes): if not pub.process: sigil = console.sigil_skipped() status = "skipped" if pub.description: status += f" {pub.description}" for name in pub.names: results.append(f"{sigil} {name} {status}.") outputs.append(pub.get_output_data(published=False, status=status)) continue logger.debug(f"Execute {pub.process}") res = await Effect(InteractiveProcessResult, InteractiveProcess, pub.process) if res.exit_code == 0: sigil = console.sigil_succeeded() status = "published" prep = "to" else: sigil = console.sigil_failed() status = "failed" prep = "for" exit_code = res.exit_code if pub.description: status += f" {prep} {pub.description}" for name in pub.names: results.append(f"{sigil} {name} {status}.") outputs.append( pub.get_output_data( exit_code=res.exit_code, published=res.exit_code == 0, status=status, )) console.print_stderr("") if not results: sigil = console.sigil_skipped() console.print_stderr(f"{sigil} Nothing published.") # We collect all results to the end, so all output from the interactive processes are done, # before printing the results. for line in results: console.print_stderr(line) # Log structured output output_data = json.dumps(outputs, cls=_PublishJsonEncoder, indent=2, sort_keys=True) logger.debug(f"Publish result data:\n{output_data}") if publish.output: with open(publish.output, mode="w") as fd: fd.write(output_data) return Publish(exit_code)