async def check( console: Console, workspace: Workspace, targets: Targets, dist_dir: DistDir, union_membership: UnionMembership, ) -> Check: typecheck_request_types = cast("Iterable[type[StyleRequest]]", union_membership[CheckRequest]) requests = tuple( typecheck_request_type( typecheck_request_type.field_set_type.create(target) for target in targets if typecheck_request_type.field_set_type.is_applicable(target)) for typecheck_request_type in typecheck_request_types) field_sets_with_sources = await MultiGet( Get(FieldSetsWithSources, FieldSetsWithSourcesRequest(request.field_sets)) for request in requests) valid_requests = tuple( request_cls(request) for request_cls, request in zip( typecheck_request_types, field_sets_with_sources) if request) all_results = await MultiGet( Get(CheckResults, CheckRequest, request) for request in valid_requests) def get_tool_name(res: CheckResults) -> str: return res.checker_name write_reports( all_results, workspace, dist_dir, goal_name=CheckSubsystem.name, get_tool_name=get_tool_name, ) exit_code = 0 if all_results: console.print_stderr("") for results in sorted(all_results, key=lambda results: results.checker_name): if results.skipped: sigil = console.sigil_skipped() status = "skipped" elif results.exit_code == 0: sigil = console.sigil_succeeded() status = "succeeded" else: sigil = console.sigil_failed() status = "failed" exit_code = results.exit_code console.print_stderr(f"{sigil} {results.checker_name} {status}.") return Check(exit_code)
async def check( console: Console, workspace: Workspace, targets: FilteredTargets, dist_dir: DistDir, union_membership: UnionMembership, check_subsystem: CheckSubsystem, ) -> Check: request_types = cast("Iterable[type[StyleRequest]]", union_membership[CheckRequest]) specified_names = determine_specified_tool_names("check", check_subsystem.only, request_types) requests = tuple( request_type( request_type.field_set_type.create(target) for target in targets if ( request_type.name in specified_names and request_type.field_set_type.is_applicable(target) ) ) for request_type in request_types ) all_results = await MultiGet( Get(CheckResults, CheckRequest, request) for request in requests if request.field_sets ) def get_name(res: CheckResults) -> str: return res.checker_name write_reports( all_results, workspace, dist_dir, goal_name=CheckSubsystem.name, get_name=get_name, ) exit_code = 0 if all_results: console.print_stderr("") for results in sorted(all_results, key=lambda results: results.checker_name): if results.skipped: continue elif results.exit_code == 0: sigil = console.sigil_succeeded() status = "succeeded" else: sigil = console.sigil_failed() status = "failed" exit_code = results.exit_code console.print_stderr(f"{sigil} {results.checker_name} {status}.") return Check(exit_code)
def _print_results( console: Console, results: tuple[LintResults, ...], formatter_failed: bool, ) -> None: if results: console.print_stderr("") for result in results: if result.skipped: continue elif result.exit_code == 0: sigil = console.sigil_succeeded() status = "succeeded" else: sigil = console.sigil_failed() status = "failed" console.print_stderr(f"{sigil} {result.linter_name} {status}.") if formatter_failed: console.print_stderr("") console.print_stderr(f"(One or more formatters failed. Run `{bin_name()} fmt` to fix.)")
def _format_test_summary(result: TestResult, run_id: RunId, console: Console) -> str: """Format the test summary printed to the console.""" assert ( result.result_metadata is not None ), "Skipped test results should not be outputted in the test summary" if result.exit_code == 0: sigil = console.sigil_succeeded() status = "succeeded" else: sigil = console.sigil_failed() status = "failed" source = _SOURCE_MAP.get(result.result_metadata.source(run_id)) source_print = f" ({source})" if source else "" elapsed_print = "" total_elapsed_ms = result.result_metadata.total_elapsed_ms if total_elapsed_ms is not None: elapsed_secs = total_elapsed_ms / 1000 elapsed_print = f"in {elapsed_secs:.2f}s" suffix = f" {elapsed_print}{source_print}" return f"{sigil} {result.address} {status}{suffix}."
async def lint( console: Console, workspace: Workspace, targets: Targets, lint_subsystem: LintSubsystem, union_membership: UnionMembership, dist_dir: DistDir, ) -> Lint: request_types = cast("Iterable[type[StyleRequest]]", union_membership[LintRequest]) requests = tuple( request_type( request_type.field_set_type.create(target) for target in targets if request_type.field_set_type.is_applicable(target)) for request_type in request_types) if lint_subsystem.per_file_caching: all_per_file_results = await MultiGet( Get(LintResults, LintRequest, request.__class__([field_set])) for request in requests for field_set in request.field_sets if request.field_sets) def key_fn(results: LintResults): return results.linter_name # NB: We must pre-sort the data for itertools.groupby() to work properly. sorted_all_per_files_results = sorted(all_per_file_results, key=key_fn) # We consolidate all results for each linter into a single `LintResults`. all_results = tuple( LintResults( itertools.chain.from_iterable( per_file_results.results for per_file_results in all_linter_results), linter_name=linter_name, ) for linter_name, all_linter_results in itertools.groupby( sorted_all_per_files_results, key=key_fn)) else: all_results = await MultiGet( Get(LintResults, LintRequest, request) for request in requests if request.field_sets) all_results = tuple( sorted(all_results, key=lambda results: results.linter_name)) def get_tool_name(res: LintResults) -> str: return res.linter_name write_reports( all_results, workspace, dist_dir, goal_name=LintSubsystem.name, get_tool_name=get_tool_name, ) exit_code = 0 if all_results: console.print_stderr("") for results in all_results: if results.skipped: sigil = console.sigil_skipped() status = "skipped" elif results.exit_code == 0: sigil = console.sigil_succeeded() status = "succeeded" else: sigil = console.sigil_failed() status = "failed" exit_code = results.exit_code console.print_stderr(f"{sigil} {results.linter_name} {status}.") return Lint(exit_code)
async def run_publish(console: Console, publish: PublishSubsystem) -> Publish: target_roots_to_package_field_sets, target_roots_to_publish_field_sets = await MultiGet( Get( TargetRootsToFieldSets, TargetRootsToFieldSetsRequest( PackageFieldSet, goal_description="", # Don't warn/error here because it's already covered by `PublishFieldSet`. no_applicable_targets_behavior=NoApplicableTargetsBehavior. ignore, ), ), Get( TargetRootsToFieldSets, TargetRootsToFieldSetsRequest( PublishFieldSet, goal_description="the `publish` goal", no_applicable_targets_behavior=NoApplicableTargetsBehavior. warn, ), ), ) # Only keep field sets that both package something, and have something to publish. targets = set(target_roots_to_package_field_sets.targets).intersection( set(target_roots_to_publish_field_sets.targets)) if not targets: return Publish(exit_code=0) # Build all packages and request the processes to run for each field set. processes = await MultiGet( Get( PublishProcesses, PublishProcessesRequest( target_roots_to_package_field_sets.mapping[tgt], target_roots_to_publish_field_sets.mapping[tgt], ), ) for tgt in targets) # Run all processes interactively. exit_code: int = 0 outputs: list[PublishOutputData] = [] results: list[str] = [] for pub in chain.from_iterable(processes): if not pub.process: sigil = console.sigil_skipped() status = "skipped" if pub.description: status += f" {pub.description}" for name in pub.names: results.append(f"{sigil} {name} {status}.") outputs.append(pub.get_output_data(published=False, status=status)) continue logger.debug(f"Execute {pub.process}") res = await Effect(InteractiveProcessResult, InteractiveProcess, pub.process) if res.exit_code == 0: sigil = console.sigil_succeeded() status = "published" prep = "to" else: sigil = console.sigil_failed() status = "failed" prep = "for" exit_code = res.exit_code if pub.description: status += f" {prep} {pub.description}" for name in pub.names: results.append(f"{sigil} {name} {status}.") outputs.append( pub.get_output_data( exit_code=res.exit_code, published=res.exit_code == 0, status=status, )) console.print_stderr("") if not results: sigil = console.sigil_skipped() console.print_stderr(f"{sigil} Nothing published.") # We collect all results to the end, so all output from the interactive processes are done, # before printing the results. for line in results: console.print_stderr(line) # Log structured output output_data = json.dumps(outputs, cls=_PublishJsonEncoder, indent=2, sort_keys=True) logger.debug(f"Publish result data:\n{output_data}") if publish.output: with open(publish.output, mode="w") as fd: fd.write(output_data) return Publish(exit_code)
async def lint( console: Console, workspace: Workspace, targets: Targets, specs_snapshot: SpecsSnapshot, lint_subsystem: LintSubsystem, union_membership: UnionMembership, dist_dir: DistDir, ) -> Lint: target_request_types = cast("Iterable[type[LintTargetsRequest]]", union_membership[LintTargetsRequest]) file_request_types = union_membership[LintFilesRequest] specified_names = determine_specified_tool_names( "lint", lint_subsystem.only, target_request_types, extra_valid_names={request.name for request in file_request_types}, ) target_requests = tuple( request_type( request_type.field_set_type.create(target) for target in targets if (request_type.name in specified_names and request_type.field_set_type.is_applicable(target))) for request_type in target_request_types) file_requests = (tuple( request_type(specs_snapshot.snapshot.files) for request_type in file_request_types if request_type.name in specified_names) if specs_snapshot.snapshot.files else ()) def address_str(fs: FieldSet) -> str: return fs.address.spec all_requests = [ *(Get(LintResults, LintTargetsRequest, request.__class__(field_set_batch)) for request in target_requests if request.field_sets for field_set_batch in partition_sequentially( request.field_sets, key=address_str, size_target=lint_subsystem.batch_size, size_max=4 * lint_subsystem.batch_size, )), *(Get(LintResults, LintFilesRequest, request) for request in file_requests), ] all_batch_results = cast( "tuple[LintResults, ...]", await MultiGet(all_requests), # type: ignore[arg-type] ) def key_fn(results: LintResults): return results.linter_name # NB: We must pre-sort the data for itertools.groupby() to work properly. sorted_all_batch_results = sorted(all_batch_results, key=key_fn) # We consolidate all results for each linter into a single `LintResults`. all_results = tuple( sorted( (LintResults( itertools.chain.from_iterable( batch_results.results for batch_results in all_linter_results), linter_name=linter_name, ) for linter_name, all_linter_results in itertools.groupby( sorted_all_batch_results, key=key_fn)), key=key_fn, )) def get_name(res: LintResults) -> str: return res.linter_name write_reports( all_results, workspace, dist_dir, goal_name=LintSubsystem.name, get_name=get_name, ) exit_code = 0 if all_results: console.print_stderr("") for results in all_results: if results.skipped: continue elif results.exit_code == 0: sigil = console.sigil_succeeded() status = "succeeded" else: sigil = console.sigil_failed() status = "failed" exit_code = results.exit_code console.print_stderr(f"{sigil} {results.linter_name} {status}.") return Lint(exit_code)
async def run_tests( console: Console, test_subsystem: TestSubsystem, interactive_runner: InteractiveRunner, workspace: Workspace, union_membership: UnionMembership, dist_dir: DistDir, ) -> Test: if test_subsystem.debug: targets_to_valid_field_sets = await Get( TargetRootsToFieldSets, TargetRootsToFieldSetsRequest( TestFieldSet, goal_description="`test --debug`", no_applicable_targets_behavior=NoApplicableTargetsBehavior. error, ), ) debug_requests = await MultiGet( Get(TestDebugRequest, TestFieldSet, field_set) for field_set in targets_to_valid_field_sets.field_sets) exit_code = 0 for debug_request in debug_requests: debug_result = interactive_runner.run(debug_request.process) if debug_result.exit_code != 0: exit_code = debug_result.exit_code return Test(exit_code) targets_to_valid_field_sets = await Get( TargetRootsToFieldSets, TargetRootsToFieldSetsRequest( TestFieldSet, goal_description=f"the `{test_subsystem.name}` goal", no_applicable_targets_behavior=NoApplicableTargetsBehavior.warn, ), ) field_sets_with_sources = await Get( FieldSetsWithSources, FieldSetsWithSourcesRequest(targets_to_valid_field_sets.field_sets)) results = await MultiGet( Get(TestResult, TestFieldSet, field_set) for field_set in field_sets_with_sources) # Print summary. exit_code = 0 if results: console.print_stderr("") for result in sorted(results): if result.exit_code == 0: sigil = console.sigil_succeeded() status = "succeeded" else: sigil = console.sigil_failed() status = "failed" exit_code = result.exit_code console.print_stderr(f"{sigil} {result.address} {status}.") if result.extra_output and result.extra_output.files: workspace.write_digest( result.extra_output.digest, path_prefix=str(dist_dir.relpath / "test" / result.address.path_safe_spec), ) merged_xml_results = await Get( Digest, MergeDigests(result.xml_results.digest for result in results if result.xml_results), ) workspace.write_digest(merged_xml_results) if test_subsystem.use_coverage: # NB: We must pre-sort the data for itertools.groupby() to work properly, using the same # key function for both. However, you can't sort by `types`, so we call `str()` on it. all_coverage_data = sorted( (result.coverage_data for result in results if result.coverage_data is not None), key=lambda cov_data: str(type(cov_data)), ) coverage_types_to_collection_types = { collection_cls.element_type: collection_cls # type: ignore[misc] for collection_cls in union_membership.get(CoverageDataCollection) } coverage_collections = [] for data_cls, data in itertools.groupby(all_coverage_data, lambda data: type(data)): collection_cls = coverage_types_to_collection_types[data_cls] coverage_collections.append(collection_cls(data)) # We can create multiple reports for each coverage data (e.g., console, xml, html) coverage_reports_collections = await MultiGet( Get(CoverageReports, CoverageDataCollection, coverage_collection) for coverage_collection in coverage_collections) coverage_report_files: list[PurePath] = [] for coverage_reports in coverage_reports_collections: report_files = coverage_reports.materialize(console, workspace) coverage_report_files.extend(report_files) if coverage_report_files and test_subsystem.open_coverage: open_files = await Get( OpenFiles, OpenFilesRequest(coverage_report_files, error_if_open_not_found=False)) for process in open_files.processes: interactive_runner.run(process) for coverage_reports in coverage_reports_collections: if coverage_reports.coverage_insufficient: logger.error("Test goal failed due to insufficient coverage. " "See coverage reports for details.") # coverage.py uses 2 to indicate failure due to insufficient coverage. # We may as well follow suit in the general case, for all languages. exit_code = 2 return Test(exit_code)