async def workspace_goal_rule( console: Console, workspace: Workspace, digest_request: DigestRequest ) -> WorkspaceGoal: snapshot = await Get(Snapshot, CreateDigest, digest_request.create_digest) workspace.write_digest(snapshot.digest) console.print_stdout(snapshot.files[0], end="") return WorkspaceGoal(exit_code=0)
async def export( console: Console, targets: Targets, export_subsystem: ExportSubsystem, workspace: Workspace, union_membership: UnionMembership, build_root: BuildRoot, dist_dir: DistDir, ) -> Export: request_types = cast("Iterable[type[ExportableDataRequest]]", union_membership.get(ExportableDataRequest)) requests = tuple(request_type(targets) for request_type in request_types) exportables = await MultiGet( Get(ExportableData, ExportableDataRequest, request) for request in requests) prefixed_digests = await MultiGet( Get(Digest, AddPrefix(exp.digest, exp.reldir)) for exp in exportables) output_dir = os.path.join(str(dist_dir.relpath), "export") merged_digest = await Get(Digest, MergeDigests(prefixed_digests)) dist_digest = await Get(Digest, AddPrefix(merged_digest, output_dir)) workspace.write_digest(dist_digest) for exp in exportables: for symlink in exp.symlinks: # Note that if symlink.source_path is an abspath, join returns it unchanged. source_abspath = os.path.join(build_root.path, symlink.source_path) link_abspath = os.path.abspath( os.path.join(output_dir, exp.reldir, symlink.link_rel_path)) absolute_symlink(source_abspath, link_abspath) console.print_stdout( f"Wrote {exp.description} to {os.path.join(output_dir, exp.reldir)}" ) return Export(exit_code=0)
async def run_tests( console: Console, options: TestOptions, runner: InteractiveRunner, addresses: BuildFileAddresses, ) -> Test: if options.values.debug: address = await Get[BuildFileAddress](BuildFileAddresses, addresses) addr_debug_request = await Get[AddressAndDebugRequest](Address, address.to_address()) result = runner.run_local_interactive_process(addr_debug_request.request.ipr) return Test(result.process_exit_code) results = await MultiGet(Get[AddressAndTestResult](Address, addr.to_address()) for addr in addresses) did_any_fail = False filtered_results = [(x.address, x.test_result) for x in results if x.test_result is not None] for address, test_result in filtered_results: if test_result.status == Status.FAILURE: did_any_fail = True if test_result.stdout: console.write_stdout(f"{address.reference()} stdout:\n{test_result.stdout}\n") if test_result.stderr: # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving the # two streams. console.write_stdout(f"{address.reference()} stderr:\n{test_result.stderr}\n") console.write_stdout("\n") for address, test_result in filtered_results: console.print_stdout(f'{address.reference():80}.....{test_result.status.value:>10}') if did_any_fail: console.print_stderr(console.red('\nTests failed')) exit_code = PANTS_FAILED_EXIT_CODE else: exit_code = PANTS_SUCCEEDED_EXIT_CODE return Test(exit_code)
def materialize(self, console: Console, workspace: Workspace) -> None: workspace.materialize_directory( DirectoryToMaterialize( self.result_digest, path_prefix=str(self.directory_to_materialize_to), ) ) console.print_stdout(f"\nWrote coverage report to `{self.directory_to_materialize_to}`")
async def generate_pants_ini(console: Console, workspace: Workspace) -> GeneratePantsIni: pants_ini_content = dedent(f"""\ [GLOBAL] pants_version: {pants_version} """) preexisting_snapshot = await Get[Snapshot](PathGlobs(include=('pants.ini',))) if preexisting_snapshot.files: console.print_stderr( "./pants.ini already exists. This goal is only meant to be run the first time you run Pants " "in a project.\n\nTo update config values, please directly modify the file." ) return GeneratePantsIni(exit_code=1) console.print_stdout(dedent(f"""\ Adding sensible defaults to ./pants.ini: * Pinning `pants_version` to `{pants_version}`. """)) digest = await Get[Digest](InputFilesContent([ FileContent(path='pants.ini', content=pants_ini_content.encode()) ])) workspace.materialize_directory(DirectoryToMaterialize(digest)) console.print_stdout( "You may modify these values directly in the file at any time. The ./pants script will detect " "any changes the next time you run it.\n\nYou are now ready to use Pants!" ) return GeneratePantsIni(exit_code=0)
async def workspace_console_rule( console: Console, workspace: Workspace, msg: MessageToConsoleRule) -> MockWorkspaceGoal: digest = await Get(Digest, InputFilesContent, msg.input_files_content) output = workspace.materialize_directory(DirectoryToMaterialize(digest)) console.print_stdout(output.output_paths[0], end='') return MockWorkspaceGoal(exit_code=0)
def fmt(console: Console, targets: HydratedTargets) -> Fmt: results = yield [ Get(FmtResult, FmtTarget, target.adaptor) for target in targets # @union assumes that all targets passed implement the union, so we manually # filter the targets we know do; this should probably no-op or log or something # configurable for non-matching targets. # We also would want to remove the workaround that filters adaptors which have a # `sources` attribute. # See https://github.com/pantsbuild/pants/issues/4535 if isinstance(target.adaptor, ( PythonAppAdaptor, PythonTargetAdaptor, PythonTestsAdaptor, PythonBinaryAdaptor)) and hasattr(target.adaptor, "sources") ] for result in results: files_content = yield Get(FilesContent, Digest, result.digest) # TODO: This is hacky and inefficient, and should be replaced by using the Workspace type # once that is available on master. # Blocked on: https://github.com/pantsbuild/pants/pull/8329 for file_content in files_content: with Path(get_buildroot(), file_content.path).open('wb') as f: f.write(file_content.content) if result.stdout: console.print_stdout(result.stdout) if result.stderr: console.print_stderr(result.stderr) # Since we ran an ExecuteRequest, any failure would already have interrupted our flow exit_code = 0 yield Fmt(exit_code)
def fast_test(console: Console, addresses: BuildFileAddresses) -> Test: test_results = yield [Get(TestResult, Address, address.to_address()) for address in addresses] did_any_fail = False for address, test_result in zip(addresses, test_results): if test_result.status == Status.FAILURE: did_any_fail = True if test_result.stdout: console.write_stdout( "{} stdout:\n{}\n".format( address.reference(), console.red(test_result.stdout) if test_result.status == Status.FAILURE else test_result.stdout ) ) if test_result.stderr: # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving the # two streams. console.write_stdout( "{} stderr:\n{}\n".format( address.reference(), console.red(test_result.stderr) if test_result.status == Status.FAILURE else test_result.stderr ) ) console.write_stdout("\n") for address, test_result in zip(addresses, test_results): console.print_stdout('{0:80}.....{1:>10}'.format(address.reference(), test_result.status.value)) if did_any_fail: console.print_stderr(console.red('Tests failed')) exit_code = PANTS_FAILED_EXIT_CODE else: exit_code = PANTS_SUCCEEDED_EXIT_CODE yield Test(exit_code)
async def tailor( tailor_subsystem: TailorSubsystem, console: Console, workspace: Workspace, union_membership: UnionMembership, ) -> Tailor: putative_target_request_types = union_membership[PutativeTargetsRequest] putative_targets_results = await MultiGet( Get(PutativeTargets, PutativeTargetsRequest, req_type()) for req_type in putative_target_request_types) putative_targets = PutativeTargets.merge(putative_targets_results) fixed_names_ptgts = await Get(UniquelyNamedPutativeTargets, PutativeTargets, putative_targets) fixed_sources_ptgts = await MultiGet( Get(DisjointSourcePutativeTarget, PutativeTarget, ptgt) for ptgt in fixed_names_ptgts.putative_targets) ptgts = [dspt.putative_target for dspt in fixed_sources_ptgts] if ptgts: edited_build_files = await Get( EditedBuildFiles, EditBuildFilesRequest(PutativeTargets(ptgts), tailor_subsystem.build_file_indent), ) updated_build_files = set(edited_build_files.updated_paths) workspace.write_digest(edited_build_files.digest) ptgts_by_build_file = group_by_build_file(ptgts) for build_file_path, ptgts in ptgts_by_build_file.items(): verb = "Updated" if build_file_path in updated_build_files else "Created" console.print_stdout(f"{verb} {console.blue(build_file_path)}:") for ptgt in ptgts: console.print_stdout( f" - Added {console.green(ptgt.type_alias)} target " f"{console.cyan(ptgt.address.spec)}") return Tailor(0)
async def fmt(console: Console, targets: HydratedTargets, workspace: Workspace, union_membership: UnionMembership) -> Fmt: aggregated_results = await MultiGet( Get[AggregatedFmtResults](FormatTarget, target.adaptor) for target in targets if FormatTarget.is_formattable(target.adaptor, union_membership=union_membership)) individual_results = [ result for aggregated_result in aggregated_results for result in aggregated_result.results ] if not individual_results: return Fmt(exit_code=0) # NB: this will fail if there are any conflicting changes, which we want to happen rather than # silently having one result override the other. In practicality, this should never happen due # to our use of an aggregator rule for each distinct language. merged_formatted_digest = await Get[Digest](DirectoriesToMerge( tuple(aggregated_result.combined_digest for aggregated_result in aggregated_results))) workspace.materialize_directory( DirectoryToMaterialize(merged_formatted_digest)) for result in individual_results: if result.stdout: console.print_stdout(result.stdout) if result.stderr: console.print_stderr(result.stderr) # Since the rules to produce FmtResult should use ExecuteRequest, rather than # FallibleExecuteProcessRequest, we assume that there were no failures. return Fmt(exit_code=0)
async def count_loc( console: Console, succinct_code_counter: SuccinctCodeCounter, specs_snapshot: SpecsSnapshot, ) -> CountLinesOfCode: if not specs_snapshot.snapshot.files: return CountLinesOfCode(exit_code=0) scc_program = await Get( DownloadedExternalTool, ExternalToolRequest, succinct_code_counter.get_request(Platform.current), ) input_digest = await Get( Digest, MergeDigests((scc_program.digest, specs_snapshot.snapshot.digest))) result = await Get( ProcessResult, Process( argv=(scc_program.exe, *succinct_code_counter.args), input_digest=input_digest, description= (f"Count lines of code for {pluralize(len(specs_snapshot.snapshot.files), 'file')}" ), level=LogLevel.DEBUG, ), ) console.print_stdout(result.stdout.decode()) return CountLinesOfCode(exit_code=0)
def fmt(console: Console, targets: HydratedTargets, union_membership: UnionMembership) -> Fmt: results = yield [ Get(FmtResult, TargetWithSources, target.adaptor) for target in targets # TODO: make TargetAdaptor return a 'sources' field with an empty snapshot instead of # raising to remove the hasattr() checks here! if union_membership.is_member(TargetWithSources, target.adaptor) and hasattr(target.adaptor, "sources") ] for result in results: files_content = yield Get(FilesContent, Digest, result.digest) # TODO: This is hacky and inefficient, and should be replaced by using the Workspace type # once that is available on master. # Blocked on: https://github.com/pantsbuild/pants/pull/8329 for file_content in files_content: with Path(get_buildroot(), file_content.path).open('wb') as f: f.write(file_content.content) if result.stdout: console.print_stdout(result.stdout) if result.stderr: console.print_stderr(result.stderr) # Since we ran an ExecuteRequest, any failure would already have interrupted our flow exit_code = 0 yield Fmt(exit_code)
async def fmt(console: Console, targets: HydratedTargets, workspace: Workspace, union_membership: UnionMembership) -> Fmt: results = await MultiGet( Get[FmtResult](TargetWithSources, target.adaptor) for target in targets if TargetWithSources.is_formattable_and_lintable( target.adaptor, union_membership=union_membership)) if not results: return Fmt(exit_code=0) # NB: this will fail if there are any conflicting changes, which we want to happen rather than # silently having one result override the other. # TODO(#8722): how should we handle multiple auto-formatters touching the same files? merged_formatted_digest = await Get[Digest](DirectoriesToMerge( tuple(result.digest for result in results))) workspace.materialize_directory( DirectoryToMaterialize(merged_formatted_digest)) for result in results: if result.stdout: console.print_stdout(result.stdout) if result.stderr: console.print_stderr(result.stderr) # Since the rules to produce FmtResult should use ExecuteRequest, rather than # FallibleExecuteProcessRequest, we assume that there were no failures. return Fmt(exit_code=0)
def workspace_console_rule(console: Console, workspace: Workspace, msg: MessageToConsoleRule) -> MockWorkspaceGoal: digest = yield Get(Digest, InputFilesContent, msg.input_files_content) output = workspace.materialize_directories( (DirectoryToMaterialize(path=msg.tmp_dir, directory_digest=digest), )) output_path = output.dependencies[0].output_paths[0] console.print_stdout(str(Path(msg.tmp_dir, output_path)), end='') yield MockWorkspaceGoal(exit_code=0)
def materialize(self, console: Console, workspace: Workspace) -> None: if not self.results_file: return output_path = self.results_file.output_path workspace.write_digest(self.results_file.digest, path_prefix=output_path.parent.as_posix()) console.print_stdout( f"Wrote {self.linter_name} report to: {output_path.as_posix()}")
async def lint( console: Console, targets_with_origins: TargetsWithOrigins, options: LintOptions, union_membership: UnionMembership, ) -> Lint: config_collection_types: Iterable[ Type[LinterConfigurations]] = union_membership.union_rules[ LinterConfigurations] config_collections: Iterable[LinterConfigurations] = tuple( config_collection_type( config_collection_type.config_type.create(target_with_origin) for target_with_origin in targets_with_origins if config_collection_type.config_type.is_valid( target_with_origin.target)) for config_collection_type in config_collection_types) config_collections_with_sources: Iterable[ ConfigurationsWithSources] = await MultiGet( Get[ConfigurationsWithSources](ConfigurationsWithSourcesRequest( config_collection)) for config_collection in config_collections) # NB: We must convert back the generic ConfigurationsWithSources objects back into their # corresponding LinterConfigurations, e.g. back to IsortConfigurations, in order for the union # rule to work. valid_config_collections: Iterable[LinterConfigurations] = tuple( config_collection_cls(config_collection) for config_collection_cls, config_collection in zip( config_collection_types, config_collections_with_sources) if config_collection) if options.values.per_target_caching: results = await MultiGet( Get[LintResult](LinterConfigurations, config_collection.__class__([config])) for config_collection in valid_config_collections for config in config_collection) else: results = await MultiGet( Get[LintResult](LinterConfigurations, config_collection) for config_collection in valid_config_collections) if not results: return Lint(exit_code=0) exit_code = 0 for result in results: if result.stdout: console.print_stdout(result.stdout) if result.stderr: console.print_stderr(result.stderr) if result.exit_code != 0: exit_code = result.exit_code return Lint(exit_code)
async def dump_java_source_analysis( targets: Targets, console: Console) -> DumpJavaSourceAnalysis: java_source_field_sets = [ JavaFieldSet.create(tgt) for tgt in targets if JavaFieldSet.is_applicable(tgt) ] java_source_analysis = await MultiGet( Get(JavaSourceDependencyAnalysis, SourceFilesRequest([fs.sources])) for fs in java_source_field_sets) java_source_analysis_json = [{ "address": str(fs.address), **analysis.to_debug_json_dict() } for (fs, analysis) in zip(java_source_field_sets, java_source_analysis)] console.print_stdout(json.dumps(java_source_analysis_json)) return DumpJavaSourceAnalysis(exit_code=0)
async def validate( console: Console, sources_snapshot: SourcesSnapshot, validate_subsystem: ValidateSubsystem, source_file_validation: SourceFileValidation, ) -> Validate: multi_matcher = source_file_validation.get_multi_matcher() digest_contents = await Get(DigestContents, Digest, sources_snapshot.snapshot.digest) regex_match_results = RegexMatchResults( multi_matcher.check_source_file(file_content.path, file_content.content) for file_content in sorted(digest_contents, key=lambda fc: fc.path)) detail_level = validate_subsystem.detail_level num_matched_all = 0 num_nonmatched_some = 0 for rmr in regex_match_results: if not rmr.matching and not rmr.nonmatching: continue if detail_level == DetailLevel.names: if rmr.nonmatching: console.print_stdout(rmr.path) continue if rmr.nonmatching: icon = "X" num_nonmatched_some += 1 else: icon = "V" num_matched_all += 1 matched_msg = " Matched: {}".format(",".join( rmr.matching)) if rmr.matching else "" nonmatched_msg = (" Didn't match: {}".format(",".join(rmr.nonmatching)) if rmr.nonmatching else "") if detail_level == DetailLevel.all or ( detail_level == DetailLevel.nonmatching and nonmatched_msg): console.print_stdout("{} {}:{}{}".format(icon, rmr.path, matched_msg, nonmatched_msg)) if detail_level not in (DetailLevel.none, DetailLevel.names): console.print_stdout( "\n{} files matched all required patterns.".format( num_matched_all)) console.print_stdout( "{} files failed to match at least one required pattern.".format( num_nonmatched_some)) if num_nonmatched_some: exit_code = PANTS_FAILED_EXIT_CODE else: exit_code = PANTS_SUCCEEDED_EXIT_CODE return Validate(exit_code)
async def lint( console: Console, targets_with_origins: HydratedTargetsWithOrigins, options: LintOptions, union_membership: UnionMembership, ) -> Lint: adaptors_with_origins = tuple( TargetAdaptorWithOrigin.create(target_with_origin.target.adaptor, target_with_origin.origin) for target_with_origin in targets_with_origins if target_with_origin.target.adaptor.has_sources()) linters: Iterable[Type[Linter]] = union_membership.union_rules[Linter] if options.values.per_target_caching: results = await MultiGet( Get[LintResult](Linter, linter((adaptor_with_origin, ))) for adaptor_with_origin in adaptors_with_origins for linter in linters if linter.is_valid_target(adaptor_with_origin)) else: linters_with_valid_targets = { linter: tuple(adaptor_with_origin for adaptor_with_origin in adaptors_with_origins if linter.is_valid_target(adaptor_with_origin)) for linter in linters } results = await MultiGet( Get[LintResult](Linter, linter(valid_targets)) for linter, valid_targets in linters_with_valid_targets.items() if valid_targets) if not results: return Lint(exit_code=0) exit_code = 0 for result in results: if result.stdout: console.print_stdout(result.stdout) if result.stderr: console.print_stderr(result.stderr) if result.exit_code != 0: exit_code = result.exit_code return Lint(exit_code)
async def export( console: Console, targets: Targets, workspace: Workspace, union_membership: UnionMembership, build_root: BuildRoot, dist_dir: DistDir, ) -> Export: request_types = cast("Iterable[type[ExportRequest]]", union_membership.get(ExportRequest)) requests = tuple(request_type(targets) for request_type in request_types) all_results = await MultiGet( Get(ExportResults, ExportRequest, request) for request in requests) flattened_results = [res for results in all_results for res in results] prefixed_digests = await MultiGet( Get(Digest, AddPrefix(result.digest, result.reldir)) for result in flattened_results) output_dir = os.path.join(str(dist_dir.relpath), "export") merged_digest = await Get(Digest, MergeDigests(prefixed_digests)) dist_digest = await Get(Digest, AddPrefix(merged_digest, output_dir)) workspace.write_digest(dist_digest) environment = await Get(Environment, EnvironmentRequest(["PATH"])) for result in flattened_results: digest_root = os.path.join(build_root.path, output_dir, result.reldir) for cmd in result.post_processing_cmds: argv = tuple( arg.format(digest_root=digest_root) for arg in cmd.argv) ip = InteractiveProcess( argv=argv, env={ "PATH": environment.get("PATH", ""), **cmd.extra_env }, run_in_workspace=True, ) await Effect(InteractiveProcessResult, InteractiveProcess, ip) console.print_stdout( f"Wrote {result.description} to {os.path.join(output_dir, result.reldir)}" ) return Export(exit_code=0)
async def lint(console: Console, targets: HydratedTargets, union_membership: UnionMembership) -> Lint: results = await MultiGet( Get(LintResult, TargetWithSources, target.adaptor) for target in targets if TargetWithSources.is_formattable_and_lintable( target.adaptor, union_membership=union_membership)) if not results: return Lint(exit_code=0) exit_code = 0 for result in results: if result.stdout: console.print_stdout(result.stdout) if result.stderr: console.print_stderr(result.stderr) if result.exit_code != 0: exit_code = result.exit_code return Lint(exit_code)
async def lint(console: Console, targets: HydratedTargets, union_membership: UnionMembership) -> Lint: nested_results = await MultiGet( Get[LintResults](LintTarget, target.adaptor) for target in targets if LintTarget.is_lintable(target.adaptor, union_membership=union_membership)) results = [result for results in nested_results for result in results] if not results: return Lint(exit_code=0) exit_code = 0 for result in results: if result.stdout: console.print_stdout(result.stdout) if result.stderr: console.print_stderr(result.stderr) if result.exit_code != 0: exit_code = result.exit_code return Lint(exit_code)
def lint(console: Console, targets: HydratedTargets, union_membership: UnionMembership) -> Lint: results = yield [ Get(LintResult, TargetWithSources, target.adaptor) for target in targets # TODO: make TargetAdaptor return a 'sources' field with an empty snapshot instead of # raising to remove the hasattr() checks here! if union_membership.is_member(TargetWithSources, target.adaptor) and hasattr(target.adaptor, "sources") ] exit_code = 0 for result in results: if result.stdout: console.print_stdout(result.stdout) if result.stderr: console.print_stderr(result.stderr) if result.exit_code != 0: exit_code = result.exit_code yield Lint(exit_code)
async def fast_test(console: Console, addresses: BuildFileAddresses) -> Test: results = await MultiGet(Get(AddressAndTestResult, Address, addr.to_address()) for addr in addresses) did_any_fail = False filtered_results = [(x.address, x.test_result) for x in results if x.test_result is not None] for address, test_result in filtered_results: if test_result.status == Status.FAILURE: did_any_fail = True if test_result.stdout: console.write_stdout( "{} stdout:\n{}\n".format( address.reference(), (console.red(test_result.stdout) if test_result.status == Status.FAILURE else test_result.stdout) ) ) if test_result.stderr: # NB: we write to stdout, rather than to stderr, to avoid potential issues interleaving the # two streams. console.write_stdout( "{} stderr:\n{}\n".format( address.reference(), (console.red(test_result.stderr) if test_result.status == Status.FAILURE else test_result.stderr) ) ) console.write_stdout("\n") for address, test_result in filtered_results: console.print_stdout('{0:80}.....{1:>10}'.format( address.reference(), test_result.status.value)) if did_any_fail: console.print_stderr(console.red('Tests failed')) exit_code = PANTS_FAILED_EXIT_CODE else: exit_code = PANTS_SUCCEEDED_EXIT_CODE return Test(exit_code)
async def check_default_tools( console: Console, real_opts: _Options, ) -> CheckDefaultTools: # The real options know about all the registered tools. for scope, si in real_opts.options.known_scope_to_info.items(): if si.subsystem_cls and issubclass(si.subsystem_cls, ExternalTool): tool_cls = si.subsystem_cls console.print_stdout(f"Checking {console.cyan(tool_cls.name)}:") for known_version in tool_cls.default_known_versions: ver, plat_val, sha256, length = tool_cls.split_known_version_str( known_version) # Note that we don't want to use the real option values here - we want to # verify that the *defaults* aren't broken. However the get_request_for() method # requires an instance (since it can consult option values, including custom # options for specific tools, that we don't know about), so we construct a # default one, but we force the --version to the one we're checking (which will # typically be the same as the default version, but doesn't have to be, if the # tool provides default_known_versions for versions other than default_version). args = ("./pants", f"--{scope}-version={ver}") blank_opts = await Get( _Options, SessionValues({ OptionsBootstrapper: OptionsBootstrapper(tuple(), ("./pants", ), args, _ChainedConfig(tuple()), CliAlias()) }), ) instance = tool_cls(blank_opts.options.for_scope(scope)) req = instance.get_request_for(plat_val, sha256, length) console.write_stdout(f" version {ver} for {plat_val}... ") # TODO: We'd like to run all the requests concurrently, but since we can't catch # engine exceptions, we wouldn't have an easy way to output which one failed. await Get(DownloadedExternalTool, ExternalToolRequest, req) console.print_stdout(console.sigil_succeeded()) return CheckDefaultTools(exit_code=0)
async def validate( console: Console, sources_snapshots: SourcesSnapshots, validate_options: ValidateOptions, ) -> Validate: per_snapshot_rmrs = await MultiGet( Get[RegexMatchResults](SourcesSnapshot, source_snapshot) for source_snapshot in sources_snapshots) regex_match_results = list(itertools.chain(*per_snapshot_rmrs)) detail_level = validate_options.values.detail_level regex_match_results = sorted(regex_match_results, key=lambda x: x.path) num_matched_all = 0 num_nonmatched_some = 0 for rmr in regex_match_results: if not rmr.matching and not rmr.nonmatching: continue if rmr.nonmatching: icon = "X" num_nonmatched_some += 1 else: icon = "V" num_matched_all += 1 matched_msg = " Matched: {}".format(",".join( rmr.matching)) if rmr.matching else "" nonmatched_msg = (" Didn't match: {}".format(",".join(rmr.nonmatching)) if rmr.nonmatching else "") if detail_level == DetailLevel.all or ( detail_level == DetailLevel.nonmatching and nonmatched_msg): console.print_stdout("{} {}:{}{}".format(icon, rmr.path, matched_msg, nonmatched_msg)) if detail_level != DetailLevel.none: console.print_stdout( "\n{} files matched all required patterns.".format( num_matched_all)) console.print_stdout( "{} files failed to match at least one required pattern.".format( num_nonmatched_some)) if num_nonmatched_some: console.print_stderr("Files failed validation.") exit_code = PANTS_FAILED_EXIT_CODE else: exit_code = PANTS_SUCCEEDED_EXIT_CODE return Validate(exit_code)
def validate(console: Console, hydrated_targets: HydratedTargets, validate_options: Validate.Options) -> Validate: per_tgt_rmrs = yield [ Get(RegexMatchResults, HydratedTarget, ht) for ht in hydrated_targets ] regex_match_results = list(itertools.chain(*per_tgt_rmrs)) detail_level = validate_options.values.detail_level regex_match_results = sorted(regex_match_results, key=lambda x: x.path) num_matched_all = 0 num_nonmatched_some = 0 for rmr in regex_match_results: if not rmr.matching and not rmr.nonmatching: continue if rmr.nonmatching: icon = 'X' num_nonmatched_some += 1 else: icon = 'V' num_matched_all += 1 matched_msg = ' Matched: {}'.format(','.join( rmr.matching)) if rmr.matching else '' nonmatched_msg = (" Didn't match: {}".format(','.join(rmr.nonmatching)) if rmr.nonmatching else '') if (detail_level == DetailLevel.all or (detail_level == DetailLevel.nonmatching and nonmatched_msg)): console.print_stdout("{} {}:{}{}".format(icon, rmr.path, matched_msg, nonmatched_msg)) if detail_level != DetailLevel.none: console.print_stdout( '\n{} files matched all required patterns.'.format( num_matched_all)) console.print_stdout( '{} files failed to match at least one required pattern.'.format( num_nonmatched_some)) if num_nonmatched_some: console.print_stderr('Files failed validation.') exit_code = PANTS_FAILED_EXIT_CODE else: exit_code = PANTS_SUCCEEDED_EXIT_CODE yield Validate(exit_code)
def fast_list_and_die_for_testing( console: Console, addresses: Addresses) -> ListAndDieForTesting: for address in addresses: console.print_stdout(address.spec) return ListAndDieForTesting(exit_code=42)
async def update_build_files( update_build_files_subsystem: UpdateBuildFilesSubsystem, build_file_options: BuildFileOptions, console: Console, workspace: Workspace, union_membership: UnionMembership, ) -> UpdateBuildFilesGoal: all_build_files = await Get( DigestContents, PathGlobs(globs=( *(os.path.join("**", p) for p in build_file_options.patterns), *(f"!{p}" for p in build_file_options.ignores), )), ) rewrite_request_classes = [] for request in union_membership[RewrittenBuildFileRequest]: if issubclass(request, (FormatWithBlackRequest, FormatWithYapfRequest)): is_chosen_formatter = issubclass( request, FormatWithBlackRequest) ^ ( update_build_files_subsystem.formatter == Formatter.YAPF) if update_build_files_subsystem.fmt and is_chosen_formatter: rewrite_request_classes.append(request) else: continue if update_build_files_subsystem.fix_safe_deprecations or not issubclass( request, DeprecationFixerRequest): rewrite_request_classes.append(request) build_file_to_lines = { build_file.path: tuple(build_file.content.decode("utf-8").splitlines()) for build_file in all_build_files } build_file_to_change_descriptions: DefaultDict[ str, list[str]] = defaultdict(list) for rewrite_request_cls in rewrite_request_classes: all_rewritten_files = await MultiGet( Get( RewrittenBuildFile, RewrittenBuildFileRequest, rewrite_request_cls( build_file, lines, colors_enabled=console._use_colors), ) for build_file, lines in build_file_to_lines.items()) for rewritten_file in all_rewritten_files: if not rewritten_file.change_descriptions: continue build_file_to_lines[rewritten_file.path] = rewritten_file.lines build_file_to_change_descriptions[rewritten_file.path].extend( rewritten_file.change_descriptions) changed_build_files = sorted(build_file for build_file, change_descriptions in build_file_to_change_descriptions.items() if change_descriptions) if not changed_build_files: msg = "No required changes to BUILD files found." if not update_build_files_subsystem.check: msg += ( " However, there may still be deprecations that `update-build-files` doesn't know " f"how to fix. See {doc_url('upgrade-tips')} for upgrade tips.") logger.info(msg) return UpdateBuildFilesGoal(exit_code=0) if not update_build_files_subsystem.check: result = await Get( Digest, CreateDigest( FileContent(build_file, ( "\n".join(build_file_to_lines[build_file]) + "\n").encode("utf-8")) for build_file in changed_build_files), ) workspace.write_digest(result) for build_file in changed_build_files: formatted_changes = "\n".join( f" - {description}" for description in build_file_to_change_descriptions[build_file]) tense = "Would update" if update_build_files_subsystem.check else "Updated" console.print_stdout( f"{tense} {console.blue(build_file)}:\n{formatted_changes}") if update_build_files_subsystem.check: console.print_stdout( f"\nTo fix `update-build-files` failures, run `{bin_name()} update-build-files`." ) return UpdateBuildFilesGoal( exit_code=1 if update_build_files_subsystem.check else 0)
async def a_goal_rule_generator(console: Console) -> Example: a = await Get[A](str("a str!")) console.print_stdout(str(a)) return Example(exit_code=0)