def fmt(console: Console, targets: HydratedTargets, union_membership: UnionMembership) -> Fmt: results = yield [ Get(FmtResult, TargetWithSources, target.adaptor) for target in targets # TODO: make TargetAdaptor return a 'sources' field with an empty snapshot instead of # raising to remove the hasattr() checks here! if union_membership.is_member(TargetWithSources, target.adaptor) and hasattr(target.adaptor, "sources") ] for result in results: files_content = yield Get(FilesContent, Digest, result.digest) # TODO: This is hacky and inefficient, and should be replaced by using the Workspace type # once that is available on master. # Blocked on: https://github.com/pantsbuild/pants/pull/8329 for file_content in files_content: with Path(get_buildroot(), file_content.path).open('wb') as f: f.write(file_content.content) if result.stdout: console.print_stdout(result.stdout) if result.stderr: console.print_stderr(result.stderr) # Since we ran an ExecuteRequest, any failure would already have interrupted our flow exit_code = 0 yield Fmt(exit_code)
async def workspace_goal_rule( console: Console, workspace: Workspace, digest_request: DigestRequest ) -> WorkspaceGoal: snapshot = await Get(Snapshot, CreateDigest, digest_request.create_digest) workspace.write_digest(snapshot.digest) console.print_stdout(snapshot.files[0], end="") return WorkspaceGoal(exit_code=0)
def materialize(self, console: Console, workspace: Workspace) -> None: workspace.materialize_directory( DirectoryToMaterialize( self.result_digest, path_prefix=str(self.directory_to_materialize_to), ) ) console.print_stdout(f"\nWrote coverage report to `{self.directory_to_materialize_to}`")
def run_goal_rule( self, goal: Type[Goal], *, global_args: Iterable[str] | None = None, args: Iterable[str] | None = None, env: Mapping[str, str] | None = None, env_inherit: set[str] | None = None, ) -> GoalRuleResult: merged_args = (*(global_args or []), goal.name, *(args or [])) self.set_options(merged_args, env=env, env_inherit=env_inherit) raw_specs = self.options_bootstrapper.full_options_for_scopes([ GlobalOptions.get_scope_info(), goal.subsystem_cls.get_scope_info() ]).specs specs = SpecsParser(self.build_root).parse_specs(raw_specs) stdout, stderr = StringIO(), StringIO() console = Console(stdout=stdout, stderr=stderr) exit_code = self.scheduler.run_goal_rule( goal, Params( specs, console, Workspace(self.scheduler), InteractiveRunner(self.scheduler), ), ) console.flush() return GoalRuleResult(exit_code, stdout.getvalue(), stderr.getvalue())
async def fmt(console: Console, targets: HydratedTargets, workspace: Workspace, union_membership: UnionMembership) -> Fmt: aggregated_results = await MultiGet( Get[AggregatedFmtResults](FormatTarget, target.adaptor) for target in targets if FormatTarget.is_formattable(target.adaptor, union_membership=union_membership)) individual_results = [ result for aggregated_result in aggregated_results for result in aggregated_result.results ] if not individual_results: return Fmt(exit_code=0) # NB: this will fail if there are any conflicting changes, which we want to happen rather than # silently having one result override the other. In practicality, this should never happen due # to our use of an aggregator rule for each distinct language. merged_formatted_digest = await Get[Digest](DirectoriesToMerge( tuple(aggregated_result.combined_digest for aggregated_result in aggregated_results))) workspace.materialize_directory( DirectoryToMaterialize(merged_formatted_digest)) for result in individual_results: if result.stdout: console.print_stdout(result.stdout) if result.stderr: console.print_stderr(result.stderr) # Since the rules to produce FmtResult should use ExecuteRequest, rather than # FallibleExecuteProcessRequest, we assume that there were no failures. return Fmt(exit_code=0)
def execute_rule(self, args=tuple(), env=tuple(), exit_code=0): """Executes the @console_rule for this test class. :API: public Returns the text output of the task. """ # Create an OptionsBootstrapper for these args/env, and a captured Console instance. args = self._implicit_args + (self.goal_cls.name, ) + tuple(args) env = dict(env) options_bootstrapper = OptionsBootstrapper.create(args=args, env=env) BuildConfigInitializer.get(options_bootstrapper) full_options = options_bootstrapper.get_full_options( list(self.goal_cls.Options.known_scope_infos())) stdout, stderr = StringIO(), StringIO() console = Console(stdout=stdout, stderr=stderr) # Run for the target specs parsed from the args. specs = TargetRootsCalculator.parse_specs(full_options.target_specs, self.build_root) params = Params(specs, console, options_bootstrapper) actual_exit_code = self.scheduler.run_console_rule( self.goal_cls, params) # Flush and capture console output. console.flush() stdout = stdout.getvalue() stderr = stderr.getvalue() self.assertEqual( exit_code, actual_exit_code, "Exited with {} (expected {}):\nstdout:\n{}\nstderr:\n{}".format( actual_exit_code, exit_code, stdout, stderr)) return stdout
async def count_loc( console: Console, succinct_code_counter: SuccinctCodeCounter, specs_snapshot: SpecsSnapshot, ) -> CountLinesOfCode: if not specs_snapshot.snapshot.files: return CountLinesOfCode(exit_code=0) scc_program = await Get( DownloadedExternalTool, ExternalToolRequest, succinct_code_counter.get_request(Platform.current), ) input_digest = await Get( Digest, MergeDigests((scc_program.digest, specs_snapshot.snapshot.digest))) result = await Get( ProcessResult, Process( argv=(scc_program.exe, *succinct_code_counter.args), input_digest=input_digest, description= (f"Count lines of code for {pluralize(len(specs_snapshot.snapshot.files), 'file')}" ), level=LogLevel.DEBUG, ), ) console.print_stdout(result.stdout.decode()) return CountLinesOfCode(exit_code=0)
async def create_binary(addresses: BuildFileAddresses, console: Console, workspace: Workspace, options: Binary.Options, options_bootstrapper: OptionsBootstrapper, build_root: BuildRoot) -> Binary: with Binary.line_oriented(options, console) as print_stdout: global_options = options_bootstrapper.bootstrap_options.for_global_scope( ) pants_distdir = Path(global_options.pants_distdir) if not is_child_of(pants_distdir, build_root.pathlib_path): console.print_stderr( f"When set to an absolute path, `--pants-distdir` must be relative to the build root." "You set it to {pants_distdir}. Instead, use a relative path or an absolute path relative to the build root." ) return Binary(exit_code=1) relative_distdir = pants_distdir.relative_to( build_root.pathlib_path) if pants_distdir.is_absolute( ) else pants_distdir print_stdout(f"Generating binaries in `./{relative_distdir}`") binaries = await MultiGet( Get[CreatedBinary](Address, address.to_address()) for address in addresses) merged_digest = await Get[Digest](DirectoriesToMerge( tuple(binary.digest for binary in binaries))) result = workspace.materialize_directory( DirectoryToMaterialize(merged_digest, path_prefix=str(relative_distdir))) for path in result.output_paths: print_stdout(f"Wrote {path}") return Binary(exit_code=0)
async def run_go_pkg_debug(targets: UnexpandedTargets, console: Console) -> GoPkgDebugGoal: first_party_package_targets = [ tgt for tgt in targets if is_first_party_package_target(tgt) ] first_party_requests = [ Get(ResolvedGoPackage, ResolveGoPackageRequest(address=tgt.address)) for tgt in first_party_package_targets ] third_party_package_targets = [ tgt for tgt in targets if is_third_party_package_target(tgt) ] third_party_requests = [ Get(ResolvedGoPackage, ResolveExternalGoPackageRequest(address=tgt.address)) for tgt in third_party_package_targets ] resolved_packages = await MultiGet( [*first_party_requests, *third_party_requests]) # type: ignore for package in resolved_packages: console.write_stdout(str(package) + "\n") return GoPkgDebugGoal(exit_code=0)
def run_goal_rule( self, goal: Type[Goal], *, global_args: Optional[Iterable[str]] = None, args: Optional[Iterable[str]] = None, env: Optional[Mapping[str, str]] = None, ) -> GoalRuleResult: options_bootstrapper = create_options_bootstrapper( args=(*(global_args or []), goal.name, *(args or [])), env=env, ) raw_specs = options_bootstrapper.get_full_options([ *GlobalOptions.known_scope_infos(), *goal.subsystem_cls.known_scope_infos() ]).specs specs = SpecsParser(self.build_root).parse_specs(raw_specs) stdout, stderr = StringIO(), StringIO() console = Console(stdout=stdout, stderr=stderr) exit_code = self.scheduler.run_goal_rule( goal, Params( specs, console, options_bootstrapper, Workspace(self.scheduler), InteractiveRunner(self.scheduler), ), ) console.flush() return GoalRuleResult(exit_code, stdout.getvalue(), stderr.getvalue())
def fmt(console: Console, targets: HydratedTargets) -> Fmt: results = yield [ Get(FmtResult, FmtTarget, target.adaptor) for target in targets # @union assumes that all targets passed implement the union, so we manually # filter the targets we know do; this should probably no-op or log or something # configurable for non-matching targets. # We also would want to remove the workaround that filters adaptors which have a # `sources` attribute. # See https://github.com/pantsbuild/pants/issues/4535 if isinstance(target.adaptor, ( PythonAppAdaptor, PythonTargetAdaptor, PythonTestsAdaptor, PythonBinaryAdaptor)) and hasattr(target.adaptor, "sources") ] for result in results: files_content = yield Get(FilesContent, Digest, result.digest) # TODO: This is hacky and inefficient, and should be replaced by using the Workspace type # once that is available on master. # Blocked on: https://github.com/pantsbuild/pants/pull/8329 for file_content in files_content: with Path(get_buildroot(), file_content.path).open('wb') as f: f.write(file_content.content) if result.stdout: console.print_stdout(result.stdout) if result.stderr: console.print_stderr(result.stderr) # Since we ran an ExecuteRequest, any failure would already have interrupted our flow exit_code = 0 yield Fmt(exit_code)
def run_console_rules(self, options_bootstrapper, goals, target_roots): """Runs @console_rules sequentially and interactively by requesting their implicit Goal products. For retryable failures, raises scheduler.ExecutionError. :param list goals: The list of requested goal names as passed on the commandline. :param TargetRoots target_roots: The targets root of the request. :returns: An exit code. """ subject = target_roots.specs console = Console( use_colors=options_bootstrapper.bootstrap_options.for_global_scope().colors ) workspace = Workspace(self.scheduler_session) interactive_runner = InteractiveRunner(self.scheduler_session) for goal in goals: goal_product = self.goal_map[goal] params = Params(subject, options_bootstrapper, console, workspace, interactive_runner) logger.debug(f'requesting {goal_product} to satisfy execution of `{goal}` goal') try: exit_code = self.scheduler_session.run_console_rule(goal_product, params) finally: console.flush() if exit_code != PANTS_SUCCEEDED_EXIT_CODE: return exit_code return PANTS_SUCCEEDED_EXIT_CODE
async def tailor( tailor_subsystem: TailorSubsystem, console: Console, workspace: Workspace, union_membership: UnionMembership, ) -> Tailor: putative_target_request_types = union_membership[PutativeTargetsRequest] putative_targets_results = await MultiGet( Get(PutativeTargets, PutativeTargetsRequest, req_type()) for req_type in putative_target_request_types) putative_targets = PutativeTargets.merge(putative_targets_results) fixed_names_ptgts = await Get(UniquelyNamedPutativeTargets, PutativeTargets, putative_targets) fixed_sources_ptgts = await MultiGet( Get(DisjointSourcePutativeTarget, PutativeTarget, ptgt) for ptgt in fixed_names_ptgts.putative_targets) ptgts = [dspt.putative_target for dspt in fixed_sources_ptgts] if ptgts: edited_build_files = await Get( EditedBuildFiles, EditBuildFilesRequest(PutativeTargets(ptgts), tailor_subsystem.build_file_indent), ) updated_build_files = set(edited_build_files.updated_paths) workspace.write_digest(edited_build_files.digest) ptgts_by_build_file = group_by_build_file(ptgts) for build_file_path, ptgts in ptgts_by_build_file.items(): verb = "Updated" if build_file_path in updated_build_files else "Created" console.print_stdout(f"{verb} {console.blue(build_file_path)}:") for ptgt in ptgts: console.print_stdout( f" - Added {console.green(ptgt.type_alias)} target " f"{console.cyan(ptgt.address.spec)}") return Tailor(0)
async def coursier_resolve_lockfiles( console: Console, targets: Targets, resolve_subsystem: CoursierResolveSubsystem, workspace: Workspace, ) -> CoursierResolve: jvm_lockfile_targets = Targets( target for target in targets if target.has_field(JvmLockfileSources) ) results = await MultiGet( Get(CoursierGenerateLockfileResult, CoursierGenerateLockfileRequest(target=target)) for target in jvm_lockfile_targets ) # For performance reasons, avoid writing out files to the workspace that haven't changed. results_to_write = tuple(result for result in results if result.digest != EMPTY_DIGEST) if results_to_write: merged_digest = await Get( Digest, MergeDigests(result.digest for result in results_to_write) ) workspace.write_digest(merged_digest) merged_digest_snapshot = await Get(Snapshot, Digest, merged_digest) for path in merged_digest_snapshot.files: console.print_stderr(f"Updated lockfile at: {path}") return CoursierResolve(exit_code=0)
def run_console_rules(self, options_bootstrapper, goals, target_roots): """Runs @console_rules sequentially and interactively by requesting their implicit Goal products. For retryable failures, raises scheduler.ExecutionError. :param list goals: The list of requested goal names as passed on the commandline. :param TargetRoots target_roots: The targets root of the request. :returns: An exit code. """ subject = target_roots.specs console = Console( use_colors=options_bootstrapper.bootstrap_options.for_global_scope().colors ) for goal in goals: goal_product = self.goal_map[goal] params = Params(subject, options_bootstrapper, console) logger.debug('requesting {} to satisfy execution of `{}` goal'.format(goal_product, goal)) try: exit_code = self.scheduler_session.run_console_rule(goal_product, params) finally: console.flush() if exit_code != PANTS_SUCCEEDED_EXIT_CODE: return exit_code return PANTS_SUCCEEDED_EXIT_CODE
async def export( console: Console, targets: Targets, export_subsystem: ExportSubsystem, workspace: Workspace, union_membership: UnionMembership, build_root: BuildRoot, dist_dir: DistDir, ) -> Export: request_types = cast("Iterable[type[ExportableDataRequest]]", union_membership.get(ExportableDataRequest)) requests = tuple(request_type(targets) for request_type in request_types) exportables = await MultiGet( Get(ExportableData, ExportableDataRequest, request) for request in requests) prefixed_digests = await MultiGet( Get(Digest, AddPrefix(exp.digest, exp.reldir)) for exp in exportables) output_dir = os.path.join(str(dist_dir.relpath), "export") merged_digest = await Get(Digest, MergeDigests(prefixed_digests)) dist_digest = await Get(Digest, AddPrefix(merged_digest, output_dir)) workspace.write_digest(dist_digest) for exp in exportables: for symlink in exp.symlinks: # Note that if symlink.source_path is an abspath, join returns it unchanged. source_abspath = os.path.join(build_root.path, symlink.source_path) link_abspath = os.path.abspath( os.path.join(output_dir, exp.reldir, symlink.link_rel_path)) absolute_symlink(source_abspath, link_abspath) console.print_stdout( f"Wrote {exp.description} to {os.path.join(output_dir, exp.reldir)}" ) return Export(exit_code=0)
async def fmt(console: Console, targets: HydratedTargets, workspace: Workspace, union_membership: UnionMembership) -> Fmt: results = await MultiGet( Get[FmtResult](TargetWithSources, target.adaptor) for target in targets if TargetWithSources.is_formattable_and_lintable( target.adaptor, union_membership=union_membership)) if not results: return Fmt(exit_code=0) # NB: this will fail if there are any conflicting changes, which we want to happen rather than # silently having one result override the other. # TODO(#8722): how should we handle multiple auto-formatters touching the same files? merged_formatted_digest = await Get[Digest](DirectoriesToMerge( tuple(result.digest for result in results))) workspace.materialize_directory( DirectoryToMaterialize(merged_formatted_digest)) for result in results: if result.stdout: console.print_stdout(result.stdout) if result.stderr: console.print_stderr(result.stderr) # Since the rules to produce FmtResult should use ExecuteRequest, rather than # FallibleExecuteProcessRequest, we assume that there were no failures. return Fmt(exit_code=0)
async def workspace_console_rule( console: Console, workspace: Workspace, msg: MessageToConsoleRule) -> MockWorkspaceGoal: digest = await Get(Digest, InputFilesContent, msg.input_files_content) output = workspace.materialize_directory(DirectoryToMaterialize(digest)) console.print_stdout(output.output_paths[0], end='') return MockWorkspaceGoal(exit_code=0)
async def run_setup_pys(targets: HydratedTargets, options: SetupPyOptions, console: Console, provenance_map: AddressProvenanceMap, distdir: DistDir, workspace: Workspace) -> SetupPy: """Run setup.py commands on all exported targets addressed.""" args = tuple(options.values.args) validate_args(args) # Get all exported targets, ignoring any non-exported targets that happened to be # globbed over, but erroring on any explicitly-requested non-exported targets. exported_targets: List[ExportedTarget] = [] explicit_nonexported_targets: List[HydratedTarget] = [] for hydrated_target in targets: if _is_exported(hydrated_target): exported_targets.append(ExportedTarget(hydrated_target)) elif provenance_map.is_single_address(hydrated_target.address): explicit_nonexported_targets.append(hydrated_target) if explicit_nonexported_targets: raise TargetNotExported( 'Cannot run setup.py on these targets, because they have no `provides=` clause: ' f'{", ".join(so.address.reference() for so in explicit_nonexported_targets)}') if options.values.transitive: # Expand out to all owners of the entire dep closure. tht = await Get[TransitiveHydratedTargets]( BuildFileAddresses([et.hydrated_target.address for et in exported_targets])) owners = await MultiGet( Get[ExportedTarget](OwnedDependency(ht)) for ht in tht.closure if is_ownable_target(ht) ) exported_targets = list(set(owners)) chroots = await MultiGet(Get[SetupPyChroot](SetupPyChrootRequest(target)) for target in exported_targets) if args: setup_py_results = await MultiGet( Get[RunSetupPyResult](RunSetupPyRequest(exported_target, chroot, tuple(args))) for exported_target, chroot in zip(exported_targets, chroots) ) for exported_target, setup_py_result in zip(exported_targets, setup_py_results): addr = exported_target.hydrated_target.address.reference() console.print_stderr(f'Writing contents of dist dir for {addr} to {distdir.relpath}') workspace.materialize_directory( DirectoryToMaterialize(setup_py_result.output, path_prefix=str(distdir.relpath)) ) else: # Just dump the chroot. for exported_target, chroot in zip(exported_targets, chroots): addr = exported_target.hydrated_target.address.reference() provides = exported_target.hydrated_target.adaptor.provides setup_py_dir = distdir.relpath / f'{provides.name}-{provides.version}' console.print_stderr(f'Writing setup.py chroot for {addr} to {setup_py_dir}') workspace.materialize_directory( DirectoryToMaterialize(chroot.digest, path_prefix=str(setup_py_dir)) ) return SetupPy(0)
def materialize(self, console: Console, workspace: Workspace) -> None: if not self.results_file: return output_path = self.results_file.output_path workspace.write_digest(self.results_file.digest, path_prefix=output_path.parent.as_posix()) console.print_stdout( f"Wrote {self.linter_name} report to: {output_path.as_posix()}")
def workspace_console_rule(console: Console, workspace: Workspace, msg: MessageToConsoleRule) -> MockWorkspaceGoal: digest = yield Get(Digest, InputFilesContent, msg.input_files_content) output = workspace.materialize_directories( (DirectoryToMaterialize(path=msg.tmp_dir, directory_digest=digest), )) output_path = output.dependencies[0].output_paths[0] console.print_stdout(str(Path(msg.tmp_dir, output_path)), end='') yield MockWorkspaceGoal(exit_code=0)
def materialize(self, console: Console, workspace: Workspace) -> Optional[PurePath]: workspace.write_digest( self.result_snapshot.digest, path_prefix=str(self.directory_to_materialize_to) ) console.print_stderr( f"\nWrote {self.report_type} coverage report to `{self.directory_to_materialize_to}`" ) return self.report_file
def format_for_cli(self, console: Console) -> str: field_alias = console.magenta(f"{self.alias}") indent = " " required_or_default = "required" if self.required else f"default: {self.default}" type_info = console.cyan(f"{indent}type: {self.type_hint}, {required_or_default}") lines = [field_alias, type_info] if self.description: lines.extend(f"{indent}{line}" for line in textwrap.wrap(self.description, 80)) return "\n".join(f"{indent}{line}" for line in lines)
def run_goal_rules( self, *, options_bootstrapper: OptionsBootstrapper, union_membership: UnionMembership, options: Options, goals: Iterable[str], specs: Specs, ): """Runs @goal_rules sequentially and interactively by requesting their implicit Goal products. For retryable failures, raises scheduler.ExecutionError. :returns: An exit code. """ global_options = options.for_global_scope() console = Console( use_colors=global_options.colors, session=self.scheduler_session if global_options.get("v2_ui") else None, ) workspace = Workspace(self.scheduler_session) interactive_runner = InteractiveRunner(self.scheduler_session) for goal in goals: goal_product = self.goal_map[goal] # NB: We no-op for goals that have no V2 implementation because no relevant backends are # registered. This allows us to safely set `--v1 --v2`, even if no V2 backends are registered. # Once V1 is removed, we might want to reconsider the behavior to instead warn or error when # trying to run something like `./pants run` without any backends registered. is_implemented = union_membership.has_members_for_all( goal_product.subsystem_cls.required_union_implementations) if not is_implemented: continue params = Params( specs.provided_specs, options_bootstrapper, console, workspace, interactive_runner, ) logger.debug( f"requesting {goal_product} to satisfy execution of `{goal}` goal" ) try: exit_code = self.scheduler_session.run_goal_rule( goal_product, params) finally: console.flush() if exit_code != PANTS_SUCCEEDED_EXIT_CODE: return exit_code return PANTS_SUCCEEDED_EXIT_CODE
async def list_targets(console: Console, list_options: ListOptions, addresses: Addresses) -> List: provides = list_options.values.provides provides_columns = list_options.values.provides_columns documented = list_options.values.documented collection: Union[HydratedTargets, Addresses] if provides or documented: # To get provides clauses or documentation, we need hydrated targets. collection = await Get[HydratedTargets](Addresses, addresses) if provides: extractors = dict( address=lambda adaptor: adaptor.address.spec, artifact_id=lambda adaptor: str(adaptor.provides), repo_name=lambda adaptor: adaptor.provides.repo.name, repo_url=lambda adaptor: adaptor.provides.repo.url, push_db_basedir=lambda adaptor: adaptor.provides.repo.push_db_basedir, ) def print_provides(col_extractors, target): if getattr(target.adaptor, "provides", None): return " ".join(extractor(target.adaptor) for extractor in col_extractors) try: column_extractors = [extractors[col] for col in (provides_columns.split(","))] except KeyError: raise Exception( "Invalid columns specified: {0}. Valid columns are: address, artifact_id, " "repo_name, repo_url, push_db_basedir.".format(provides_columns) ) print_fn = lambda target: print_provides(column_extractors, target) else: def print_documented(target): description = getattr(target.adaptor, "description", None) if description: return "{0}\n {1}".format( target.adaptor.address.spec, "\n ".join(description.strip().split("\n")) ) print_fn = print_documented else: # Otherwise, we can use only addresses. collection = addresses print_fn = lambda address: address.spec with list_options.line_oriented(console) as print_stdout: if not collection: console.print_stderr("WARNING: No targets were matched in goal `{}`.".format("list")) for item in collection: result = print_fn(item) if result: print_stdout(result) return List(exit_code=0)
async def lint( console: Console, targets_with_origins: TargetsWithOrigins, options: LintOptions, union_membership: UnionMembership, ) -> Lint: config_collection_types: Iterable[ Type[LinterConfigurations]] = union_membership.union_rules[ LinterConfigurations] config_collections: Iterable[LinterConfigurations] = tuple( config_collection_type( config_collection_type.config_type.create(target_with_origin) for target_with_origin in targets_with_origins if config_collection_type.config_type.is_valid( target_with_origin.target)) for config_collection_type in config_collection_types) config_collections_with_sources: Iterable[ ConfigurationsWithSources] = await MultiGet( Get[ConfigurationsWithSources](ConfigurationsWithSourcesRequest( config_collection)) for config_collection in config_collections) # NB: We must convert back the generic ConfigurationsWithSources objects back into their # corresponding LinterConfigurations, e.g. back to IsortConfigurations, in order for the union # rule to work. valid_config_collections: Iterable[LinterConfigurations] = tuple( config_collection_cls(config_collection) for config_collection_cls, config_collection in zip( config_collection_types, config_collections_with_sources) if config_collection) if options.values.per_target_caching: results = await MultiGet( Get[LintResult](LinterConfigurations, config_collection.__class__([config])) for config_collection in valid_config_collections for config in config_collection) else: results = await MultiGet( Get[LintResult](LinterConfigurations, config_collection) for config_collection in valid_config_collections) if not results: return Lint(exit_code=0) exit_code = 0 for result in results: if result.stdout: console.print_stdout(result.stdout) if result.stderr: console.print_stderr(result.stderr) if result.exit_code != 0: exit_code = result.exit_code return Lint(exit_code)
async def run_repl( console: Console, workspace: Workspace, repl_subsystem: ReplSubsystem, all_specified_addresses: Addresses, build_root: BuildRoot, union_membership: UnionMembership, global_options: GlobalOptions, complete_env: CompleteEnvironment, ) -> Repl: transitive_targets = await Get( TransitiveTargets, TransitiveTargetsRequest(all_specified_addresses)) # TODO: When we support multiple languages, detect the default repl to use based # on the targets. For now we default to the python repl. repl_shell_name = repl_subsystem.shell or "python" implementations = { impl.name: impl for impl in union_membership[ReplImplementation] } repl_implementation_cls = implementations.get(repl_shell_name) if repl_implementation_cls is None: available = sorted(implementations.keys()) console.print_stderr( f"{repr(repl_shell_name)} is not a registered REPL. Available REPLs (which may " f"be specified through the option `--repl-shell`): {available}") return Repl(-1) with temporary_dir(root_dir=global_options.options.pants_workdir, cleanup=False) as tmpdir: repl_impl = repl_implementation_cls(targets=Targets( transitive_targets.closure), chroot=tmpdir) request = await Get(ReplRequest, ReplImplementation, repl_impl) workspace.write_digest( request.digest, path_prefix=PurePath(tmpdir).relative_to( build_root.path).as_posix(), # We don't want to influence whether the InteractiveProcess is able to restart. Because # we're writing into a temp directory, we can safely mark this side_effecting=False. side_effecting=False, ) env = {**complete_env, **request.extra_env} result = await Effect( InteractiveProcessResult, InteractiveProcess( argv=request.args, env=env, run_in_workspace=True, restartable=repl_subsystem.restartable, ), ) return Repl(result.exit_code)
async def run_repl( console: Console, workspace: Workspace, runner: InteractiveRunner, options: ReplOptions, transitive_targets: TransitiveTargets, build_root: BuildRoot, union_membership: UnionMembership, global_options: GlobalOptions, ) -> Repl: # We can guarantee that we will only even enter this `goal_rule` if there exists an implementer # of the `ReplImplementation` union because `LegacyGraphSession.run_goal_rules()` will not # execute this rule's body if there are no implementations registered. membership: Iterable[Type[ ReplImplementation]] = union_membership.union_rules[ReplImplementation] implementations = {impl.name: impl for impl in membership} default_repl = "python" repl_shell_name = cast(str, options.values.shell or default_repl) repl_implementation_cls = implementations.get(repl_shell_name) if repl_implementation_cls is None: available = sorted(set(implementations.keys())) console.print_stderr( f"{repr(repl_shell_name)} is not a registered REPL. Available REPLs (which may " f"be specified through the option `--repl-shell`): {available}") return Repl(-1) repl_impl = repl_implementation_cls(targets=Targets( tgt for tgt in transitive_targets.closure if repl_implementation_cls.is_valid(tgt))) repl_binary = await Get[ReplBinary](ReplImplementation, repl_impl) with temporary_dir(root_dir=global_options.options.pants_workdir, cleanup=False) as tmpdir: path_relative_to_build_root = PurePath(tmpdir).relative_to( build_root.path).as_posix() workspace.materialize_directory( DirectoryToMaterialize(repl_binary.digest, path_prefix=path_relative_to_build_root)) full_path = PurePath(tmpdir, repl_binary.binary_name).as_posix() run_request = InteractiveProcessRequest( argv=(full_path, ), run_in_workspace=True, ) result = runner.run_local_interactive_process(run_request) return Repl(result.process_exit_code)
def run(console: Console, workspace: Workspace, runner: InteractiveRunner, bfa: BuildFileAddress) -> Run: target = bfa.to_address() binary = yield Get(CreatedBinary, Address, target) with temporary_dir(cleanup=True) as tmpdir: dirs_to_materialize = (DirectoryToMaterialize( path=str(tmpdir), directory_digest=binary.digest), ) workspace.materialize_directories(dirs_to_materialize) console.write_stdout(f"Running target: {target}\n") full_path = str(Path(tmpdir, binary.binary_name)) run_request = InteractiveProcessRequest( argv=[full_path], run_in_workspace=True, ) try: result = runner.run_local_interactive_process(run_request) exit_code = result.process_exit_code if result.process_exit_code == 0: console.write_stdout(f"{target} ran successfully.\n") else: console.write_stderr( f"{target} failed with code {result.process_exit_code}!\n") except Exception as e: console.write_stderr( f"Exception when attempting to run {target} : {e}\n") exit_code = -1 yield Run(exit_code)
async def run( run_subsystem: RunSubsystem, global_options: GlobalOptions, console: Console, interactive_runner: InteractiveRunner, workspace: Workspace, build_root: BuildRoot, complete_env: CompleteEnvironment, ) -> Run: targets_to_valid_field_sets = await Get( TargetRootsToFieldSets, TargetRootsToFieldSetsRequest( RunFieldSet, goal_description="the `run` goal", no_applicable_targets_behavior=NoApplicableTargetsBehavior.error, expect_single_field_set=True, ), ) field_set = targets_to_valid_field_sets.field_sets[0] request = await Get(RunRequest, RunFieldSet, field_set) with temporary_dir(root_dir=global_options.options.pants_workdir, cleanup=True) as tmpdir: workspace.write_digest(request.digest, path_prefix=PurePath(tmpdir).relative_to( build_root.path).as_posix()) args = (arg.format(chroot=tmpdir) for arg in request.args) env = { **complete_env, **{ k: v.format(chroot=tmpdir) for k, v in request.extra_env.items() } } try: result = interactive_runner.run( InteractiveProcess( argv=(*args, *run_subsystem.args), env=env, run_in_workspace=True, )) exit_code = result.exit_code except Exception as e: console.print_stderr( f"Exception when attempting to run {field_set.address}: {e!r}") exit_code = -1 return Run(exit_code)
async def dump_java_source_analysis( targets: Targets, console: Console) -> DumpJavaSourceAnalysis: java_source_field_sets = [ JavaFieldSet.create(tgt) for tgt in targets if JavaFieldSet.is_applicable(tgt) ] java_source_analysis = await MultiGet( Get(JavaSourceDependencyAnalysis, SourceFilesRequest([fs.sources])) for fs in java_source_field_sets) java_source_analysis_json = [{ "address": str(fs.address), **analysis.to_debug_json_dict() } for (fs, analysis) in zip(java_source_field_sets, java_source_analysis)] console.print_stdout(json.dumps(java_source_analysis_json)) return DumpJavaSourceAnalysis(exit_code=0)