async def determine_shunit2_shell(request: Shunit2RunnerRequest, shell_setup: ShellSetup) -> Shunit2Runner: if request.shell_field.value is not None: tgt_shell = Shunit2Shell(request.shell_field.value) else: parse_result = Shunit2Shell.parse_shebang( request.test_file_content.content) if parse_result is None: raise ShellNotConfigured( f"Could not determine which shell to use to run shunit2 on {request.address}.\n\n" f"Please either specify the `{Shunit2ShellField.alias}` field or add a " f"shebang to {request.test_file_content.path} with one of the supported shells in " f"the format `!#/path/to/shell` or `!#/path/to/env shell`" f"(run `./pants help {Shunit2Tests.alias}` for valid shells).") tgt_shell = parse_result env = await Get(Environment, EnvironmentRequest(["PATH"])) path_request = BinaryPathRequest( binary_name=tgt_shell.name, search_path=shell_setup.executable_search_path(env), test=tgt_shell.binary_path_test, ) paths = await Get(BinaryPaths, BinaryPathRequest, path_request) first_path = paths.first_path if not first_path: raise BinaryNotFoundError( path_request, rationale=f"run shunit2 on {request.address}") return Shunit2Runner(tgt_shell, first_path)
async def find_zip() -> ZipBinary: request = BinaryPathRequest(binary_name="zip", search_path=SEARCH_PATHS, test=BinaryPathTest(args=["-v"])) paths = await Get(BinaryPaths, BinaryPathRequest, request) first_path = paths.first_path if not first_path: raise BinaryNotFoundError(request, rationale="create `.zip` archives") return ZipBinary(first_path.path, first_path.fingerprint)
async def find_unzip() -> UnzipBinary: request = BinaryPathRequest(binary_name="unzip", search_path=SEARCH_PATHS, test=BinaryPathTest(args=["-v"])) paths = await Get(BinaryPaths, BinaryPathRequest, request) first_path = paths.first_path if not first_path: raise BinaryNotFoundError( request, rationale="download the tools Pants needs to run") return UnzipBinary(first_path.path, first_path.fingerprint)
async def find_tar() -> TarBinary: request = BinaryPathRequest(binary_name="tar", search_path=SEARCH_PATHS, test=BinaryPathTest(args=["--version"])) paths = await Get(BinaryPaths, BinaryPathRequest, request) first_path = paths.first_path if not first_path: raise BinaryNotFoundError.from_request( request, rationale="download the tools Pants needs to run") return TarBinary(first_path.path, first_path.fingerprint)
async def find_docker(docker_request: DockerBinaryRequest) -> DockerBinary: request = BinaryPathRequest( binary_name="docker", search_path=docker_request.search_path, test=BinaryPathTest(args=["-v"]), ) paths = await Get(BinaryPaths, BinaryPathRequest, request) first_path = paths.first_path if not first_path: raise BinaryNotFoundError.from_request(request, rationale="interact with the docker daemon") return DockerBinary(first_path.path, first_path.fingerprint)
async def find_python(python_bootstrap: PythonBootstrap) -> PythonBinary: # PEX files are compatible with bootstrapping via Python 2.7 or Python 3.5+, but we select 3.6+ # for maximum compatibility with internal scripts. interpreter_search_paths = python_bootstrap.interpreter_search_paths() all_python_binary_paths = await MultiGet( Get( BinaryPaths, BinaryPathRequest( search_path=interpreter_search_paths, binary_name=binary_name, check_file_entries=True, test=BinaryPathTest( args=[ "-c", # N.B.: The following code snippet must be compatible with Python 3.6+. # # We hash the underlying Python interpreter executable to ensure we detect # changes in the real interpreter that might otherwise be masked by Pyenv # shim scripts found on the search path. Naively, just printing out the full # version_info would be enough, but that does not account for supported abi # changes (e.g.: a pyenv switch from a py27mu interpreter to a py27m # interpreter.) # # When hashing, we pick 8192 for efficiency of reads and fingerprint updates # (writes) since it's a common OS buffer size and an even multiple of the # hash block size. dedent("""\ import sys major, minor = sys.version_info[:2] if not (major == 3 and minor >= 6): sys.exit(1) import hashlib hasher = hashlib.sha256() with open(sys.executable, "rb") as fp: for chunk in iter(lambda: fp.read(8192), b""): hasher.update(chunk) sys.stdout.write(hasher.hexdigest()) """), ], fingerprint_stdout= False, # We already emit a usable fingerprint to stdout. ), ), ) for binary_name in python_bootstrap.interpreter_names) for binary_paths in all_python_binary_paths: path = binary_paths.first_path if path: return PythonBinary( path=path.path, fingerprint=path.fingerprint, ) raise BinaryNotFoundError( "Was not able to locate a Python interpreter to execute rule code.\n" "Please ensure that Python is available in one of the locations identified by " "`[python-bootstrap] search_path`, which currently expands to:\n" f" {interpreter_search_paths}")
async def run_shell_command( request: GenerateFilesFromShellCommandRequest, shell_setup: ShellSetup, bash: BashBinary, ) -> GeneratedSources: shell_command = request.protocol_target working_directory = shell_command.address.spec_path command = shell_command[ShellCommandCommandField].value tools = shell_command[ShellCommandToolsField].value outputs = shell_command[ShellCommandOutputsField].value or () if not command: raise ValueError( f"Missing `command` line in `shell_command` target {shell_command.address}." ) if not tools: raise ValueError( f"Must provide any `tools` used by the `shell_command` {shell_command.address}." ) env = await Get(Environment, EnvironmentRequest(["PATH"])) search_path = shell_setup.executable_search_path(env) tool_requests = [ BinaryPathRequest( binary_name=tool, search_path=search_path, ) for tool in {*tools, *["mkdir", "ln"]} if tool not in BASH_BUILTIN_COMMANDS ] tool_paths = await MultiGet( Get(BinaryPaths, BinaryPathRequest, request) for request in tool_requests) command_env = { "TOOLS": " ".join(shlex.quote(tool.binary_name) for tool in tool_requests), } for binary, tool_request in zip(tool_paths, tool_requests): if binary.first_path: command_env[tool_request.binary_name] = binary.first_path.path else: raise BinaryNotFoundError.from_request( tool_request, rationale= f"execute experimental_shell_command {shell_command.address}", ) transitive_targets = await Get( TransitiveTargets, TransitiveTargetsRequest([shell_command.address]), ) sources = await Get( SourceFiles, SourceFilesRequest( sources_fields=[ tgt.get(Sources) for tgt in transitive_targets.dependencies ], for_sources_types=( Sources, FilesSources, ), enable_codegen=True, ), ) output_files = [f for f in outputs if not f.endswith("/")] output_directories = [d for d in outputs if d.endswith("/")] if working_directory in sources.snapshot.dirs: input_digest = sources.snapshot.digest else: work_dir = await Get(Digest, CreateDigest([Directory(working_directory)])) input_digest = await Get( Digest, MergeDigests([sources.snapshot.digest, work_dir])) # Setup bin_relpath dir with symlinks to all requested tools, so that we can use PATH. bin_relpath = ".bin" setup_tool_symlinks_script = ";".join( dedent(f"""\ $mkdir -p {bin_relpath} for tool in $TOOLS; do $ln -s ${{!tool}} {bin_relpath}/; done export PATH="$PWD/{bin_relpath}" """).split("\n")) result = await Get( ProcessResult, Process( argv=(bash.path, "-c", setup_tool_symlinks_script + command), description= f"Running experimental_shell_command {shell_command.address}", env=command_env, input_digest=input_digest, output_directories=output_directories, output_files=output_files, working_directory=working_directory, ), ) if shell_command[ShellCommandLogOutputField].value: if result.stdout: logger.info(result.stdout.decode()) if result.stderr: logger.warning(result.stderr.decode()) output = await Get(Snapshot, AddPrefix(result.output_digest, working_directory)) return GeneratedSources(output)
async def setup_goroot(golang_subsystem: GolangSubsystem) -> GoRoot: env = await Get(Environment, EnvironmentRequest(["PATH"])) search_paths = golang_subsystem.go_search_paths(env) all_go_binary_paths = await Get( BinaryPaths, BinaryPathRequest( search_path=search_paths, binary_name="go", test=BinaryPathTest(["version"]), ), ) if not all_go_binary_paths.paths: raise BinaryNotFoundError( "Cannot find any `go` binaries using the option " f"`[golang].go_search_paths`: {list(search_paths)}\n\n" "To fix, please install Go (https://golang.org/doc/install) with the version " f"{golang_subsystem.expected_version} (set by `[golang].expected_version`) and ensure " "that it is discoverable via `[golang].go_search_paths`.") # `go env GOVERSION` does not work in earlier Go versions (like 1.15), so we must run # `go version` and `go env GOROOT` to calculate both the version and GOROOT. version_results = await MultiGet( Get( ProcessResult, Process( (binary_path.path, "version"), description=f"Determine Go version for {binary_path.path}", level=LogLevel.DEBUG, cache_scope=ProcessCacheScope.PER_RESTART_SUCCESSFUL, ), ) for binary_path in all_go_binary_paths.paths) invalid_versions = [] for binary_path, version_result in zip(all_go_binary_paths.paths, version_results): try: _raw_version = version_result.stdout.decode("utf-8").split()[ 2] # e.g. go1.17 or go1.17.1 _version_components = _raw_version[2:].split( ".") # e.g. [1, 17] or [1, 17, 1] version = f"{_version_components[0]}.{_version_components[1]}" except IndexError: raise AssertionError( f"Failed to parse `go version` output for {binary_path}. Please open a bug at " f"https://github.com/pantsbuild/pants/issues/new/choose with the below data." f"\n\n" f"{version_result}") if version == golang_subsystem.expected_version: env_result = await Get( ProcessResult, Process( (binary_path.path, "env", "GOROOT"), description= f"Determine Go version and GOROOT for {binary_path.path}", level=LogLevel.DEBUG, cache_scope=ProcessCacheScope.PER_RESTART_SUCCESSFUL, env={"GOPATH": "/does/not/matter"}, ), ) goroot = env_result.stdout.decode("utf-8").strip() return GoRoot(goroot) logger.debug( f"Go binary at {binary_path.path} has version {version}, but this " f"project is using {golang_subsystem.expected_version} " "(set by `[golang].expected_version`). Ignoring.") invalid_versions.append((binary_path.path, version)) invalid_versions_str = bullet_list( f"{path}: {version}" for path, version in sorted(invalid_versions)) raise BinaryNotFoundError( "Cannot find a `go` binary with the expected version of " f"{golang_subsystem.expected_version} (set by `[golang].expected_version`).\n\n" f"Found these `go` binaries, but they had different versions:\n\n" f"{invalid_versions_str}\n\n" "To fix, please install the expected version (https://golang.org/doc/install) and ensure " "that it is discoverable via the option `[golang].go_search_paths`, or change " "`[golang].expected_version`.")
async def prepare_shell_command_process(request: ShellCommandProcessRequest, shell_setup: ShellSetup, bash: BashBinary) -> Process: shell_command = request.target interactive = shell_command.has_field(ShellCommandRunWorkdirField) if interactive: working_directory = shell_command[ ShellCommandRunWorkdirField].value or "" else: working_directory = shell_command.address.spec_path command = shell_command[ShellCommandCommandField].value timeout = shell_command.get(ShellCommandTimeoutField).value tools = shell_command.get(ShellCommandToolsField, default_raw_value=()).value outputs = shell_command.get(ShellCommandOutputsField).value or () if not command: raise ValueError( f"Missing `command` line in `{shell_command.alias}` target {shell_command.address}." ) if interactive: command_env = { "CHROOT": "{chroot}", } else: if not tools: raise ValueError( f"Must provide any `tools` used by the `{shell_command.alias}` {shell_command.address}." ) env = await Get(Environment, EnvironmentRequest(["PATH"])) search_path = shell_setup.executable_search_path(env) tool_requests = [ BinaryPathRequest( binary_name=tool, search_path=search_path, ) for tool in {*tools, *["mkdir", "ln"]} if tool not in BASH_BUILTIN_COMMANDS ] tool_paths = await MultiGet( Get(BinaryPaths, BinaryPathRequest, request) for request in tool_requests) command_env = { "TOOLS": " ".join( _shell_tool_safe_env_name(tool.binary_name) for tool in tool_requests), } for binary, tool_request in zip(tool_paths, tool_requests): if binary.first_path: command_env[_shell_tool_safe_env_name( tool_request.binary_name)] = binary.first_path.path else: raise BinaryNotFoundError.from_request( tool_request, rationale= f"execute `{shell_command.alias}` {shell_command.address}", ) transitive_targets = await Get( TransitiveTargets, TransitiveTargetsRequest([shell_command.address]), ) sources, pkgs_per_target = await MultiGet( Get( SourceFiles, SourceFilesRequest( sources_fields=[ tgt.get(SourcesField) for tgt in transitive_targets.dependencies ], for_sources_types=(SourcesField, FileSourceField), enable_codegen=True, ), ), Get( FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, transitive_targets.dependencies), ), ) packages = await MultiGet( Get(BuiltPackage, PackageFieldSet, field_set) for field_set in pkgs_per_target.field_sets) if interactive or not working_directory or working_directory in sources.snapshot.dirs: work_dir = EMPTY_DIGEST else: work_dir = await Get(Digest, CreateDigest([Directory(working_directory)])) input_digest = await Get( Digest, MergeDigests([ sources.snapshot.digest, work_dir, *(pkg.digest for pkg in packages) ])) output_files = [f for f in outputs if not f.endswith("/")] output_directories = [d for d in outputs if d.endswith("/")] if interactive: relpath = os.path.relpath( working_directory or ".", start="/" if os.path.isabs(working_directory) else ".") boot_script = f"cd {shlex.quote(relpath)}; " if relpath != "." else "" else: # Setup bin_relpath dir with symlinks to all requested tools, so that we can use PATH, force # symlinks to avoid issues with repeat runs using the __run.sh script in the sandbox. bin_relpath = ".bin" boot_script = ";".join( dedent(f"""\ $mkdir -p {bin_relpath} for tool in $TOOLS; do $ln -sf ${{!tool}} {bin_relpath}; done export PATH="$PWD/{bin_relpath}" """).split("\n")) return Process( argv=(bash.path, "-c", boot_script + command), description=f"Running {shell_command.alias} {shell_command.address}", env=command_env, input_digest=input_digest, output_directories=output_directories, output_files=output_files, timeout_seconds=timeout, working_directory=working_directory, )