Exemplo n.º 1
0
async def get_subprocess_environment(
    subproc_env: SubprocessEnvironment, ) -> SubprocessEnvironmentVars:
    return SubprocessEnvironmentVars(await Get(
        Environment,
        EnvironmentRequest(subproc_env.env_vars_to_pass_to_subprocesses,
                           allowed=SETTABLE_ENV_VARS),
    ))
Exemplo n.º 2
0
async def push_docker_images(
    request: PublishDockerImageRequest, docker: DockerBinary, options: DockerOptions
) -> PublishProcesses:
    tags = tuple(
        chain.from_iterable(
            cast(BuiltDockerImage, image).tags
            for pkg in request.packages
            for image in pkg.artifacts
        )
    )

    if request.field_set.skip_push.value:
        return PublishProcesses(
            [
                PublishPackages(
                    names=tags,
                    description=f"(by `{request.field_set.skip_push.alias}` on {request.field_set.address})",
                ),
            ]
        )

    env = await Get(Environment, EnvironmentRequest(options.env_vars))
    processes = zip(tags, docker.push_image(tags, env))
    return PublishProcesses(
        [
            PublishPackages(
                names=(tag,),
                process=InteractiveProcess.from_process(process),
            )
            for tag, process in processes
        ]
    )
Exemplo n.º 3
0
async def setup_go_sdk_process(
    request: GoSdkProcess,
    go_sdk_run: GoSdkRunSetup,
    bash: BashBinary,
    golang_subsystem: GolangSubsystem,
    goroot: GoRoot,
) -> Process:
    input_digest, env_vars = await MultiGet(
        Get(Digest, MergeDigests([go_sdk_run.digest, request.input_digest])),
        Get(
            Environment,
            EnvironmentRequest(
                golang_subsystem.env_vars_to_pass_to_subprocesses)),
    )
    return Process(
        argv=[bash.path, go_sdk_run.script.path, *request.command],
        env={
            **env_vars,
            **request.env,
            GoSdkRunSetup.CHDIR_ENV:
            request.working_dir or "",
            # TODO: Maybe could just use MAJOR.MINOR for version part here?
            "__PANTS_GO_SDK_CACHE_KEY":
            f"{goroot.version}/{goroot.goos}/{goroot.goarch}",
        },
        input_digest=input_digest,
        description=request.description,
        output_files=request.output_files,
        output_directories=request.output_directories,
        level=LogLevel.DEBUG,
        platform=request.platform,
    )
Exemplo n.º 4
0
async def determine_shunit2_shell(request: Shunit2RunnerRequest,
                                  shell_setup: ShellSetup) -> Shunit2Runner:
    if request.shell_field.value is not None:
        tgt_shell = Shunit2Shell(request.shell_field.value)
    else:
        parse_result = Shunit2Shell.parse_shebang(
            request.test_file_content.content)
        if parse_result is None:
            raise ShellNotConfigured(
                f"Could not determine which shell to use to run shunit2 on {request.address}.\n\n"
                f"Please either specify the `{Shunit2ShellField.alias}` field or add a "
                f"shebang to {request.test_file_content.path} with one of the supported shells in "
                f"the format `!#/path/to/shell` or `!#/path/to/env shell`"
                f"(run `./pants help {Shunit2Tests.alias}` for valid shells).")
        tgt_shell = parse_result

    env = await Get(Environment, EnvironmentRequest(["PATH"]))
    path_request = BinaryPathRequest(
        binary_name=tgt_shell.name,
        search_path=shell_setup.executable_search_path(env),
        test=tgt_shell.binary_path_test,
    )
    paths = await Get(BinaryPaths, BinaryPathRequest, path_request)
    first_path = paths.first_path
    if not first_path:
        raise BinaryNotFoundError(
            path_request, rationale=f"run shunit2 on {request.address}")
    return Shunit2Runner(tgt_shell, first_path)
Exemplo n.º 5
0
async def python_bootstrap(
        python_bootstrap_subsystem: PythonBootstrapSubsystem
) -> PythonBootstrap:
    environment = await Get(
        Environment,
        EnvironmentRequest(
            ["PATH", "HOME", "PYENV_ROOT", "ASDF_DIR", "ASDF_DATA_DIR"]))
    return PythonBootstrap(environment, python_bootstrap_subsystem.options)
Exemplo n.º 6
0
def twine_env_request(repo: str) -> EnvironmentRequest:
    suffix = twine_env_suffix(repo)
    req = EnvironmentRequest([
        f"{var}{suffix}" for var in [
            "TWINE_USERNAME",
            "TWINE_PASSWORD",
            "TWINE_REPOSITORY_URL",
        ]
    ])
    return req
Exemplo n.º 7
0
async def docker_image_run_request(
    field_set: DockerFieldSet, docker: DockerBinary, options: DockerOptions
) -> RunRequest:
    env, image = await MultiGet(
        Get(Environment, EnvironmentRequest(options.env_vars)),
        Get(BuiltPackage, PackageFieldSet, field_set),
    )
    tag = cast(BuiltDockerImage, image.artifacts[0]).tags[0]
    run = docker.run_image(tag, docker_run_args=options.run_args, env=env)

    return RunRequest(args=run.argv, digest=image.digest, extra_env=run.env)
Exemplo n.º 8
0
def twine_env_request(repo: str) -> EnvironmentRequest:
    suffix = twine_env_suffix(repo)
    req = EnvironmentRequest([
        f"{var}{suffix}" for var in [
            "TWINE_USERNAME",
            "TWINE_PASSWORD",
            "TWINE_REPOSITORY_URL",
            # "TWINE_CERT",  # Does the --cert arg to pex take care of this for us?
        ]
    ])
    return req
Exemplo n.º 9
0
async def docker_build_environment_vars(
        request: DockerBuildEnvironmentRequest,
        docker_options: DockerOptions) -> DockerBuildEnvironment:
    build_args = await Get(DockerBuildArgs,
                           DockerBuildArgsRequest(request.target))
    env_vars = KeyValueSequenceUtil.from_strings(
        *{build_arg
          for build_arg in build_args if "=" not in build_arg},
        *docker_options.env_vars,
    )
    env = await Get(Environment, EnvironmentRequest(tuple(env_vars)))
    return DockerBuildEnvironment(env)
Exemplo n.º 10
0
async def push_docker_images(request: PublishDockerImageRequest,
                             docker: DockerBinary,
                             options: DockerOptions) -> PublishProcesses:
    tags = tuple(
        chain.from_iterable(
            cast(BuiltDockerImage, image).tags for pkg in request.packages
            for image in pkg.artifacts))

    if request.field_set.skip_push.value:
        return PublishProcesses([
            PublishPackages(
                names=tags,
                description=
                f"(by `{request.field_set.skip_push.alias}` on {request.field_set.address})",
            ),
        ])

    env = await Get(Environment, EnvironmentRequest(options.env_vars))
    skip_push = defaultdict(set)
    jobs: list[PublishPackages] = []
    refs: list[str] = []
    processes: list[Get] = []

    for tag in tags:
        for registry in options.registries().registries.values():
            if tag.startswith(registry.address) and registry.skip_push:
                skip_push[registry.alias].add(tag)
                break
        else:
            refs.append(tag)
            processes.append(
                Get(InteractiveProcess,
                    InteractiveProcessRequest(docker.push_image(tag, env))))

    interactive_processes = await MultiGet(processes)
    for ref, process in zip(refs, interactive_processes):
        jobs.append(PublishPackages(
            names=(ref, ),
            process=process,
        ))

    if skip_push:
        for name, skip_tags in skip_push.items():
            jobs.append(
                PublishPackages(
                    names=tuple(skip_tags),
                    description=f"(by `skip_push` on registry @{name})",
                ), )

    return PublishProcesses(jobs)
Exemplo n.º 11
0
async def export(
    console: Console,
    targets: Targets,
    workspace: Workspace,
    union_membership: UnionMembership,
    build_root: BuildRoot,
    dist_dir: DistDir,
) -> Export:
    request_types = cast("Iterable[type[ExportRequest]]",
                         union_membership.get(ExportRequest))
    requests = tuple(request_type(targets) for request_type in request_types)
    all_results = await MultiGet(
        Get(ExportResults, ExportRequest, request) for request in requests)
    flattened_results = [res for results in all_results for res in results]

    prefixed_digests = await MultiGet(
        Get(Digest, AddPrefix(result.digest, result.reldir))
        for result in flattened_results)
    output_dir = os.path.join(str(dist_dir.relpath), "export")
    merged_digest = await Get(Digest, MergeDigests(prefixed_digests))
    dist_digest = await Get(Digest, AddPrefix(merged_digest, output_dir))
    workspace.write_digest(dist_digest)
    environment = await Get(Environment, EnvironmentRequest(["PATH"]))
    for result in flattened_results:
        digest_root = os.path.join(build_root.path, output_dir, result.reldir)
        for cmd in result.post_processing_cmds:
            argv = tuple(
                arg.format(digest_root=digest_root) for arg in cmd.argv)
            ip = InteractiveProcess(
                argv=argv,
                env={
                    "PATH": environment.get("PATH", ""),
                    **cmd.extra_env
                },
                run_in_workspace=True,
            )
            await Effect(InteractiveProcessResult, InteractiveProcess, ip)

        console.print_stdout(
            f"Wrote {result.description} to {os.path.join(output_dir, result.reldir)}"
        )
    return Export(exit_code=0)
Exemplo n.º 12
0
async def setup_helm(helm_subsytem: HelmSubsystem) -> HelmBinary:
    cache_dir = "__cache"
    config_dir = "__config"
    data_dir = "__data"

    downloaded_binary, cache_digest, config_digest, data_digest = await MultiGet(
        Get(DownloadedExternalTool, ExternalToolRequest,
            helm_subsytem.get_request(Platform.current)),
        Get(Digest, CreateDigest([Directory(cache_dir)])),
        Get(Digest, CreateDigest([Directory(config_dir)])),
        Get(Digest, CreateDigest([Directory(data_dir)])),
    )

    tool_relpath = "__helm"
    immutable_input_digests = {tool_relpath: downloaded_binary.digest}
    helm_path = os.path.join(tool_relpath, downloaded_binary.exe)
    helm_env = _build_helm_env(cache_dir, config_dir, data_dir)

    # TODO Install Global Helm plugins
    # TODO Configure Helm classic repositories

    cache_subset_digest, config_subset_digest, data_subset_digest = await MultiGet(
        Get(Digest, RemovePrefix(cache_digest, cache_dir)),
        Get(Digest, RemovePrefix(config_digest, config_dir)),
        Get(Digest, RemovePrefix(data_digest, data_dir)),
    )

    setup_immutable_digests = {
        **immutable_input_digests,
        cache_dir: cache_subset_digest,
        config_dir: config_subset_digest,
        data_dir: data_subset_digest,
    }

    local_env = await Get(Environment, EnvironmentRequest(["HOME", "PATH"]))
    return HelmBinary(
        path=helm_path,
        helm_env=helm_env,
        local_env=local_env,
        immutable_input_digests=setup_immutable_digests,
    )
Exemplo n.º 13
0
async def find_docker(docker_request: DockerBinaryRequest,
                      docker_options: DockerOptions) -> DockerBinary:
    env = await Get(Environment, EnvironmentRequest(["PATH"]))
    search_path = docker_options.executable_search_path(env)
    request = BinaryPathRequest(
        binary_name="docker",
        search_path=search_path,
        test=BinaryPathTest(args=["-v"]),
    )
    paths = await Get(BinaryPaths, BinaryPathRequest, request)
    first_path = paths.first_path_or_raise(
        request, rationale="interact with the docker daemon")

    if not docker_options.tools:
        return DockerBinary(first_path.path, first_path.fingerprint)

    tools = await Get(
        BinaryShims,
        BinaryShimsRequest,
        BinaryShimsRequest.for_binaries(
            *docker_options.tools,
            rationale="use docker",
            output_directory="bin",
            search_path=search_path,
        ),
    )
    tools_path = ".shims"
    extra_env = {
        "PATH": os.path.join("{chroot}", tools_path, tools.bin_directory)
    }
    extra_input_digests = {tools_path: tools.digest}

    return DockerBinary(
        first_path.path,
        first_path.fingerprint,
        extra_env=extra_env,
        extra_input_digests=extra_input_digests,
    )
Exemplo n.º 14
0
async def prepare_shell_command_process(request: ShellCommandProcessRequest,
                                        shell_setup: ShellSetup,
                                        bash: BashBinary) -> Process:
    shell_command = request.target
    interactive = shell_command.has_field(ShellCommandRunWorkdirField)
    if interactive:
        working_directory = shell_command[
            ShellCommandRunWorkdirField].value or ""
    else:
        working_directory = shell_command.address.spec_path
    command = shell_command[ShellCommandCommandField].value
    timeout = shell_command.get(ShellCommandTimeoutField).value
    tools = shell_command.get(ShellCommandToolsField,
                              default_raw_value=()).value
    outputs = shell_command.get(ShellCommandOutputsField).value or ()
    extra_env_vars = shell_command.get(
        ShellCommandExtraEnvVarsField).value or ()

    if not command:
        raise ValueError(
            f"Missing `command` line in `{shell_command.alias}` target {shell_command.address}."
        )

    if interactive:
        command_env = {
            "CHROOT": "{chroot}",
        }
    else:
        if not tools:
            raise ValueError(
                f"Must provide any `tools` used by the `{shell_command.alias}` {shell_command.address}."
            )

        env = await Get(Environment, EnvironmentRequest(["PATH"]))
        search_path = shell_setup.executable_search_path(env)
        tool_requests = [
            BinaryPathRequest(
                binary_name=tool,
                search_path=search_path,
            ) for tool in {*tools, *["mkdir", "ln"]}
            if tool not in BASH_BUILTIN_COMMANDS
        ]
        tool_paths = await MultiGet(
            Get(BinaryPaths, BinaryPathRequest, request)
            for request in tool_requests)

        command_env = {
            "TOOLS":
            " ".join(
                _shell_tool_safe_env_name(tool.binary_name)
                for tool in tool_requests),
        }

        for binary, tool_request in zip(tool_paths, tool_requests):
            if binary.first_path:
                command_env[_shell_tool_safe_env_name(
                    tool_request.binary_name)] = binary.first_path.path
            else:
                raise BinaryNotFoundError.from_request(
                    tool_request,
                    rationale=
                    f"execute `{shell_command.alias}` {shell_command.address}",
                )

    extra_env = await Get(Environment, EnvironmentRequest(extra_env_vars))
    command_env.update(extra_env)

    transitive_targets = await Get(
        TransitiveTargets,
        TransitiveTargetsRequest([shell_command.address]),
    )

    sources, pkgs_per_target = await MultiGet(
        Get(
            SourceFiles,
            SourceFilesRequest(
                sources_fields=[
                    tgt.get(SourcesField)
                    for tgt in transitive_targets.dependencies
                ],
                for_sources_types=(SourcesField, FileSourceField),
                enable_codegen=True,
            ),
        ),
        Get(
            FieldSetsPerTarget,
            FieldSetsPerTargetRequest(PackageFieldSet,
                                      transitive_targets.dependencies),
        ),
    )

    packages = await MultiGet(
        Get(BuiltPackage, PackageFieldSet, field_set)
        for field_set in pkgs_per_target.field_sets)

    if interactive or not working_directory or working_directory in sources.snapshot.dirs:
        work_dir = EMPTY_DIGEST
    else:
        work_dir = await Get(Digest,
                             CreateDigest([Directory(working_directory)]))

    input_digest = await Get(
        Digest,
        MergeDigests([
            sources.snapshot.digest, work_dir,
            *(pkg.digest for pkg in packages)
        ]))

    output_files = [f for f in outputs if not f.endswith("/")]
    output_directories = [d for d in outputs if d.endswith("/")]

    if interactive:
        relpath = os.path.relpath(
            working_directory or ".",
            start="/" if os.path.isabs(working_directory) else ".")
        boot_script = f"cd {shlex.quote(relpath)}; " if relpath != "." else ""
    else:
        # Setup bin_relpath dir with symlinks to all requested tools, so that we can use PATH, force
        # symlinks to avoid issues with repeat runs using the __run.sh script in the sandbox.
        bin_relpath = ".bin"
        boot_script = ";".join(
            dedent(f"""\
                $mkdir -p {bin_relpath}
                for tool in $TOOLS; do $ln -sf ${{!tool}} {bin_relpath}; done
                export PATH="$PWD/{bin_relpath}"
                """).split("\n"))

    return Process(
        argv=(bash.path, "-c", boot_script + command),
        description=f"Running {shell_command.alias} {shell_command.address}",
        env=command_env,
        input_digest=input_digest,
        output_directories=output_directories,
        output_files=output_files,
        timeout_seconds=timeout,
        working_directory=working_directory,
    )
Exemplo n.º 15
0
async def setup_helm(helm_subsytem: HelmSubsystem,
                     global_plugins: HelmPlugins) -> HelmBinary:
    downloaded_binary, empty_dirs_digest = await MultiGet(
        Get(DownloadedExternalTool, ExternalToolRequest,
            helm_subsytem.get_request(Platform.current)),
        Get(
            Digest,
            CreateDigest([
                Directory(_HELM_CONFIG_DIR),
                Directory(_HELM_DATA_DIR),
            ]),
        ),
    )

    tool_relpath = "__helm"
    immutable_input_digests = {tool_relpath: downloaded_binary.digest}

    helm_path = os.path.join(tool_relpath, downloaded_binary.exe)
    helm_env = {
        "HELM_CACHE_HOME": _HELM_CACHE_DIR,
        "HELM_CONFIG_HOME": _HELM_CONFIG_DIR,
        "HELM_DATA_HOME": _HELM_DATA_DIR,
    }

    # Create a digest that will get mutated during the setup process
    mutable_input_digest = empty_dirs_digest

    # Install all global Helm plugins
    if global_plugins:
        prefixed_plugins_digests = await MultiGet(
            Get(
                Digest,
                AddPrefix(plugin.digest,
                          os.path.join(_HELM_DATA_DIR, "plugins",
                                       plugin.name)),
            ) for plugin in global_plugins)
        mutable_input_digest = await Get(
            Digest,
            MergeDigests([mutable_input_digest, *prefixed_plugins_digests]))

    updated_config_digest, updated_data_digest = await MultiGet(
        Get(
            Digest,
            DigestSubset(mutable_input_digest,
                         PathGlobs([os.path.join(_HELM_CONFIG_DIR, "**")])),
        ),
        Get(
            Digest,
            DigestSubset(mutable_input_digest,
                         PathGlobs([os.path.join(_HELM_DATA_DIR, "**")])),
        ),
    )
    config_subset_digest, data_subset_digest = await MultiGet(
        Get(Digest, RemovePrefix(updated_config_digest, _HELM_CONFIG_DIR)),
        Get(Digest, RemovePrefix(updated_data_digest, _HELM_DATA_DIR)),
    )

    setup_immutable_digests = {
        **immutable_input_digests,
        _HELM_CONFIG_DIR: config_subset_digest,
        _HELM_DATA_DIR: data_subset_digest,
    }

    local_env = await Get(Environment, EnvironmentRequest(["HOME", "PATH"]))
    return HelmBinary(
        path=helm_path,
        helm_env=helm_env,
        local_env=local_env,
        immutable_input_digests=setup_immutable_digests,
    )
Exemplo n.º 16
0
async def setup_shunit2_for_target(
    request: TestSetupRequest,
    shell_setup: ShellSetup,
    test_subsystem: TestSubsystem,
    test_extra_env: TestExtraEnv,
    global_options: GlobalOptions,
) -> TestSetup:
    shunit2_download_file = DownloadFile(
        "https://raw.githubusercontent.com/kward/shunit2/b9102bb763cc603b3115ed30a5648bf950548097/shunit2",
        FileDigest(
            "1f11477b7948150d1ca50cdd41d89be4ed2acd137e26d2e0fe23966d0e272cc5",
            40987),
    )
    shunit2_script, transitive_targets, built_package_dependencies, env = await MultiGet(
        Get(Digest, DownloadFile, shunit2_download_file),
        Get(TransitiveTargets,
            TransitiveTargetsRequest([request.field_set.address])),
        Get(
            BuiltPackageDependencies,
            BuildPackageDependenciesRequest(
                request.field_set.runtime_package_dependencies),
        ),
        Get(Environment, EnvironmentRequest(["PATH"])),
    )

    dependencies_source_files_request = Get(
        SourceFiles,
        SourceFilesRequest(
            (tgt.get(Sources) for tgt in transitive_targets.dependencies),
            for_sources_types=(ShellSources, FilesSources, ResourcesSources),
            enable_codegen=True,
        ),
    )
    dependencies_source_files, field_set_sources = await MultiGet(
        dependencies_source_files_request,
        Get(SourceFiles, SourceFilesRequest([request.field_set.sources])),
    )

    field_set_digest_content = await Get(DigestContents, Digest,
                                         field_set_sources.snapshot.digest)
    # Because a FieldSet corresponds to a file address, there should be exactly 1 file in the
    # sources. This assumption allows us to simplify determining which shell to use via inspecting
    # the shebang.
    if len(field_set_digest_content) != 1:
        raise AssertionError(
            f"The file address {request.field_set.address} had sources != 1, which is unexpected: "
            f"{field_set_sources.snapshot.files}. Please file a bug at "
            "https://github.com/pantsbuild/pants/issues/new with this error message copied."
        )
    original_test_file_content = field_set_digest_content[0]
    updated_test_file_content = add_source_shunit2(original_test_file_content)

    updated_test_digest, runner = await MultiGet(
        Get(Digest, CreateDigest([updated_test_file_content])),
        Get(
            Shunit2Runner,
            Shunit2RunnerRequest(request.field_set.address,
                                 original_test_file_content,
                                 request.field_set.shell),
        ),
    )

    input_digest = await Get(
        Digest,
        MergeDigests((
            shunit2_script,
            updated_test_digest,
            dependencies_source_files.snapshot.digest,
            *(pkg.digest for pkg in built_package_dependencies),
        )),
    )

    env_dict = {
        "PATH": create_path_env_var(shell_setup.executable_search_path(env)),
        "SHUNIT_COLOR": "always" if global_options.options.colors else "none",
        **test_extra_env.env,
    }
    argv = (
        # Zsh requires extra args. See https://github.com/kward/shunit2/#-zsh.
        [
            runner.binary_path.path, "-o", "shwordsplit", "--",
            *field_set_sources.snapshot.files
        ] if runner.shell == Shunit2Shell.zsh else
        [runner.binary_path.path, *field_set_sources.snapshot.files])
    cache_scope = ProcessCacheScope.NEVER if test_subsystem.force else ProcessCacheScope.SUCCESSFUL
    process = Process(
        argv=argv,
        input_digest=input_digest,
        description=f"Run shunit2 for {request.field_set.address}.",
        level=LogLevel.DEBUG,
        env=env_dict,
        timeout_seconds=request.field_set.timeout.value,
        cache_scope=cache_scope,
    )
    return TestSetup(process)
Exemplo n.º 17
0
async def find_pex_python(
    python_setup: PythonSetup,
    pex_runtime_env: PexRuntimeEnvironment,
    subprocess_env_vars: SubprocessEnvironmentVars,
    global_options: GlobalOptions,
) -> PexEnvironment:
    pex_relevant_environment = await Get(
        Environment, EnvironmentRequest(["PATH", "HOME", "PYENV_ROOT"]))
    # PEX files are compatible with bootstrapping via Python 2.7 or Python 3.5+. The bootstrap
    # code will then re-exec itself if the underlying PEX user code needs a more specific python
    # interpreter. As such, we look for many Pythons usable by the PEX bootstrap code here for
    # maximum flexibility.
    all_python_binary_paths = await MultiGet(
        Get(
            BinaryPaths,
            BinaryPathRequest(
                search_path=python_setup.interpreter_search_paths(
                    pex_relevant_environment),
                binary_name=binary_name,
                test=BinaryPathTest(
                    args=[
                        "-c",
                        # N.B.: The following code snippet must be compatible with Python 2.7 and
                        # Python 3.5+.
                        #
                        # We hash the underlying Python interpreter executable to ensure we detect
                        # changes in the real interpreter that might otherwise be masked by Pyenv
                        # shim scripts found on the search path. Naively, just printing out the full
                        # version_info would be enough, but that does not account for supported abi
                        # changes (e.g.: a pyenv switch from a py27mu interpreter to a py27m
                        # interpreter.)
                        #
                        # When hashing, we pick 8192 for efficiency of reads and fingerprint updates
                        # (writes) since it's a common OS buffer size and an even multiple of the
                        # hash block size.
                        dedent("""\
                            import sys

                            major, minor = sys.version_info[:2]
                            if (major, minor) != (2, 7) and not (major == 3 and minor >= 5):
                                sys.exit(1)

                            import hashlib
                            hasher = hashlib.sha256()
                            with open(sys.executable, "rb") as fp:
                                for chunk in iter(lambda: fp.read(8192), b""):
                                    hasher.update(chunk)
                            sys.stdout.write(hasher.hexdigest())
                            """),
                    ],
                    fingerprint_stdout=
                    False,  # We already emit a usable fingerprint to stdout.
                ),
            ),
        ) for binary_name in pex_runtime_env.bootstrap_interpreter_names)

    def first_python_binary() -> Optional[PythonExecutable]:
        for binary_paths in all_python_binary_paths:
            if binary_paths.first_path:
                return PythonExecutable(
                    path=binary_paths.first_path.path,
                    fingerprint=binary_paths.first_path.fingerprint,
                )
        return None

    return PexEnvironment(
        path=pex_runtime_env.path(pex_relevant_environment),
        interpreter_search_paths=tuple(
            python_setup.interpreter_search_paths(pex_relevant_environment)),
        subprocess_environment_dict=subprocess_env_vars.vars,
        # TODO: This path normalization is duplicated with `engine_initializer.py`. How can we do
        #  the normalization only once, via the options system?
        named_caches_dir=Path(
            global_options.options.named_caches_dir).resolve().as_posix(),
        bootstrap_python=first_python_binary(),
    )
Exemplo n.º 18
0
async def setup_pytest_for_target(
    request: TestSetupRequest,
    pytest: PyTest,
    test_subsystem: TestSubsystem,
    python_setup: PythonSetup,
    coverage_config: CoverageConfig,
    coverage_subsystem: CoverageSubsystem,
    test_extra_env: TestExtraEnv,
    global_options: GlobalOptions,
) -> TestSetup:
    transitive_targets, plugin_setups = await MultiGet(
        Get(TransitiveTargets,
            TransitiveTargetsRequest([request.field_set.address])),
        Get(AllPytestPluginSetups,
            AllPytestPluginSetupsRequest(request.field_set.address)),
    )
    all_targets = transitive_targets.closure

    interpreter_constraints = InterpreterConstraints.create_from_targets(
        all_targets, python_setup)

    requirements_pex_get = Get(
        Pex,
        RequirementsPexRequest([request.field_set.address],
                               internal_only=True))
    pytest_pex_get = Get(
        Pex,
        PexRequest(
            output_filename="pytest.pex",
            requirements=pytest.pex_requirements(),
            interpreter_constraints=interpreter_constraints,
            internal_only=True,
        ),
    )

    # Ensure that the empty extra output dir exists.
    extra_output_directory_digest_get = Get(
        Digest, CreateDigest([Directory(_EXTRA_OUTPUT_DIR)]))

    prepared_sources_get = Get(
        PythonSourceFiles,
        PythonSourceFilesRequest(all_targets, include_files=True))

    # Get the file names for the test_target so that we can specify to Pytest precisely which files
    # to test, rather than using auto-discovery.
    field_set_source_files_get = Get(
        SourceFiles, SourceFilesRequest([request.field_set.source]))

    field_set_extra_env_get = Get(
        Environment,
        EnvironmentRequest(request.field_set.extra_env_vars.value or ()))

    (
        pytest_pex,
        requirements_pex,
        prepared_sources,
        field_set_source_files,
        field_set_extra_env,
        extra_output_directory_digest,
    ) = await MultiGet(
        pytest_pex_get,
        requirements_pex_get,
        prepared_sources_get,
        field_set_source_files_get,
        field_set_extra_env_get,
        extra_output_directory_digest_get,
    )

    local_dists = await Get(
        LocalDistsPex,
        LocalDistsPexRequest(
            [request.field_set.address],
            internal_only=True,
            interpreter_constraints=interpreter_constraints,
            sources=prepared_sources,
        ),
    )

    pytest_runner_pex_get = Get(
        VenvPex,
        PexRequest(
            output_filename="pytest_runner.pex",
            interpreter_constraints=interpreter_constraints,
            main=pytest.main,
            internal_only=True,
            pex_path=[pytest_pex, requirements_pex, local_dists.pex],
        ),
    )
    config_files_get = Get(
        ConfigFiles,
        ConfigFilesRequest,
        pytest.config_request(field_set_source_files.snapshot.dirs),
    )
    pytest_runner_pex, config_files = await MultiGet(pytest_runner_pex_get,
                                                     config_files_get)

    # The coverage and pytest config may live in the same config file (e.g., setup.cfg, tox.ini
    # or pyproject.toml), and wee may have rewritten those files to augment the coverage config,
    # in which case we must ensure that the original and rewritten files don't collide.
    pytest_config_digest = config_files.snapshot.digest
    if coverage_config.path in config_files.snapshot.files:
        subset_paths = list(config_files.snapshot.files)
        # Remove the original file, and rely on the rewritten file, which contains all the
        # pytest-related config unchanged.
        subset_paths.remove(coverage_config.path)
        pytest_config_digest = await Get(
            Digest, DigestSubset(pytest_config_digest,
                                 PathGlobs(subset_paths)))

    input_digest = await Get(
        Digest,
        MergeDigests((
            coverage_config.digest,
            local_dists.remaining_sources.source_files.snapshot.digest,
            pytest_config_digest,
            extra_output_directory_digest,
            *(plugin_setup.digest for plugin_setup in plugin_setups),
        )),
    )

    add_opts = [f"--color={'yes' if global_options.colors else 'no'}"]
    output_files = []

    results_file_name = None
    if not request.is_debug:
        results_file_name = f"{request.field_set.address.path_safe_spec}.xml"
        add_opts.extend((f"--junitxml={results_file_name}", "-o",
                         f"junit_family={pytest.junit_family}"))
        output_files.append(results_file_name)

    coverage_args = []
    if test_subsystem.use_coverage and not request.is_debug:
        pytest.validate_pytest_cov_included()
        output_files.append(".coverage")

        if coverage_subsystem.filter:
            cov_args = [f"--cov={morf}" for morf in coverage_subsystem.filter]
        else:
            # N.B.: Passing `--cov=` or `--cov=.` to communicate "record coverage for all sources"
            # fails in certain contexts as detailed in:
            #   https://github.com/pantsbuild/pants/issues/12390
            # Instead we focus coverage on just the directories containing python source files
            # materialized to the Process chroot.
            cov_args = [
                f"--cov={source_root}"
                for source_root in prepared_sources.source_roots
            ]

        coverage_args = [
            "--cov-report=",  # Turn off output.
            f"--cov-config={coverage_config.path}",
            *cov_args,
        ]

    extra_env = {
        "PYTEST_ADDOPTS": " ".join(add_opts),
        "PEX_EXTRA_SYS_PATH": ":".join(prepared_sources.source_roots),
        **test_extra_env.env,
        # NOTE: field_set_extra_env intentionally after `test_extra_env` to allow overriding within
        # `python_tests`.
        **field_set_extra_env,
    }

    # Cache test runs only if they are successful, or not at all if `--test-force`.
    cache_scope = (ProcessCacheScope.PER_SESSION
                   if test_subsystem.force else ProcessCacheScope.SUCCESSFUL)
    process = await Get(
        Process,
        VenvPexProcess(
            pytest_runner_pex,
            argv=(*pytest.args, *coverage_args, *field_set_source_files.files),
            extra_env=extra_env,
            input_digest=input_digest,
            output_directories=(_EXTRA_OUTPUT_DIR, ),
            output_files=output_files,
            timeout_seconds=request.field_set.timeout.
            calculate_from_global_options(pytest),
            execution_slot_variable=pytest.execution_slot_var,
            description=f"Run Pytest for {request.field_set.address}",
            level=LogLevel.DEBUG,
            cache_scope=cache_scope,
        ),
    )
    return TestSetup(process, results_file_name=results_file_name)
Exemplo n.º 19
0
async def setup_shunit2_for_target(
    request: TestSetupRequest,
    shell_setup: ShellSetup,
    test_subsystem: TestSubsystem,
    test_extra_env: TestExtraEnv,
    global_options: GlobalOptions,
) -> TestSetup:
    shunit2_download_file = DownloadFile(
        "https://raw.githubusercontent.com/kward/shunit2/b9102bb763cc603b3115ed30a5648bf950548097/shunit2",
        FileDigest(
            "1f11477b7948150d1ca50cdd41d89be4ed2acd137e26d2e0fe23966d0e272cc5",
            40987),
    )
    shunit2_script, transitive_targets, built_package_dependencies, env = await MultiGet(
        Get(Digest, DownloadFile, shunit2_download_file),
        Get(TransitiveTargets,
            TransitiveTargetsRequest([request.field_set.address])),
        Get(
            BuiltPackageDependencies,
            BuildPackageDependenciesRequest(
                request.field_set.runtime_package_dependencies),
        ),
        Get(Environment, EnvironmentRequest(["PATH"])),
    )

    dependencies_source_files_request = Get(
        SourceFiles,
        SourceFilesRequest(
            (tgt.get(SourcesField) for tgt in transitive_targets.dependencies),
            for_sources_types=(ShellSourceField, FileSourceField,
                               ResourceSourceField),
            enable_codegen=True,
        ),
    )
    dependencies_source_files, field_set_sources = await MultiGet(
        dependencies_source_files_request,
        Get(SourceFiles, SourceFilesRequest([request.field_set.sources])),
    )

    field_set_digest_content = await Get(DigestContents, Digest,
                                         field_set_sources.snapshot.digest)
    # `ShellTestSourceField` validates that there's exactly one file.
    test_file_content = field_set_digest_content[0]
    updated_test_file_content = add_source_shunit2(test_file_content)

    updated_test_digest, runner = await MultiGet(
        Get(Digest, CreateDigest([updated_test_file_content])),
        Get(
            Shunit2Runner,
            Shunit2RunnerRequest(request.field_set.address, test_file_content,
                                 request.field_set.shell),
        ),
    )

    input_digest = await Get(
        Digest,
        MergeDigests((
            shunit2_script,
            updated_test_digest,
            dependencies_source_files.snapshot.digest,
            *(pkg.digest for pkg in built_package_dependencies),
        )),
    )

    env_dict = {
        "PATH": create_path_env_var(shell_setup.executable_search_path(env)),
        "SHUNIT_COLOR": "always" if global_options.colors else "none",
        **test_extra_env.env,
    }
    argv = (
        # Zsh requires extra args. See https://github.com/kward/shunit2/#-zsh.
        [
            runner.binary_path.path, "-o", "shwordsplit", "--",
            *field_set_sources.snapshot.files
        ] if runner.shell == Shunit2Shell.zsh else
        [runner.binary_path.path, *field_set_sources.snapshot.files])
    cache_scope = (ProcessCacheScope.PER_SESSION
                   if test_subsystem.force else ProcessCacheScope.SUCCESSFUL)
    process = Process(
        argv=argv,
        input_digest=input_digest,
        description=f"Run shunit2 for {request.field_set.address}.",
        level=LogLevel.DEBUG,
        env=env_dict,
        timeout_seconds=request.field_set.timeout.value,
        cache_scope=cache_scope,
    )
    return TestSetup(process)
Exemplo n.º 20
0
async def get_subprocess_environment(
    subproc_env: SubprocessEnvironment,
) -> SubprocessEnvironmentVars:
    return SubprocessEnvironmentVars(
        await Get(Environment, EnvironmentRequest(subproc_env.env_vars_to_pass_to_subprocesses))
    )
Exemplo n.º 21
0
async def get_filtered_environment(
        test_subsystem: TestSubsystem) -> TestExtraEnv:
    return TestExtraEnv(await
                        Get(Environment,
                            EnvironmentRequest(test_subsystem.extra_env_vars)))
Exemplo n.º 22
0
async def setup_goroot(golang_subsystem: GolangSubsystem) -> GoRoot:
    env = await Get(Environment, EnvironmentRequest(["PATH"]))
    search_paths = golang_subsystem.go_search_paths(env)
    all_go_binary_paths = await Get(
        BinaryPaths,
        BinaryPathRequest(
            search_path=search_paths,
            binary_name="go",
            test=BinaryPathTest(["version"]),
        ),
    )
    if not all_go_binary_paths.paths:
        raise BinaryNotFoundError(
            "Cannot find any `go` binaries using the option "
            f"`[golang].go_search_paths`: {list(search_paths)}\n\n"
            "To fix, please install Go (https://golang.org/doc/install) with the version "
            f"{golang_subsystem.expected_version} (set by `[golang].expected_version`) and ensure "
            "that it is discoverable via `[golang].go_search_paths`.")

    # `go env GOVERSION` does not work in earlier Go versions (like 1.15), so we must run
    # `go version` and `go env GOROOT` to calculate both the version and GOROOT.
    version_results = await MultiGet(
        Get(
            ProcessResult,
            Process(
                (binary_path.path, "version"),
                description=f"Determine Go version for {binary_path.path}",
                level=LogLevel.DEBUG,
                cache_scope=ProcessCacheScope.PER_RESTART_SUCCESSFUL,
            ),
        ) for binary_path in all_go_binary_paths.paths)

    invalid_versions = []
    for binary_path, version_result in zip(all_go_binary_paths.paths,
                                           version_results):
        try:
            _raw_version = version_result.stdout.decode("utf-8").split()[
                2]  # e.g. go1.17 or go1.17.1
            _version_components = _raw_version[2:].split(
                ".")  # e.g. [1, 17] or [1, 17, 1]
            version = f"{_version_components[0]}.{_version_components[1]}"
        except IndexError:
            raise AssertionError(
                f"Failed to parse `go version` output for {binary_path}. Please open a bug at "
                f"https://github.com/pantsbuild/pants/issues/new/choose with the below data."
                f"\n\n"
                f"{version_result}")

        if version == golang_subsystem.expected_version:
            env_result = await Get(
                ProcessResult,
                Process(
                    (binary_path.path, "env", "-json"),
                    description=
                    f"Determine Go SDK metadata for {binary_path.path}",
                    level=LogLevel.DEBUG,
                    cache_scope=ProcessCacheScope.PER_RESTART_SUCCESSFUL,
                    env={"GOPATH": "/does/not/matter"},
                ),
            )
            sdk_metadata = json.loads(env_result.stdout.decode())
            return GoRoot(path=sdk_metadata["GOROOT"],
                          version=version,
                          _raw_metadata=FrozenDict(sdk_metadata))

        logger.debug(
            f"Go binary at {binary_path.path} has version {version}, but this "
            f"project is using {golang_subsystem.expected_version} "
            "(set by `[golang].expected_version`). Ignoring.")
        invalid_versions.append((binary_path.path, version))

    invalid_versions_str = bullet_list(
        f"{path}: {version}" for path, version in sorted(invalid_versions))
    raise BinaryNotFoundError(
        "Cannot find a `go` binary with the expected version of "
        f"{golang_subsystem.expected_version} (set by `[golang].expected_version`).\n\n"
        f"Found these `go` binaries, but they had different versions:\n\n"
        f"{invalid_versions_str}\n\n"
        "To fix, please install the expected version (https://golang.org/doc/install) and ensure "
        "that it is discoverable via the option `[golang].go_search_paths`, or change "
        "`[golang].expected_version`.")
Exemplo n.º 23
0
async def run_shell_command(
    request: GenerateFilesFromShellCommandRequest,
    shell_setup: ShellSetup,
    bash: BashBinary,
) -> GeneratedSources:
    shell_command = request.protocol_target
    working_directory = shell_command.address.spec_path
    command = shell_command[ShellCommandCommandField].value
    tools = shell_command[ShellCommandToolsField].value
    outputs = shell_command[ShellCommandOutputsField].value or ()

    if not command:
        raise ValueError(
            f"Missing `command` line in `shell_command` target {shell_command.address}."
        )

    if not tools:
        raise ValueError(
            f"Must provide any `tools` used by the `shell_command` {shell_command.address}."
        )

    env = await Get(Environment, EnvironmentRequest(["PATH"]))
    search_path = shell_setup.executable_search_path(env)
    tool_requests = [
        BinaryPathRequest(
            binary_name=tool,
            search_path=search_path,
        ) for tool in {*tools, *["mkdir", "ln"]}
        if tool not in BASH_BUILTIN_COMMANDS
    ]
    tool_paths = await MultiGet(
        Get(BinaryPaths, BinaryPathRequest, request)
        for request in tool_requests)

    command_env = {
        "TOOLS":
        " ".join(shlex.quote(tool.binary_name) for tool in tool_requests),
    }

    for binary, tool_request in zip(tool_paths, tool_requests):
        if binary.first_path:
            command_env[tool_request.binary_name] = binary.first_path.path
        else:
            raise BinaryNotFoundError.from_request(
                tool_request,
                rationale=
                f"execute experimental_shell_command {shell_command.address}",
            )

    transitive_targets = await Get(
        TransitiveTargets,
        TransitiveTargetsRequest([shell_command.address]),
    )

    sources = await Get(
        SourceFiles,
        SourceFilesRequest(
            sources_fields=[
                tgt.get(Sources) for tgt in transitive_targets.dependencies
            ],
            for_sources_types=(
                Sources,
                FilesSources,
            ),
            enable_codegen=True,
        ),
    )

    output_files = [f for f in outputs if not f.endswith("/")]
    output_directories = [d for d in outputs if d.endswith("/")]

    if working_directory in sources.snapshot.dirs:
        input_digest = sources.snapshot.digest
    else:
        work_dir = await Get(Digest,
                             CreateDigest([Directory(working_directory)]))
        input_digest = await Get(
            Digest, MergeDigests([sources.snapshot.digest, work_dir]))

    # Setup bin_relpath dir with symlinks to all requested tools, so that we can use PATH.
    bin_relpath = ".bin"
    setup_tool_symlinks_script = ";".join(
        dedent(f"""\
            $mkdir -p {bin_relpath}
            for tool in $TOOLS; do $ln -s ${{!tool}} {bin_relpath}/; done
            export PATH="$PWD/{bin_relpath}"
            """).split("\n"))

    result = await Get(
        ProcessResult,
        Process(
            argv=(bash.path, "-c", setup_tool_symlinks_script + command),
            description=
            f"Running experimental_shell_command {shell_command.address}",
            env=command_env,
            input_digest=input_digest,
            output_directories=output_directories,
            output_files=output_files,
            working_directory=working_directory,
        ),
    )

    if shell_command[ShellCommandLogOutputField].value:
        if result.stdout:
            logger.info(result.stdout.decode())
        if result.stderr:
            logger.warning(result.stderr.decode())

    output = await Get(Snapshot,
                       AddPrefix(result.output_digest, working_directory))
    return GeneratedSources(output)
Exemplo n.º 24
0
async def generate_scala_from_protobuf(
    request: GenerateScalaFromProtobufRequest,
    protoc: Protoc,
    scalapb: ScalaPBSubsystem,
    shim_classfiles: ScalaPBShimCompiledClassfiles,
    jdk: InternalJdk,
) -> GeneratedSources:
    output_dir = "_generated_files"
    toolcp_relpath = "__toolcp"
    shimcp_relpath = "__shimcp"
    plugins_relpath = "__plugins"
    protoc_relpath = "__protoc"

    lockfile_request = await Get(GenerateJvmLockfileFromTool,
                                 ScalapbcToolLockfileSentinel())
    (
        downloaded_protoc_binary,
        tool_classpath,
        empty_output_dir,
        transitive_targets,
        inherit_env,
    ) = await MultiGet(
        Get(DownloadedExternalTool, ExternalToolRequest,
            protoc.get_request(Platform.current)),
        Get(ToolClasspath, ToolClasspathRequest(lockfile=lockfile_request)),
        Get(Digest, CreateDigest([Directory(output_dir)])),
        Get(TransitiveTargets,
            TransitiveTargetsRequest([request.protocol_target.address])),
        # Need PATH so that ScalaPB can invoke `mkfifo`.
        Get(Environment, EnvironmentRequest(requested=["PATH"])),
    )

    # NB: By stripping the source roots, we avoid having to set the value `--proto_path`
    # for Protobuf imports to be discoverable.
    all_sources_stripped, target_sources_stripped = await MultiGet(
        Get(
            StrippedSourceFiles,
            SourceFilesRequest(tgt[ProtobufSourceField]
                               for tgt in transitive_targets.closure
                               if tgt.has_field(ProtobufSourceField)),
        ),
        Get(StrippedSourceFiles,
            SourceFilesRequest([request.protocol_target[ProtobufSourceField]
                                ])),
    )

    merged_jvm_plugins_digest = EMPTY_DIGEST
    maybe_jvm_plugins_setup_args: tuple[str, ...] = ()
    maybe_jvm_plugins_output_args: tuple[str, ...] = ()
    jvm_plugins = scalapb.jvm_plugins
    if jvm_plugins:
        materialized_jvm_plugins = await Get(
            MaterializedJvmPlugins, MaterializeJvmPluginsRequest(jvm_plugins))
        merged_jvm_plugins_digest = materialized_jvm_plugins.digest
        maybe_jvm_plugins_setup_args = materialized_jvm_plugins.setup_args(
            plugins_relpath)
        maybe_jvm_plugins_output_args = tuple(
            f"--{plugin.name}_out={output_dir}"
            for plugin in materialized_jvm_plugins.plugins)

    extra_immutable_input_digests = {
        toolcp_relpath: tool_classpath.digest,
        shimcp_relpath: shim_classfiles.digest,
        plugins_relpath: merged_jvm_plugins_digest,
        protoc_relpath: downloaded_protoc_binary.digest,
    }

    input_digest = await Get(
        Digest,
        MergeDigests([all_sources_stripped.snapshot.digest, empty_output_dir]))

    result = await Get(
        ProcessResult,
        JvmProcess(
            jdk=jdk,
            classpath_entries=[
                *tool_classpath.classpath_entries(toolcp_relpath),
                shimcp_relpath
            ],
            argv=[
                "org.pantsbuild.backend.scala.scalapb.ScalaPBShim",
                f"--protoc={os.path.join(protoc_relpath, downloaded_protoc_binary.exe)}",
                *maybe_jvm_plugins_setup_args,
                f"--scala_out={output_dir}",
                *maybe_jvm_plugins_output_args,
                *target_sources_stripped.snapshot.files,
            ],
            input_digest=input_digest,
            extra_immutable_input_digests=extra_immutable_input_digests,
            extra_nailgun_keys=extra_immutable_input_digests,
            description=
            f"Generating Scala sources from {request.protocol_target.address}.",
            level=LogLevel.DEBUG,
            output_directories=(output_dir, ),
            extra_env=inherit_env,
        ),
    )

    normalized_digest, source_root = await MultiGet(
        Get(Digest, RemovePrefix(result.output_digest, output_dir)),
        Get(SourceRoot, SourceRootRequest,
            SourceRootRequest.for_target(request.protocol_target)),
    )

    source_root_restored = (await Get(
        Snapshot, AddPrefix(normalized_digest, source_root.path))
                            if source_root.path != "." else await Get(
                                Snapshot, Digest, normalized_digest))
    return GeneratedSources(source_root_restored)
Exemplo n.º 25
0
async def package_into_image(
    field_set: DockerPackageFieldSet,
    union_membership: UnionMembership,
) -> BuiltPackage:
    """Build a docker image from a 'docker' build target.

    Creates a build context & dockerfile from the build target & its
    dependencies. Then builds & tags that image. (see the module
    docstring for more information)
    """
    target_name = field_set.address.target_name
    transitive_targets = await Get(
        TransitiveTargets, TransitiveTargetsRequest([field_set.address])
    )
    component_list = []
    logger.debug("Building Target %s", target_name)
    for field_set_type in union_membership[DockerComponentFieldSet]:
        for target in transitive_targets.dependencies:
            if field_set_type.is_applicable(target):
                logger.debug(
                    "Dependent Target %s applies to as component %s",
                    target.address,
                    field_set_type.__name__,
                )
                component_list.append(field_set_type.create(target))

    components = await MultiGet(
        Get(DockerComponent, DockerComponentFieldSet, fs) for fs in component_list
    )

    source_digests = []
    run_commands = []
    components = sorted(components, key=lambda c: c.order)
    for component in components:
        if component.sources:
            source_digests.append(component.sources)
        run_commands.extend(component.commands)
    source_digest = await Get(Digest, MergeDigests(source_digests))
    application_snapshot = await Get(Snapshot, AddPrefix(source_digest, "application"))

    if logger.isEnabledFor(logging.DEBUG):
        logger.debug("Files to be copied into the docker container")
        for file in application_snapshot.files:
            logger.debug("* %s", file)

    dockerfile_contents = _create_dockerfile(
        field_set.base_image.value,
        field_set.workdir.value,
        field_set.image_setup.value,
        run_commands,
        field_set.command.value,
    )
    logger.debug(dockerfile_contents)
    dockerfile = await Get(
        Digest,
        CreateDigest([FileContent("Dockerfile", dockerfile_contents.encode("utf-8"))]),
    )
    # create docker build context of all merged files & fetch docker
    # connection enviornment variables
    # and the location of the docker process
    search_path = ["/bin", "/usr/bin", "/usr/local/bin", "$HOME/"]
    docker_context, docker_env, docker_paths = await MultiGet(
        Get(Digest, MergeDigests([dockerfile, application_snapshot.digest])),
        Get(Environment, EnvironmentRequest(utils.DOCKER_ENV_VARS)),
        Get(
            BinaryPaths,
            BinaryPathRequest(
                binary_name="docker",
                search_path=search_path,
            ),
        ),
    )
    if not docker_paths.first_path:
        raise ValueError("Unable to locate Docker binary on paths: %s", search_path)
    process_path = docker_paths.first_path.path
    # build an list of arguments of the form ["-t",
    # "registry/name:tag"] to pass to the docker executable
    tag_arguments = _build_tag_argument_list(
        target_name, field_set.tags.value or [], field_set.registry.value
    )
    # create the image
    process_args = [process_path, "build"]
    if not logger.isEnabledFor(logging.DEBUG):
        process_args.append("-q")  # only output the hash of the image
    process_args.extend(tag_arguments)
    process_args.append(".")  # use current (sealed) directory as build context
    process_result = await Get(
        ProcessResult,
        Process(
            env=docker_env,
            argv=process_args,
            input_digest=docker_context,
            description=f"Creating Docker Image from {target_name}",
        ),
    )
    logger.info(process_result.stdout.decode())
    o = await Get(Snapshot, Digest, process_result.output_digest)
    return BuiltPackage(
        digest=process_result.output_digest,
        artifacts=([BuiltPackageArtifact(f, ()) for f in o.files]),
    )
Exemplo n.º 26
0
async def package_python_dist(
    field_set: PythonDistributionFieldSet,
    python_setup: PythonSetup,
    union_membership: UnionMembership,
) -> BuiltPackage:
    transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest([field_set.address]))
    exported_target = ExportedTarget(transitive_targets.roots[0])

    dist_tgt = exported_target.target
    wheel = dist_tgt.get(WheelField).value
    sdist = dist_tgt.get(SDistField).value
    if not wheel and not sdist:
        raise NoDistTypeSelected(
            softwrap(
                f"""
                In order to package {dist_tgt.address.spec} at least one of {WheelField.alias!r} or
                {SDistField.alias!r} must be `True`.
                """
            )
        )

    wheel_config_settings = dist_tgt.get(WheelConfigSettingsField).value or FrozenDict()
    sdist_config_settings = dist_tgt.get(SDistConfigSettingsField).value or FrozenDict()
    backend_env_vars = dist_tgt.get(BuildBackendEnvVarsField).value
    if backend_env_vars:
        extra_build_time_env = await Get(Environment, EnvironmentRequest(sorted(backend_env_vars)))
    else:
        extra_build_time_env = Environment()

    interpreter_constraints = InterpreterConstraints.create_from_targets(
        transitive_targets.closure, python_setup
    ) or InterpreterConstraints(python_setup.interpreter_constraints)
    chroot = await Get(
        DistBuildChroot,
        DistBuildChrootRequest(
            exported_target,
            interpreter_constraints=interpreter_constraints,
        ),
    )

    # Find the source roots for the build-time 1stparty deps (e.g., deps of setup.py).
    source_roots_result = await Get(
        SourceRootsResult,
        SourceRootsRequest(
            files=[], dirs={PurePath(tgt.address.spec_path) for tgt in transitive_targets.closure}
        ),
    )
    source_roots = tuple(sorted({sr.path for sr in source_roots_result.path_to_root.values()}))

    # Get any extra build-time environment (e.g., native extension requirements).
    build_env_requests = []
    build_env_request_types = union_membership.get(DistBuildEnvironmentRequest)
    for build_env_request_type in build_env_request_types:
        if build_env_request_type.is_applicable(dist_tgt):
            build_env_requests.append(
                build_env_request_type(
                    tuple(tt.address for tt in transitive_targets.closure), interpreter_constraints
                )
            )

    build_envs = await MultiGet(
        [
            Get(DistBuildEnvironment, DistBuildEnvironmentRequest, build_env_request)
            for build_env_request in build_env_requests
        ]
    )
    extra_build_time_requirements = tuple(
        itertools.chain.from_iterable(
            build_env.extra_build_time_requirements for build_env in build_envs
        )
    )
    input_digest = await Get(
        Digest,
        MergeDigests(
            [chroot.digest, *(build_env.extra_build_time_inputs for build_env in build_envs)]
        ),
    )

    # We prefix the entire chroot, and run with this prefix as the cwd, so that we can capture
    # any changes setup made within it without also capturing other artifacts of the pex
    # process invocation.
    chroot_prefix = "chroot"
    working_directory = os.path.join(chroot_prefix, chroot.working_directory)
    prefixed_input = await Get(Digest, AddPrefix(input_digest, chroot_prefix))
    build_system = await Get(BuildSystem, BuildSystemRequest(prefixed_input, working_directory))

    setup_py_result = await Get(
        DistBuildResult,
        DistBuildRequest(
            build_system=build_system,
            interpreter_constraints=interpreter_constraints,
            build_wheel=wheel,
            build_sdist=sdist,
            input=prefixed_input,
            working_directory=working_directory,
            build_time_source_roots=source_roots,
            target_address_spec=exported_target.target.address.spec,
            wheel_config_settings=wheel_config_settings,
            sdist_config_settings=sdist_config_settings,
            extra_build_time_requirements=extra_build_time_requirements,
            extra_build_time_env=extra_build_time_env,
        ),
    )
    dist_snapshot = await Get(Snapshot, Digest, setup_py_result.output)
    return BuiltPackage(
        setup_py_result.output,
        tuple(BuiltPackageArtifact(path) for path in dist_snapshot.files),
    )
Exemplo n.º 27
0
async def setup_thrift_tool(
        apache_thrift: ApacheThriftSubsystem) -> ApacheThriftSetup:
    env = await Get(Environment, EnvironmentRequest(["PATH"]))
    search_paths = apache_thrift.thrift_search_paths(env)
    all_thrift_binary_paths = await Get(
        BinaryPaths,
        BinaryPathRequest(
            search_path=search_paths,
            binary_name="thrift",
            test=BinaryPathTest(["-version"]),
        ),
    )
    if not all_thrift_binary_paths.paths:
        raise BinaryNotFoundError(
            softwrap(f"""
                Cannot find any `thrift` binaries using the option
                `[apache-thrift].thrift_search_paths`: {list(search_paths)}

                To fix, please install Apache Thrift (https://thrift.apache.org/) with the version
                {apache_thrift.expected_version} (set by `[apache-thrift].expected_version`) and ensure
                that it is discoverable via `[apache-thrift].thrift_search_paths`.
                """))

    version_results = await MultiGet(
        Get(
            ProcessResult,
            Process(
                (binary_path.path, "-version"),
                description=
                f"Determine Apache Thrift version for {binary_path.path}",
                level=LogLevel.DEBUG,
                cache_scope=ProcessCacheScope.PER_RESTART_SUCCESSFUL,
            ),
        ) for binary_path in all_thrift_binary_paths.paths)

    invalid_versions = []
    for binary_path, version_result in zip(all_thrift_binary_paths.paths,
                                           version_results):
        try:
            _raw_version = version_result.stdout.decode("utf-8").split()[2]
            _version_components = _raw_version.split(
                ".")  # e.g. [1, 17] or [1, 17, 1]
            version = f"{_version_components[0]}.{_version_components[1]}"
        except IndexError:
            raise AssertionError(
                softwrap(f"""
                    Failed to parse `thrift -version` output for {binary_path}. Please open a bug at
                    https://github.com/pantsbuild/pants/issues/new/choose with the below data:

                    {version_result}
                    """))

        if version == apache_thrift.expected_version:
            return ApacheThriftSetup(binary_path.path)

        logger.debug(
            softwrap(f"""
                The Thrift binary at {binary_path.path} has version {version}, but this
                project is using {apache_thrift.expected_version}
                (set by `[apache-thrift].expected_version`). Ignoring.
                """))
        invalid_versions.append((binary_path.path, version))

    invalid_versions_str = bullet_list(
        f"{path}: {version}" for path, version in sorted(invalid_versions))
    raise BinaryNotFoundError(
        softwrap(f"""
            Cannot find a `thrift` binary with the expected version of
            {apache_thrift.expected_version} (set by `[apache-thrift].expected_version`).

            Found these `thrift` binaries, but they had different versions:

            {invalid_versions_str}

            To fix, please install the expected version (https://thrift.apache.org/) and ensure
            that it is discoverable via the option `[apache-thrift].thrift_search_paths`, or change
            `[apache-thrift].expected_version`.
            """))