Пример #1
0
async def coursier_resolve_lockfile(
    bash: BashBinary,
    coursier: Coursier,
    artifact_requirements: ArtifactRequirements,
) -> CoursierResolvedLockfile:
    """Run `coursier fetch ...` against a list of Maven coordinates and capture the result.

    This rule does two things in a single Process invocation:

        * Runs `coursier fetch` to let Coursier do the heavy lifting of resolving
          dependencies and downloading resolved artifacts (jars, etc).
        * Copies the resolved artifacts into the Process output directory, capturing
          the artifacts as content-addressed `Digest`s.

    It's important that this happens in the same process, since the process isn't
    guaranteed to run on the same machine as the rule, nor is a subsequent process
    invocation.  This guarantees that whatever Coursier resolved, it was fully
    captured into Pants' content addressed artifact storage.

    Note however that we still get the benefit of Coursier's "global" cache if it
    had already been run on the machine where the `coursier fetch` runs, so rerunning
    `coursier fetch` tends to be fast in practice.

    Finally, this rule bundles up the result into a `CoursierResolvedLockfile`.  This
    data structure encapsulates everything necessary to either materialize the
    resolved dependencies to a classpath for Java invocations, or to write the
    lockfile out to the workspace to hermetically freeze the result of the resolve.
    """

    if len(artifact_requirements) == 0:
        return CoursierResolvedLockfile(entries=())

    coursier_report_file_name = "coursier_report.json"
    process_result = await Get(
        ProcessResult,
        Process(
            argv=coursier.args(
                [
                    coursier_report_file_name,
                    *(req.to_coord_str() for req in artifact_requirements),
                    # TODO(#13496): Disable --strict-include to work around Coursier issue
                    # https://github.com/coursier/coursier/issues/1364 which erroneously rejects underscores in
                    # artifact rules as malformed.
                    # *(
                    #     f"--strict-include={req.to_coord_str(versioned=False)}"
                    #     for req in artifact_requirements
                    #     if req.strict
                    # ),
                ],
                wrapper=[bash.path, coursier.wrapper_script],
            ),
            input_digest=coursier.digest,
            output_directories=("classpath", ),
            output_files=(coursier_report_file_name, ),
            append_only_caches=coursier.append_only_caches,
            env=coursier.env,
            description=
            ("Running `coursier fetch` against "
             f"{pluralize(len(artifact_requirements), 'requirement')}: "
             f"{', '.join(req.to_coord_str() for req in artifact_requirements)}"
             ),
            level=LogLevel.DEBUG,
        ),
    )
    report_digest = await Get(
        Digest,
        DigestSubset(process_result.output_digest,
                     PathGlobs([coursier_report_file_name])))
    report_contents = await Get(DigestContents, Digest, report_digest)
    report = json.loads(report_contents[0].content)

    artifact_file_names = tuple(
        PurePath(dep["file"]).name for dep in report["dependencies"])
    artifact_output_paths = tuple(f"classpath/{file_name}"
                                  for file_name in artifact_file_names)
    artifact_digests = await MultiGet(
        Get(
            Digest,
            DigestSubset(process_result.output_digest, PathGlobs(
                [output_path]))) for output_path in artifact_output_paths)
    stripped_artifact_digests = await MultiGet(
        Get(Digest, RemovePrefix(artifact_digest, "classpath"))
        for artifact_digest in artifact_digests)
    artifact_file_digests = await MultiGet(
        Get(FileDigest, ExtractFileDigest(stripped_artifact_digest, file_name))
        for stripped_artifact_digest, file_name in zip(
            stripped_artifact_digests, artifact_file_names))
    return CoursierResolvedLockfile(entries=tuple(
        CoursierLockfileEntry(
            coord=Coordinate.from_coord_str(dep["coord"]),
            direct_dependencies=Coordinates(
                Coordinate.from_coord_str(dd)
                for dd in dep["directDependencies"]),
            dependencies=Coordinates(
                Coordinate.from_coord_str(d) for d in dep["dependencies"]),
            file_name=file_name,
            file_digest=artifact_file_digest,
        ) for dep, file_name, artifact_file_digest in zip(
            report["dependencies"], artifact_file_names,
            artifact_file_digests)))
Пример #2
0
async def setup_goroot(golang_subsystem: GolangSubsystem) -> GoRoot:
    env = await Get(Environment, EnvironmentRequest(["PATH"]))
    search_paths = golang_subsystem.go_search_paths(env)
    all_go_binary_paths = await Get(
        BinaryPaths,
        BinaryPathRequest(
            search_path=search_paths,
            binary_name="go",
            test=BinaryPathTest(["version"]),
        ),
    )
    if not all_go_binary_paths.paths:
        raise BinaryNotFoundError(
            "Cannot find any `go` binaries using the option "
            f"`[golang].go_search_paths`: {list(search_paths)}\n\n"
            "To fix, please install Go (https://golang.org/doc/install) with the version "
            f"{golang_subsystem.expected_version} (set by `[golang].expected_version`) and ensure "
            "that it is discoverable via `[golang].go_search_paths`.")

    # `go env GOVERSION` does not work in earlier Go versions (like 1.15), so we must run
    # `go version` and `go env GOROOT` to calculate both the version and GOROOT.
    version_results = await MultiGet(
        Get(
            ProcessResult,
            Process(
                (binary_path.path, "version"),
                description=f"Determine Go version for {binary_path.path}",
                level=LogLevel.DEBUG,
                cache_scope=ProcessCacheScope.PER_RESTART_SUCCESSFUL,
            ),
        ) for binary_path in all_go_binary_paths.paths)

    invalid_versions = []
    for binary_path, version_result in zip(all_go_binary_paths.paths,
                                           version_results):
        try:
            _raw_version = version_result.stdout.decode("utf-8").split()[
                2]  # e.g. go1.17 or go1.17.1
            _version_components = _raw_version[2:].split(
                ".")  # e.g. [1, 17] or [1, 17, 1]
            version = f"{_version_components[0]}.{_version_components[1]}"
        except IndexError:
            raise AssertionError(
                f"Failed to parse `go version` output for {binary_path}. Please open a bug at "
                f"https://github.com/pantsbuild/pants/issues/new/choose with the below data."
                f"\n\n"
                f"{version_result}")

        if version == golang_subsystem.expected_version:
            env_result = await Get(
                ProcessResult,
                Process(
                    (binary_path.path, "env", "GOROOT"),
                    description=
                    f"Determine Go version and GOROOT for {binary_path.path}",
                    level=LogLevel.DEBUG,
                    cache_scope=ProcessCacheScope.PER_RESTART_SUCCESSFUL,
                    env={"GOPATH": "/does/not/matter"},
                ),
            )
            goroot = env_result.stdout.decode("utf-8").strip()
            return GoRoot(goroot)

        logger.debug(
            f"Go binary at {binary_path.path} has version {version}, but this "
            f"project is using {golang_subsystem.expected_version} "
            "(set by `[golang].expected_version`). Ignoring.")
        invalid_versions.append((binary_path.path, version))

    invalid_versions_str = bullet_list(
        f"{path}: {version}" for path, version in sorted(invalid_versions))
    raise BinaryNotFoundError(
        "Cannot find a `go` binary with the expected version of "
        f"{golang_subsystem.expected_version} (set by `[golang].expected_version`).\n\n"
        f"Found these `go` binaries, but they had different versions:\n\n"
        f"{invalid_versions_str}\n\n"
        "To fix, please install the expected version (https://golang.org/doc/install) and ensure "
        "that it is discoverable via the option `[golang].go_search_paths`, or change "
        "`[golang].expected_version`.")
Пример #3
0
async def flake8_lint_partition(
    partition: Flake8Partition, flake8: Flake8, lint_subsystem: LintSubsystem
) -> LintResult:
    requirements_pex_request = Get(
        Pex,
        PexRequest(
            output_filename="flake8.pex",
            internal_only=True,
            requirements=PexRequirements(flake8.all_requirements),
            interpreter_constraints=(
                partition.interpreter_constraints
                or PexInterpreterConstraints(flake8.interpreter_constraints)
            ),
            entry_point=flake8.entry_point,
        ),
    )

    config_digest_request = Get(
        Digest,
        PathGlobs(
            globs=[flake8.config] if flake8.config else [],
            glob_match_error_behavior=GlobMatchErrorBehavior.error,
            description_of_origin="the option `--flake8-config`",
        ),
    )

    source_files_request = Get(
        SourceFiles, SourceFilesRequest(field_set.sources for field_set in partition.field_sets)
    )

    requirements_pex, config_digest, source_files = await MultiGet(
        requirements_pex_request, config_digest_request, source_files_request
    )

    input_digest = await Get(
        Digest,
        MergeDigests((source_files.snapshot.digest, requirements_pex.digest, config_digest)),
    )

    report_file_name = "flake8_report.txt" if lint_subsystem.reports_dir else None

    result = await Get(
        FallibleProcessResult,
        PexProcess(
            requirements_pex,
            argv=generate_args(
                source_files=source_files, flake8=flake8, report_file_name=report_file_name,
            ),
            input_digest=input_digest,
            output_files=(report_file_name,) if report_file_name else None,
            description=f"Run Flake8 on {pluralize(len(partition.field_sets), 'file')}.",
            level=LogLevel.DEBUG,
        ),
    )

    report = None
    if report_file_name:
        report_digest = await Get(
            Digest,
            DigestSubset(
                result.output_digest,
                PathGlobs(
                    [report_file_name],
                    glob_match_error_behavior=GlobMatchErrorBehavior.warn,
                    description_of_origin="Flake8 report file",
                ),
            ),
        )
        report = LintReport(report_file_name, report_digest)

    return LintResult.from_fallible_process_result(
        result, partition_description=str(sorted(partition.interpreter_constraints)), report=report
    )
Пример #4
0
async def run_shellcheck(request: ShellcheckRequest, shellcheck: Shellcheck) -> LintResults:
    if shellcheck.skip:
        return LintResults([], linter_name="Shellcheck")

    # Shellcheck looks at direct dependencies to make sure that every symbol is defined, so we must
    # include those in the run.
    all_dependencies = await MultiGet(
        Get(Targets, DependenciesRequest(field_set.dependencies))
        for field_set in request.field_sets
    )
    direct_sources_get = Get(
        SourceFiles,
        SourceFilesRequest(
            (field_set.sources for field_set in request.field_sets),
            for_sources_types=(ShellSources,),
            enable_codegen=True,
        ),
    )
    dependency_sources_get = Get(
        SourceFiles,
        SourceFilesRequest(
            (tgt.get(Sources) for dependencies in all_dependencies for tgt in dependencies),
            for_sources_types=(ShellSources,),
            enable_codegen=True,
        ),
    )

    download_shellcheck_get = Get(
        DownloadedExternalTool, ExternalToolRequest, shellcheck.get_request(Platform.current)
    )

    direct_sources, dependency_sources, downloaded_shellcheck = await MultiGet(
        direct_sources_get, dependency_sources_get, download_shellcheck_get
    )

    config_files = await Get(
        ConfigFiles, ConfigFilesRequest, shellcheck.config_request(direct_sources.snapshot.dirs)
    )

    input_digest = await Get(
        Digest,
        MergeDigests(
            (
                direct_sources.snapshot.digest,
                dependency_sources.snapshot.digest,
                downloaded_shellcheck.digest,
                config_files.snapshot.digest,
            )
        ),
    )

    process_result = await Get(
        FallibleProcessResult,
        Process(
            argv=[downloaded_shellcheck.exe, *shellcheck.args, *direct_sources.snapshot.files],
            input_digest=input_digest,
            description=f"Run Shellcheck on {pluralize(len(request.field_sets), 'file')}.",
            level=LogLevel.DEBUG,
        ),
    )
    result = LintResult.from_fallible_process_result(process_result)
    return LintResults([result], linter_name="Shellcheck")
Пример #5
0
async def mypy_typecheck_partition(
    partition: MyPyPartition,
    config_file: MyPyConfigFile,
    first_party_plugins: MyPyFirstPartyPlugins,
    mypy: MyPy,
    python_setup: PythonSetup,
) -> CheckResult:
    # MyPy requires 3.5+ to run, but uses the typed-ast library to work with 2.7, 3.4, 3.5, 3.6,
    # and 3.7. However, typed-ast does not understand 3.8+, so instead we must run MyPy with
    # Python 3.8+ when relevant. We only do this if <3.8 can't be used, as we don't want a
    # loose requirement like `>=3.6` to result in requiring Python 3.8+, which would error if
    # 3.8+ is not installed on the machine.
    tool_interpreter_constraints = (
        partition.interpreter_constraints
        if (
            mypy.options.is_default("interpreter_constraints")
            and partition.interpreter_constraints.requires_python38_or_newer(
                python_setup.interpreter_universe
            )
        )
        else mypy.interpreter_constraints
    )

    closure_sources_get = Get(PythonSourceFiles, PythonSourceFilesRequest(partition.closure))
    roots_sources_get = Get(
        SourceFiles,
        SourceFilesRequest(tgt.get(PythonSourceField) for tgt in partition.root_targets),
    )

    # See `requirements_venv_pex` for how this will get wrapped in a `VenvPex`.
    requirements_pex_get = Get(
        Pex,
        PexFromTargetsRequest,
        PexFromTargetsRequest.for_requirements(
            (tgt.address for tgt in partition.root_targets),
            hardcoded_interpreter_constraints=partition.interpreter_constraints,
            internal_only=True,
        ),
    )
    extra_type_stubs_pex_get = Get(
        Pex,
        PexRequest(
            output_filename="extra_type_stubs.pex",
            internal_only=True,
            requirements=PexRequirements(mypy.extra_type_stubs),
            interpreter_constraints=partition.interpreter_constraints,
        ),
    )

    mypy_pex_get = Get(
        VenvPex,
        PexRequest(
            output_filename="mypy.pex",
            internal_only=True,
            main=mypy.main,
            requirements=mypy.pex_requirements(
                extra_requirements=first_party_plugins.requirement_strings,
            ),
            interpreter_constraints=tool_interpreter_constraints,
        ),
    )

    (
        closure_sources,
        roots_sources,
        mypy_pex,
        extra_type_stubs_pex,
        requirements_pex,
    ) = await MultiGet(
        closure_sources_get,
        roots_sources_get,
        mypy_pex_get,
        extra_type_stubs_pex_get,
        requirements_pex_get,
    )

    python_files = determine_python_files(roots_sources.snapshot.files)
    file_list_path = "__files.txt"
    file_list_digest_request = Get(
        Digest,
        CreateDigest([FileContent(file_list_path, "\n".join(python_files).encode())]),
    )

    # This creates a venv with all the 3rd-party requirements used by the code. We tell MyPy to
    # use this venv by setting `--python-executable`. Note that this Python interpreter is
    # different than what we run MyPy with.
    #
    # We could have directly asked the `PexFromTargetsRequest` to return a `VenvPex`, rather than
    # `Pex`, but that would mean missing out on sharing a cache with other goals like `test` and
    # `run`.
    requirements_venv_pex_request = Get(
        VenvPex,
        PexRequest(
            output_filename="requirements_venv.pex",
            internal_only=True,
            pex_path=[requirements_pex, extra_type_stubs_pex],
            interpreter_constraints=partition.interpreter_constraints,
        ),
    )

    requirements_venv_pex, file_list_digest = await MultiGet(
        requirements_venv_pex_request, file_list_digest_request
    )

    merged_input_files = await Get(
        Digest,
        MergeDigests(
            [
                file_list_digest,
                first_party_plugins.sources_digest,
                closure_sources.source_files.snapshot.digest,
                requirements_venv_pex.digest,
                config_file.digest,
            ]
        ),
    )

    all_used_source_roots = sorted(
        set(itertools.chain(first_party_plugins.source_roots, closure_sources.source_roots))
    )
    env = {
        "PEX_EXTRA_SYS_PATH": ":".join(all_used_source_roots),
        "MYPYPATH": ":".join(all_used_source_roots),
    }

    result = await Get(
        FallibleProcessResult,
        VenvPexProcess(
            mypy_pex,
            argv=generate_argv(
                mypy,
                venv_python=requirements_venv_pex.python.argv0,
                file_list_path=file_list_path,
                python_version=config_file.python_version_to_autoset(
                    partition.interpreter_constraints, python_setup.interpreter_universe
                ),
            ),
            input_digest=merged_input_files,
            extra_env=env,
            output_directories=(REPORT_DIR,),
            description=f"Run MyPy on {pluralize(len(python_files), 'file')}.",
            level=LogLevel.DEBUG,
        ),
    )
    report = await Get(Digest, RemovePrefix(result.output_digest, REPORT_DIR))
    return CheckResult.from_fallible_process_result(
        result,
        partition_description=str(sorted(str(c) for c in partition.interpreter_constraints)),
        report=report,
    )
Пример #6
0
async def setup_assembly_pre_compilation(
    request: AssemblyPreCompilationRequest,
    goroot: GoRoot,
) -> FallibleAssemblyPreCompilation:
    # From Go tooling comments:
    #
    #   Supply an empty go_asm.h as if the compiler had been run. -symabis parsing is lax enough
    #   that we don't need the actual definitions that would appear in go_asm.h.
    #
    # See https://go-review.googlesource.com/c/go/+/146999/8/src/cmd/go/internal/work/gc.go
    go_asm_h_digest = await Get(Digest,
                                CreateDigest([FileContent("go_asm.h", b"")]))
    symabis_input_digest = await Get(
        Digest, MergeDigests([request.compilation_input, go_asm_h_digest]))
    symabis_result = await Get(
        FallibleProcessResult,
        GoSdkProcess(
            input_digest=symabis_input_digest,
            command=(
                "tool",
                "asm",
                "-I",
                os.path.join(goroot.path, "pkg", "include"),
                "-gensymabis",
                "-o",
                "symabis",
                "--",
                *(f"./{request.source_files_subpath}/{name}"
                  for name in request.s_files),
            ),
            description=
            (f"Generate symabis metadata for assembly files for {request.source_files_subpath}"
             ),
            output_files=("symabis", ),
        ),
    )
    if symabis_result.exit_code != 0:
        return FallibleAssemblyPreCompilation(
            None, symabis_result.exit_code,
            symabis_result.stderr.decode("utf-8"))

    merged = await Get(
        Digest,
        MergeDigests([request.compilation_input,
                      symabis_result.output_digest]),
    )

    assembly_results = await MultiGet(
        Get(
            FallibleProcessResult,
            GoSdkProcess(
                input_digest=request.compilation_input,
                command=(
                    "tool",
                    "asm",
                    "-I",
                    os.path.join(goroot.path, "pkg", "include"),
                    "-o",
                    f"./{request.source_files_subpath}/{PurePath(s_file).with_suffix('.o')}",
                    f"./{request.source_files_subpath}/{s_file}",
                ),
                description=f"Assemble {s_file} with Go",
                output_files=
                (f"./{request.source_files_subpath}/{PurePath(s_file).with_suffix('.o')}",
                 ),
            ),
        ) for s_file in request.s_files)
    exit_code = max(result.exit_code for result in assembly_results)
    if exit_code != 0:
        stdout = "\n\n".join(
            result.stdout.decode("utf-8") for result in assembly_results
            if result.stdout)
        stderr = "\n\n".join(
            result.stderr.decode("utf-8") for result in assembly_results
            if result.stderr)
        return FallibleAssemblyPreCompilation(None, exit_code, stdout, stderr)

    return FallibleAssemblyPreCompilation(
        AssemblyPreCompilation(
            merged,
            tuple(result.output_digest for result in assembly_results)))
Пример #7
0
async def resolve_dependencies(
    request: DependenciesRequest,
    target_types_to_generate_requests: TargetTypesToGenerateTargetsRequests,
    union_membership: UnionMembership,
    global_options: GlobalOptions,
) -> Addresses:
    wrapped_tgt, explicitly_provided = await MultiGet(
        Get(WrappedTarget, Address, request.field.address),
        Get(ExplicitlyProvidedDependencies, DependenciesRequest, request),
    )
    tgt = wrapped_tgt.target

    # Inject any dependencies (based on `Dependencies` field rather than `Sources` field).
    inject_request_types = union_membership.get(InjectDependenciesRequest)
    injected = await MultiGet(
        Get(InjectedDependencies, InjectDependenciesRequest,
            inject_request_type(request.field))
        for inject_request_type in inject_request_types
        if isinstance(request.field, inject_request_type.inject_for))

    # Infer any dependencies (based on `Sources` field).
    inference_request_types = union_membership.get(InferDependenciesRequest)
    inferred: Tuple[InferredDependencies, ...] = ()
    if inference_request_types:
        sources_field = tgt.get(Sources)
        relevant_inference_request_types = [
            inference_request_type
            for inference_request_type in inference_request_types
            if isinstance(sources_field, inference_request_type.infer_from)
        ]
        inferred = await MultiGet(
            Get(
                InferredDependencies,
                InferDependenciesRequest,
                inference_request_type(sources_field),
            ) for inference_request_type in relevant_inference_request_types)

    # If it's a target generator, inject dependencies on all of its generated targets.
    generated_addresses: tuple[Address, ...] = ()
    if target_types_to_generate_requests.is_generator(
            tgt) and not tgt.address.is_generated_target:
        generate_request = target_types_to_generate_requests[type(tgt)]
        generated_targets = await Get(GeneratedTargets, GenerateTargetsRequest,
                                      generate_request(tgt))
        generated_addresses = tuple(generated_targets.keys())

    # If the target has `SpecialCasedDependencies`, such as the `archive` target having
    # `files` and `packages` fields, then we possibly include those too. We don't want to always
    # include those dependencies because they should often be excluded from the result due to
    # being handled elsewhere in the calling code.
    special_cased: Tuple[Address, ...] = ()
    if request.include_special_cased_deps:
        # Unlike normal, we don't use `tgt.get()` because there may be >1 subclass of
        # SpecialCasedDependencies.
        special_cased_fields = tuple(
            field for field in tgt.field_values.values()
            if isinstance(field, SpecialCasedDependencies))
        # We can't use the normal `Get(Addresses, UnparsedAddressInputs)` due to a graph cycle.
        special_cased = await MultiGet(
            Get(
                Address,
                AddressInput,
                AddressInput.parse(
                    addr,
                    relative_to=tgt.address.spec_path,
                    subproject_roots=global_options.options.subproject_roots,
                ),
            ) for special_cased_field in special_cased_fields
            for addr in special_cased_field.to_unparsed_address_inputs().values
        )

    result = {
        addr
        for addr in (
            *generated_addresses,
            *explicitly_provided.includes,
            *itertools.chain.from_iterable(injected),
            *itertools.chain.from_iterable(inferred),
            *special_cased,
        ) if addr not in explicitly_provided.ignores
    }
    return Addresses(sorted(result))
Пример #8
0
async def compile_java_source(
    bash: BashBinary,
    coursier: Coursier,
    jdk_setup: JdkSetup,
    request: CompileJavaSourceRequest,
) -> FallibleCompiledClassfiles:
    component_members_with_sources = tuple(t for t in request.component.members
                                           if t.has_field(Sources))
    component_members_and_source_files = zip(
        component_members_with_sources,
        await MultiGet(
            Get(
                SourceFiles,
                SourceFilesRequest(
                    (t.get(Sources), ),
                    for_sources_types=(JavaSourceField, ),
                    enable_codegen=True,
                ),
            ) for t in component_members_with_sources),
    )

    component_members_and_java_source_files = [
        (target, sources)
        for target, sources in component_members_and_source_files
        if sources.snapshot.digest != EMPTY_DIGEST
    ]

    if not component_members_and_java_source_files:
        return FallibleCompiledClassfiles(
            description=str(request.component),
            result=CompileResult.SUCCEEDED,
            output=CompiledClassfiles(digest=EMPTY_DIGEST),
            exit_code=0,
        )

    # Target coarsening currently doesn't perform dep expansion, which matters for targets
    # with multiple sources that expand to individual source subtargets.
    # We expand the dependencies explicitly here before coarsening, but ideally this could
    # be done somehow during coarsening.
    # TODO: Should component dependencies be filtered out here if they were only brought in by component members which were
    #   filtered out above (due to having no JavaSources to contribute)?  If so, that will likely required extending
    #   the CoarsenedTargets API to include more complete dependency information, or to support such filtering directly.
    expanded_direct_deps = await Get(Targets,
                                     Addresses(request.component.dependencies))
    coarsened_direct_deps = await Get(
        CoarsenedTargets, Addresses(t.address for t in expanded_direct_deps))

    lockfile = await Get(
        CoursierResolvedLockfile,
        CoursierLockfileForTargetRequest(Targets(request.component.members)),
    )
    direct_dependency_classfiles_fallible = await MultiGet(
        Get(FallibleCompiledClassfiles,
            CompileJavaSourceRequest(component=coarsened_dep))
        for coarsened_dep in coarsened_direct_deps)
    direct_dependency_classfiles = [
        fcc.output for fcc in direct_dependency_classfiles_fallible
        if fcc.output
    ]
    if len(direct_dependency_classfiles) != len(
            direct_dependency_classfiles_fallible):
        return FallibleCompiledClassfiles(
            description=str(request.component),
            result=CompileResult.DEPENDENCY_FAILED,
            output=None,
            exit_code=1,
        )

    dest_dir = "classfiles"
    (
        materialized_classpath,
        merged_direct_dependency_classfiles_digest,
        dest_dir_digest,
    ) = await MultiGet(
        Get(
            MaterializedClasspath,
            MaterializedClasspathRequest(
                prefix="__thirdpartycp",
                lockfiles=(lockfile, ),
            ),
        ),
        Get(
            Digest,
            MergeDigests(classfiles.digest
                         for classfiles in direct_dependency_classfiles)),
        Get(
            Digest,
            CreateDigest([Directory(dest_dir)]),
        ),
    )

    usercp_relpath = "__usercp"
    prefixed_direct_dependency_classfiles_digest = await Get(
        Digest,
        AddPrefix(merged_direct_dependency_classfiles_digest, usercp_relpath))

    classpath_arg = usercp_relpath
    third_party_classpath_arg = materialized_classpath.classpath_arg()
    if third_party_classpath_arg:
        classpath_arg = ":".join([classpath_arg, third_party_classpath_arg])

    merged_digest = await Get(
        Digest,
        MergeDigests((
            prefixed_direct_dependency_classfiles_digest,
            materialized_classpath.digest,
            dest_dir_digest,
            jdk_setup.digest,
            *(sources.snapshot.digest
              for _, sources in component_members_and_java_source_files),
        )),
    )

    process_result = await Get(
        FallibleProcessResult,
        Process(
            argv=[
                *jdk_setup.args(bash,
                                [f"{jdk_setup.java_home}/lib/tools.jar"]),
                "com.sun.tools.javac.Main",
                "-cp",
                classpath_arg,
                "-d",
                dest_dir,
                *sorted(
                    chain.from_iterable(
                        sources.snapshot.files for _, sources in
                        component_members_and_java_source_files)),
            ],
            input_digest=merged_digest,
            use_nailgun=jdk_setup.digest,
            output_directories=(dest_dir, ),
            description=f"Compile {request.component.members} with javac",
            level=LogLevel.DEBUG,
        ),
    )
    output: CompiledClassfiles | None = None
    if process_result.exit_code == 0:
        stripped_classfiles_digest = await Get(
            Digest, RemovePrefix(process_result.output_digest, dest_dir))
        output = CompiledClassfiles(stripped_classfiles_digest)

    return FallibleCompiledClassfiles.from_fallible_process_result(
        str(request.component),
        process_result,
        output,
    )
Пример #9
0
async def assemble_resources_jar(
    zip: ZipBinary,
    request: JvmResourcesRequest,
) -> FallibleClasspathEntry:
    # Request the component's direct dependency classpath, and additionally any prerequisite.
    # Filter out any dependencies that are generated by our current target so that each resource
    # only appears in a single input JAR.
    # NOTE: Generated dependencies will have the same dependencies as the current target, so we
    # don't need to inspect those dependencies.
    optional_prereq_request = [
        *((request.prerequisite, ) if request.prerequisite else ())
    ]
    fallibles = await MultiGet(
        Get(FallibleClasspathEntries,
            ClasspathEntryRequests(optional_prereq_request)),
        Get(FallibleClasspathEntries,
            ClasspathDependenciesRequest(request, ignore_generated=True)),
    )
    direct_dependency_classpath_entries = FallibleClasspathEntries(
        itertools.chain(*fallibles)).if_all_succeeded()

    if direct_dependency_classpath_entries is None:
        return FallibleClasspathEntry(
            description=str(request.component),
            result=CompileResult.DEPENDENCY_FAILED,
            output=None,
            exit_code=1,
        )

    source_files = await Get(
        StrippedSourceFiles,
        SourceFilesRequest(
            [tgt.get(SourcesField) for tgt in request.component.members]),
    )

    output_filename = f"{request.component.representative.address.path_safe_spec}.resources.jar"
    output_files = [output_filename]

    resources_jar_input_digest = source_files.snapshot.digest
    resources_jar_result = await Get(
        ProcessResult,
        Process(
            argv=[
                zip.path,
                output_filename,
                *source_files.snapshot.files,
            ],
            description="Build resources JAR for {request.component}",
            input_digest=resources_jar_input_digest,
            output_files=output_files,
            level=LogLevel.DEBUG,
        ),
    )

    cpe = ClasspathEntry(resources_jar_result.output_digest, output_files, [])

    merged_cpe_digest = await Get(
        Digest,
        MergeDigests(
            chain((cpe.digest, ),
                  (i.digest for i in direct_dependency_classpath_entries))),
    )

    merged_cpe = ClasspathEntry.merge(
        digest=merged_cpe_digest,
        entries=[cpe, *direct_dependency_classpath_entries])

    return FallibleClasspathEntry(output_filename, CompileResult.SUCCEEDED,
                                  merged_cpe, 0)
Пример #10
0
async def lint(
    console: Console,
    workspace: Workspace,
    targets: Targets,
    lint_subsystem: LintSubsystem,
    union_membership: UnionMembership,
) -> Lint:
    request_types = union_membership[LintRequest]
    requests: Iterable[StyleRequest] = tuple(
        request_type(
            request_type.field_set_type.create(target) for target in targets
            if request_type.field_set_type.is_valid(target))
        for request_type in request_types)
    field_sets_with_sources: Iterable[FieldSetsWithSources] = await MultiGet(
        Get(FieldSetsWithSources,
            FieldSetsWithSourcesRequest(request.field_sets))
        for request in requests)
    valid_requests: Iterable[StyleRequest] = tuple(
        request_cls(request)
        for request_cls, request in zip(request_types, field_sets_with_sources)
        if request)

    if lint_subsystem.per_file_caching:
        all_per_file_results = await MultiGet(
            Get(LintResults, LintRequest, request.__class__([field_set]))
            for request in valid_requests for field_set in request.field_sets)
        # We consolidate all results for each linter into a single `LintResults`.
        all_results = tuple(
            LintResults(
                itertools.chain.from_iterable(
                    per_file_results.results
                    for per_file_results in all_linter_results),
                linter_name=linter_name,
            ) for linter_name, all_linter_results in itertools.groupby(
                all_per_file_results, key=lambda results: results.linter_name))
    else:
        all_results = await MultiGet(
            Get(LintResults, LintRequest, lint_request)
            for lint_request in valid_requests)

    all_results = tuple(
        sorted(all_results, key=lambda results: results.linter_name))

    reports = list(
        itertools.chain.from_iterable(results.reports
                                      for results in all_results))
    if reports:
        # TODO(#10532): Tolerate when a linter has multiple reports.
        linters_with_multiple_reports = [
            results.linter_name for results in all_results
            if len(results.reports) > 1
        ]
        if linters_with_multiple_reports:
            if lint_subsystem.per_file_caching:
                suggestion = "Try running without `--lint-per-file-caching` set."
            else:
                suggestion = (
                    "The linters likely partitioned the input targets, such as grouping by Python "
                    "interpreter compatibility. Try running on fewer targets or unset "
                    "`--lint-reports-dir`.")
            raise InvalidLinterReportsError(
                "Multiple reports would have been written for these linters: "
                f"{linters_with_multiple_reports}. The option `--lint-reports-dir` only works if "
                f"each linter has a single result. {suggestion}")
        merged_reports = await Get(
            Digest, MergeDigests(report.digest for report in reports))
        workspace.write_digest(merged_reports)
        logger.info(
            f"Wrote lint result files to {lint_subsystem.reports_dir}.")

    exit_code = 0
    if all_results:
        console.print_stderr("")
    for results in all_results:
        if results.skipped:
            sigil = console.yellow("-")
            status = "skipped"
        elif results.exit_code == 0:
            sigil = console.green("✓")
            status = "succeeded"
        else:
            sigil = console.red("𐄂")
            status = "failed"
            exit_code = results.exit_code
        console.print_stderr(f"{sigil} {results.linter_name} {status}.")

    return Lint(exit_code)
Пример #11
0
async def resolve_dependencies(request: DependenciesRequest,
                               union_membership: UnionMembership,
                               global_options: GlobalOptions) -> Addresses:
    provided = parse_dependencies_field(
        request.field.sanitized_raw_value or (),
        spec_path=request.field.address.spec_path,
        subproject_roots=global_options.options.subproject_roots,
    )

    # If this is a base target, inject dependencies on its subtargets.
    subtarget_addresses: Tuple[Address, ...] = ()
    if request.field.address.is_base_target:
        subtargets = await Get(Subtargets, Address, request.field.address)
        subtarget_addresses = tuple(t.address for t in subtargets.subtargets)

    # Inject any dependencies. This is determined by the `request.field` class. For example, if
    # there is a rule to inject for FortranDependencies, then FortranDependencies and any subclass
    # of FortranDependencies will use that rule.
    inject_request_types = union_membership.get(InjectDependenciesRequest)
    injected = await MultiGet(
        Get(InjectedDependencies, InjectDependenciesRequest,
            inject_request_type(request.field))
        for inject_request_type in inject_request_types
        if isinstance(request.field, inject_request_type.inject_for))

    inference_request_types = union_membership.get(InferDependenciesRequest)
    inferred: Tuple[InferredDependencies, ...] = ()
    if inference_request_types:
        # Dependency inference is solely determined by the `Sources` field for a Target, so we
        # re-resolve the original target to inspect its `Sources` field, if any.
        wrapped_tgt = await Get(WrappedTarget, Address, request.field.address)
        sources_field = wrapped_tgt.target.get(Sources)
        relevant_inference_request_types = [
            inference_request_type
            for inference_request_type in inference_request_types
            if isinstance(sources_field, inference_request_type.infer_from)
        ]
        inferred = await MultiGet(
            Get(
                InferredDependencies,
                InferDependenciesRequest,
                inference_request_type(sources_field),
            ) for inference_request_type in relevant_inference_request_types)

    literal_addresses = await MultiGet(
        Get(Address, AddressInput, ai) for ai in provided.addresses)
    literal_ignored_addresses = set(await MultiGet(
        Get(Address, AddressInput, ai) for ai in provided.ignored_addresses))

    addresses: Set[Address] = set()
    used_ignored_addresses: Set[Address] = set()
    for addr in [
            *subtarget_addresses,
            *literal_addresses,
            *itertools.chain.from_iterable(injected),
            *itertools.chain.from_iterable(inferred),
    ]:
        if addr in literal_ignored_addresses:
            used_ignored_addresses.add(addr)
        else:
            addresses.add(addr)
    result = sorted(addresses)

    unused_ignores = literal_ignored_addresses - used_ignored_addresses
    # If there are unused ignores and this is not a generated subtarget, we eagerly error so that
    # the user isn't falsely led to believe the ignore is working. We do not do this for generated
    # subtargets because we cannot guarantee that the ignore specified in the original owning
    # target would be used for all generated subtargets.
    if unused_ignores and request.field.address.is_base_target:
        raise UnusedDependencyIgnoresException(request.field.address,
                                               unused_ignores=unused_ignores,
                                               result=result)

    return Addresses(result)
Пример #12
0
async def py_constraints(
    addresses: Addresses,
    console: Console,
    py_constraints_subsystem: PyConstraintsSubsystem,
    python_setup: PythonSetup,
    registered_target_types: RegisteredTargetTypes,
    union_membership: UnionMembership,
) -> PyConstraintsGoal:
    if py_constraints_subsystem.summary:
        if addresses:
            console.print_stderr(
                "The `py-constraints --summary` goal does not take file/target arguments. Run "
                "`help py-constraints` for more details.")
            return PyConstraintsGoal(exit_code=1)

        all_targets = await Get(AllTargets, AllTargetsRequest())
        all_python_targets = tuple(t for t in all_targets
                                   if t.has_field(InterpreterConstraintsField))

        constraints_per_tgt = [
            InterpreterConstraints.create_from_targets([tgt], python_setup)
            for tgt in all_python_targets
        ]

        transitive_targets_per_tgt = await MultiGet(
            Get(TransitiveTargets, TransitiveTargetsRequest([tgt.address]))
            for tgt in all_python_targets)
        transitive_constraints_per_tgt = [
            InterpreterConstraints.create_from_targets(
                transitive_targets.closure, python_setup)
            for transitive_targets in transitive_targets_per_tgt
        ]

        dependees_per_root = await MultiGet(
            Get(
                Dependees,
                DependeesRequest(
                    [tgt.address], transitive=True, include_roots=False))
            for tgt in all_python_targets)

        data = [{
            "Target": tgt.address.spec,
            "Constraints": str(constraints),
            "Transitive Constraints": str(transitive_constraints),
            "# Dependencies": len(transitive_targets.dependencies),
            "# Dependees": len(dependees),
        } for tgt, constraints, transitive_constraints, transitive_targets,
                dependees in zip(
                    all_python_targets,
                    constraints_per_tgt,
                    transitive_constraints_per_tgt,
                    transitive_targets_per_tgt,
                    dependees_per_root,
                )]

        with py_constraints_subsystem.output_sink(console) as stdout:
            writer = csv.DictWriter(
                stdout,
                fieldnames=[
                    "Target",
                    "Constraints",
                    "Transitive Constraints",
                    "# Dependencies",
                    "# Dependees",
                ],
            )
            writer.writeheader()
            for entry in data:
                writer.writerow(entry)

        return PyConstraintsGoal(exit_code=0)

    transitive_targets = await Get(TransitiveTargets,
                                   TransitiveTargetsRequest(addresses))
    final_constraints = InterpreterConstraints.create_from_targets(
        transitive_targets.closure, python_setup)

    if not final_constraints:
        target_types_with_constraints = sorted(
            tgt_type.alias for tgt_type in registered_target_types.types
            if tgt_type.class_has_field(InterpreterConstraintsField,
                                        union_membership))
        logger.warning(
            "No Python files/targets matched for the `py-constraints` goal. All target types with "
            f"Python interpreter constraints: {', '.join(target_types_with_constraints)}"
        )
        return PyConstraintsGoal(exit_code=0)

    constraints_to_addresses = defaultdict(set)
    for tgt in transitive_targets.closure:
        constraints = InterpreterConstraints.create_from_targets([tgt],
                                                                 python_setup)
        if not constraints:
            continue
        constraints_to_addresses[constraints].add(tgt.address)

    with py_constraints_subsystem.output(console) as output_stdout:
        output_stdout(f"Final merged constraints: {final_constraints}\n")
        if len(addresses) > 1:
            merged_constraints_warning = (
                "(These are the constraints used if you were to depend on all of the input "
                "files/targets together, even though they may end up never being used together in "
                "the real world. Consider using a more precise query or running "
                f"`{bin_name()} py-constraints --summary`.)\n")
            output_stdout(indent(fill(merged_constraints_warning, 80), "  "))

        for constraint, addrs in sorted(constraints_to_addresses.items()):
            output_stdout(f"\n{constraint}\n")
            for addr in sorted(addrs):
                output_stdout(f"  {addr}\n")

    return PyConstraintsGoal(exit_code=0)
Пример #13
0
async def pex_from_targets(request: PexFromTargetsRequest, python_setup: PythonSetup) -> PexRequest:
    if request.direct_deps_only:
        targets = await Get(Targets, Addresses(request.addresses))
        direct_deps = await MultiGet(
            Get(Targets, DependenciesRequest(tgt.get(Dependencies))) for tgt in targets
        )
        all_targets = FrozenOrderedSet(itertools.chain(*direct_deps, targets))
    else:
        transitive_targets = await Get(
            TransitiveTargets, TransitiveTargetsRequest(request.addresses)
        )
        all_targets = transitive_targets.closure

    if request.hardcoded_interpreter_constraints:
        interpreter_constraints = request.hardcoded_interpreter_constraints
    else:
        calculated_constraints = InterpreterConstraints.create_from_targets(
            all_targets, python_setup
        )
        # If there are no targets, we fall back to the global constraints. This is relevant,
        # for example, when running `./pants repl` with no specs.
        interpreter_constraints = calculated_constraints or InterpreterConstraints(
            python_setup.interpreter_constraints
        )

    sources_digests = []
    if request.additional_sources:
        sources_digests.append(request.additional_sources)
    if request.include_source_files:
        sources = await Get(PythonSourceFiles, PythonSourceFilesRequest(all_targets))
    else:
        sources = PythonSourceFiles.empty()

    additional_inputs_digests = []
    if request.additional_inputs:
        additional_inputs_digests.append(request.additional_inputs)
    additional_args = request.additional_args
    if request.include_local_dists:
        # Note that LocalDistsPexRequest has no `direct_deps_only` mode, so we will build all
        # local dists in the transitive closure even if the request was for direct_deps_only.
        # Since we currently use `direct_deps_only` in one case (building a requirements pex
        # when running pylint) and in that case include_local_dists=False, this seems harmless.
        local_dists = await Get(
            LocalDistsPex,
            LocalDistsPexRequest(
                request.addresses,
                internal_only=request.internal_only,
                interpreter_constraints=interpreter_constraints,
                sources=sources,
            ),
        )
        remaining_sources = local_dists.remaining_sources
        additional_inputs_digests.append(local_dists.pex.digest)
        additional_args += ("--requirements-pex", local_dists.pex.name)
    else:
        remaining_sources = sources

    remaining_sources_stripped = await Get(
        StrippedPythonSourceFiles, PythonSourceFiles, remaining_sources
    )
    sources_digests.append(remaining_sources_stripped.stripped_source_files.snapshot.digest)

    merged_sources_digest, additional_inputs = await MultiGet(
        Get(Digest, MergeDigests(sources_digests)),
        Get(Digest, MergeDigests(additional_inputs_digests)),
    )

    requirements = PexRequirements.create_from_requirement_fields(
        (
            tgt[PythonRequirementsField]
            for tgt in all_targets
            if tgt.has_field(PythonRequirementsField)
        ),
        additional_requirements=request.additional_requirements,
        apply_constraints=True,
    )

    description = request.description

    if requirements:
        repository_pex: Pex | None = None
        if python_setup.requirement_constraints:
            maybe_constraints_repository_pex = await Get(
                _ConstraintsRepositoryPex,
                _ConstraintsRepositoryPexRequest(
                    requirements,
                    request.platforms,
                    interpreter_constraints,
                    request.internal_only,
                    request.additional_lockfile_args,
                ),
            )
            if maybe_constraints_repository_pex.maybe_pex:
                repository_pex = maybe_constraints_repository_pex.maybe_pex
        elif (
            python_setup.resolve_all_constraints
            and python_setup.resolve_all_constraints_was_set_explicitly()
        ):
            raise ValueError(
                "`[python].resolve_all_constraints` is enabled, so "
                "`[python].requirement_constraints` must also be set."
            )
        elif request.resolve_and_lockfile:
            resolve, lockfile = request.resolve_and_lockfile
            repository_pex = await Get(
                Pex,
                PexRequest(
                    description=f"Installing {lockfile} for the resolve `{resolve}`",
                    output_filename=f"{path_safe(resolve)}_lockfile.pex",
                    internal_only=request.internal_only,
                    requirements=Lockfile(
                        file_path=lockfile,
                        file_path_description_of_origin=(
                            f"the resolve `{resolve}` (from "
                            "`[python].experimental_resolves_to_lockfiles`)"
                        ),
                        # TODO(#12314): Hook up lockfile staleness check.
                        lockfile_hex_digest=None,
                        req_strings=None,
                    ),
                    interpreter_constraints=interpreter_constraints,
                    platforms=request.platforms,
                    additional_args=request.additional_lockfile_args,
                ),
            )
        elif python_setup.lockfile:
            repository_pex = await Get(
                Pex,
                PexRequest(
                    description=f"Installing {python_setup.lockfile}",
                    output_filename="lockfile.pex",
                    internal_only=request.internal_only,
                    requirements=Lockfile(
                        file_path=python_setup.lockfile,
                        file_path_description_of_origin=(
                            "the option `[python].experimental_lockfile`"
                        ),
                        # TODO(#12314): Hook up lockfile staleness check once multiple lockfiles
                        # are supported.
                        lockfile_hex_digest=None,
                        req_strings=None,
                    ),
                    interpreter_constraints=interpreter_constraints,
                    platforms=request.platforms,
                    additional_args=request.additional_lockfile_args,
                ),
            )
        requirements = dataclasses.replace(requirements, repository_pex=repository_pex)

    return PexRequest(
        output_filename=request.output_filename,
        internal_only=request.internal_only,
        requirements=requirements,
        interpreter_constraints=interpreter_constraints,
        platforms=request.platforms,
        main=request.main,
        sources=merged_sources_digest,
        additional_inputs=additional_inputs,
        additional_args=additional_args,
        description=description,
    )
Пример #14
0
async def fib(n: int) -> Fib:
    if n < 2:
        return Fib(n)
    x, y = tuple(await MultiGet([Get(Fib, int(n - 2)), Get(Fib, int(n - 1))]))
    return Fib(x.val + y.val)
Пример #15
0
async def package_python_google_cloud_function(
    field_set: PythonGoogleCloudFunctionFieldSet,
    lambdex: Lambdex,
    union_membership: UnionMembership,
) -> BuiltPackage:
    output_filename = field_set.output_path.value_or_default(
        # Cloud Functions typically use the .zip suffix, so we use that instead of .pex.
        file_ending="zip",
    )

    # We hardcode the platform value to the appropriate one for each Google Cloud Function runtime.
    # (Running the "hello world" cloud function in the example code will report the platform, and can be
    # used to verify correctness of these platform strings.)
    py_major, py_minor = field_set.runtime.to_interpreter_version()
    platform = f"linux_x86_64-cp-{py_major}{py_minor}-cp{py_major}{py_minor}"
    # set pymalloc ABI flag - this was removed in python 3.8 https://bugs.python.org/issue36707
    if py_major <= 3 and py_minor < 8:
        platform += "m"

    additional_pex_args = (
        # Ensure we can resolve manylinux wheels in addition to any AMI-specific wheels.
        "--manylinux=manylinux2014",
        # When we're executing Pex on Linux, allow a local interpreter to be resolved if
        # available and matching the AMI platform.
        "--resolve-local-platforms",
    )
    pex_request = PexFromTargetsRequest(
        addresses=[field_set.address],
        internal_only=False,
        output_filename=output_filename,
        platforms=PexPlatforms([platform]),
        additional_args=additional_pex_args,
        additional_lockfile_args=additional_pex_args,
    )

    lambdex_request = PexRequest(
        output_filename="lambdex.pex",
        internal_only=True,
        requirements=lambdex.pex_requirements(),
        interpreter_constraints=lambdex.interpreter_constraints,
        main=lambdex.main,
    )

    lambdex_pex, pex_result, handler, transitive_targets = await MultiGet(
        Get(VenvPex, PexRequest, lambdex_request),
        Get(Pex, PexFromTargetsRequest, pex_request),
        Get(ResolvedPythonGoogleHandler, ResolvePythonGoogleHandlerRequest(field_set.handler)),
        Get(TransitiveTargets, TransitiveTargetsRequest([field_set.address])),
    )

    # Warn if users depend on `files` targets, which won't be included in the PEX and is a common
    # gotcha.
    file_tgts = targets_with_sources_types(
        [FileSourceField], transitive_targets.dependencies, union_membership
    )
    if file_tgts:
        files_addresses = sorted(tgt.address.spec for tgt in file_tgts)
        logger.warning(
            f"The `python_google_cloud_function` target {field_set.address} transitively depends "
            "on the below `files` targets, but Pants will not include them in the built Cloud "
            "Function. Filesystem APIs like `open()` are not able to load files within the binary "
            "itself; instead, they read from the current working directory."
            f"\n\nInstead, use `resources` targets. See {doc_url('resources')}."
            f"\n\nFiles targets dependencies: {files_addresses}"
        )

    # NB: Lambdex modifies its input pex in-place, so the input file is also the output file.
    result = await Get(
        ProcessResult,
        VenvPexProcess(
            lambdex_pex,
            argv=("build", "-M", "main.py", "-e", handler.val, output_filename),
            input_digest=pex_result.digest,
            output_files=(output_filename,),
            description=f"Setting up handler in {output_filename}",
        ),
    )
    artifact = BuiltPackageArtifact(
        output_filename,
        extra_log_lines=(
            f"    Runtime: {field_set.runtime.value}",
            # The GCP-facing handler function is always main.handler, which is the
            # wrapper injected by lambdex that manages invocation of the actual handler.
            "    Handler: main.handler",
        ),
    )
    return BuiltPackage(digest=result.output_digest, artifacts=(artifact,))
Пример #16
0
async def merge_coverage_data(
    data_collection: PytestCoverageDataCollection,
    coverage_setup: CoverageSetup,
    coverage_config: CoverageConfig,
    coverage: CoverageSubsystem,
    source_roots: AllSourceRoots,
) -> MergedCoverageData:
    if len(data_collection) == 1 and not coverage.global_report:
        coverage_data = data_collection[0]
        return MergedCoverageData(coverage_data.digest,
                                  (coverage_data.address, ))

    coverage_digest_gets = []
    coverage_data_file_paths = []
    addresses = []
    for data in data_collection:
        # We prefix each .coverage file with its corresponding address to avoid collisions.
        coverage_digest_gets.append(
            Get(Digest,
                AddPrefix(data.digest, prefix=data.address.path_safe_spec)))
        coverage_data_file_paths.append(
            f"{data.address.path_safe_spec}/.coverage")
        addresses.append(data.address)

    if coverage.global_report:
        # It's important to set the `branch` value in the empty base report to the value it will
        # have when running on real inputs, so that the reports are of the same type, and can be
        # merged successfully. Otherwise we may get "Can't combine arc data with line data" errors.
        # See https://github.com/pantsbuild/pants/issues/14542 .
        config_contents = await Get(DigestContents, Digest,
                                    coverage_config.digest)
        branch = get_branch_value_from_config(
            config_contents[0]) if config_contents else False
        global_coverage_base_dir = PurePath("__global_coverage__")
        global_coverage_config_path = global_coverage_base_dir / "pyproject.toml"
        global_coverage_config_content = toml.dumps({
            "tool": {
                "coverage": {
                    "run": {
                        "relative_files":
                        True,
                        "source":
                        [source_root.path for source_root in source_roots],
                        "branch":
                        branch,
                    }
                }
            }
        }).encode()

        no_op_exe_py_path = global_coverage_base_dir / "no-op-exe.py"

        all_sources_digest, no_op_exe_py_digest, global_coverage_config_digest = await MultiGet(
            Get(
                Digest,
                PathGlobs(globs=[
                    f"{source_root.path}/**/*.py"
                    for source_root in source_roots
                ]),
            ),
            Get(
                Digest,
                CreateDigest(
                    [FileContent(path=str(no_op_exe_py_path), content=b"")])),
            Get(
                Digest,
                CreateDigest([
                    FileContent(
                        path=str(global_coverage_config_path),
                        content=global_coverage_config_content,
                    ),
                ]),
            ),
        )
        extra_sources_digest = await Get(
            Digest, MergeDigests((all_sources_digest, no_op_exe_py_digest)))
        input_digest = await Get(
            Digest,
            MergeDigests(
                (extra_sources_digest, global_coverage_config_digest)))
        result = await Get(
            ProcessResult,
            VenvPexProcess(
                coverage_setup.pex,
                argv=("run", "--rcfile", str(global_coverage_config_path),
                      str(no_op_exe_py_path)),
                input_digest=input_digest,
                output_files=(".coverage", ),
                description="Create base global Pytest coverage report.",
                level=LogLevel.DEBUG,
            ),
        )
        coverage_digest_gets.append(
            Get(
                Digest,
                AddPrefix(digest=result.output_digest,
                          prefix=str(global_coverage_base_dir))))
        coverage_data_file_paths.append(
            str(global_coverage_base_dir / ".coverage"))
    else:
        extra_sources_digest = EMPTY_DIGEST

    input_digest = await Get(
        Digest, MergeDigests(await MultiGet(coverage_digest_gets)))
    result = await Get(
        ProcessResult,
        VenvPexProcess(
            coverage_setup.pex,
            # We tell combine to keep the original input files, to aid debugging in the sandbox.
            argv=("combine", "--keep", *sorted(coverage_data_file_paths)),
            input_digest=input_digest,
            output_files=(".coverage", ),
            description=
            f"Merge {len(coverage_data_file_paths)} Pytest coverage reports.",
            level=LogLevel.DEBUG,
        ),
    )
    return MergedCoverageData(
        await Get(Digest,
                  MergeDigests((result.output_digest, extra_sources_digest))),
        tuple(addresses),
    )
Пример #17
0
async def build_go_package(request: BuildGoPackageRequest) -> FallibleBuiltGoPackage:
    maybe_built_deps = await MultiGet(
        Get(FallibleBuiltGoPackage, BuildGoPackageRequest, build_request)
        for build_request in request.direct_dependencies
    )

    import_paths_to_pkg_a_files: dict[str, str] = {}
    dep_digests = []
    for maybe_dep in maybe_built_deps:
        if maybe_dep.output is None:
            return dataclasses.replace(
                maybe_dep, import_path=request.import_path, dependency_failed=True
            )
        dep = maybe_dep.output
        import_paths_to_pkg_a_files.update(dep.import_paths_to_pkg_a_files)
        dep_digests.append(dep.digest)

    merged_deps_digest, import_config, embedcfg = await MultiGet(
        Get(Digest, MergeDigests(dep_digests)),
        Get(ImportConfig, ImportConfigRequest(FrozenDict(import_paths_to_pkg_a_files))),
        Get(RenderedEmbedConfig, RenderEmbedConfigRequest(request.embed_config)),
    )

    input_digest = await Get(
        Digest,
        MergeDigests([merged_deps_digest, import_config.digest, embedcfg.digest, request.digest]),
    )

    assembly_digests = None
    symabis_path = None
    if request.s_file_names:
        assembly_setup = await Get(
            FallibleAssemblyPreCompilation,
            AssemblyPreCompilationRequest(input_digest, request.s_file_names, request.subpath),
        )
        if assembly_setup.result is None:
            return FallibleBuiltGoPackage(
                None,
                request.import_path,
                assembly_setup.exit_code,
                stdout=assembly_setup.stdout,
                stderr=assembly_setup.stderr,
            )
        input_digest = assembly_setup.result.merged_compilation_input_digest
        assembly_digests = assembly_setup.result.assembly_digests
        symabis_path = "./symabis"

    compile_args = [
        "tool",
        "compile",
        "-o",
        "__pkg__.a",
        "-pack",
        "-p",
        request.import_path,
        "-importcfg",
        import_config.CONFIG_PATH,
        # See https://github.com/golang/go/blob/f229e7031a6efb2f23241b5da000c3b3203081d6/src/cmd/go/internal/work/gc.go#L79-L100
        # for why Go sets the default to 1.16.
        f"-lang=go{request.minimum_go_version or '1.16'}",
    ]

    if symabis_path:
        compile_args.extend(["-symabis", symabis_path])

    if embedcfg.digest != EMPTY_DIGEST:
        compile_args.extend(["-embedcfg", RenderedEmbedConfig.PATH])

    if not request.s_file_names:
        # If there are no non-Go sources, then pass -complete flag which tells the compiler that the provided
        # Go files are the entire package.
        compile_args.append("-complete")

    relativized_sources = (
        f"./{request.subpath}/{name}" if request.subpath else f"./{name}"
        for name in request.go_file_names
    )
    compile_args.extend(["--", *relativized_sources])
    compile_result = await Get(
        FallibleProcessResult,
        GoSdkProcess(
            input_digest=input_digest,
            command=tuple(compile_args),
            description=f"Compile Go package: {request.import_path}",
            output_files=("__pkg__.a",),
        ),
    )
    if compile_result.exit_code != 0:
        return FallibleBuiltGoPackage(
            None,
            request.import_path,
            compile_result.exit_code,
            stdout=compile_result.stdout.decode("utf-8"),
            stderr=compile_result.stderr.decode("utf-8"),
        )

    compilation_digest = compile_result.output_digest
    if assembly_digests:
        assembly_result = await Get(
            AssemblyPostCompilation,
            AssemblyPostCompilationRequest(
                compilation_digest,
                assembly_digests,
                request.s_file_names,
                request.subpath,
            ),
        )
        if assembly_result.result.exit_code != 0:
            return FallibleBuiltGoPackage(
                None,
                request.import_path,
                assembly_result.result.exit_code,
                stdout=assembly_result.result.stdout.decode("utf-8"),
                stderr=assembly_result.result.stderr.decode("utf-8"),
            )
        assert assembly_result.merged_output_digest
        compilation_digest = assembly_result.merged_output_digest

    path_prefix = os.path.join("__pkgs__", path_safe(request.import_path))
    import_paths_to_pkg_a_files[request.import_path] = os.path.join(path_prefix, "__pkg__.a")
    output_digest = await Get(Digest, AddPrefix(compilation_digest, path_prefix))
    merged_result_digest = await Get(Digest, MergeDigests([*dep_digests, output_digest]))

    output = BuiltGoPackage(merged_result_digest, FrozenDict(import_paths_to_pkg_a_files))
    return FallibleBuiltGoPackage(output, request.import_path)
Пример #18
0
async def merge_coverage_data(
    data_collection: PytestCoverageDataCollection,
    coverage_setup: CoverageSetup,
    coverage: CoverageSubsystem,
    source_roots: AllSourceRoots,
) -> MergedCoverageData:
    if len(data_collection) == 1 and not coverage.global_report:
        coverage_data = data_collection[0]
        return MergedCoverageData(coverage_data.digest, (coverage_data.address,))

    coverage_digest_gets = []
    coverage_data_file_paths = []
    addresses = []
    for data in data_collection:
        # We prefix each .coverage file with its corresponding address to avoid collisions.
        coverage_digest_gets.append(
            Get(Digest, AddPrefix(data.digest, prefix=data.address.path_safe_spec))
        )
        coverage_data_file_paths.append(f"{data.address.path_safe_spec}/.coverage")
        addresses.append(data.address)

    if coverage.global_report:
        global_coverage_base_dir = PurePath("__global_coverage__")

        global_coverage_config_path = global_coverage_base_dir / "pyproject.toml"
        global_coverage_config_content = toml.dumps(
            {
                "tool": {
                    "coverage": {
                        "run": {
                            "relative_files": True,
                            "source": list(source_root.path for source_root in source_roots),
                        }
                    }
                }
            }
        ).encode()

        no_op_exe_py_path = global_coverage_base_dir / "no-op-exe.py"

        all_sources_digest, no_op_exe_py_digest, global_coverage_config_digest = await MultiGet(
            Get(
                Digest,
                PathGlobs(globs=[f"{source_root.path}/**/*.py" for source_root in source_roots]),
            ),
            Get(Digest, CreateDigest([FileContent(path=str(no_op_exe_py_path), content=b"")])),
            Get(
                Digest,
                CreateDigest(
                    [
                        FileContent(
                            path=str(global_coverage_config_path),
                            content=global_coverage_config_content,
                        ),
                    ]
                ),
            ),
        )
        extra_sources_digest = await Get(
            Digest, MergeDigests((all_sources_digest, no_op_exe_py_digest))
        )
        input_digest = await Get(
            Digest, MergeDigests((extra_sources_digest, global_coverage_config_digest))
        )
        result = await Get(
            ProcessResult,
            VenvPexProcess(
                coverage_setup.pex,
                argv=("run", "--rcfile", str(global_coverage_config_path), str(no_op_exe_py_path)),
                input_digest=input_digest,
                output_files=(".coverage",),
                description="Create base global Pytest coverage report.",
                level=LogLevel.DEBUG,
            ),
        )
        coverage_digest_gets.append(
            Get(
                Digest, AddPrefix(digest=result.output_digest, prefix=str(global_coverage_base_dir))
            )
        )
        coverage_data_file_paths.append(str(global_coverage_base_dir / ".coverage"))
    else:
        extra_sources_digest = EMPTY_DIGEST

    input_digest = await Get(Digest, MergeDigests(await MultiGet(coverage_digest_gets)))
    result = await Get(
        ProcessResult,
        VenvPexProcess(
            coverage_setup.pex,
            argv=("combine", *sorted(coverage_data_file_paths)),
            input_digest=input_digest,
            output_files=(".coverage",),
            description=f"Merge {len(coverage_data_file_paths)} Pytest coverage reports.",
            level=LogLevel.DEBUG,
        ),
    )
    return MergedCoverageData(
        await Get(Digest, MergeDigests((result.output_digest, extra_sources_digest))),
        tuple(addresses),
    )
Пример #19
0
async def find_owners(owners_request: OwnersRequest) -> Owners:
    # Determine which of the sources are live and which are deleted.
    sources_paths = await Get(Paths, PathGlobs(owners_request.sources))

    live_files = FrozenOrderedSet(sources_paths.files)
    deleted_files = FrozenOrderedSet(s for s in owners_request.sources
                                     if s not in live_files)
    live_dirs = FrozenOrderedSet(os.path.dirname(s) for s in live_files)
    deleted_dirs = FrozenOrderedSet(os.path.dirname(s) for s in deleted_files)

    # Walk up the buildroot looking for targets that would conceivably claim changed sources.
    # For live files, we use ExpandedTargets, which causes more precise, often file-level, targets
    # to be created. For deleted files we use UnexpandedTargets, which have the original declared
    # glob.
    live_candidate_specs = tuple(
        AscendantAddresses(directory=d) for d in live_dirs)
    deleted_candidate_specs = tuple(
        AscendantAddresses(directory=d) for d in deleted_dirs)
    live_candidate_tgts, deleted_candidate_tgts = await MultiGet(
        Get(Targets, AddressSpecs(live_candidate_specs)),
        Get(UnexpandedTargets, AddressSpecs(deleted_candidate_specs)),
    )

    matching_addresses: OrderedSet[Address] = OrderedSet()
    unmatched_sources = set(owners_request.sources)
    for live in (True, False):
        candidate_tgts: Sequence[Target]
        if live:
            candidate_tgts = live_candidate_tgts
            sources_set = live_files
        else:
            candidate_tgts = deleted_candidate_tgts
            sources_set = deleted_files

        build_file_addresses = await MultiGet(
            Get(BuildFileAddress, Address, tgt.address)
            for tgt in candidate_tgts)

        for candidate_tgt, bfa in zip(candidate_tgts, build_file_addresses):
            matching_files = set(
                matches_filespec(candidate_tgt.get(Sources).filespec,
                                 paths=sources_set))
            # Also consider secondary ownership, meaning it's not a `Sources` field with primary
            # ownership, but the target still should match the file. We can't use `tgt.get()`
            # because this is a mixin, and there technically may be >1 field.
            secondary_owner_fields = tuple(
                field  # type: ignore[misc]
                for field in candidate_tgt.field_values.values()
                if isinstance(field, SecondaryOwnerMixin))
            for secondary_owner_field in secondary_owner_fields:
                matching_files.update(
                    matches_filespec(secondary_owner_field.filespec,
                                     paths=sources_set))
            if not matching_files and bfa.rel_path not in sources_set:
                continue

            unmatched_sources -= matching_files
            matching_addresses.add(candidate_tgt.address)

    if (unmatched_sources and owners_request.owners_not_found_behavior !=
            OwnersNotFoundBehavior.ignore):
        _log_or_raise_unmatched_owners(
            [PurePath(path) for path in unmatched_sources],
            owners_request.owners_not_found_behavior)

    return Owners(matching_addresses)
Пример #20
0
async def generate_coverage_reports(
    merged_coverage_data: MergedCoverageData,
    coverage_setup: CoverageSetup,
    coverage_config: CoverageConfig,
    coverage_subsystem: CoverageSubsystem,
) -> CoverageReports:
    """Takes all Python test results and generates a single coverage report."""
    transitive_targets = await Get(
        TransitiveTargets, TransitiveTargetsRequest(merged_coverage_data.addresses)
    )
    sources = await Get(
        PythonSourceFiles,
        # Coverage sometimes includes non-Python files in its `.coverage` data. We need to
        # ensure that they're present when generating the report. We include all the files included
        # by `pytest_runner.py`.
        PythonSourceFilesRequest(
            transitive_targets.closure, include_files=True, include_resources=True
        ),
    )
    input_digest = await Get(
        Digest,
        MergeDigests(
            (
                merged_coverage_data.coverage_data,
                coverage_config.digest,
                sources.source_files.snapshot.digest,
            )
        ),
    )

    pex_processes = []
    report_types = []
    result_snapshot = await Get(Snapshot, Digest, merged_coverage_data.coverage_data)
    coverage_reports: List[CoverageReport] = []
    for report_type in coverage_subsystem.reports:
        if report_type == CoverageReportType.RAW:
            coverage_reports.append(
                FilesystemCoverageReport(
                    # We don't know yet if the coverage is sufficient, so we let some other report
                    # trigger the failure if necessary.
                    coverage_insufficient=False,
                    report_type=CoverageReportType.RAW.value,
                    result_snapshot=result_snapshot,
                    directory_to_materialize_to=coverage_subsystem.output_dir,
                    report_file=coverage_subsystem.output_dir / ".coverage",
                )
            )
            continue

        report_types.append(report_type)
        output_file = (
            f"coverage.{report_type.value}"
            if report_type in {CoverageReportType.XML, CoverageReportType.JSON}
            else None
        )
        args = [report_type.report_name, f"--rcfile={coverage_config.path}"]
        if coverage_subsystem.fail_under is not None:
            args.append(f"--fail-under={coverage_subsystem.fail_under}")
        pex_processes.append(
            VenvPexProcess(
                coverage_setup.pex,
                argv=tuple(args),
                input_digest=input_digest,
                output_directories=("htmlcov",) if report_type == CoverageReportType.HTML else None,
                output_files=(output_file,) if output_file else None,
                description=f"Generate Pytest {report_type.report_name} coverage report.",
                level=LogLevel.DEBUG,
            )
        )
    results = await MultiGet(
        Get(FallibleProcessResult, VenvPexProcess, process) for process in pex_processes
    )
    for proc, res in zip(pex_processes, results):
        if res.exit_code not in {0, 2}:
            # coverage.py uses exit code 2 if --fail-under triggers, in which case the
            # reports are still generated.
            raise ProcessExecutionFailure(
                res.exit_code,
                res.stdout,
                res.stderr,
                proc.description,
            )

    # In practice if one result triggers --fail-under, they all will, but no need to rely on that.
    result_exit_codes = tuple(res.exit_code for res in results)
    result_stdouts = tuple(res.stdout for res in results)
    result_snapshots = await MultiGet(Get(Snapshot, Digest, res.output_digest) for res in results)

    coverage_reports.extend(
        _get_coverage_report(
            coverage_subsystem.output_dir, report_type, exit_code != 0, stdout, snapshot
        )
        for (report_type, exit_code, stdout, snapshot) in zip(
            report_types, result_exit_codes, result_stdouts, result_snapshots
        )
    )

    return CoverageReports(tuple(coverage_reports))
Пример #21
0
async def resolve_unexpanded_targets(
        addresses: Addresses) -> UnexpandedTargets:
    wrapped_targets = await MultiGet(
        Get(WrappedTarget, Address, a) for a in addresses)
    return UnexpandedTargets(wrapped_target.target
                             for wrapped_target in wrapped_targets)
Пример #22
0
async def analyze_java_source_dependencies(
    processor_classfiles: JavaParserCompiledClassfiles,
    jdk: InternalJdk,
    request: JavaSourceDependencyAnalysisRequest,
) -> FallibleJavaSourceDependencyAnalysisResult:
    source_files = request.source_files
    if len(source_files.files) > 1:
        raise ValueError(
            f"parse_java_package expects sources with exactly 1 source file, but found {len(source_files.files)}."
        )
    elif len(source_files.files) == 0:
        raise ValueError(
            "parse_java_package expects sources with exactly 1 source file, but found none."
        )
    source_prefix = "__source_to_analyze"
    source_path = os.path.join(source_prefix, source_files.files[0])
    processorcp_relpath = "__processorcp"
    toolcp_relpath = "__toolcp"

    (
        tool_classpath,
        prefixed_source_files_digest,
    ) = await MultiGet(
        Get(
            ToolClasspath,
            ToolClasspathRequest(
                artifact_requirements=java_parser_artifact_requirements()),
        ),
        Get(Digest, AddPrefix(source_files.snapshot.digest, source_prefix)),
    )

    extra_immutable_input_digests = {
        toolcp_relpath: tool_classpath.digest,
        processorcp_relpath: processor_classfiles.digest,
    }

    analysis_output_path = "__source_analysis.json"

    process_result = await Get(
        FallibleProcessResult,
        JvmProcess(
            jdk=jdk,
            classpath_entries=[
                *tool_classpath.classpath_entries(toolcp_relpath),
                processorcp_relpath,
            ],
            argv=[
                "org.pantsbuild.javaparser.PantsJavaParserLauncher",
                analysis_output_path,
                source_path,
            ],
            input_digest=prefixed_source_files_digest,
            extra_immutable_input_digests=extra_immutable_input_digests,
            output_files=(analysis_output_path, ),
            extra_nailgun_keys=extra_immutable_input_digests,
            description=f"Analyzing {source_files.files[0]}",
            level=LogLevel.DEBUG,
        ),
    )

    return FallibleJavaSourceDependencyAnalysisResult(
        process_result=process_result)
Пример #23
0
async def run_tests(
    console: Console,
    test_subsystem: TestSubsystem,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    union_membership: UnionMembership,
    dist_dir: DistDir,
) -> Test:
    if test_subsystem.debug:
        targets_to_valid_field_sets = await Get(
            TargetRootsToFieldSets,
            TargetRootsToFieldSetsRequest(
                TestFieldSet,
                goal_description="`test --debug`",
                no_applicable_targets_behavior=NoApplicableTargetsBehavior.
                error,
            ),
        )
        debug_requests = await MultiGet(
            Get(TestDebugRequest, TestFieldSet, field_set)
            for field_set in targets_to_valid_field_sets.field_sets)
        exit_code = 0
        for debug_request in debug_requests:
            debug_result = interactive_runner.run(debug_request.process)
            if debug_result.exit_code != 0:
                exit_code = debug_result.exit_code
        return Test(exit_code)

    targets_to_valid_field_sets = await Get(
        TargetRootsToFieldSets,
        TargetRootsToFieldSetsRequest(
            TestFieldSet,
            goal_description=f"the `{test_subsystem.name}` goal",
            no_applicable_targets_behavior=NoApplicableTargetsBehavior.warn,
        ),
    )
    field_sets_with_sources = await Get(
        FieldSetsWithSources,
        FieldSetsWithSourcesRequest(targets_to_valid_field_sets.field_sets))

    results = await MultiGet(
        Get(TestResult, TestFieldSet, field_set)
        for field_set in field_sets_with_sources)

    # Print summary.
    exit_code = 0
    if results:
        console.print_stderr("")
    for result in sorted(results):
        if result.exit_code == 0:
            sigil = console.sigil_succeeded()
            status = "succeeded"
        else:
            sigil = console.sigil_failed()
            status = "failed"
            exit_code = result.exit_code
        console.print_stderr(f"{sigil} {result.address} {status}.")
        if result.extra_output and result.extra_output.files:
            workspace.write_digest(
                result.extra_output.digest,
                path_prefix=str(dist_dir.relpath / "test" /
                                result.address.path_safe_spec),
            )

    merged_xml_results = await Get(
        Digest,
        MergeDigests(result.xml_results.digest for result in results
                     if result.xml_results),
    )
    workspace.write_digest(merged_xml_results)

    if test_subsystem.use_coverage:
        # NB: We must pre-sort the data for itertools.groupby() to work properly, using the same
        # key function for both. However, you can't sort by `types`, so we call `str()` on it.
        all_coverage_data = sorted(
            (result.coverage_data
             for result in results if result.coverage_data is not None),
            key=lambda cov_data: str(type(cov_data)),
        )

        coverage_types_to_collection_types = {
            collection_cls.element_type: collection_cls  # type: ignore[misc]
            for collection_cls in union_membership.get(CoverageDataCollection)
        }
        coverage_collections = []
        for data_cls, data in itertools.groupby(all_coverage_data,
                                                lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))
        # We can create multiple reports for each coverage data (e.g., console, xml, html)
        coverage_reports_collections = await MultiGet(
            Get(CoverageReports, CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections)

        coverage_report_files: list[PurePath] = []
        for coverage_reports in coverage_reports_collections:
            report_files = coverage_reports.materialize(console, workspace)
            coverage_report_files.extend(report_files)

        if coverage_report_files and test_subsystem.open_coverage:
            open_files = await Get(
                OpenFiles,
                OpenFilesRequest(coverage_report_files,
                                 error_if_open_not_found=False))
            for process in open_files.processes:
                interactive_runner.run(process)

        for coverage_reports in coverage_reports_collections:
            if coverage_reports.coverage_insufficient:
                logger.error("Test goal failed due to insufficient coverage. "
                             "See coverage reports for details.")
                # coverage.py uses 2 to indicate failure due to insufficient coverage.
                # We may as well follow suit in the general case, for all languages.
                exit_code = 2

    return Test(exit_code)
Пример #24
0
async def create_ipython_repl_request(
        request: IPythonRepl, ipython: IPython, pex_env: PexEnvironment,
        python_setup: PythonSetup) -> ReplRequest:
    validate_compatible_resolve(request.targets, python_setup)

    interpreter_constraints, transitive_targets = await MultiGet(
        Get(InterpreterConstraints,
            InterpreterConstraintsRequest(request.addresses)),
        Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses)),
    )

    requirements_request = Get(Pex, RequirementsPexRequest(request.addresses))
    sources_request = Get(
        PythonSourceFiles,
        PythonSourceFilesRequest(transitive_targets.closure,
                                 include_files=True))

    ipython_request = Get(
        Pex, PexRequest,
        ipython.to_pex_request(
            interpreter_constraints=interpreter_constraints))

    requirements_pex, sources, ipython_pex = await MultiGet(
        requirements_request, sources_request, ipython_request)

    local_dists = await Get(
        LocalDistsPex,
        LocalDistsPexRequest(
            request.addresses,
            internal_only=True,
            interpreter_constraints=interpreter_constraints,
            sources=sources,
        ),
    )

    merged_digest = await Get(
        Digest,
        MergeDigests((
            requirements_pex.digest,
            local_dists.pex.digest,
            local_dists.remaining_sources.source_files.snapshot.digest,
            ipython_pex.digest,
        )),
    )

    complete_pex_env = pex_env.in_workspace()
    args = list(
        complete_pex_env.create_argv(request.in_chroot(ipython_pex.name),
                                     python=ipython_pex.python))
    if ipython.ignore_cwd:
        args.append("--ignore-cwd")

    chrooted_source_roots = [
        request.in_chroot(sr) for sr in sources.source_roots
    ]
    extra_env = {
        **complete_pex_env.environment_dict(python_configured=ipython_pex.python is not None),
        "PEX_PATH":
        os.pathsep.join([
            request.in_chroot(requirements_pex.name),
            request.in_chroot(local_dists.pex.name),
        ]),
        "PEX_EXTRA_SYS_PATH":
        os.pathsep.join(chrooted_source_roots),
    }

    return ReplRequest(digest=merged_digest, args=args, extra_env=extra_env)
Пример #25
0
async def generate_python_from_protobuf(
        request: GeneratePythonFromProtobufRequest,
        protoc: Protoc) -> GeneratedSources:
    download_protoc_request = Get(DownloadedExternalTool, ExternalToolRequest,
                                  protoc.get_request(Platform.current))

    output_dir = "_generated_files"
    # TODO(#9650): replace this with a proper intrinsic to create empty directories.
    create_output_dir_request = Get(
        ProcessResult,
        Process(
            ("/bin/mkdir", output_dir),
            description=f"Create the directory {output_dir}",
            level=LogLevel.TRACE,
            output_directories=(output_dir, ),
        ),
    )

    # Protoc needs all transitive dependencies on `protobuf_libraries` to work properly. It won't
    # actually generate those dependencies; it only needs to look at their .proto files to work
    # with imports.
    transitive_targets = await Get(
        TransitiveTargets, Addresses([request.protocol_target.address]))
    # NB: By stripping the source roots, we avoid having to set the value `--proto_path`
    # for Protobuf imports to be discoverable.
    all_stripped_sources_request = Get(
        StrippedSourceFiles,
        SourceFilesRequest(
            (tgt.get(Sources) for tgt in transitive_targets.closure),
            for_sources_types=(ProtobufSources, ),
        ),
    )
    target_stripped_sources_request = Get(
        StrippedSourceFiles,
        SourceFilesRequest([request.protocol_target[ProtobufSources]]))

    (
        downloaded_protoc_binary,
        create_output_dir_result,
        all_sources_stripped,
        target_sources_stripped,
    ) = await MultiGet(
        download_protoc_request,
        create_output_dir_request,
        all_stripped_sources_request,
        target_stripped_sources_request,
    )

    input_digest = await Get(
        Digest,
        MergeDigests((
            all_sources_stripped.snapshot.digest,
            downloaded_protoc_binary.digest,
            create_output_dir_result.output_digest,
        )),
    )

    result = await Get(
        ProcessResult,
        Process(
            (
                downloaded_protoc_binary.exe,
                "--python_out",
                output_dir,
                *target_sources_stripped.snapshot.files,
            ),
            input_digest=input_digest,
            description=
            f"Generating Python sources from {request.protocol_target.address}.",
            level=LogLevel.DEBUG,
            output_directories=(output_dir, ),
        ),
    )

    # We must do some path manipulation on the output digest for it to look like normal sources,
    # including adding back a source root.
    py_source_root = request.protocol_target.get(PythonSourceRootField).value
    if py_source_root:
        # Verify that the python source root specified by the target is in fact a source root.
        source_root_request = SourceRootRequest(PurePath(py_source_root))
    else:
        # The target didn't specify a python source root, so use the protobuf_library's source root.
        source_root_request = SourceRootRequest.for_target(
            request.protocol_target)

    normalized_digest, source_root = await MultiGet(
        Get(Digest, RemovePrefix(result.output_digest, output_dir)),
        Get(SourceRoot, SourceRootRequest, source_root_request),
    )

    source_root_restored = (await Get(
        Snapshot, AddPrefix(normalized_digest, source_root.path))
                            if source_root.path != "." else await Get(
                                Snapshot, Digest, normalized_digest))
    return GeneratedSources(source_root_restored)
Пример #26
0
async def setup_pex_cli_process(
    request: PexCliProcess,
    pex_binary: PexPEX,
    pex_env: PexEnvironment,
    python_native_code: PythonNativeCode,
    global_options: GlobalOptions,
    pex_runtime_env: PexRuntimeEnvironment,
) -> Process:
    tmpdir = ".tmp"
    gets: List[Get] = [Get(Digest, CreateDigest([Directory(tmpdir)]))]
    cert_args = []

    # The certs file will typically not be in the repo, so we can't digest it via a PathGlobs.
    # Instead we manually create a FileContent for it.
    if global_options.options.ca_certs_path:
        ca_certs_content = Path(global_options.options.ca_certs_path).read_bytes()
        chrooted_ca_certs_path = os.path.basename(global_options.options.ca_certs_path)

        gets.append(
            Get(
                Digest,
                CreateDigest((FileContent(chrooted_ca_certs_path, ca_certs_content),)),
            )
        )
        cert_args = ["--cert", chrooted_ca_certs_path]

    digests_to_merge = [pex_binary.digest]
    digests_to_merge.extend(await MultiGet(gets))
    if request.additional_input_digest:
        digests_to_merge.append(request.additional_input_digest)
    input_digest = await Get(Digest, MergeDigests(digests_to_merge))

    argv = [
        pex_binary.exe,
        *cert_args,
        "--python-path",
        create_path_env_var(pex_env.interpreter_search_paths),
        # Ensure Pex and its subprocesses create temporary files in the the process execution
        # sandbox. It may make sense to do this generally for Processes, but in the short term we
        # have known use cases where /tmp is too small to hold large wheel downloads Pex is asked to
        # perform. Making the TMPDIR local to the sandbox allows control via
        # --local-execution-root-dir for the local case and should work well with remote cases where
        # a remoting implementation has to allow for processes producing large binaries in a
        # sandbox to support reasonable workloads. Communicating TMPDIR via --tmpdir instead of via
        # environment variable allows Pex to absolutize the path ensuring subprocesses that change
        # CWD can find the TMPDIR.
        "--tmpdir",
        tmpdir,
    ]
    if pex_runtime_env.verbosity > 0:
        argv.append(f"-{'v' * pex_runtime_env.verbosity}")

    # NB: This comes at the end of the argv because the request may use `--` passthrough args,
    # which must come at the end.
    argv.extend(request.argv)
    normalized_argv = pex_env.create_argv(*argv, python=request.python)
    env = {
        **pex_env.environment_dict(python_configured=request.python is not None),
        **python_native_code.environment_dict,
        **(request.extra_env or {}),
    }

    return Process(
        normalized_argv,
        description=request.description,
        input_digest=input_digest,
        env=env,
        output_files=request.output_files,
        output_directories=request.output_directories,
        append_only_caches=pex_env.append_only_caches(),
        level=request.level,
        cache_scope=request.cache_scope,
    )
Пример #27
0
async def bandit_lint_partition(partition: BanditPartition, bandit: Bandit,
                                lint_subsystem: LintSubsystem) -> LintResult:
    bandit_pex_request = Get(
        VenvPex,
        PexRequest(
            output_filename="bandit.pex",
            internal_only=True,
            requirements=PexRequirements(bandit.all_requirements),
            interpreter_constraints=partition.interpreter_constraints,
            main=bandit.main,
        ),
    )

    config_digest_request = Get(
        Digest,
        PathGlobs(
            globs=[bandit.config] if bandit.config else [],
            glob_match_error_behavior=GlobMatchErrorBehavior.error,
            description_of_origin="the option `--bandit-config`",
        ),
    )

    source_files_request = Get(
        SourceFiles,
        SourceFilesRequest(field_set.sources
                           for field_set in partition.field_sets))

    bandit_pex, config_digest, source_files = await MultiGet(
        bandit_pex_request, config_digest_request, source_files_request)

    input_digest = await Get(
        Digest, MergeDigests((source_files.snapshot.digest, config_digest)))

    report_file_name = "bandit_report.txt" if lint_subsystem.reports_dir else None

    result = await Get(
        FallibleProcessResult,
        VenvPexProcess(
            bandit_pex,
            argv=generate_args(source_files=source_files,
                               bandit=bandit,
                               report_file_name=report_file_name),
            input_digest=input_digest,
            description=
            f"Run Bandit on {pluralize(len(partition.field_sets), 'file')}.",
            output_files=(report_file_name, ) if report_file_name else None,
            level=LogLevel.DEBUG,
        ),
    )

    report = None
    if report_file_name:
        report_digest = await Get(
            Digest,
            DigestSubset(
                result.output_digest,
                PathGlobs(
                    [report_file_name],
                    glob_match_error_behavior=GlobMatchErrorBehavior.warn,
                    description_of_origin="Bandit report file",
                ),
            ),
        )
        report = LintReport(report_file_name, report_digest)

    return LintResult.from_fallible_process_result(
        result,
        partition_description=str(
            sorted(str(c) for c in partition.interpreter_constraints)),
        report=report,
    )
Пример #28
0
async def setup_junit_for_target(
    request: TestSetupRequest,
    jvm: JvmSubsystem,
    junit: JUnit,
    test_subsystem: TestSubsystem,
) -> TestSetup:

    jdk, transitive_tgts = await MultiGet(
        Get(JdkEnvironment, JdkRequest,
            JdkRequest.from_field(request.field_set.jdk_version)),
        Get(TransitiveTargets,
            TransitiveTargetsRequest([request.field_set.address])),
    )

    lockfile_request = await Get(GenerateJvmLockfileFromTool,
                                 JunitToolLockfileSentinel())
    classpath, junit_classpath, files = await MultiGet(
        Get(Classpath, Addresses([request.field_set.address])),
        Get(ToolClasspath, ToolClasspathRequest(lockfile=lockfile_request)),
        Get(
            SourceFiles,
            SourceFilesRequest(
                (dep.get(SourcesField)
                 for dep in transitive_tgts.dependencies),
                for_sources_types=(FileSourceField, ),
                enable_codegen=True,
            ),
        ),
    )

    input_digest = await Get(
        Digest, MergeDigests((*classpath.digests(), files.snapshot.digest)))

    toolcp_relpath = "__toolcp"
    extra_immutable_input_digests = {
        toolcp_relpath: junit_classpath.digest,
    }

    reports_dir_prefix = "__reports_dir"
    reports_dir = f"{reports_dir_prefix}/{request.field_set.address.path_safe_spec}"

    # Classfiles produced by the root `junit_test` targets are the only ones which should run.
    user_classpath_arg = ":".join(classpath.root_args())

    # Cache test runs only if they are successful, or not at all if `--test-force`.
    cache_scope = (ProcessCacheScope.PER_SESSION
                   if test_subsystem.force else ProcessCacheScope.SUCCESSFUL)

    extra_jvm_args: list[str] = []
    if request.is_debug:
        extra_jvm_args.extend(jvm.debug_args)

    process = JvmProcess(
        jdk=jdk,
        classpath_entries=[
            *classpath.args(),
            *junit_classpath.classpath_entries(toolcp_relpath),
        ],
        argv=[
            *extra_jvm_args,
            "org.junit.platform.console.ConsoleLauncher",
            *(("--classpath", user_classpath_arg) if user_classpath_arg else
              ()),
            *(("--scan-class-path",
               user_classpath_arg) if user_classpath_arg else ()),
            "--reports-dir",
            reports_dir,
            *junit.args,
        ],
        input_digest=input_digest,
        extra_jvm_options=junit.jvm_options,
        extra_immutable_input_digests=extra_immutable_input_digests,
        output_directories=(reports_dir, ),
        description=
        f"Run JUnit 5 ConsoleLauncher against {request.field_set.address}",
        level=LogLevel.DEBUG,
        cache_scope=cache_scope,
        use_nailgun=False,
    )
    return TestSetup(process=process, reports_dir_prefix=reports_dir_prefix)
Пример #29
0
async def compile_wsdl_source(
    request: CompileWsdlSourceRequest,
    jdk: InternalJdk,
    jaxws: JaxWsTools,
) -> CompiledWsdlSource:
    output_dir = "_generated_files"
    toolcp_relpath = "__toolcp"

    lockfile_request = await Get(GenerateJvmLockfileFromTool,
                                 JaxWsToolsLockfileSentinel())
    tool_classpath, subsetted_input_digest, empty_output_dir = await MultiGet(
        Get(
            ToolClasspath,
            ToolClasspathRequest(lockfile=lockfile_request),
        ),
        Get(
            Digest,
            DigestSubset(
                request.digest,
                PathGlobs(
                    [request.path],
                    glob_match_error_behavior=GlobMatchErrorBehavior.error,
                    conjunction=GlobExpansionConjunction.all_match,
                    description_of_origin="the WSDL file name",
                ),
            ),
        ),
        Get(Digest, CreateDigest([Directory(output_dir)])),
    )

    input_digest = await Get(
        Digest, MergeDigests([subsetted_input_digest, empty_output_dir]))

    immutable_input_digests = {
        toolcp_relpath: tool_classpath.digest,
    }

    jaxws_args = [
        "-d",
        output_dir,
        "-encoding",
        "utf8",
        "-keep",
        "-Xnocompile",
        "-B-XautoNameResolution",
    ]
    if request.module:
        jaxws_args.extend(["-m", request.module])
    if request.package:
        jaxws_args.extend(["-p", request.package])

    jaxws_process = JvmProcess(
        jdk=jdk,
        argv=[
            "com.sun.tools.ws.WsImport",
            *jaxws_args,
            request.path,
        ],
        classpath_entries=tool_classpath.classpath_entries(toolcp_relpath),
        input_digest=input_digest,
        extra_immutable_input_digests=immutable_input_digests,
        extra_nailgun_keys=immutable_input_digests,
        description="Generating Java sources from WSDL source",
        level=LogLevel.DEBUG,
        output_directories=(output_dir, ),
    )
    jaxws_result = await Get(ProcessResult, JvmProcess, jaxws_process)

    normalized_digest = await Get(
        Digest, RemovePrefix(jaxws_result.output_digest, output_dir))
    return CompiledWsdlSource(normalized_digest)
Пример #30
0
async def build_local_dists(
    request: LocalDistsPexRequest,
) -> LocalDistsPex:

    transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses))
    applicable_targets = [
        tgt for tgt in transitive_targets.closure if PythonDistributionFieldSet.is_applicable(tgt)
    ]

    python_dist_field_sets = [
        PythonDistributionFieldSet.create(target) for target in applicable_targets
    ]

    dists = await MultiGet(
        [Get(BuiltPackage, PackageFieldSet, field_set) for field_set in python_dist_field_sets]
    )

    # The primary use-case of the "local dists" feature is to support consuming native extensions
    # as wheels without having to publish them first.
    # It doesn't seem very useful to consume locally-built sdists, and it makes it hard to
    # reason about possible sys.path collisions between the in-repo sources and whatever the
    # sdist will place on the sys.path when it's installed.
    # So for now we simply ignore sdists, with a warning if necessary.
    provided_files = set()
    wheels = []

    all_contents = await MultiGet(Get(DigestContents, Digest, dist.digest) for dist in dists)
    for dist, contents, tgt in zip(dists, all_contents, applicable_targets):
        artifacts = {(a.relpath or "") for a in dist.artifacts}
        # A given local dist might build a wheel and an sdist (and maybe other artifacts -
        # we don't know what setup command was run...)
        # As long as there is a wheel, we can ignore the other artifacts.
        wheel = next((art for art in artifacts if art.endswith(".whl")), None)
        if wheel:
            wheel_content = next(content for content in contents if content.path == wheel)
            wheels.append(wheel)
            buf = BytesIO()
            buf.write(wheel_content.content)
            buf.seek(0)
            with zipfile.ZipFile(buf) as zf:
                provided_files.update(zf.namelist())
        else:
            logger.warning(
                f"Encountered a dependency on the {tgt.alias} target at {tgt.address.spec}, but "
                "this target does not produce a Python wheel artifact. Therefore this target's "
                "code will be used directly from sources, without a distribution being built, "
                "and therefore any native extensions in it will not be built.\n\n"
                f"See {doc_url('python-distributions')} for details on how to set up a {tgt.alias} "
                "target to produce a wheel."
            )

    dists_digest = await Get(Digest, MergeDigests([dist.digest for dist in dists]))
    wheels_digest = await Get(Digest, DigestSubset(dists_digest, PathGlobs(["**/*.whl"])))

    dists_pex = await Get(
        Pex,
        PexRequest(
            output_filename="local_dists.pex",
            requirements=PexRequirements(wheels),
            interpreter_constraints=request.interpreter_constraints,
            additional_inputs=wheels_digest,
            internal_only=request.internal_only,
        ),
    )

    # We check source roots in reverse lexicographic order,
    # so we'll find the innermost root that matches.
    source_roots = list(reversed(sorted(request.sources.source_roots)))
    remaining_sources = set(request.sources.source_files.files)
    unrooted_files_set = set(request.sources.source_files.unrooted_files)
    for source in request.sources.source_files.files:
        if source not in unrooted_files_set:
            for source_root in source_roots:
                source_relpath = fast_relpath_optional(source, source_root)
                if source_relpath is not None and source_relpath in provided_files:
                    remaining_sources.remove(source)
    remaining_sources_snapshot = await Get(
        Snapshot,
        DigestSubset(
            request.sources.source_files.snapshot.digest, PathGlobs(sorted(remaining_sources))
        ),
    )
    subtracted_sources = PythonSourceFiles(
        SourceFiles(remaining_sources_snapshot, request.sources.source_files.unrooted_files),
        request.sources.source_roots,
    )

    return LocalDistsPex(dists_pex, subtracted_sources)