Beispiel #1
0
async def inject_docker_dependencies(request: InjectDockerDependencies) -> InjectedDependencies:
    """Inspects COPY instructions in the Dockerfile for references to known packagable targets."""
    dockerfile_info = await Get(
        DockerfileInfo, DockerfileInfoRequest(request.dependencies_field.address)
    )

    # Parse all putative target addresses.
    putative_addresses = await Get(
        Addresses,
        UnparsedAddressInputs(
            dockerfile_info.putative_target_addresses,
            owning_address=dockerfile_info.address,
        ),
    )

    # Get the target for those addresses that are known.
    directories = {address.spec_path for address in putative_addresses}
    all_addresses = await Get(Addresses, AddressSpecs(map(MaybeEmptySiblingAddresses, directories)))
    targets = await Get(
        Targets, Addresses((address for address in putative_addresses if address in all_addresses))
    )

    # Only keep those targets that we can "package".
    package = await Get(FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, targets))
    referenced_targets = (
        field_sets[0].address for field_sets in package.collection if len(field_sets) > 0
    )
    return InjectedDependencies(Addresses(referenced_targets))
Beispiel #2
0
async def find_valid_field_sets_for_target_roots(
    request: TargetRootsToFieldSetsRequest,
    targets_with_origins: TargetsWithOrigins,
    union_membership: UnionMembership,
    registered_target_types: RegisteredTargetTypes,
) -> TargetRootsToFieldSets:
    field_sets_per_target = await Get(
        FieldSetsPerTarget,
        FieldSetsPerTargetRequest(
            request.field_set_superclass, (two.target for two in targets_with_origins)
        ),
    )
    targets_to_valid_field_sets = {}
    for tgt_with_origin, field_sets in zip(targets_with_origins, field_sets_per_target.collection):
        if field_sets:
            targets_to_valid_field_sets[tgt_with_origin] = field_sets
    if request.error_if_no_applicable_targets and not targets_to_valid_field_sets:
        raise NoApplicableTargetsException.create_from_field_sets(
            TargetsWithOrigins(targets_with_origins),
            field_set_types=union_membership.union_rules[request.field_set_superclass],
            goal_description=request.goal_description,
            union_membership=union_membership,
            registered_target_types=registered_target_types,
        )
    result = TargetRootsToFieldSets(targets_to_valid_field_sets)
    if not request.expect_single_field_set:
        return result
    if len(result.targets) > 1:
        raise TooManyTargetsException(result.targets, goal_description=request.goal_description)
    if len(result.field_sets) > 1:
        raise AmbiguousImplementationsException(
            result.targets[0], result.field_sets, goal_description=request.goal_description
        )
    return result
Beispiel #3
0
async def find_all_packageable_targets(
        all_targets: AllTargets) -> AllPackageableTargets:
    fs_per_target = await Get(
        FieldSetsPerTarget,
        FieldSetsPerTargetRequest(PackageFieldSet, all_targets))
    return AllPackageableTargets(
        target
        for target, field_sets in zip(all_targets, fs_per_target.collection)
        if len(field_sets) > 0)
Beispiel #4
0
async def build_runtime_package_dependencies(
    request: BuildPackageDependenciesRequest, ) -> BuiltPackageDependencies:
    unparsed_addresses = request.field.to_unparsed_address_inputs()
    if not unparsed_addresses:
        return BuiltPackageDependencies()
    tgts = await Get(Targets, UnparsedAddressInputs, unparsed_addresses)
    field_sets_per_tgt = await Get(
        FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, tgts))
    packages = await MultiGet(
        Get(BuiltPackage, PackageFieldSet, field_set)
        for field_set in field_sets_per_tgt.field_sets)
    return BuiltPackageDependencies(packages)
async def package_archive_target(
        field_set: ArchiveFieldSet,
        global_options: GlobalOptions) -> BuiltPackage:
    package_targets, files_targets = await MultiGet(
        Get(
            Targets,
            UnparsedAddressInputs(field_set.packages.value or (),
                                  owning_address=field_set.address),
        ),
        Get(
            Targets,
            UnparsedAddressInputs(field_set.files.value or (),
                                  owning_address=field_set.address),
        ),
    )

    package_field_sets_per_target = await Get(
        FieldSetsPerTarget,
        FieldSetsPerTargetRequest(PackageFieldSet, package_targets))
    packages = await MultiGet(
        Get(BuiltPackage, PackageFieldSet, field_set)
        for field_set in package_field_sets_per_target.field_sets)

    files_sources = await MultiGet(
        Get(
            HydratedSources,
            HydrateSourcesRequest(tgt.get(Sources),
                                  for_sources_types=(FilesSources, ),
                                  enable_codegen=True),
        ) for tgt in files_targets)

    input_snapshot = await Get(
        Snapshot,
        MergeDigests((
            *(package.digest for package in packages),
            *(sources.snapshot.digest for sources in files_sources),
        )),
    )

    output_filename = field_set.output_path.value_or_default(
        field_set.address,
        file_ending=field_set.format_field.value,
        use_legacy_format=global_options.options.pants_distdir_legacy_paths,
    )
    archive = await Get(
        Digest,
        CreateArchive(
            input_snapshot,
            output_filename=output_filename,
            format=ArchiveFormat(field_set.format_field.value),
        ),
    )
    return BuiltPackage(archive, relpath=output_filename)
Beispiel #6
0
async def package_archive_target(field_set: ArchiveFieldSet) -> BuiltPackage:
    # TODO(#13086): Because we're using `Targets` instead of `UnexpandedTargets`, the
    #  `files` target generator gets replaced by its generated `file` targets. That replacement is
    #  necessary because we only hydrate sources for `FileSourcesField`, which is only for the
    #  `file` target.  That's really subtle!
    package_targets, file_targets = await MultiGet(
        Get(Targets, UnparsedAddressInputs,
            field_set.packages.to_unparsed_address_inputs()),
        Get(Targets, UnparsedAddressInputs,
            field_set.files.to_unparsed_address_inputs()),
    )

    package_field_sets_per_target = await Get(
        FieldSetsPerTarget,
        FieldSetsPerTargetRequest(PackageFieldSet, package_targets))
    packages = await MultiGet(
        Get(BuiltPackage, PackageFieldSet, field_set)
        for field_set in package_field_sets_per_target.field_sets)

    file_sources = await MultiGet(
        Get(
            HydratedSources,
            HydrateSourcesRequest(
                tgt.get(SourcesField),
                for_sources_types=(FileSourceField, ),
                enable_codegen=True,
            ),
        ) for tgt in file_targets)

    input_snapshot = await Get(
        Snapshot,
        MergeDigests((
            *(package.digest for package in packages),
            *(sources.snapshot.digest for sources in file_sources),
        )),
    )

    output_filename = field_set.output_path.value_or_default(
        file_ending=field_set.format_field.value)
    archive = await Get(
        Digest,
        CreateArchive(
            input_snapshot,
            output_filename=output_filename,
            format=ArchiveFormat(field_set.format_field.value),
        ),
    )
    return BuiltPackage(archive, (BuiltPackageArtifact(output_filename), ))
Beispiel #7
0
async def package_archive_target(field_set: ArchiveFieldSet) -> BuiltPackage:
    package_targets, files_targets = await MultiGet(
        Get(Targets, UnparsedAddressInputs, field_set.packages.to_unparsed_address_inputs()),
        Get(Targets, UnparsedAddressInputs, field_set.files.to_unparsed_address_inputs()),
    )

    package_field_sets_per_target = await Get(
        FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, package_targets)
    )
    packages = await MultiGet(
        Get(BuiltPackage, PackageFieldSet, field_set)
        for field_set in package_field_sets_per_target.field_sets
    )

    files_sources = await MultiGet(
        Get(
            HydratedSources,
            HydrateSourcesRequest(
                tgt.get(Sources), for_sources_types=(FilesSources,), enable_codegen=True
            ),
        )
        for tgt in files_targets
    )

    input_snapshot = await Get(
        Snapshot,
        MergeDigests(
            (
                *(package.digest for package in packages),
                *(sources.snapshot.digest for sources in files_sources),
            )
        ),
    )

    output_filename = field_set.output_path.value_or_default(
        field_set.address, file_ending=field_set.format_field.value
    )
    archive = await Get(
        Digest,
        CreateArchive(
            input_snapshot,
            output_filename=output_filename,
            format=ArchiveFormat(field_set.format_field.value),
        ),
    )
    return BuiltPackage(archive, (BuiltPackageArtifact(output_filename),))
Beispiel #8
0
async def find_valid_field_sets_for_target_roots(
    request: TargetRootsToFieldSetsRequest,
    specs: Specs,
    union_membership: UnionMembership,
    registered_target_types: RegisteredTargetTypes,
) -> TargetRootsToFieldSets:
    # NB: This must be in an `await Get`, rather than the rule signature, to avoid a rule graph
    # issue.
    targets = await Get(Targets, Specs, specs)
    field_sets_per_target = await Get(
        FieldSetsPerTarget,
        FieldSetsPerTargetRequest(request.field_set_superclass, targets))
    targets_to_applicable_field_sets = {}
    for tgt, field_sets in zip(targets, field_sets_per_target.collection):
        if field_sets:
            targets_to_applicable_field_sets[tgt] = field_sets

    # Possibly warn or error if no targets were applicable.
    if not targets_to_applicable_field_sets:
        no_applicable_exception = NoApplicableTargetsException.create_from_field_sets(
            targets,
            specs,
            union_membership,
            registered_target_types,
            field_set_types=union_membership.union_rules[
                request.field_set_superclass],
            goal_description=request.goal_description,
        )
        if request.no_applicable_targets_behavior == NoApplicableTargetsBehavior.error:
            raise no_applicable_exception
        if request.no_applicable_targets_behavior == NoApplicableTargetsBehavior.warn:
            logger.warning(str(no_applicable_exception))

    result = TargetRootsToFieldSets(targets_to_applicable_field_sets)
    if not request.expect_single_field_set:
        return result
    if len(result.targets) > 1:
        raise TooManyTargetsException(
            result.targets, goal_description=request.goal_description)
    if len(result.field_sets) > 1:
        raise AmbiguousImplementationsException(
            result.targets[0],
            result.field_sets,
            goal_description=request.goal_description)
    return result
Beispiel #9
0
async def prepare_shell_command_process(request: ShellCommandProcessRequest,
                                        shell_setup: ShellSetup,
                                        bash: BashBinary) -> Process:
    shell_command = request.target
    interactive = shell_command.has_field(ShellCommandRunWorkdirField)
    if interactive:
        working_directory = shell_command[
            ShellCommandRunWorkdirField].value or ""
    else:
        working_directory = shell_command.address.spec_path
    command = shell_command[ShellCommandCommandField].value
    timeout = shell_command.get(ShellCommandTimeoutField).value
    tools = shell_command.get(ShellCommandToolsField,
                              default_raw_value=()).value
    outputs = shell_command.get(ShellCommandOutputsField).value or ()
    extra_env_vars = shell_command.get(
        ShellCommandExtraEnvVarsField).value or ()

    if not command:
        raise ValueError(
            f"Missing `command` line in `{shell_command.alias}` target {shell_command.address}."
        )

    if interactive:
        command_env = {
            "CHROOT": "{chroot}",
        }
    else:
        if not tools:
            raise ValueError(
                f"Must provide any `tools` used by the `{shell_command.alias}` {shell_command.address}."
            )

        env = await Get(Environment, EnvironmentRequest(["PATH"]))
        search_path = shell_setup.executable_search_path(env)
        tool_requests = [
            BinaryPathRequest(
                binary_name=tool,
                search_path=search_path,
            ) for tool in {*tools, *["mkdir", "ln"]}
            if tool not in BASH_BUILTIN_COMMANDS
        ]
        tool_paths = await MultiGet(
            Get(BinaryPaths, BinaryPathRequest, request)
            for request in tool_requests)

        command_env = {
            "TOOLS":
            " ".join(
                _shell_tool_safe_env_name(tool.binary_name)
                for tool in tool_requests),
        }

        for binary, tool_request in zip(tool_paths, tool_requests):
            if binary.first_path:
                command_env[_shell_tool_safe_env_name(
                    tool_request.binary_name)] = binary.first_path.path
            else:
                raise BinaryNotFoundError.from_request(
                    tool_request,
                    rationale=
                    f"execute `{shell_command.alias}` {shell_command.address}",
                )

    extra_env = await Get(Environment, EnvironmentRequest(extra_env_vars))
    command_env.update(extra_env)

    transitive_targets = await Get(
        TransitiveTargets,
        TransitiveTargetsRequest([shell_command.address]),
    )

    sources, pkgs_per_target = await MultiGet(
        Get(
            SourceFiles,
            SourceFilesRequest(
                sources_fields=[
                    tgt.get(SourcesField)
                    for tgt in transitive_targets.dependencies
                ],
                for_sources_types=(SourcesField, FileSourceField),
                enable_codegen=True,
            ),
        ),
        Get(
            FieldSetsPerTarget,
            FieldSetsPerTargetRequest(PackageFieldSet,
                                      transitive_targets.dependencies),
        ),
    )

    packages = await MultiGet(
        Get(BuiltPackage, PackageFieldSet, field_set)
        for field_set in pkgs_per_target.field_sets)

    if interactive or not working_directory or working_directory in sources.snapshot.dirs:
        work_dir = EMPTY_DIGEST
    else:
        work_dir = await Get(Digest,
                             CreateDigest([Directory(working_directory)]))

    input_digest = await Get(
        Digest,
        MergeDigests([
            sources.snapshot.digest, work_dir,
            *(pkg.digest for pkg in packages)
        ]))

    output_files = [f for f in outputs if not f.endswith("/")]
    output_directories = [d for d in outputs if d.endswith("/")]

    if interactive:
        relpath = os.path.relpath(
            working_directory or ".",
            start="/" if os.path.isabs(working_directory) else ".")
        boot_script = f"cd {shlex.quote(relpath)}; " if relpath != "." else ""
    else:
        # Setup bin_relpath dir with symlinks to all requested tools, so that we can use PATH, force
        # symlinks to avoid issues with repeat runs using the __run.sh script in the sandbox.
        bin_relpath = ".bin"
        boot_script = ";".join(
            dedent(f"""\
                $mkdir -p {bin_relpath}
                for tool in $TOOLS; do $ln -sf ${{!tool}} {bin_relpath}; done
                export PATH="$PWD/{bin_relpath}"
                """).split("\n"))

    return Process(
        argv=(bash.path, "-c", boot_script + command),
        description=f"Running {shell_command.alias} {shell_command.address}",
        env=command_env,
        input_digest=input_digest,
        output_directories=output_directories,
        output_files=output_files,
        timeout_seconds=timeout,
        working_directory=working_directory,
    )
Beispiel #10
0
async def setup_pytest_for_target(
    request: TestSetupRequest,
    pytest: PyTest,
    test_subsystem: TestSubsystem,
    python_setup: PythonSetup,
    coverage_config: CoverageConfig,
    coverage_subsystem: CoverageSubsystem,
    test_extra_env: TestExtraEnv,
    global_options: GlobalOptions,
) -> TestSetup:
    transitive_targets = await Get(
        TransitiveTargets, TransitiveTargetsRequest([request.field_set.address])
    )
    all_targets = transitive_targets.closure

    interpreter_constraints = PexInterpreterConstraints.create_from_targets(
        all_targets, python_setup
    )

    requirements_pex_request = Get(
        Pex,
        PexFromTargetsRequest,
        PexFromTargetsRequest.for_requirements([request.field_set.address], internal_only=True),
    )

    pytest_pex_request = Get(
        Pex,
        PexRequest(
            output_filename="pytest.pex",
            requirements=PexRequirements(pytest.get_requirement_strings()),
            interpreter_constraints=interpreter_constraints,
            internal_only=True,
        ),
    )

    prepared_sources_request = Get(
        PythonSourceFiles, PythonSourceFilesRequest(all_targets, include_files=True)
    )

    # Create any assets that the test depends on through the `runtime_package_dependencies` field.
    assets: Tuple[BuiltPackage, ...] = ()
    unparsed_runtime_packages = (
        request.field_set.runtime_package_dependencies.to_unparsed_address_inputs()
    )
    if unparsed_runtime_packages.values:
        runtime_package_targets = await Get(
            Targets, UnparsedAddressInputs, unparsed_runtime_packages
        )
        field_sets_per_target = await Get(
            FieldSetsPerTarget,
            FieldSetsPerTargetRequest(PackageFieldSet, runtime_package_targets),
        )
        assets = await MultiGet(
            Get(BuiltPackage, PackageFieldSet, field_set)
            for field_set in field_sets_per_target.field_sets
        )

    # Get the file names for the test_target so that we can specify to Pytest precisely which files
    # to test, rather than using auto-discovery.
    field_set_source_files_request = Get(
        SourceFiles, SourceFilesRequest([request.field_set.sources])
    )

    pytest_pex, requirements_pex, prepared_sources, field_set_source_files = await MultiGet(
        pytest_pex_request,
        requirements_pex_request,
        prepared_sources_request,
        field_set_source_files_request,
    )

    pytest_runner_pex = await Get(
        VenvPex,
        PexRequest(
            output_filename="pytest_runner.pex",
            interpreter_constraints=interpreter_constraints,
            # TODO(John Sirois): Switch to ConsoleScript once Pex supports discovering console
            #  scripts via the PEX_PATH: https://github.com/pantsbuild/pex/issues/1257
            main=EntryPoint("pytest"),
            internal_only=True,
            pex_path=[pytest_pex, requirements_pex],
        ),
    )

    input_digest = await Get(
        Digest,
        MergeDigests(
            (
                coverage_config.digest,
                prepared_sources.source_files.snapshot.digest,
                *(binary.digest for binary in assets),
            )
        ),
    )

    add_opts = [f"--color={'yes' if global_options.options.colors else 'no'}"]
    output_files = []

    results_file_name = None
    if pytest.options.junit_xml_dir and not request.is_debug:
        results_file_name = f"{request.field_set.address.path_safe_spec}.xml"
        add_opts.extend(
            (f"--junitxml={results_file_name}", "-o", f"junit_family={pytest.options.junit_family}")
        )
        output_files.append(results_file_name)

    coverage_args = []
    if test_subsystem.use_coverage and not request.is_debug:
        output_files.append(".coverage")
        cov_paths = coverage_subsystem.filter if coverage_subsystem.filter else (".",)
        coverage_args = [
            "--cov-report=",  # Turn off output.
            *itertools.chain.from_iterable(["--cov", cov_path] for cov_path in cov_paths),
        ]

    extra_env = {
        "PYTEST_ADDOPTS": " ".join(add_opts),
        "PEX_EXTRA_SYS_PATH": ":".join(prepared_sources.source_roots),
    }

    extra_env.update(test_extra_env.env)

    # Cache test runs only if they are successful, or not at all if `--test-force`.
    cache_scope = ProcessCacheScope.NEVER if test_subsystem.force else ProcessCacheScope.SUCCESSFUL
    process = await Get(
        Process,
        VenvPexProcess(
            pytest_runner_pex,
            argv=(*pytest.options.args, *coverage_args, *field_set_source_files.files),
            extra_env=extra_env,
            input_digest=input_digest,
            output_files=output_files,
            timeout_seconds=request.field_set.timeout.calculate_from_global_options(pytest),
            execution_slot_variable=pytest.options.execution_slot_var,
            description=f"Run Pytest for {request.field_set.address}",
            level=LogLevel.DEBUG,
            cache_scope=cache_scope,
        ),
    )
    return TestSetup(process, results_file_name=results_file_name)
Beispiel #11
0
async def package_pyoxidizer_binary(
    pyoxidizer: PyOxidizer,
    field_set: PyOxidizerFieldSet,
    runner_script: PyoxidizerRunnerScript,
    bash: BashBinary,
) -> BuiltPackage:
    direct_deps = await Get(Targets,
                            DependenciesRequest(field_set.dependencies))
    deps_field_sets = await Get(
        FieldSetsPerTarget,
        FieldSetsPerTargetRequest(PackageFieldSet, direct_deps))
    built_packages = await MultiGet(
        Get(BuiltPackage, PackageFieldSet, field_set)
        for field_set in deps_field_sets.field_sets)
    wheel_paths = [
        artifact.relpath for built_pkg in built_packages
        for artifact in built_pkg.artifacts
        if artifact.relpath is not None and artifact.relpath.endswith(".whl")
    ]
    if not wheel_paths:
        raise InvalidTargetException(
            softwrap(f"""
                The `{PyOxidizerTarget.alias}` target {field_set.address} must include
                in its `dependencies` field at least one `python_distribution` target that produces a
                `.whl` file. For example, if using `{GenerateSetupField.alias}=True`, then make sure
                `{WheelField.alias}=True`. See {doc_url('python-distributions')}.
                """))

    config_template = None
    if field_set.template.value is not None:
        config_template_source = await Get(
            HydratedSources, HydrateSourcesRequest(field_set.template))
        digest_contents = await Get(DigestContents, Digest,
                                    config_template_source.snapshot.digest)
        config_template = digest_contents[0].content.decode("utf-8")

    config = PyOxidizerConfig(
        executable_name=field_set.address.target_name,
        entry_point=field_set.entry_point.value,
        wheels=wheel_paths,
        template=config_template,
        unclassified_resources=(None if
                                not field_set.unclassified_resources.value else
                                list(field_set.unclassified_resources.value)),
    )
    rendered_config = config.render()
    logger.debug(
        f"Configuration used for {field_set.address}: {rendered_config}")

    pyoxidizer_pex, config_digest = await MultiGet(
        Get(Pex, PexRequest, pyoxidizer.to_pex_request()),
        Get(
            Digest,
            CreateDigest([
                FileContent("pyoxidizer.bzl", rendered_config.encode("utf-8"))
            ])),
    )
    input_digest = await Get(
        Digest,
        MergeDigests((
            config_digest,
            runner_script.digest,
            *(built_package.digest for built_package in built_packages),
        )),
    )
    pex_process = await Get(
        Process,
        PexProcess(
            pyoxidizer_pex,
            argv=("build", *pyoxidizer.args),
            description=f"Building {field_set.address} with PyOxidizer",
            input_digest=input_digest,
            level=LogLevel.INFO,
            output_directories=("build", ),
        ),
    )
    process_with_caching = dataclasses.replace(
        pex_process,
        argv=(bash.path, runner_script.path, *pex_process.argv),
        append_only_caches={
            **pex_process.append_only_caches,
            "pyoxidizer":
            runner_script.CACHE_PATH,
        },
    )

    result = await Get(ProcessResult, Process, process_with_caching)

    stripped_digest = await Get(Digest,
                                RemovePrefix(result.output_digest, "build"))
    final_snapshot = await Get(
        Snapshot,
        AddPrefix(stripped_digest,
                  field_set.output_path.value_or_default(file_ending=None)),
    )
    return BuiltPackage(
        final_snapshot.digest,
        artifacts=tuple(
            BuiltPackageArtifact(file) for file in final_snapshot.files),
    )
async def create_docker_build_context(
        request: DockerBuildContextRequest,
        docker_options: DockerOptions) -> DockerBuildContext:
    # Get all targets to include in context.
    transitive_targets = await Get(TransitiveTargets,
                                   TransitiveTargetsRequest([request.address]))
    docker_image = transitive_targets.roots[0]

    # Get all dependencies for the root target.
    root_dependencies = await Get(
        Targets, DependenciesRequest(docker_image.get(Dependencies)))

    # Get all file sources from the root dependencies. That includes any non-file sources that can
    # be "codegen"ed into a file source.
    sources_request = Get(
        SourceFiles,
        SourceFilesRequest(
            sources_fields=[
                tgt.get(SourcesField) for tgt in root_dependencies
            ],
            for_sources_types=(
                DockerContextFilesSourcesField,
                FileSourceField,
            ),
            enable_codegen=True,
        ),
    )

    embedded_pkgs_per_target_request = Get(
        FieldSetsPerTarget,
        FieldSetsPerTargetRequest(PackageFieldSet,
                                  transitive_targets.dependencies),
    )

    sources, embedded_pkgs_per_target, dockerfile_info = await MultiGet(
        sources_request,
        embedded_pkgs_per_target_request,
        Get(DockerfileInfo, DockerfileInfoRequest(docker_image.address)),
    )

    # Package binary dependencies for build context.
    embedded_pkgs = await MultiGet(
        Get(BuiltPackage, PackageFieldSet, field_set)
        for field_set in embedded_pkgs_per_target.field_sets
        # Exclude docker images, unless build_upstream_images is true.
        if request.build_upstream_images or not isinstance(
            getattr(field_set, "source", None), DockerImageSourceField))

    if request.build_upstream_images:
        images_str = ", ".join(a.tags[0] for p in embedded_pkgs
                               for a in p.artifacts
                               if isinstance(a, BuiltDockerImage))
        if images_str:
            logger.debug(f"Built upstream Docker images: {images_str}")
        else:
            logger.debug("Did not build any upstream Docker images")

    packages_str = ", ".join(a.relpath for p in embedded_pkgs
                             for a in p.artifacts if a.relpath)
    if packages_str:
        logger.debug(f"Built packages for Docker image: {packages_str}")
    else:
        logger.debug("Did not build any packages for Docker image")

    embedded_pkgs_digest = [
        built_package.digest for built_package in embedded_pkgs
    ]
    all_digests = (dockerfile_info.digest, sources.snapshot.digest,
                   *embedded_pkgs_digest)

    # Merge all digests to get the final docker build context digest.
    context_request = Get(Snapshot, MergeDigests(d for d in all_digests if d))

    # Requests for build args and env
    build_args_request = Get(DockerBuildArgs,
                             DockerBuildArgsRequest(docker_image))
    build_env_request = Get(DockerBuildEnvironment,
                            DockerBuildEnvironmentRequest(docker_image))
    context, build_args, build_env = await MultiGet(context_request,
                                                    build_args_request,
                                                    build_env_request)

    if request.build_upstream_images:
        # Update build arg values for FROM image build args.

        # Get the FROM image build args with defined values in the Dockerfile.
        dockerfile_build_args = {
            arg_name: arg_value
            for arg_name, arg_value in
            dockerfile_info.build_args.to_dict().items() if arg_value
            and arg_name in dockerfile_info.from_image_build_arg_names
        }
        # Parse the build args values into Address instances.
        from_image_addresses = await Get(
            Addresses,
            UnparsedAddressInputs(
                dockerfile_build_args.values(),
                owning_address=dockerfile_info.address,
            ),
        )
        # Map those addresses to the corresponding built image ref (tag).
        address_to_built_image_tag = {
            field_set.address: image.tags[0]
            for field_set, built in zip(embedded_pkgs_per_target.field_sets,
                                        embedded_pkgs)
            for image in built.artifacts if isinstance(image, BuiltDockerImage)
        }
        # Create the FROM image build args.
        from_image_build_args = [
            f"{arg_name}={address_to_built_image_tag[addr]}" for arg_name, addr
            in zip(dockerfile_build_args.keys(), from_image_addresses)
        ]
        # Merge all build args.
        build_args = DockerBuildArgs.from_strings(*build_args,
                                                  *from_image_build_args)

    return DockerBuildContext.create(
        build_args=build_args,
        snapshot=context,
        dockerfile_info=dockerfile_info,
        build_env=build_env,
    )
Beispiel #13
0
async def find_valid_field_sets_for_target_roots(
    request: TargetRootsToFieldSetsRequest,
    specs: Specs,
    union_membership: UnionMembership,
    registered_target_types: RegisteredTargetTypes,
) -> TargetRootsToFieldSets:
    # NB: This must be in an `await Get`, rather than the rule signature, to avoid a rule graph
    # issue.
    targets = await Get(FilteredTargets, Specs, specs)
    field_sets_per_target = await Get(
        FieldSetsPerTarget,
        FieldSetsPerTargetRequest(request.field_set_superclass, targets))
    targets_to_applicable_field_sets = {}
    for tgt, field_sets in zip(targets, field_sets_per_target.collection):
        if field_sets:
            targets_to_applicable_field_sets[tgt] = field_sets

    # Possibly warn or error if no targets were applicable.
    if not targets_to_applicable_field_sets:
        no_applicable_exception = NoApplicableTargetsException.create_from_field_sets(
            targets,
            specs,
            union_membership,
            registered_target_types,
            field_set_types=union_membership[request.field_set_superclass],
            goal_description=request.goal_description,
        )
        if request.no_applicable_targets_behavior == NoApplicableTargetsBehavior.error:
            raise no_applicable_exception

        # We squelch the warning if the specs came from change detection or only from globs,
        # since in that case we interpret the user's intent as "if there are relevant matching
        # targets, act on them". But we still want to warn if the specs were literal, or empty.
        #
        # No need to check `specs.ignores` here, as change detection will not set that. Likewise,
        # we don't want an ignore spec to trigger this warning, even if it was a literal.
        empty_ok = specs.includes.from_change_detection or (
            specs.includes and not specs.includes.address_literals
            and not specs.includes.file_literals)
        if (request.no_applicable_targets_behavior
                == NoApplicableTargetsBehavior.warn and not empty_ok):
            logger.warning(str(no_applicable_exception))

    if request.num_shards > 0:
        sharded_targets_to_applicable_field_sets = {
            tgt: value
            for tgt, value in targets_to_applicable_field_sets.items()
            if request.is_in_shard(tgt.address.spec)
        }
        result = TargetRootsToFieldSets(
            sharded_targets_to_applicable_field_sets)
    else:
        result = TargetRootsToFieldSets(targets_to_applicable_field_sets)

    if not request.expect_single_field_set:
        return result
    if len(result.targets) > 1:
        raise TooManyTargetsException(
            result.targets, goal_description=request.goal_description)
    if len(result.field_sets) > 1:
        raise AmbiguousImplementationsException(
            result.targets[0],
            result.field_sets,
            goal_description=request.goal_description)
    return result
Beispiel #14
0
async def create_docker_build_context(
        request: DockerBuildContextRequest,
        docker_options: DockerOptions) -> DockerBuildContext:
    # Get all targets to include in context.
    transitive_targets = await Get(TransitiveTargets,
                                   TransitiveTargetsRequest([request.address]))

    docker_image = transitive_targets.roots[0]

    # Get the Dockerfile from the root target.
    dockerfile_request = Get(
        SourceFiles,
        SourceFilesRequest(
            sources_fields=[
                t.get(SourcesField) for t in transitive_targets.roots
            ],
            for_sources_types=(DockerImageSourceField, ),
        ),
    )

    # Get all dependencies for the root target.
    root_dependencies = await MultiGet(
        Get(Targets, DependenciesRequest(target.get(Dependencies)))
        for target in transitive_targets.roots)

    # Get all sources from the root dependencies (i.e. files).
    sources_request = Get(
        SourceFiles,
        SourceFilesRequest(
            sources_fields=[
                t.get(SourcesField) for t in chain(*root_dependencies)
            ],
            for_sources_types=(FileSourceField, ),
        ),
    )

    embedded_pkgs_per_target_request = Get(
        FieldSetsPerTarget,
        FieldSetsPerTargetRequest(PackageFieldSet,
                                  transitive_targets.dependencies),
    )

    dockerfile, sources, embedded_pkgs_per_target, dockerfile_info = await MultiGet(
        dockerfile_request,
        sources_request,
        embedded_pkgs_per_target_request,
        Get(
            DockerfileInfo,
            DockerImageSourceField,
            transitive_targets.roots[0][DockerImageSourceField],
        ),
    )

    # Package binary dependencies for build context.
    embedded_pkgs = await MultiGet(
        Get(BuiltPackage, PackageFieldSet, field_set)
        for field_set in embedded_pkgs_per_target.field_sets
        # Exclude docker images, unless build_upstream_images is true.
        if request.build_upstream_images or not isinstance(
            getattr(field_set, "sources", None), DockerImageSourceField))

    packages_str = ", ".join(a.relpath for p in embedded_pkgs
                             for a in p.artifacts if a.relpath)
    logger.debug(f"Packages for Docker image: {packages_str}")

    embedded_pkgs_digest = [
        built_package.digest for built_package in embedded_pkgs
    ]
    all_digests = (dockerfile.snapshot.digest, sources.snapshot.digest,
                   *embedded_pkgs_digest)

    # Merge all digests to get the final docker build context digest.
    context_request = Get(Digest, MergeDigests(d for d in all_digests if d))

    # Requests for build args and env
    build_args_request = Get(DockerBuildArgs,
                             DockerBuildArgsRequest(docker_image))
    env_request = Get(DockerBuildEnvironment,
                      DockerBuildEnvironmentRequest(docker_image))
    context, build_args, env = await MultiGet(context_request,
                                              build_args_request, env_request)

    return DockerBuildContext.create(
        build_args=build_args,
        digest=context,
        dockerfile_info=dockerfile_info,
        env=env,
    )
Beispiel #15
0
async def setup_pytest_for_target(
    request: TestSetupRequest,
    pytest: PyTest,
    test_subsystem: TestSubsystem,
    python_setup: PythonSetup,
    coverage_config: CoverageConfig,
    coverage_subsystem: CoverageSubsystem,
    test_extra_env: TestExtraEnv,
    global_options: GlobalOptions,
) -> TestSetup:
    transitive_targets = await Get(
        TransitiveTargets,
        TransitiveTargetsRequest([request.field_set.address]))
    all_targets = transitive_targets.closure

    interpreter_constraints = PexInterpreterConstraints.create_from_targets(
        all_targets, python_setup)

    # Defaults to zip_safe=False.
    requirements_pex_request = Get(
        Pex,
        PexFromTargetsRequest,
        PexFromTargetsRequest.for_requirements([request.field_set.address],
                                               internal_only=True),
    )

    pytest_pex_request = Get(
        Pex,
        PexRequest(
            output_filename="pytest.pex",
            requirements=PexRequirements(pytest.get_requirement_strings()),
            interpreter_constraints=interpreter_constraints,
            entry_point="pytest:main",
            internal_only=True,
            additional_args=(
                # NB: We set `--not-zip-safe` because Pytest plugin discovery, which uses
                # `importlib_metadata` and thus `zipp`, does not play nicely when doing import
                # magic directly from zip files. `zipp` has pathologically bad behavior with large
                # zipfiles.
                # TODO: this does have a performance cost as the pex must now be expanded to disk.
                # Long term, it would be better to fix Zipp (whose fix would then need to be used
                # by importlib_metadata and then by Pytest). See
                # https://github.com/jaraco/zipp/pull/26.
                "--not-zip-safe",
                # TODO(John Sirois): Support shading python binaries:
                #   https://github.com/pantsbuild/pants/issues/9206
                "--pex-path",
                requirements_pex_request.input.output_filename,
            ),
        ),
    )

    prepared_sources_request = Get(
        PythonSourceFiles,
        PythonSourceFilesRequest(all_targets, include_files=True))

    # Create any assets that the test depends on through the `runtime_package_dependencies` field.
    assets: Tuple[BuiltPackage, ...] = ()
    unparsed_runtime_packages = (request.field_set.runtime_package_dependencies
                                 .to_unparsed_address_inputs())
    if unparsed_runtime_packages.values:
        runtime_package_targets = await Get(Targets, UnparsedAddressInputs,
                                            unparsed_runtime_packages)
        field_sets_per_target = await Get(
            FieldSetsPerTarget,
            FieldSetsPerTargetRequest(PackageFieldSet,
                                      runtime_package_targets),
        )
        assets = await MultiGet(
            Get(BuiltPackage, PackageFieldSet, field_set)
            for field_set in field_sets_per_target.field_sets)

    # Get the file names for the test_target so that we can specify to Pytest precisely which files
    # to test, rather than using auto-discovery.
    field_set_source_files_request = Get(
        SourceFiles, SourceFilesRequest([request.field_set.sources]))

    pytest_pex, requirements_pex, prepared_sources, field_set_source_files = await MultiGet(
        pytest_pex_request,
        requirements_pex_request,
        prepared_sources_request,
        field_set_source_files_request,
    )

    input_digest = await Get(
        Digest,
        MergeDigests((
            coverage_config.digest,
            prepared_sources.source_files.snapshot.digest,
            requirements_pex.digest,
            pytest_pex.digest,
            *(binary.digest for binary in assets),
        )),
    )

    add_opts = [f"--color={'yes' if global_options.options.colors else 'no'}"]
    output_files = []

    results_file_name = None
    if pytest.options.junit_xml_dir and not request.is_debug:
        results_file_name = f"{request.field_set.address.path_safe_spec}.xml"
        add_opts.extend((f"--junitxml={results_file_name}", "-o",
                         f"junit_family={pytest.options.junit_family}"))
        output_files.append(results_file_name)

    coverage_args = []
    if test_subsystem.use_coverage and not request.is_debug:
        output_files.append(".coverage")
        cov_paths = coverage_subsystem.filter if coverage_subsystem.filter else (
            ".", )
        coverage_args = [
            "--cov-report=",  # Turn off output.
            *itertools.chain.from_iterable(["--cov", cov_path]
                                           for cov_path in cov_paths),
        ]

    extra_env = {
        "PYTEST_ADDOPTS": " ".join(add_opts),
        "PEX_EXTRA_SYS_PATH": ":".join(prepared_sources.source_roots),
    }

    extra_env.update(test_extra_env.env)

    # Cache test runs only if they are successful, or not at all if `--test-force`.
    cache_scope = ProcessCacheScope.NEVER if test_subsystem.force else ProcessCacheScope.SUCCESSFUL
    process = await Get(
        Process,
        PexProcess(
            pytest_pex,
            argv=(*pytest.options.args, *coverage_args,
                  *field_set_source_files.files),
            extra_env=extra_env,
            input_digest=input_digest,
            output_files=output_files,
            timeout_seconds=request.field_set.timeout.
            calculate_from_global_options(pytest),
            execution_slot_variable=pytest.options.execution_slot_var,
            description=f"Run Pytest for {request.field_set.address}",
            level=LogLevel.DEBUG,
            cache_scope=cache_scope,
        ),
    )
    return TestSetup(process, results_file_name=results_file_name)