async def merge_coverage_data(
        data_collection: PytestCoverageDataCollection,
        coverage_setup: CoverageSetup) -> MergedCoverageData:
    if len(data_collection) == 1:
        return MergedCoverageData(data_collection[0].digest)
    # We prefix each .coverage file with its corresponding address to avoid collisions.
    coverage_digests = await MultiGet(
        Get(Digest, AddPrefix(data.digest, prefix=data.address.path_safe_spec))
        for data in data_collection)
    input_digest = await Get(
        Digest, MergeDigests((*coverage_digests, coverage_setup.pex.digest)))
    prefixes = sorted(f"{data.address.path_safe_spec}/.coverage"
                      for data in data_collection)
    result = await Get(
        ProcessResult,
        PexProcess(
            coverage_setup.pex,
            argv=("combine", *prefixes),
            input_digest=input_digest,
            output_files=(".coverage", ),
            description=f"Merge {len(prefixes)} Pytest coverage reports.",
            level=LogLevel.DEBUG,
        ),
    )
    return MergedCoverageData(result.output_digest)
示例#2
0
def get_all_data(rule_runner: RuleRunner, pex: Pex | VenvPex) -> PexData:
    if isinstance(pex, VenvPex):
        digest = pex.digest
        sandbox_path = pex.pex_filename
        process = rule_runner.request(
            Process,
            [
                VenvPexProcess(
                    pex,
                    argv=["info"],
                    extra_env=dict(PEX_TOOLS="1"),
                    description="Extract PEX-INFO.",
                ),
            ],
        )
    else:
        digest = pex.digest
        sandbox_path = pex.name
        pex_pex = rule_runner.request(PexPEX, [])
        process = rule_runner.request(
            Process,
            [
                PexProcess(
                    Pex(digest=pex_pex.digest,
                        name=pex_pex.exe,
                        python=pex.python),
                    argv=["-m", "pex.tools", pex.name, "info"],
                    input_digest=pex.digest,
                    extra_env=dict(PEX_INTERPRETER="1"),
                    description="Extract PEX-INFO.",
                )
            ],
        )

    rule_runner.scheduler.write_digest(digest)
    local_path = PurePath(rule_runner.build_root) / "test.pex"
    result = rule_runner.request(ProcessResult, [process])
    pex_info_content = result.stdout.decode()

    is_zipapp = zipfile.is_zipfile(local_path)
    if is_zipapp:
        with zipfile.ZipFile(local_path, "r") as zipfp:
            files = tuple(zipfp.namelist())
    else:
        files = tuple(
            os.path.normpath(
                os.path.relpath(os.path.join(root, path), local_path))
            for root, dirs, files in os.walk(local_path)
            for path in dirs + files)

    return PexData(
        pex=pex,
        is_zipapp=is_zipapp,
        sandbox_path=PurePath(sandbox_path),
        local_path=local_path,
        info=json.loads(pex_info_content),
        files=files,
    )
示例#3
0
async def setup_isort(setup_request: SetupRequest, isort: Isort) -> Setup:
    isort_pex_request = Get(
        Pex,
        PexRequest(
            output_filename="isort.pex",
            internal_only=True,
            requirements=PexRequirements(isort.all_requirements),
            interpreter_constraints=PexInterpreterConstraints(
                isort.interpreter_constraints),
            entry_point=isort.entry_point,
        ),
    )

    config_digest_request = Get(
        Digest,
        PathGlobs(
            globs=isort.config,
            glob_match_error_behavior=GlobMatchErrorBehavior.error,
            conjunction=GlobExpansionConjunction.all_match,
            description_of_origin="the option `--isort-config`",
        ),
    )

    source_files_request = Get(
        SourceFiles,
        SourceFilesRequest(field_set.sources
                           for field_set in setup_request.request.field_sets),
    )

    source_files, isort_pex, config_digest = await MultiGet(
        source_files_request, isort_pex_request, config_digest_request)
    source_files_snapshot = (
        source_files.snapshot
        if setup_request.request.prior_formatter_result is None else
        setup_request.request.prior_formatter_result)

    input_digest = await Get(
        Digest,
        MergeDigests(
            (source_files_snapshot.digest, isort_pex.digest, config_digest)),
    )

    process = await Get(
        Process,
        PexProcess(
            isort_pex,
            argv=generate_args(source_files=source_files,
                               isort=isort,
                               check_only=setup_request.check_only),
            input_digest=input_digest,
            output_files=source_files_snapshot.files,
            description=
            f"Run isort on {pluralize(len(setup_request.request.field_sets), 'file')}.",
            level=LogLevel.DEBUG,
        ),
    )
    return Setup(process, original_digest=source_files_snapshot.digest)
示例#4
0
async def clangformat_fmt(request: ClangFormatRequest,
                          clangformat: ClangFormat) -> FmtResult:
    if clangformat.skip:
        return FmtResult.skip(formatter_name=request.name)

    # Look for any/all of the clang-format configuration files (recurse sub-dirs)
    config_files_get = Get(
        ConfigFiles,
        ConfigFilesRequest,
        clangformat.config_request(request.snapshot.dirs),
    )

    clangformat_pex, config_files = await MultiGet(
        Get(Pex, PexRequest, clangformat.to_pex_request()), config_files_get)

    # Merge source files, config files, and clang-format pex process
    input_digest = await Get(
        Digest,
        MergeDigests([
            request.snapshot.digest,
            config_files.snapshot.digest,
            clangformat_pex.digest,
        ]),
    )

    result = await Get(
        ProcessResult,
        PexProcess(
            clangformat_pex,
            argv=(
                "--style=file",  # Look for .clang-format files
                "--fallback-style=webkit",  # Use WebKit if there is no config file
                "-i",  # In-place edits
                "--Werror",  # Formatting warnings as errors
                *clangformat.args,  # User-added arguments
                *request.snapshot.files,
            ),
            input_digest=input_digest,
            output_files=request.snapshot.files,
            description=
            f"Run clang-format on {pluralize(len(request.snapshot.files), 'file')}.",
            level=LogLevel.DEBUG,
        ),
    )
    output_snapshot = await Get(Snapshot, Digest, result.output_digest)
    return FmtResult.create(request,
                            result,
                            output_snapshot,
                            strip_chroot_path=True)
示例#5
0
def test_pex_environment(rule_runner: RuleRunner) -> None:
    sources = rule_runner.request(
        Digest,
        [
            CreateDigest(
                (
                    FileContent(
                        path="main.py",
                        content=textwrap.dedent(
                            """
                        from os import environ
                        print(f"LANG={environ.get('LANG')}")
                        print(f"ftp_proxy={environ.get('ftp_proxy')}")
                        """
                        ).encode(),
                    ),
                )
            ),
        ],
    )
    pex_output = create_pex_and_get_all_data(
        rule_runner,
        entry_point="main",
        sources=sources,
        additional_pants_args=(
            "--subprocess-environment-env-vars=LANG",  # Value should come from environment.
            "--subprocess-environment-env-vars=ftp_proxy=dummyproxy",
        ),
        env={"LANG": "es_PY.UTF-8"},
    )

    process = rule_runner.request(
        Process,
        [
            PexProcess(
                pex_output["pex"],
                argv=["python", "test.pex"],
                input_digest=pex_output["pex"].digest,
                description="Run the pex and check its reported environment",
            ),
        ],
    )

    result = rule_runner.request(ProcessResult, [process])
    assert b"LANG=es_PY.UTF-8" in result.stdout
    assert b"ftp_proxy=dummyproxy" in result.stdout
示例#6
0
def _run_setup_py(
    rule_runner: RuleRunner,
    plugin: str,
    interpreter_constraints: InterpreterConstraints,
    version: str | None,
    install_requires: Sequence[str] | None,
    setup_py_args: Sequence[str],
    install_dir: str,
) -> None:
    pex_obj = _create_pex(rule_runner, interpreter_constraints)
    install_requires_str = f", install_requires={install_requires!r}" if install_requires else ""
    setup_py_file = FileContent(
        "setup.py",
        dedent(f"""
                from setuptools import setup

                setup(name="{plugin}", version="{version or DEFAULT_VERSION}"{install_requires_str})
            """).encode(),
    )
    source_digest = rule_runner.request(
        Digest,
        [CreateDigest([setup_py_file])],
    )
    merged_digest = rule_runner.request(
        Digest, [MergeDigests([pex_obj.digest, source_digest])])

    process = PexProcess(
        pex=pex_obj,
        argv=("setup.py", *setup_py_args),
        input_digest=merged_digest,
        description="Run setup.py",
        output_directories=("dist/", ),
    )
    result = rule_runner.request(ProcessResult, [process])
    result_snapshot = rule_runner.request(Snapshot, [result.output_digest])
    rule_runner.scheduler.write_digest(result.output_digest,
                                       path_prefix="output")
    safe_mkdir(install_dir)
    for path in result_snapshot.files:
        shutil.copy(PurePath(rule_runner.build_root, "output", path),
                    install_dir)
示例#7
0
async def run_setup_py(req: RunSetupPyRequest,
                       setuptools: Setuptools) -> RunSetupPyResult:
    """Run a setup.py command on a single exported target."""
    # Note that this pex has no entrypoint. We use it to run our generated setup.py, which
    # in turn imports from and invokes setuptools.
    setuptools_pex = await Get(
        Pex,
        PexRequest(
            output_filename="setuptools.pex",
            internal_only=True,
            requirements=PexRequirements(setuptools.all_requirements),
            interpreter_constraints=(
                req.interpreter_constraints if
                setuptools.options.is_default("interpreter_constraints") else
                PexInterpreterConstraints(setuptools.interpreter_constraints)),
        ),
    )
    input_digest = await Get(
        Digest, MergeDigests((req.chroot.digest, setuptools_pex.digest)))
    # The setuptools dist dir, created by it under the chroot (not to be confused with
    # pants's own dist dir, at the buildroot).
    dist_dir = "dist/"
    result = await Get(
        ProcessResult,
        PexProcess(
            setuptools_pex,
            argv=("setup.py", *req.args),
            input_digest=input_digest,
            # setuptools commands that create dists write them to the distdir.
            # TODO: Could there be other useful files to capture?
            output_directories=(dist_dir, ),
            description=
            f"Run setuptools for {req.exported_target.target.address}",
            level=LogLevel.DEBUG,
        ),
    )
    output_digest = await Get(Digest,
                              RemovePrefix(result.output_digest, dist_dir))
    return RunSetupPyResult(output_digest)
示例#8
0
async def run_setup_py(
    req: RunSetupPyRequest, setuptools_setup: SetuptoolsSetup
) -> RunSetupPyResult:
    """Run a setup.py command on a single exported target."""
    input_digest = await Get(
        Digest, MergeDigests((req.chroot.digest, setuptools_setup.requirements_pex.digest))
    )
    # The setuptools dist dir, created by it under the chroot (not to be confused with
    # pants's own dist dir, at the buildroot).
    dist_dir = "dist/"
    result = await Get(
        ProcessResult,
        PexProcess(
            setuptools_setup.requirements_pex,
            argv=("setup.py", *req.args),
            input_digest=input_digest,
            # setuptools commands that create dists write them to the distdir.
            # TODO: Could there be other useful files to capture?
            output_directories=(dist_dir,),
            description=f"Run setuptools for {req.exported_target.target.address}",
        ),
    )
    output_digest = await Get(Digest, RemovePrefix(result.output_digest, dist_dir))
    return RunSetupPyResult(output_digest)
示例#9
0
async def pylint_lint_partition(partition: PylintPartition, pylint: Pylint) -> LintResult:
    requirements_pex_request = Get(
        Pex,
        PexFromTargetsRequest,
        PexFromTargetsRequest.for_requirements(
            (field_set.address for field_set in partition.field_sets),
            # NB: These constraints must be identical to the other PEXes. Otherwise, we risk using
            # a different version for the requirements than the other two PEXes, which can result
            # in a PEX runtime error about missing dependencies.
            hardcoded_interpreter_constraints=partition.interpreter_constraints,
            internal_only=True,
            direct_deps_only=True,
        ),
    )

    plugin_requirements = PexRequirements.create_from_requirement_fields(
        plugin_tgt[PythonRequirementsField]
        for plugin_tgt in partition.plugin_targets
        if plugin_tgt.has_field(PythonRequirementsField)
    )
    # Right now any Pylint transitive requirements will shadow corresponding user
    # requirements, which could lead to problems.
    pylint_pex_request = Get(
        Pex,
        PexRequest(
            output_filename="pylint.pex",
            internal_only=True,
            requirements=PexRequirements([*pylint.all_requirements, *plugin_requirements]),
            entry_point=pylint.entry_point,
            interpreter_constraints=partition.interpreter_constraints,
            # TODO(John Sirois): Support shading python binaries:
            #   https://github.com/pantsbuild/pants/issues/9206
            additional_args=("--pex-path", requirements_pex_request.input.output_filename),
        ),
    )

    config_digest_request = Get(
        Digest,
        PathGlobs(
            globs=[pylint.config] if pylint.config else [],
            glob_match_error_behavior=GlobMatchErrorBehavior.error,
            description_of_origin="the option `--pylint-config`",
        ),
    )

    prepare_plugin_sources_request = Get(
        StrippedPythonSourceFiles, PythonSourceFilesRequest(partition.plugin_targets)
    )
    prepare_python_sources_request = Get(
        PythonSourceFiles, PythonSourceFilesRequest(partition.targets_with_dependencies)
    )
    field_set_sources_request = Get(
        SourceFiles, SourceFilesRequest(field_set.sources for field_set in partition.field_sets)
    )

    (
        pylint_pex,
        requirements_pex,
        config_digest,
        prepared_plugin_sources,
        prepared_python_sources,
        field_set_sources,
    ) = await MultiGet(
        pylint_pex_request,
        requirements_pex_request,
        config_digest_request,
        prepare_plugin_sources_request,
        prepare_python_sources_request,
        field_set_sources_request,
    )

    prefixed_plugin_sources = (
        await Get(
            Digest,
            AddPrefix(prepared_plugin_sources.stripped_source_files.snapshot.digest, "__plugins"),
        )
        if pylint.source_plugins
        else EMPTY_DIGEST
    )

    pythonpath = list(prepared_python_sources.source_roots)
    if pylint.source_plugins:
        # NB: Pylint source plugins must be explicitly loaded via PEX_EXTRA_SYS_PATH. The value must
        # point to the plugin's directory, rather than to a parent's directory, because
        # `load-plugins` takes a module name rather than a path to the module; i.e. `plugin`, but
        # not `path.to.plugin`. (This means users must have specified the parent directory as a
        # source root.)
        pythonpath.append("__plugins")

    input_digest = await Get(
        Digest,
        MergeDigests(
            (
                pylint_pex.digest,
                requirements_pex.digest,
                config_digest,
                prefixed_plugin_sources,
                prepared_python_sources.source_files.snapshot.digest,
            )
        ),
    )

    result = await Get(
        FallibleProcessResult,
        PexProcess(
            pylint_pex,
            argv=generate_args(source_files=field_set_sources, pylint=pylint),
            input_digest=input_digest,
            extra_env={"PEX_EXTRA_SYS_PATH": ":".join(pythonpath)},
            description=f"Run Pylint on {pluralize(len(partition.field_sets), 'file')}.",
            level=LogLevel.DEBUG,
        ),
    )
    return LintResult.from_fallible_process_result(
        result, partition_description=str(sorted(str(c) for c in partition.interpreter_constraints))
    )
示例#10
0
def create_pex_and_get_all_data(
    rule_runner: RuleRunner,
    *,
    pex_type: type[Pex | VenvPex] = Pex,
    requirements: PexRequirements | Lockfile
    | LockfileContent = PexRequirements(),
    main: MainSpecification | None = None,
    interpreter_constraints: InterpreterConstraints = InterpreterConstraints(),
    platforms: PexPlatforms = PexPlatforms(),
    sources: Digest | None = None,
    additional_inputs: Digest | None = None,
    additional_pants_args: tuple[str, ...] = (),
    additional_pex_args: tuple[str, ...] = (),
    env: Mapping[str, str] | None = None,
    internal_only: bool = True,
) -> PexData:
    request = PexRequest(
        output_filename="test.pex",
        internal_only=internal_only,
        requirements=requirements,
        interpreter_constraints=interpreter_constraints,
        platforms=platforms,
        main=main,
        sources=sources,
        additional_inputs=additional_inputs,
        additional_args=additional_pex_args,
    )
    rule_runner.set_options(
        ["--backend-packages=pants.backend.python", *additional_pants_args],
        env=env,
        env_inherit={"PATH", "PYENV_ROOT", "HOME"},
    )

    pex: Pex | VenvPex
    if pex_type == Pex:
        pex = rule_runner.request(Pex, [request])
        digest = pex.digest
        sandbox_path = pex.name
        pex_pex = rule_runner.request(PexPEX, [])
        process = rule_runner.request(
            Process,
            [
                PexProcess(
                    Pex(digest=pex_pex.digest,
                        name=pex_pex.exe,
                        python=pex.python),
                    argv=["-m", "pex.tools", pex.name, "info"],
                    input_digest=pex.digest,
                    extra_env=dict(PEX_INTERPRETER="1"),
                    description="Extract PEX-INFO.",
                )
            ],
        )
    else:
        pex = rule_runner.request(VenvPex, [request])
        digest = pex.digest
        sandbox_path = pex.pex_filename
        process = rule_runner.request(
            Process,
            [
                VenvPexProcess(
                    pex,
                    argv=["info"],
                    extra_env=dict(PEX_TOOLS="1"),
                    description="Extract PEX-INFO.",
                ),
            ],
        )

    rule_runner.scheduler.write_digest(digest)
    local_path = PurePath(rule_runner.build_root) / "test.pex"
    result = rule_runner.request(ProcessResult, [process])
    pex_info_content = result.stdout.decode()

    is_zipapp = zipfile.is_zipfile(local_path)
    if is_zipapp:
        with zipfile.ZipFile(local_path, "r") as zipfp:
            files = tuple(zipfp.namelist())
    else:
        files = tuple(
            os.path.normpath(
                os.path.relpath(os.path.join(root, path), local_path))
            for root, dirs, files in os.walk(local_path)
            for path in dirs + files)

    return PexData(
        pex=pex,
        is_zipapp=is_zipapp,
        sandbox_path=PurePath(sandbox_path),
        local_path=local_path,
        info=json.loads(pex_info_content),
        files=files,
    )
示例#11
0
def create_pex_and_get_all_data(
    rule_runner: RuleRunner,
    *,
    pex_type: type[Pex | VenvPex] = Pex,
    requirements: PexRequirements = PexRequirements(),
    main: MainSpecification | None = None,
    interpreter_constraints: PexInterpreterConstraints = PexInterpreterConstraints(),
    platforms: PexPlatforms = PexPlatforms(),
    sources: Digest | None = None,
    additional_inputs: Digest | None = None,
    additional_pants_args: Tuple[str, ...] = (),
    additional_pex_args: Tuple[str, ...] = (),
    env: Mapping[str, str] | None = None,
    internal_only: bool = True,
) -> Dict:
    request = PexRequest(
        output_filename="test.pex",
        internal_only=internal_only,
        requirements=requirements,
        interpreter_constraints=interpreter_constraints,
        platforms=platforms,
        main=main,
        sources=sources,
        additional_inputs=additional_inputs,
        additional_args=additional_pex_args,
    )
    rule_runner.set_options(
        ["--backend-packages=pants.backend.python", *additional_pants_args],
        env=env,
        env_inherit={"PATH", "PYENV_ROOT", "HOME"},
    )
    pex = rule_runner.request(pex_type, [request])
    if isinstance(pex, Pex):
        digest = pex.digest
        pex_pex = rule_runner.request(PexPEX, [])
        process = rule_runner.request(
            Process,
            [
                PexProcess(
                    Pex(digest=pex_pex.digest, name=pex_pex.exe, python=pex.python),
                    argv=["-m", "pex.tools", pex.name, "info"],
                    input_digest=pex.digest,
                    extra_env=dict(PEX_INTERPRETER="1"),
                    description="Extract PEX-INFO.",
                )
            ],
        )
    elif isinstance(pex, VenvPex):
        digest = pex.digest
        process = rule_runner.request(
            Process,
            [
                VenvPexProcess(
                    pex,
                    argv=["info"],
                    extra_env=dict(PEX_TOOLS="1"),
                    description="Extract PEX-INFO.",
                ),
            ],
        )
    else:
        raise AssertionError(f"Expected a Pex or a VenvPex but got a {type(pex)}.")

    rule_runner.scheduler.write_digest(digest)
    pex_path = os.path.join(rule_runner.build_root, "test.pex")
    result = rule_runner.request(ProcessResult, [process])
    pex_info_content = result.stdout.decode()

    with zipfile.ZipFile(pex_path, "r") as zipfp:
        pex_list = zipfp.namelist()

    return {
        "pex": pex,
        "local_path": pex_path,
        "info": json.loads(pex_info_content),
        "files": pex_list,
    }
示例#12
0
async def bandit_lint_partition(partition: BanditPartition, bandit: Bandit,
                                lint_subsystem: LintSubsystem) -> LintResult:
    bandit_pex_request = Get(
        Pex,
        PexRequest(
            output_filename="bandit.pex",
            internal_only=True,
            requirements=PexRequirements(bandit.all_requirements),
            interpreter_constraints=partition.interpreter_constraints,
            entry_point=bandit.entry_point,
        ),
    )

    config_digest_request = Get(
        Digest,
        PathGlobs(
            globs=[bandit.config] if bandit.config else [],
            glob_match_error_behavior=GlobMatchErrorBehavior.error,
            description_of_origin="the option `--bandit-config`",
        ),
    )

    source_files_request = Get(
        SourceFiles,
        SourceFilesRequest(field_set.sources
                           for field_set in partition.field_sets))

    bandit_pex, config_digest, source_files = await MultiGet(
        bandit_pex_request, config_digest_request, source_files_request)

    input_digest = await Get(
        Digest,
        MergeDigests(
            (source_files.snapshot.digest, bandit_pex.digest, config_digest)))

    report_file_name = "bandit_report.txt" if lint_subsystem.reports_dir else None

    result = await Get(
        FallibleProcessResult,
        PexProcess(
            bandit_pex,
            argv=generate_args(source_files=source_files,
                               bandit=bandit,
                               report_file_name=report_file_name),
            input_digest=input_digest,
            description=
            f"Run Bandit on {pluralize(len(partition.field_sets), 'file')}.",
            output_files=(report_file_name, ) if report_file_name else None,
            level=LogLevel.DEBUG,
        ),
    )

    report = None
    if report_file_name:
        report_digest = await Get(
            Digest,
            DigestSubset(
                result.output_digest,
                PathGlobs(
                    [report_file_name],
                    glob_match_error_behavior=GlobMatchErrorBehavior.warn,
                    description_of_origin="Bandit report file",
                ),
            ),
        )
        report = LintReport(report_file_name, report_digest)

    return LintResult.from_fallible_process_result(
        result,
        partition_description=str(
            sorted(str(c) for c in partition.interpreter_constraints)),
        report=report,
    )
示例#13
0
async def create_python_awslambda(
    field_set: PythonAwsLambdaFieldSet, lambdex_setup: LambdexSetup, global_options: GlobalOptions
) -> CreatedAWSLambda:
    output_filename = field_set.output_path.value_or_default(
        field_set.address,
        # Lambdas typically use the .zip suffix, so we use that instead of .pex.
        file_ending="zip",
        use_legacy_format=global_options.options.pants_distdir_legacy_paths,
    )

    # We hardcode the platform value to the appropriate one for each AWS Lambda runtime.
    # (Running the "hello world" lambda in the example code will report the platform, and can be
    # used to verify correctness of these platform strings.)
    py_major, py_minor = field_set.runtime.to_interpreter_version()
    platform = f"linux_x86_64-cp-{py_major}{py_minor}-cp{py_major}{py_minor}"
    # set pymalloc ABI flag - this was removed in python 3.8 https://bugs.python.org/issue36707
    if py_major <= 3 and py_minor < 8:
        platform += "m"
    if (py_major, py_minor) == (2, 7):
        platform += "u"
    pex_request = TwoStepPexFromTargetsRequest(
        PexFromTargetsRequest(
            addresses=[field_set.address],
            internal_only=False,
            entry_point=None,
            output_filename=output_filename,
            platforms=PexPlatforms([platform]),
            additional_args=[
                # Ensure we can resolve manylinux wheels in addition to any AMI-specific wheels.
                "--manylinux=manylinux2014",
                # When we're executing Pex on Linux, allow a local interpreter to be resolved if
                # available and matching the AMI platform.
                "--resolve-local-platforms",
            ],
        )
    )

    pex_result = await Get(TwoStepPex, TwoStepPexFromTargetsRequest, pex_request)
    input_digest = await Get(
        Digest, MergeDigests((pex_result.pex.digest, lambdex_setup.requirements_pex.digest))
    )

    # NB: Lambdex modifies its input pex in-place, so the input file is also the output file.
    result = await Get(
        ProcessResult,
        PexProcess(
            lambdex_setup.requirements_pex,
            argv=("build", "-e", field_set.handler.value, output_filename),
            input_digest=input_digest,
            output_files=(output_filename,),
            description=f"Setting up handler in {output_filename}",
        ),
    )
    return CreatedAWSLambda(
        digest=result.output_digest,
        zip_file_relpath=output_filename,
        runtime=field_set.runtime.value,
        # The AWS-facing handler function is always lambdex_handler.handler, which is the wrapper
        # injected by lambdex that manages invocation of the actual handler.
        handler="lambdex_handler.handler",
    )
示例#14
0
async def package_pyoxidizer_binary(
    pyoxidizer: PyOxidizer,
    field_set: PyOxidizerFieldSet,
    runner_script: PyoxidizerRunnerScript,
    bash: BashBinary,
) -> BuiltPackage:
    direct_deps = await Get(Targets,
                            DependenciesRequest(field_set.dependencies))
    deps_field_sets = await Get(
        FieldSetsPerTarget,
        FieldSetsPerTargetRequest(PackageFieldSet, direct_deps))
    built_packages = await MultiGet(
        Get(BuiltPackage, PackageFieldSet, field_set)
        for field_set in deps_field_sets.field_sets)
    wheel_paths = [
        artifact.relpath for built_pkg in built_packages
        for artifact in built_pkg.artifacts
        if artifact.relpath is not None and artifact.relpath.endswith(".whl")
    ]
    if not wheel_paths:
        raise InvalidTargetException(
            softwrap(f"""
                The `{PyOxidizerTarget.alias}` target {field_set.address} must include
                in its `dependencies` field at least one `python_distribution` target that produces a
                `.whl` file. For example, if using `{GenerateSetupField.alias}=True`, then make sure
                `{WheelField.alias}=True`. See {doc_url('python-distributions')}.
                """))

    config_template = None
    if field_set.template.value is not None:
        config_template_source = await Get(
            HydratedSources, HydrateSourcesRequest(field_set.template))
        digest_contents = await Get(DigestContents, Digest,
                                    config_template_source.snapshot.digest)
        config_template = digest_contents[0].content.decode("utf-8")

    config = PyOxidizerConfig(
        executable_name=field_set.address.target_name,
        entry_point=field_set.entry_point.value,
        wheels=wheel_paths,
        template=config_template,
        unclassified_resources=(None if
                                not field_set.unclassified_resources.value else
                                list(field_set.unclassified_resources.value)),
    )
    rendered_config = config.render()
    logger.debug(
        f"Configuration used for {field_set.address}: {rendered_config}")

    pyoxidizer_pex, config_digest = await MultiGet(
        Get(Pex, PexRequest, pyoxidizer.to_pex_request()),
        Get(
            Digest,
            CreateDigest([
                FileContent("pyoxidizer.bzl", rendered_config.encode("utf-8"))
            ])),
    )
    input_digest = await Get(
        Digest,
        MergeDigests((
            config_digest,
            runner_script.digest,
            *(built_package.digest for built_package in built_packages),
        )),
    )
    pex_process = await Get(
        Process,
        PexProcess(
            pyoxidizer_pex,
            argv=("build", *pyoxidizer.args),
            description=f"Building {field_set.address} with PyOxidizer",
            input_digest=input_digest,
            level=LogLevel.INFO,
            output_directories=("build", ),
        ),
    )
    process_with_caching = dataclasses.replace(
        pex_process,
        argv=(bash.path, runner_script.path, *pex_process.argv),
        append_only_caches={
            **pex_process.append_only_caches,
            "pyoxidizer":
            runner_script.CACHE_PATH,
        },
    )

    result = await Get(ProcessResult, Process, process_with_caching)

    stripped_digest = await Get(Digest,
                                RemovePrefix(result.output_digest, "build"))
    final_snapshot = await Get(
        Snapshot,
        AddPrefix(stripped_digest,
                  field_set.output_path.value_or_default(file_ending=None)),
    )
    return BuiltPackage(
        final_snapshot.digest,
        artifacts=tuple(
            BuiltPackageArtifact(file) for file in final_snapshot.files),
    )
示例#15
0
async def generate_coverage_reports(
    merged_coverage_data: MergedCoverageData,
    coverage_setup: CoverageSetup,
    coverage_config: CoverageConfig,
    coverage_subsystem: CoverageSubsystem,
    all_used_addresses: Addresses,
) -> CoverageReports:
    """Takes all Python test results and generates a single coverage report."""
    transitive_targets = await Get(
        TransitiveTargets, TransitiveTargetsRequest(all_used_addresses))
    sources = await Get(
        PythonSourceFiles,
        # Coverage sometimes includes non-Python files in its `.coverage` data. We need to
        # ensure that they're present when generating the report. We include all the files included
        # by `pytest_runner.py`.
        PythonSourceFilesRequest(transitive_targets.closure,
                                 include_files=True,
                                 include_resources=True),
    )
    input_digest = await Get(
        Digest,
        MergeDigests((
            merged_coverage_data.coverage_data,
            coverage_config.digest,
            coverage_setup.pex.digest,
            sources.source_files.snapshot.digest,
        )),
    )

    pex_processes = []
    report_types = []
    result_snapshot = await Get(Snapshot, Digest,
                                merged_coverage_data.coverage_data)
    coverage_reports: List[CoverageReport] = []
    for report_type in coverage_subsystem.reports:
        if report_type == CoverageReportType.RAW:
            coverage_reports.append(
                FilesystemCoverageReport(
                    report_type=CoverageReportType.RAW.value,
                    result_snapshot=result_snapshot,
                    directory_to_materialize_to=coverage_subsystem.output_dir,
                    report_file=coverage_subsystem.output_dir / ".coverage",
                ))
            continue
        report_types.append(report_type)
        output_file = (f"coverage.{report_type.value}" if report_type in {
            CoverageReportType.XML, CoverageReportType.JSON
        } else None)
        pex_processes.append(
            PexProcess(
                coverage_setup.pex,
                argv=(report_type.report_name, ),
                input_digest=input_digest,
                output_directories=("htmlcov", )
                if report_type == CoverageReportType.HTML else None,
                output_files=(output_file, ) if output_file else None,
                description=
                f"Generate Pytest {report_type.report_name} coverage report.",
                level=LogLevel.DEBUG,
            ))
    results = await MultiGet(
        Get(ProcessResult, PexProcess, process) for process in pex_processes)
    result_stdouts = tuple(res.stdout for res in results)
    result_snapshots = await MultiGet(
        Get(Snapshot, Digest, res.output_digest) for res in results)

    coverage_reports.extend(
        _get_coverage_report(coverage_subsystem.output_dir, report_type,
                             stdout, snapshot)
        for (report_type, stdout,
             snapshot) in zip(report_types, result_stdouts, result_snapshots))

    return CoverageReports(tuple(coverage_reports))
示例#16
0
async def setup_black(setup_request: SetupRequest, black: Black,
                      python_setup: PythonSetup) -> Setup:
    # Black requires 3.6+ but uses the typed-ast library to work with 2.7, 3.4, 3.5, 3.6, and 3.7.
    # However, typed-ast does not understand 3.8, so instead we must run Black with Python 3.8 when
    # relevant. We only do this if if <3.8 can't be used, as we don't want a loose requirement like
    # `>=3.6` to result in requiring Python 3.8, which would error if 3.8 is not installed on the
    # machine.
    all_interpreter_constraints = PexInterpreterConstraints.create_from_compatibility_fields(
        (field_set.interpreter_constraints
         for field_set in setup_request.request.field_sets),
        python_setup,
    )
    tool_interpreter_constraints = PexInterpreterConstraints((
        "CPython>=3.8", ) if (
            all_interpreter_constraints.requires_python38_or_newer()
            and black.options.is_default("interpreter_constraints")
        ) else black.interpreter_constraints)

    black_pex_request = Get(
        Pex,
        PexRequest(
            output_filename="black.pex",
            internal_only=True,
            requirements=PexRequirements(black.all_requirements),
            interpreter_constraints=tool_interpreter_constraints,
            entry_point=black.entry_point,
        ),
    )

    config_digest_request = Get(
        Digest,
        PathGlobs(
            globs=[black.config] if black.config else [],
            glob_match_error_behavior=GlobMatchErrorBehavior.error,
            description_of_origin="the option `--black-config`",
        ),
    )

    source_files_request = Get(
        SourceFiles,
        SourceFilesRequest(field_set.sources
                           for field_set in setup_request.request.field_sets),
    )

    source_files, black_pex, config_digest = await MultiGet(
        source_files_request, black_pex_request, config_digest_request)
    source_files_snapshot = (
        source_files.snapshot
        if setup_request.request.prior_formatter_result is None else
        setup_request.request.prior_formatter_result)

    input_digest = await Get(
        Digest,
        MergeDigests(
            (source_files_snapshot.digest, black_pex.digest, config_digest)),
    )

    process = await Get(
        Process,
        PexProcess(
            black_pex,
            argv=generate_args(source_files=source_files,
                               black=black,
                               check_only=setup_request.check_only),
            input_digest=input_digest,
            output_files=source_files_snapshot.files,
            description=
            f"Run Black on {pluralize(len(setup_request.request.field_sets), 'file')}.",
            level=LogLevel.DEBUG,
        ),
    )
    return Setup(process, original_digest=source_files_snapshot.digest)
示例#17
0
async def generate_coverage_reports(
    merged_coverage_data: MergedCoverageData,
    coverage_setup: CoverageSetup,
    coverage_config: CoverageConfig,
    coverage_subsystem: CoverageSubsystem,
    all_used_addresses: Addresses,
) -> CoverageReports:
    """Takes all Python test results and generates a single coverage report."""
    transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest(all_used_addresses))
    sources = await Get(
        PythonSourceFiles,
        PythonSourceFilesRequest(transitive_targets.closure, include_resources=False),
    )
    input_digest = await Get(
        Digest,
        MergeDigests(
            (
                merged_coverage_data.coverage_data,
                coverage_config.digest,
                coverage_setup.pex.digest,
                sources.source_files.snapshot.digest,
            )
        ),
    )

    pex_processes = []
    report_types = []
    result_snapshot = await Get(Snapshot, Digest, merged_coverage_data.coverage_data)
    coverage_reports: List[CoverageReport] = []
    for report_type in coverage_subsystem.reports:
        if report_type == CoverageReportType.RAW:
            coverage_reports.append(
                FilesystemCoverageReport(
                    report_type=CoverageReportType.RAW.value,
                    result_snapshot=result_snapshot,
                    directory_to_materialize_to=coverage_subsystem.output_dir,
                    report_file=coverage_subsystem.output_dir / ".coverage",
                )
            )
            continue
        report_types.append(report_type)
        output_file = (
            f"coverage.{report_type.value}"
            if report_type in {CoverageReportType.XML, CoverageReportType.JSON}
            else None
        )
        pex_processes.append(
            PexProcess(
                coverage_setup.pex,
                # We pass `--ignore-errors` because Pants dynamically injects missing `__init__.py`
                # files and this will cause Coverage to fail.
                argv=(report_type.report_name, "--ignore-errors"),
                input_digest=input_digest,
                output_directories=("htmlcov",) if report_type == CoverageReportType.HTML else None,
                output_files=(output_file,) if output_file else None,
                description=f"Generate Pytest {report_type.report_name} coverage report.",
                level=LogLevel.DEBUG,
            )
        )
    results = await MultiGet(Get(ProcessResult, PexProcess, process) for process in pex_processes)
    result_stdouts = tuple(res.stdout for res in results)
    result_snapshots = await MultiGet(Get(Snapshot, Digest, res.output_digest) for res in results)

    coverage_reports.extend(
        _get_coverage_report(coverage_subsystem.output_dir, report_type, stdout, snapshot)
        for (report_type, stdout, snapshot) in zip(report_types, result_stdouts, result_snapshots)
    )

    return CoverageReports(tuple(coverage_reports))
示例#18
0
async def export_virtualenv(request: _ExportVenvRequest,
                            python_setup: PythonSetup,
                            pex_pex: PexPEX) -> ExportResult:
    if request.resolve:
        interpreter_constraints = InterpreterConstraints(
            python_setup.resolves_to_interpreter_constraints.get(
                request.resolve, python_setup.interpreter_constraints))
    else:
        interpreter_constraints = InterpreterConstraints.create_from_targets(
            request.root_python_targets,
            python_setup) or InterpreterConstraints(
                python_setup.interpreter_constraints)

    min_interpreter = interpreter_constraints.snap_to_minimum(
        python_setup.interpreter_universe)
    if not min_interpreter:
        err_msg = ((
            f"The resolve '{request.resolve}' (from `[python].resolves`) has invalid interpreter "
            f"constraints, which are set via `[python].resolves_to_interpreter_constraints`: "
            f"{interpreter_constraints}. Could not determine the minimum compatible interpreter."
        ) if request.resolve else (
            "The following interpreter constraints were computed for all the targets for which "
            f"export was requested: {interpreter_constraints}. There is no python interpreter "
            "compatible with these constraints. Please restrict the target set to one that shares "
            "a compatible interpreter."))
        raise ExportError(err_msg)

    requirements_pex = await Get(
        Pex,
        RequirementsPexRequest(
            (tgt.address for tgt in request.root_python_targets),
            internal_only=True,
            hardcoded_interpreter_constraints=min_interpreter,
        ),
    )

    # Get the full python version (including patch #), so we can use it as the venv name.
    res = await Get(
        ProcessResult,
        PexProcess(
            pex=requirements_pex,
            description="Get interpreter version",
            argv=[
                "-c",
                "import sys; print('.'.join(str(x) for x in sys.version_info[0:3]))"
            ],
        ),
    )
    py_version = res.stdout.strip().decode()

    dest = (os.path.join("python", "virtualenvs", path_safe(request.resolve))
            if request.resolve else os.path.join("python", "virtualenv"))

    merged_digest = await Get(
        Digest, MergeDigests([pex_pex.digest, requirements_pex.digest]))
    pex_pex_path = os.path.join("{digest_root}", pex_pex.exe)
    return ExportResult(
        f"virtualenv for the resolve '{request.resolve}' (using {min_interpreter})",
        dest,
        digest=merged_digest,
        post_processing_cmds=[
            PostProcessingCommand(
                [
                    pex_pex_path,
                    os.path.join("{digest_root}", requirements_pex.name),
                    "venv",
                    "--pip",
                    "--collisions-ok",
                    "--remove=all",
                    f"{{digest_root}}/{py_version}",
                ],
                {"PEX_MODULE": "pex.tools"},
            ),
            PostProcessingCommand(["rm", "-f", pex_pex_path]),
        ],
    )
示例#19
0
async def mypy_typecheck_partition(partition: MyPyPartition,
                                   mypy: MyPy) -> TypecheckResult:
    plugin_target_addresses = await Get(Addresses, UnparsedAddressInputs,
                                        mypy.source_plugins)
    plugin_transitive_targets_request = Get(
        TransitiveTargets, TransitiveTargetsRequest(plugin_target_addresses))
    plugin_transitive_targets, launcher_script = await MultiGet(
        plugin_transitive_targets_request,
        Get(Digest, CreateDigest([LAUNCHER_FILE])))

    plugin_requirements = PexRequirements.create_from_requirement_fields(
        plugin_tgt[PythonRequirementsField]
        for plugin_tgt in plugin_transitive_targets.closure
        if plugin_tgt.has_field(PythonRequirementsField))

    # If the user did not set `--python-version` already, we set it ourselves based on their code's
    # interpreter constraints. This determines what AST is used by MyPy.
    python_version = (
        None if partition.python_version_already_configured else
        partition.interpreter_constraints.minimum_python_version())

    # MyPy requires 3.5+ to run, but uses the typed-ast library to work with 2.7, 3.4, 3.5, 3.6,
    # and 3.7. However, typed-ast does not understand 3.8, so instead we must run MyPy with
    # Python 3.8 when relevant. We only do this if <3.8 can't be used, as we don't want a
    # loose requirement like `>=3.6` to result in requiring Python 3.8, which would error if
    # 3.8 is not installed on the machine.
    tool_interpreter_constraints = PexInterpreterConstraints((
        "CPython>=3.8", ) if (
            mypy.options.is_default("interpreter_constraints")
            and partition.interpreter_constraints.requires_python38_or_newer()
        ) else mypy.interpreter_constraints)

    plugin_sources_request = Get(
        PythonSourceFiles,
        PythonSourceFilesRequest(plugin_transitive_targets.closure))
    typechecked_sources_request = Get(
        PythonSourceFiles, PythonSourceFilesRequest(partition.closure))

    # Normally, this `requirements.pex` would be merged with mypy.pex via `--pex-path`. However,
    # this will cause a runtime error if the interpreter constraints are different between the
    # PEXes and they have incompatible wheels.
    #
    # Instead, we teach MyPy about the requirements by extracting the distributions from
    # requirements.pex and setting EXTRACTED_WHEELS, which our custom launcher script then
    # looks for.
    #
    # Conventionally, MyPy users might instead set `MYPYPATH` for this. However, doing this
    # results in type checking the requirements themselves.
    requirements_pex_request = Get(
        Pex,
        PexFromTargetsRequest,
        PexFromTargetsRequest.for_requirements(
            (addr for addr in partition.field_set_addresses),
            hardcoded_interpreter_constraints=partition.
            interpreter_constraints,
            internal_only=True,
        ),
    )
    mypy_pex_request = Get(
        Pex,
        PexRequest(
            output_filename="mypy.pex",
            internal_only=True,
            sources=launcher_script,
            requirements=PexRequirements(
                itertools.chain(mypy.all_requirements, plugin_requirements)),
            interpreter_constraints=tool_interpreter_constraints,
            entry_point=PurePath(LAUNCHER_FILE.path).stem,
        ),
    )

    config_digest_request = Get(Digest, PathGlobs, config_path_globs(mypy))

    (
        plugin_sources,
        typechecked_sources,
        mypy_pex,
        requirements_pex,
        config_digest,
    ) = await MultiGet(
        plugin_sources_request,
        typechecked_sources_request,
        mypy_pex_request,
        requirements_pex_request,
        config_digest_request,
    )

    typechecked_srcs_snapshot = typechecked_sources.source_files.snapshot
    file_list_path = "__files.txt"
    python_files = "\n".join(
        determine_python_files(
            typechecked_sources.source_files.snapshot.files))
    create_file_list_request = Get(
        Digest,
        CreateDigest([FileContent(file_list_path, python_files.encode())]),
    )

    file_list_digest, extracted_pex_distributions = await MultiGet(
        create_file_list_request,
        Get(ExtractedPexDistributions, Pex, requirements_pex))

    merged_input_files = await Get(
        Digest,
        MergeDigests([
            file_list_digest,
            plugin_sources.source_files.snapshot.digest,
            typechecked_srcs_snapshot.digest,
            mypy_pex.digest,
            extracted_pex_distributions.digest,
            config_digest,
        ]),
    )

    all_used_source_roots = sorted(
        set(
            itertools.chain(plugin_sources.source_roots,
                            typechecked_sources.source_roots)))
    env = {
        "PEX_EXTRA_SYS_PATH":
        ":".join(all_used_source_roots),
        "EXTRACTED_WHEELS":
        ":".join(extracted_pex_distributions.wheel_directory_paths),
    }

    result = await Get(
        FallibleProcessResult,
        PexProcess(
            mypy_pex,
            argv=generate_argv(mypy,
                               file_list_path=file_list_path,
                               python_version=python_version),
            input_digest=merged_input_files,
            extra_env=env,
            description=
            f"Run MyPy on {pluralize(len(typechecked_srcs_snapshot.files), 'file')}.",
            level=LogLevel.DEBUG,
        ),
    )
    return TypecheckResult.from_fallible_process_result(
        result,
        partition_description=str(
            sorted(str(c) for c in partition.interpreter_constraints)))
示例#20
0
async def setup_pytest_for_target(
    request: TestSetupRequest,
    pytest: PyTest,
    test_subsystem: TestSubsystem,
    python_setup: PythonSetup,
    coverage_config: CoverageConfig,
    coverage_subsystem: CoverageSubsystem,
    test_extra_env: TestExtraEnv,
    global_options: GlobalOptions,
) -> TestSetup:
    transitive_targets = await Get(
        TransitiveTargets,
        TransitiveTargetsRequest([request.field_set.address]))
    all_targets = transitive_targets.closure

    interpreter_constraints = PexInterpreterConstraints.create_from_targets(
        all_targets, python_setup)

    # Defaults to zip_safe=False.
    requirements_pex_request = Get(
        Pex,
        PexFromTargetsRequest,
        PexFromTargetsRequest.for_requirements([request.field_set.address],
                                               internal_only=True),
    )

    pytest_pex_request = Get(
        Pex,
        PexRequest(
            output_filename="pytest.pex",
            requirements=PexRequirements(pytest.get_requirement_strings()),
            interpreter_constraints=interpreter_constraints,
            entry_point="pytest:main",
            internal_only=True,
            additional_args=(
                # NB: We set `--not-zip-safe` because Pytest plugin discovery, which uses
                # `importlib_metadata` and thus `zipp`, does not play nicely when doing import
                # magic directly from zip files. `zipp` has pathologically bad behavior with large
                # zipfiles.
                # TODO: this does have a performance cost as the pex must now be expanded to disk.
                # Long term, it would be better to fix Zipp (whose fix would then need to be used
                # by importlib_metadata and then by Pytest). See
                # https://github.com/jaraco/zipp/pull/26.
                "--not-zip-safe",
                # TODO(John Sirois): Support shading python binaries:
                #   https://github.com/pantsbuild/pants/issues/9206
                "--pex-path",
                requirements_pex_request.input.output_filename,
            ),
        ),
    )

    prepared_sources_request = Get(
        PythonSourceFiles,
        PythonSourceFilesRequest(all_targets, include_files=True))

    # Create any assets that the test depends on through the `runtime_package_dependencies` field.
    assets: Tuple[BuiltPackage, ...] = ()
    unparsed_runtime_packages = (request.field_set.runtime_package_dependencies
                                 .to_unparsed_address_inputs())
    if unparsed_runtime_packages.values:
        runtime_package_targets = await Get(Targets, UnparsedAddressInputs,
                                            unparsed_runtime_packages)
        field_sets_per_target = await Get(
            FieldSetsPerTarget,
            FieldSetsPerTargetRequest(PackageFieldSet,
                                      runtime_package_targets),
        )
        assets = await MultiGet(
            Get(BuiltPackage, PackageFieldSet, field_set)
            for field_set in field_sets_per_target.field_sets)

    # Get the file names for the test_target so that we can specify to Pytest precisely which files
    # to test, rather than using auto-discovery.
    field_set_source_files_request = Get(
        SourceFiles, SourceFilesRequest([request.field_set.sources]))

    pytest_pex, requirements_pex, prepared_sources, field_set_source_files = await MultiGet(
        pytest_pex_request,
        requirements_pex_request,
        prepared_sources_request,
        field_set_source_files_request,
    )

    input_digest = await Get(
        Digest,
        MergeDigests((
            coverage_config.digest,
            prepared_sources.source_files.snapshot.digest,
            requirements_pex.digest,
            pytest_pex.digest,
            *(binary.digest for binary in assets),
        )),
    )

    add_opts = [f"--color={'yes' if global_options.options.colors else 'no'}"]
    output_files = []

    results_file_name = None
    if pytest.options.junit_xml_dir and not request.is_debug:
        results_file_name = f"{request.field_set.address.path_safe_spec}.xml"
        add_opts.extend((f"--junitxml={results_file_name}", "-o",
                         f"junit_family={pytest.options.junit_family}"))
        output_files.append(results_file_name)

    coverage_args = []
    if test_subsystem.use_coverage and not request.is_debug:
        output_files.append(".coverage")
        cov_paths = coverage_subsystem.filter if coverage_subsystem.filter else (
            ".", )
        coverage_args = [
            "--cov-report=",  # Turn off output.
            *itertools.chain.from_iterable(["--cov", cov_path]
                                           for cov_path in cov_paths),
        ]

    extra_env = {
        "PYTEST_ADDOPTS": " ".join(add_opts),
        "PEX_EXTRA_SYS_PATH": ":".join(prepared_sources.source_roots),
    }

    extra_env.update(test_extra_env.env)

    # Cache test runs only if they are successful, or not at all if `--test-force`.
    cache_scope = ProcessCacheScope.NEVER if test_subsystem.force else ProcessCacheScope.SUCCESSFUL
    process = await Get(
        Process,
        PexProcess(
            pytest_pex,
            argv=(*pytest.options.args, *coverage_args,
                  *field_set_source_files.files),
            extra_env=extra_env,
            input_digest=input_digest,
            output_files=output_files,
            timeout_seconds=request.field_set.timeout.
            calculate_from_global_options(pytest),
            execution_slot_variable=pytest.options.execution_slot_var,
            description=f"Run Pytest for {request.field_set.address}",
            level=LogLevel.DEBUG,
            cache_scope=cache_scope,
        ),
    )
    return TestSetup(process, results_file_name=results_file_name)