コード例 #1
0
ファイル: rules.py プロジェクト: rhysyngsun/pants
async def pylint_lint_partition(partition: PylintPartition,
                                pylint: Pylint) -> LintResult:
    requirements_pex_get = Get(
        Pex,
        PexFromTargetsRequest,
        PexFromTargetsRequest.for_requirements(
            (field_set.address for field_set in partition.field_sets),
            # NB: These constraints must be identical to the other PEXes. Otherwise, we risk using
            # a different version for the requirements than the other two PEXes, which can result
            # in a PEX runtime error about missing dependencies.
            hardcoded_interpreter_constraints=partition.
            interpreter_constraints,
            internal_only=True,
            direct_deps_only=True,
        ),
    )

    plugin_requirements = PexRequirements.create_from_requirement_fields(
        plugin_tgt[PythonRequirementsField]
        for plugin_tgt in partition.plugin_targets
        if plugin_tgt.has_field(PythonRequirementsField))
    pylint_pex_get = Get(
        Pex,
        PexRequest(
            output_filename="pylint.pex",
            internal_only=True,
            requirements=PexRequirements(
                [*pylint.all_requirements, *plugin_requirements]),
            interpreter_constraints=partition.interpreter_constraints,
        ),
    )

    prepare_plugin_sources_get = Get(
        StrippedPythonSourceFiles,
        PythonSourceFilesRequest(partition.plugin_targets))
    prepare_python_sources_get = Get(
        PythonSourceFiles,
        PythonSourceFilesRequest(partition.targets_with_dependencies))
    field_set_sources_get = Get(
        SourceFiles,
        SourceFilesRequest(field_set.sources
                           for field_set in partition.field_sets))

    (
        pylint_pex,
        requirements_pex,
        prepared_plugin_sources,
        prepared_python_sources,
        field_set_sources,
    ) = await MultiGet(
        pylint_pex_get,
        requirements_pex_get,
        prepare_plugin_sources_get,
        prepare_python_sources_get,
        field_set_sources_get,
    )

    pylint_runner_pex, config_files = await MultiGet(
        Get(
            VenvPex,
            PexRequest(
                output_filename="pylint_runner.pex",
                interpreter_constraints=partition.interpreter_constraints,
                main=pylint.main,
                internal_only=True,
                pex_path=[pylint_pex, requirements_pex],
            ),
        ),
        Get(ConfigFiles, ConfigFilesRequest,
            pylint.config_request(field_set_sources.snapshot.dirs)),
    )

    prefixed_plugin_sources = (await Get(
        Digest,
        AddPrefix(
            prepared_plugin_sources.stripped_source_files.snapshot.digest,
            "__plugins"),
    ) if pylint.source_plugins else EMPTY_DIGEST)

    pythonpath = list(prepared_python_sources.source_roots)
    if pylint.source_plugins:
        # NB: Pylint source plugins must be explicitly loaded via PEX_EXTRA_SYS_PATH. The value must
        # point to the plugin's directory, rather than to a parent's directory, because
        # `load-plugins` takes a module name rather than a path to the module; i.e. `plugin`, but
        # not `path.to.plugin`. (This means users must have specified the parent directory as a
        # source root.)
        pythonpath.append("__plugins")

    input_digest = await Get(
        Digest,
        MergeDigests((
            config_files.snapshot.digest,
            prefixed_plugin_sources,
            prepared_python_sources.source_files.snapshot.digest,
        )),
    )

    result = await Get(
        FallibleProcessResult,
        VenvPexProcess(
            pylint_runner_pex,
            argv=generate_argv(field_set_sources, pylint),
            input_digest=input_digest,
            extra_env={"PEX_EXTRA_SYS_PATH": ":".join(pythonpath)},
            description=
            f"Run Pylint on {pluralize(len(partition.field_sets), 'file')}.",
            level=LogLevel.DEBUG,
        ),
    )
    return LintResult.from_fallible_process_result(
        result,
        partition_description=str(
            sorted(str(c) for c in partition.interpreter_constraints)))
コード例 #2
0
async def setup_scalapb_shim_classfiles(
    scalapb: ScalaPBSubsystem,
    jdk: InternalJdk,
) -> ScalaPBShimCompiledClassfiles:
    dest_dir = "classfiles"

    scalapb_shim_content = pkgutil.get_data(
        "pants.backend.codegen.protobuf.scala", "ScalaPBShim.scala")
    if not scalapb_shim_content:
        raise AssertionError("Unable to find ScalaParser.scala resource.")

    scalapb_shim_source = FileContent("ScalaPBShim.scala",
                                      scalapb_shim_content)

    lockfile_request = await Get(GenerateJvmLockfileFromTool,
                                 ScalapbcToolLockfileSentinel())
    tool_classpath, shim_classpath, source_digest = await MultiGet(
        Get(
            ToolClasspath,
            ToolClasspathRequest(
                prefix="__toolcp",
                artifact_requirements=ArtifactRequirements.from_coordinates([
                    Coordinate(
                        group="org.scala-lang",
                        artifact="scala-compiler",
                        version=SHIM_SCALA_VERSION,
                    ),
                    Coordinate(
                        group="org.scala-lang",
                        artifact="scala-library",
                        version=SHIM_SCALA_VERSION,
                    ),
                    Coordinate(
                        group="org.scala-lang",
                        artifact="scala-reflect",
                        version=SHIM_SCALA_VERSION,
                    ),
                ]),
            ),
        ),
        Get(ToolClasspath,
            ToolClasspathRequest(prefix="__shimcp",
                                 lockfile=lockfile_request)),
        Get(Digest, CreateDigest([scalapb_shim_source,
                                  Directory(dest_dir)])),
    )

    merged_digest = await Get(
        Digest,
        MergeDigests(
            (tool_classpath.digest, shim_classpath.digest, source_digest)))

    process_result = await Get(
        ProcessResult,
        JvmProcess(
            jdk=jdk,
            classpath_entries=tool_classpath.classpath_entries(),
            argv=[
                "scala.tools.nsc.Main",
                "-bootclasspath",
                ":".join(tool_classpath.classpath_entries()),
                "-classpath",
                ":".join(shim_classpath.classpath_entries()),
                "-d",
                dest_dir,
                scalapb_shim_source.path,
            ],
            input_digest=merged_digest,
            extra_jvm_options=scalapb.jvm_options,
            output_directories=(dest_dir, ),
            description="Compile ScalaPB shim with scalac",
            level=LogLevel.DEBUG,
            # NB: We do not use nailgun for this process, since it is launched exactly once.
            use_nailgun=False,
        ),
    )
    stripped_classfiles_digest = await Get(
        Digest, RemovePrefix(process_result.output_digest, dest_dir))
    return ScalaPBShimCompiledClassfiles(digest=stripped_classfiles_digest)
コード例 #3
0
ファイル: shell_command.py プロジェクト: hephex/pants
async def prepare_shell_command_process(request: ShellCommandProcessRequest,
                                        shell_setup: ShellSetup,
                                        bash: BashBinary) -> Process:
    shell_command = request.target
    interactive = shell_command.has_field(ShellCommandRunWorkdirField)
    if interactive:
        working_directory = shell_command[
            ShellCommandRunWorkdirField].value or ""
    else:
        working_directory = shell_command.address.spec_path
    command = shell_command[ShellCommandCommandField].value
    timeout = shell_command.get(ShellCommandTimeoutField).value
    tools = shell_command.get(ShellCommandToolsField,
                              default_raw_value=()).value
    outputs = shell_command.get(ShellCommandOutputsField).value or ()

    if not command:
        raise ValueError(
            f"Missing `command` line in `{shell_command.alias}` target {shell_command.address}."
        )

    if interactive:
        command_env = {
            "CHROOT": "{chroot}",
        }
    else:
        if not tools:
            raise ValueError(
                f"Must provide any `tools` used by the `{shell_command.alias}` {shell_command.address}."
            )

        env = await Get(Environment, EnvironmentRequest(["PATH"]))
        search_path = shell_setup.executable_search_path(env)
        tool_requests = [
            BinaryPathRequest(
                binary_name=tool,
                search_path=search_path,
            ) for tool in {*tools, *["mkdir", "ln"]}
            if tool not in BASH_BUILTIN_COMMANDS
        ]
        tool_paths = await MultiGet(
            Get(BinaryPaths, BinaryPathRequest, request)
            for request in tool_requests)

        command_env = {
            "TOOLS":
            " ".join(
                _shell_tool_safe_env_name(tool.binary_name)
                for tool in tool_requests),
        }

        for binary, tool_request in zip(tool_paths, tool_requests):
            if binary.first_path:
                command_env[_shell_tool_safe_env_name(
                    tool_request.binary_name)] = binary.first_path.path
            else:
                raise BinaryNotFoundError.from_request(
                    tool_request,
                    rationale=
                    f"execute `{shell_command.alias}` {shell_command.address}",
                )

    transitive_targets = await Get(
        TransitiveTargets,
        TransitiveTargetsRequest([shell_command.address]),
    )

    sources, pkgs_per_target = await MultiGet(
        Get(
            SourceFiles,
            SourceFilesRequest(
                sources_fields=[
                    tgt.get(SourcesField)
                    for tgt in transitive_targets.dependencies
                ],
                for_sources_types=(SourcesField, FileSourceField),
                enable_codegen=True,
            ),
        ),
        Get(
            FieldSetsPerTarget,
            FieldSetsPerTargetRequest(PackageFieldSet,
                                      transitive_targets.dependencies),
        ),
    )

    packages = await MultiGet(
        Get(BuiltPackage, PackageFieldSet, field_set)
        for field_set in pkgs_per_target.field_sets)

    if interactive or not working_directory or working_directory in sources.snapshot.dirs:
        work_dir = EMPTY_DIGEST
    else:
        work_dir = await Get(Digest,
                             CreateDigest([Directory(working_directory)]))

    input_digest = await Get(
        Digest,
        MergeDigests([
            sources.snapshot.digest, work_dir,
            *(pkg.digest for pkg in packages)
        ]))

    output_files = [f for f in outputs if not f.endswith("/")]
    output_directories = [d for d in outputs if d.endswith("/")]

    if interactive:
        relpath = os.path.relpath(
            working_directory or ".",
            start="/" if os.path.isabs(working_directory) else ".")
        boot_script = f"cd {shlex.quote(relpath)}; " if relpath != "." else ""
    else:
        # Setup bin_relpath dir with symlinks to all requested tools, so that we can use PATH, force
        # symlinks to avoid issues with repeat runs using the __run.sh script in the sandbox.
        bin_relpath = ".bin"
        boot_script = ";".join(
            dedent(f"""\
                $mkdir -p {bin_relpath}
                for tool in $TOOLS; do $ln -sf ${{!tool}} {bin_relpath}; done
                export PATH="$PWD/{bin_relpath}"
                """).split("\n"))

    return Process(
        argv=(bash.path, "-c", boot_script + command),
        description=f"Running {shell_command.alias} {shell_command.address}",
        env=command_env,
        input_digest=input_digest,
        output_directories=output_directories,
        output_files=output_files,
        timeout_seconds=timeout,
        working_directory=working_directory,
    )
コード例 #4
0
ファイル: rules.py プロジェクト: LarryFinn/pants
async def pylint_lint(
    field_sets: PylintFieldSets,
    pylint: Pylint,
    python_setup: PythonSetup,
    subprocess_encoding_environment: SubprocessEncodingEnvironment,
) -> LintResult:
    if pylint.options.skip:
        return LintResult.noop()

    # Pylint needs direct dependencies in the chroot to ensure that imports are valid. However, it
    # doesn't lint those direct dependencies nor does it care about transitive dependencies.
    addresses = []
    for field_set in field_sets:
        addresses.append(field_set.address)
        addresses.extend(field_set.dependencies.value or ())
    targets = await Get[Targets](Addresses(addresses))

    # NB: Pylint output depends upon which Python interpreter version it's run with. We ensure that
    # each target runs with its own interpreter constraints. See
    # http://pylint.pycqa.org/en/latest/faq.html#what-versions-of-python-is-pylint-supporting.
    interpreter_constraints = PexInterpreterConstraints.create_from_compatibility_fields(
        (field_set.compatibility for field_set in field_sets), python_setup)
    requirements_pex_request = Get[Pex](PexRequest(
        output_filename="pylint.pex",
        requirements=PexRequirements(pylint.get_requirement_specs()),
        interpreter_constraints=interpreter_constraints,
        entry_point=pylint.get_entry_point(),
    ))

    config_path: Optional[str] = pylint.options.config
    config_snapshot_request = Get[Snapshot](PathGlobs(
        globs=tuple([config_path] if config_path else []),
        glob_match_error_behavior=GlobMatchErrorBehavior.error,
        description_of_origin="the option `--pylint-config`",
    ))

    prepare_python_sources_request = Get[ImportablePythonSources](Targets,
                                                                  targets)
    specified_source_files_request = Get[SourceFiles](
        SpecifiedSourceFilesRequest(
            ((field_set.sources, field_set.origin)
             for field_set in field_sets),
            strip_source_roots=True,
        ))

    requirements_pex, config_snapshot, prepared_python_sources, specified_source_files = cast(
        Tuple[Pex, Snapshot, ImportablePythonSources, SourceFiles],
        await MultiGet([
            requirements_pex_request,
            config_snapshot_request,
            prepare_python_sources_request,
            specified_source_files_request,
        ]),
    )

    input_digest = await Get[Digest](MergeDigests((
        requirements_pex.digest,
        config_snapshot.digest,
        prepared_python_sources.snapshot.digest,
    )), )

    address_references = ", ".join(
        sorted(field_set.address.reference() for field_set in field_sets))

    process = requirements_pex.create_process(
        python_setup=python_setup,
        subprocess_encoding_environment=subprocess_encoding_environment,
        pex_path=f"./pylint.pex",
        pex_args=generate_args(specified_source_files=specified_source_files,
                               pylint=pylint),
        input_digest=input_digest,
        description=
        f"Run Pylint on {pluralize(len(field_sets), 'target')}: {address_references}.",
    )
    result = await Get[FallibleProcessResult](Process, process)
    return LintResult.from_fallible_process_result(result,
                                                   linter_name="Pylint")
コード例 #5
0
async def setup_shunit2_for_target(
    request: TestSetupRequest,
    shell_setup: ShellSetup,
    test_subsystem: TestSubsystem,
    test_extra_env: TestExtraEnv,
    global_options: GlobalOptions,
) -> TestSetup:
    shunit2_download_file = DownloadFile(
        "https://raw.githubusercontent.com/kward/shunit2/b9102bb763cc603b3115ed30a5648bf950548097/shunit2",
        FileDigest(
            "1f11477b7948150d1ca50cdd41d89be4ed2acd137e26d2e0fe23966d0e272cc5",
            40987),
    )
    shunit2_script, transitive_targets, built_package_dependencies, env = await MultiGet(
        Get(Digest, DownloadFile, shunit2_download_file),
        Get(TransitiveTargets,
            TransitiveTargetsRequest([request.field_set.address])),
        Get(
            BuiltPackageDependencies,
            BuildPackageDependenciesRequest(
                request.field_set.runtime_package_dependencies),
        ),
        Get(Environment, EnvironmentRequest(["PATH"])),
    )

    dependencies_source_files_request = Get(
        SourceFiles,
        SourceFilesRequest(
            (tgt.get(SourcesField) for tgt in transitive_targets.dependencies),
            for_sources_types=(ShellSourceField, FileSourceField,
                               ResourceSourceField),
            enable_codegen=True,
        ),
    )
    dependencies_source_files, field_set_sources = await MultiGet(
        dependencies_source_files_request,
        Get(SourceFiles, SourceFilesRequest([request.field_set.sources])),
    )

    field_set_digest_content = await Get(DigestContents, Digest,
                                         field_set_sources.snapshot.digest)
    # `ShellTestSourceField` validates that there's exactly one file.
    test_file_content = field_set_digest_content[0]
    updated_test_file_content = add_source_shunit2(test_file_content)

    updated_test_digest, runner = await MultiGet(
        Get(Digest, CreateDigest([updated_test_file_content])),
        Get(
            Shunit2Runner,
            Shunit2RunnerRequest(request.field_set.address, test_file_content,
                                 request.field_set.shell),
        ),
    )

    input_digest = await Get(
        Digest,
        MergeDigests((
            shunit2_script,
            updated_test_digest,
            dependencies_source_files.snapshot.digest,
            *(pkg.digest for pkg in built_package_dependencies),
        )),
    )

    env_dict = {
        "PATH": create_path_env_var(shell_setup.executable_search_path(env)),
        "SHUNIT_COLOR": "always" if global_options.colors else "none",
        **test_extra_env.env,
    }
    argv = (
        # Zsh requires extra args. See https://github.com/kward/shunit2/#-zsh.
        [
            runner.binary_path.path, "-o", "shwordsplit", "--",
            *field_set_sources.snapshot.files
        ] if runner.shell == Shunit2Shell.zsh else
        [runner.binary_path.path, *field_set_sources.snapshot.files])
    cache_scope = (ProcessCacheScope.PER_SESSION
                   if test_subsystem.force else ProcessCacheScope.SUCCESSFUL)
    process = Process(
        argv=argv,
        input_digest=input_digest,
        description=f"Run shunit2 for {request.field_set.address}.",
        level=LogLevel.DEBUG,
        env=env_dict,
        timeout_seconds=request.field_set.timeout.value,
        cache_scope=cache_scope,
    )
    return TestSetup(process)
コード例 #6
0
ファイル: rules.py プロジェクト: Spacerat/pants
async def bandit_lint_partition(partition: BanditPartition, bandit: Bandit,
                                lint_subsystem: LintSubsystem) -> LintResult:
    requirements_pex_request = Get(
        Pex,
        PexRequest(
            output_filename="bandit.pex",
            requirements=PexRequirements(bandit.all_requirements),
            interpreter_constraints=(partition.interpreter_constraints
                                     or PexInterpreterConstraints(
                                         bandit.interpreter_constraints)),
            entry_point=bandit.entry_point,
        ),
    )

    config_digest_request = Get(
        Digest,
        PathGlobs(
            globs=[bandit.config] if bandit.config else [],
            glob_match_error_behavior=GlobMatchErrorBehavior.error,
            description_of_origin="the option `--bandit-config`",
        ),
    )

    source_files_request = Get(
        SourceFiles,
        SourceFilesRequest(field_set.sources
                           for field_set in partition.field_sets))

    requirements_pex, config_digest, source_files = await MultiGet(
        requirements_pex_request, config_digest_request, source_files_request)

    input_digest = await Get(
        Digest,
        MergeDigests((source_files.snapshot.digest, requirements_pex.digest,
                      config_digest)))

    address_references = ", ".join(
        sorted(field_set.address.spec for field_set in partition.field_sets))
    report_path = (lint_subsystem.reports_dir /
                   "bandit_report.txt" if lint_subsystem.reports_dir else None)
    args = generate_args(
        source_files=source_files,
        bandit=bandit,
        output_file=report_path.name if report_path else None,
    )

    result = await Get(
        FallibleProcessResult,
        PexProcess(
            requirements_pex,
            argv=args,
            input_digest=input_digest,
            description=
            (f"Run Bandit on {pluralize(len(partition.field_sets), 'target')}: "
             f"{address_references}."),
            output_files=(report_path.name, ) if report_path else None,
        ),
    )

    results_file = None
    if report_path:
        report_file_snapshot = await Get(
            Snapshot,
            DigestSubset(result.output_digest, PathGlobs([report_path.name])))
        if len(report_file_snapshot.files) != 1:
            raise Exception(
                f"Unexpected report file snapshot: {report_file_snapshot.files}"
            )
        results_file = LintResultFile(output_path=report_path,
                                      digest=report_file_snapshot.digest)

    return LintResult.from_fallible_process_result(result,
                                                   linter_name="Bandit",
                                                   results_file=results_file)
コード例 #7
0
ファイル: rules.py プロジェクト: jperkelens/pants
async def pylint_lint_partition(partition: PylintPartition, pylint: Pylint) -> LintResult:
    # We build one PEX with Pylint requirements and another with all direct 3rd-party dependencies.
    # Splitting this into two PEXes gives us finer-grained caching. We then merge via `--pex-path`.
    plugin_requirements = PexRequirements.create_from_requirement_fields(
        plugin_tgt[PythonRequirementsField]
        for plugin_tgt in partition.plugin_targets
        if plugin_tgt.has_field(PythonRequirementsField)
    )
    target_requirements = PexRequirements.create_from_requirement_fields(
        tgt[PythonRequirementsField]
        for tgt in partition.targets_with_dependencies
        if tgt.has_field(PythonRequirementsField)
    )
    pylint_pex_request = Get(
        Pex,
        PexRequest(
            output_filename="pylint.pex",
            internal_only=True,
            requirements=PexRequirements([*pylint.all_requirements, *plugin_requirements]),
            interpreter_constraints=partition.interpreter_constraints,
        ),
    )
    requirements_pex_request = Get(
        Pex,
        PexRequest(
            output_filename="requirements.pex",
            internal_only=True,
            requirements=target_requirements,
            interpreter_constraints=partition.interpreter_constraints,
        ),
    )
    # TODO(John Sirois): Support shading python binaries:
    #   https://github.com/pantsbuild/pants/issues/9206
    # Right now any Pylint transitive requirements will shadow corresponding user
    # requirements, which could lead to problems.
    pylint_runner_pex_args = ["--pex-path", ":".join(["pylint.pex", "requirements.pex"])]
    pylint_runner_pex_request = Get(
        Pex,
        PexRequest(
            output_filename="pylint_runner.pex",
            internal_only=True,
            entry_point=pylint.entry_point,
            interpreter_constraints=partition.interpreter_constraints,
            additional_args=pylint_runner_pex_args,
        ),
    )

    config_digest_request = Get(
        Digest,
        PathGlobs(
            globs=[pylint.config] if pylint.config else [],
            glob_match_error_behavior=GlobMatchErrorBehavior.error,
            description_of_origin="the option `--pylint-config`",
        ),
    )

    prepare_plugin_sources_request = Get(
        StrippedPythonSourceFiles, PythonSourceFilesRequest(partition.plugin_targets),
    )
    prepare_python_sources_request = Get(
        PythonSourceFiles, PythonSourceFilesRequest(partition.targets_with_dependencies),
    )
    field_set_sources_request = Get(
        SourceFiles, SourceFilesRequest(field_set.sources for field_set in partition.field_sets),
    )

    (
        pylint_pex,
        requirements_pex,
        pylint_runner_pex,
        config_digest,
        prepared_plugin_sources,
        prepared_python_sources,
        field_set_sources,
    ) = await MultiGet(
        pylint_pex_request,
        requirements_pex_request,
        pylint_runner_pex_request,
        config_digest_request,
        prepare_plugin_sources_request,
        prepare_python_sources_request,
        field_set_sources_request,
    )

    prefixed_plugin_sources = (
        await Get(
            Digest,
            AddPrefix(prepared_plugin_sources.stripped_source_files.snapshot.digest, "__plugins"),
        )
        if pylint.source_plugins
        else EMPTY_DIGEST
    )

    pythonpath = list(prepared_python_sources.source_roots)
    if pylint.source_plugins:
        # NB: Pylint source plugins must be explicitly loaded via PEX_EXTRA_SYS_PATH. The value must
        # point to the plugin's directory, rather than to a parent's directory, because
        # `load-plugins` takes a module name rather than a path to the module; i.e. `plugin`, but
        # not `path.to.plugin`. (This means users must have specified the parent directory as a
        # source root.)
        pythonpath.append("__plugins")

    input_digest = await Get(
        Digest,
        MergeDigests(
            (
                pylint_pex.digest,
                requirements_pex.digest,
                pylint_runner_pex.digest,
                config_digest,
                prefixed_plugin_sources,
                prepared_python_sources.source_files.snapshot.digest,
            )
        ),
    )

    result = await Get(
        FallibleProcessResult,
        PexProcess(
            pylint_runner_pex,
            argv=generate_args(source_files=field_set_sources, pylint=pylint),
            input_digest=input_digest,
            extra_env={"PEX_EXTRA_SYS_PATH": ":".join(pythonpath)},
            description=f"Run Pylint on {pluralize(len(partition.field_sets), 'file')}.",
            level=LogLevel.DEBUG,
        ),
    )
    return LintResult.from_fallible_process_result(
        result, partition_description=str(sorted(partition.interpreter_constraints))
    )
コード例 #8
0
async def run_shellcheck(request: ShellcheckRequest,
                         shellcheck: Shellcheck) -> LintResults:
    if shellcheck.skip:
        return LintResults([], linter_name="Shellcheck")

    # Shellcheck looks at direct dependencies to make sure that every symbol is defined, so we must
    # include those in the run.
    all_dependencies = await MultiGet(
        Get(Targets, DependenciesRequest(field_set.dependencies))
        for field_set in request.field_sets)
    direct_sources_get = Get(
        SourceFiles,
        SourceFilesRequest(
            (field_set.sources for field_set in request.field_sets),
            for_sources_types=(ShellSources, ),
            enable_codegen=True,
        ),
    )
    dependency_sources_get = Get(
        SourceFiles,
        SourceFilesRequest(
            (tgt.get(Sources) for dependencies in all_dependencies
             for tgt in dependencies),
            for_sources_types=(ShellSources, ),
            enable_codegen=True,
        ),
    )
    config_files_get = Get(ConfigFiles, ConfigFilesRequest,
                           shellcheck.config_request)

    download_shellcheck_get = Get(DownloadedExternalTool, ExternalToolRequest,
                                  shellcheck.get_request(Platform.current))

    direct_sources, dependency_sources, downloaded_shellcheck, config_files = await MultiGet(
        direct_sources_get, dependency_sources_get, download_shellcheck_get,
        config_files_get)
    input_digest = await Get(
        Digest,
        MergeDigests((
            direct_sources.snapshot.digest,
            dependency_sources.snapshot.digest,
            downloaded_shellcheck.digest,
            config_files.snapshot.digest,
        )),
    )

    process_result = await Get(
        FallibleProcessResult,
        Process(
            argv=[
                downloaded_shellcheck.exe, *shellcheck.args,
                *direct_sources.snapshot.files
            ],
            input_digest=input_digest,
            description=
            f"Run Shellcheck on {pluralize(len(request.field_sets), 'file')}.",
            level=LogLevel.DEBUG,
        ),
    )
    result = LintResult.from_fallible_process_result(process_result)
    return LintResults([result], linter_name="Shellcheck")
コード例 #9
0
ファイル: pex_from_targets.py プロジェクト: jperkelens/pants
async def pex_from_targets(request: PexFromTargetsRequest,
                           python_setup: PythonSetup) -> PexRequest:
    transitive_targets = await Get(TransitiveTargets, Addresses,
                                   request.addresses)
    all_targets = transitive_targets.closure

    input_digests = []
    if request.additional_sources:
        input_digests.append(request.additional_sources)
    if request.include_source_files:
        prepared_sources = await Get(StrippedPythonSourceFiles,
                                     PythonSourceFilesRequest(all_targets))
        input_digests.append(
            prepared_sources.stripped_source_files.snapshot.digest)
    merged_input_digest = await Get(Digest, MergeDigests(input_digests))

    interpreter_constraints = PexInterpreterConstraints.create_from_compatibility_fields(
        (tgt[PythonInterpreterCompatibility] for tgt in all_targets
         if tgt.has_field(PythonInterpreterCompatibility)),
        python_setup,
    )

    exact_reqs = PexRequirements.create_from_requirement_fields(
        (tgt[PythonRequirementsField]
         for tgt in all_targets if tgt.has_field(PythonRequirementsField)),
        additional_requirements=request.additional_requirements,
    )

    requirements = exact_reqs
    description = request.description

    if python_setup.requirement_constraints:
        # In requirement strings Foo_-Bar.BAZ and foo-bar-baz refer to the same project. We let
        # packaging canonicalize for us.
        # See: https://www.python.org/dev/peps/pep-0503/#normalized-names

        exact_req_projects = {
            canonicalize_project_name(Requirement.parse(req).project_name)
            for req in exact_reqs
        }
        constraints_file_contents = await Get(
            DigestContents,
            PathGlobs(
                [python_setup.requirement_constraints],
                glob_match_error_behavior=GlobMatchErrorBehavior.error,
                conjunction=GlobExpansionConjunction.all_match,
                description_of_origin=
                "the option `--python-setup-requirement-constraints`",
            ),
        )
        constraints_file_reqs = set(
            parse_requirements(
                next(iter(constraints_file_contents)).content.decode()))
        constraint_file_projects = {
            canonicalize_project_name(req.project_name)
            for req in constraints_file_reqs
        }
        unconstrained_projects = exact_req_projects - constraint_file_projects
        if unconstrained_projects:
            logger.warning(
                f"The constraints file {python_setup.requirement_constraints} does not contain "
                f"entries for the following requirements: {', '.join(unconstrained_projects)}"
            )

        if python_setup.resolve_all_constraints == ResolveAllConstraintsOption.ALWAYS or (
                python_setup.resolve_all_constraints
                == ResolveAllConstraintsOption.NONDEPLOYABLES
                and request.internal_only):
            if unconstrained_projects:
                logger.warning(
                    "Ignoring resolve_all_constraints setting in [python_setup] scope "
                    "because constraints file does not cover all requirements."
                )
            else:
                requirements = PexRequirements(
                    str(req) for req in constraints_file_reqs)
                description = description or f"Resolving {python_setup.requirement_constraints}"
    elif (python_setup.resolve_all_constraints !=
          ResolveAllConstraintsOption.NEVER
          and python_setup.resolve_all_constraints_was_set_explicitly()):
        raise ValueError(
            f"[python-setup].resolve_all_constraints is set to "
            f"{python_setup.resolve_all_constraints.value}, so "
            f"[python-setup].requirement_constraints must also be provided.")

    return PexRequest(
        output_filename=request.output_filename,
        internal_only=request.internal_only,
        requirements=requirements,
        interpreter_constraints=interpreter_constraints,
        platforms=request.platforms,
        entry_point=request.entry_point,
        sources=merged_input_digest,
        additional_inputs=request.additional_inputs,
        additional_args=request.additional_args,
        description=description,
    )
コード例 #10
0
ファイル: fmt.py プロジェクト: nadeemnazeer/pants
async def fmt(
    console: Console,
    targets: Targets,
    fmt_subsystem: FmtSubsystem,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Fmt:
    language_target_collection_types = union_membership[LanguageFmtTargets]
    language_target_collections: Iterable[LanguageFmtTargets] = tuple(
        language_target_collection_type(
            Targets(target for target in targets
                    if language_target_collection_type.belongs_to_language(
                        target))) for language_target_collection_type in
        language_target_collection_types)
    targets_with_sources: Iterable[TargetsWithSources] = await MultiGet(
        Get(
            TargetsWithSources,
            TargetsWithSourcesRequest(language_target_collection.targets),
        ) for language_target_collection in language_target_collections)
    # NB: We must convert back the generic TargetsWithSources objects back into their
    # corresponding LanguageFmtTargets, e.g. back to PythonFmtTargets, in order for the union
    # rule to work.
    valid_language_target_collections: Iterable[LanguageFmtTargets] = tuple(
        language_target_collection_cls(
            Targets(target for target in language_target_collection.targets
                    if target in language_targets_with_sources))
        for language_target_collection_cls,
        language_target_collection, language_targets_with_sources in zip(
            language_target_collection_types, language_target_collections,
            targets_with_sources) if language_targets_with_sources)

    if fmt_subsystem.per_file_caching:
        per_language_results = await MultiGet(
            Get(
                LanguageFmtResults,
                LanguageFmtTargets,
                language_target_collection.__class__(Targets([target])),
            )
            for language_target_collection in valid_language_target_collections
            for target in language_target_collection.targets)
    else:
        per_language_results = await MultiGet(
            Get(LanguageFmtResults, LanguageFmtTargets,
                language_target_collection)
            for language_target_collection in valid_language_target_collections
        )

    individual_results: List[FmtResult] = list(
        itertools.chain.from_iterable(
            language_result.results
            for language_result in per_language_results))

    if not individual_results:
        return Fmt(exit_code=0)

    changed_digests = tuple(language_result.output
                            for language_result in per_language_results
                            if language_result.did_change)
    if changed_digests:
        # NB: this will fail if there are any conflicting changes, which we want to happen rather
        # than silently having one result override the other. In practicality, this should never
        # happen due to us grouping each language's formatters into a single digest.
        merged_formatted_digest = await Get(Digest,
                                            MergeDigests(changed_digests))
        workspace.write_digest(merged_formatted_digest)

    if individual_results:
        console.print_stderr("")

    # We group all results for the same formatter so that we can give one final status in the
    # summary. This is only relevant if there were multiple results because of
    # `--per-file-caching`.
    formatter_to_results = defaultdict(set)
    for result in individual_results:
        formatter_to_results[result.formatter_name].add(result)

    for formatter, results in sorted(formatter_to_results.items()):
        if any(result.did_change for result in results):
            sigil = console.red("𐄂")
            status = "made changes"
        elif all(result.skipped for result in results):
            sigil = console.yellow("-")
            status = "skipped"
        else:
            sigil = console.green("✓")
            status = "made no changes"
        console.print_stderr(f"{sigil} {formatter} {status}.")

    # Since the rules to produce FmtResult should use ExecuteRequest, rather than
    # FallibleProcess, we assume that there were no failures.
    return Fmt(exit_code=0)
コード例 #11
0
ファイル: cloc.py プロジェクト: matze999/pants
async def run_cloc(
    console: Console,
    cloc_subsystem: CountLinesOfCodeSubsystem,
    cloc_binary: ClocBinary,
    sources_snapshot: SourcesSnapshot,
) -> CountLinesOfCode:
    """Runs the cloc Perl script."""
    if not sources_snapshot.snapshot.files:
        return CountLinesOfCode(exit_code=0)

    input_files_filename = "input_files.txt"
    input_file_digest = await Get(
        Digest,
        CreateDigest([
            FileContent(input_files_filename,
                        "\n".join(sources_snapshot.snapshot.files).encode())
        ]),
    )
    downloaded_cloc_binary = await Get(
        DownloadedExternalTool, ExternalToolRequest,
        cloc_binary.get_request(Platform.current))
    digest = await Get(
        Digest,
        MergeDigests((input_file_digest, downloaded_cloc_binary.digest,
                      sources_snapshot.snapshot.digest)),
    )

    report_filename = "report.txt"
    ignore_filename = "ignored.txt"

    cmd = (
        "/usr/bin/perl",
        downloaded_cloc_binary.exe,
        "--skip-uniqueness",  # Skip the file uniqueness check.
        f"--ignored={ignore_filename}",  # Write the names and reasons of ignored files to this file.
        f"--report-file={report_filename}",  # Write the output to this file rather than stdout.
        f"--list-file={input_files_filename}",  # Read an exhaustive list of files to process from this file.
    )
    req = Process(
        argv=cmd,
        input_digest=digest,
        output_files=(report_filename, ignore_filename),
        description=
        (f"Count lines of code for {pluralize(len(sources_snapshot.snapshot.files), 'file')}"
         ),
        level=LogLevel.DEBUG,
    )
    exec_result = await Get(ProcessResult, Process, req)

    report_digest_contents = await Get(DigestContents, Digest,
                                       exec_result.output_digest)
    reports = {
        file_content.path: file_content.content.decode()
        for file_content in report_digest_contents
    }

    for line in reports[report_filename].splitlines():
        console.print_stdout(line)

    if cloc_subsystem.ignored:
        console.print_stderr("\nIgnored the following files:")
        for line in reports[ignore_filename].splitlines():
            console.print_stderr(line)

    return CountLinesOfCode(exit_code=0)
コード例 #12
0
async def generate_python_from_protobuf(
        request: GeneratePythonFromProtobufRequest,
        protoc: Protoc) -> GeneratedSources:
    download_protoc_request = Get(DownloadedExternalTool, ExternalToolRequest,
                                  protoc.get_request(Platform.current))

    output_dir = "_generated_files"
    create_output_dir_request = Get(Digest,
                                    CreateDigest([Directory(output_dir)]))

    # Protoc needs all transitive dependencies on `protobuf_libraries` to work properly. It won't
    # actually generate those dependencies; it only needs to look at their .proto files to work
    # with imports.
    transitive_targets = await Get(
        TransitiveTargets, Addresses([request.protocol_target.address]))
    # NB: By stripping the source roots, we avoid having to set the value `--proto_path`
    # for Protobuf imports to be discoverable.
    all_stripped_sources_request = Get(
        StrippedSourceFiles,
        SourceFilesRequest(
            (tgt.get(Sources) for tgt in transitive_targets.closure),
            for_sources_types=(ProtobufSources, ),
        ),
    )
    target_stripped_sources_request = Get(
        StrippedSourceFiles,
        SourceFilesRequest([request.protocol_target[ProtobufSources]]))

    (
        downloaded_protoc_binary,
        empty_output_dir,
        all_sources_stripped,
        target_sources_stripped,
    ) = await MultiGet(
        download_protoc_request,
        create_output_dir_request,
        all_stripped_sources_request,
        target_stripped_sources_request,
    )

    input_digest = await Get(
        Digest,
        MergeDigests((
            all_sources_stripped.snapshot.digest,
            downloaded_protoc_binary.digest,
            empty_output_dir,
        )),
    )

    result = await Get(
        ProcessResult,
        Process(
            (
                downloaded_protoc_binary.exe,
                "--python_out",
                output_dir,
                *target_sources_stripped.snapshot.files,
            ),
            input_digest=input_digest,
            description=
            f"Generating Python sources from {request.protocol_target.address}.",
            level=LogLevel.DEBUG,
            output_directories=(output_dir, ),
        ),
    )

    # We must do some path manipulation on the output digest for it to look like normal sources,
    # including adding back a source root.
    py_source_root = request.protocol_target.get(PythonSourceRootField).value
    if py_source_root:
        # Verify that the python source root specified by the target is in fact a source root.
        source_root_request = SourceRootRequest(PurePath(py_source_root))
    else:
        # The target didn't specify a python source root, so use the protobuf_library's source root.
        source_root_request = SourceRootRequest.for_target(
            request.protocol_target)

    normalized_digest, source_root = await MultiGet(
        Get(Digest, RemovePrefix(result.output_digest, output_dir)),
        Get(SourceRoot, SourceRootRequest, source_root_request),
    )

    source_root_restored = (await Get(
        Snapshot, AddPrefix(normalized_digest, source_root.path))
                            if source_root.path != "." else await Get(
                                Snapshot, Digest, normalized_digest))
    return GeneratedSources(source_root_restored)
コード例 #13
0
ファイル: test.py プロジェクト: Spacerat/pants
async def run_tests(
    console: Console,
    test_subsystem: TestSubsystem,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Test:
    if test_subsystem.debug:
        targets_to_valid_field_sets = await Get(
            TargetsToValidFieldSets,
            TargetsToValidFieldSetsRequest(
                TestFieldSet,
                goal_description="`test --debug`",
                error_if_no_valid_targets=True,
                expect_single_field_set=True,
            ),
        )
        field_set = targets_to_valid_field_sets.field_sets[0]
        request = await Get(TestDebugRequest, TestFieldSet, field_set)
        debug_result = interactive_runner.run(request.process)
        return Test(debug_result.exit_code)

    targets_to_valid_field_sets = await Get(
        TargetsToValidFieldSets,
        TargetsToValidFieldSetsRequest(
            TestFieldSet,
            goal_description=f"the `{test_subsystem.name}` goal",
            error_if_no_valid_targets=False,
        ),
    )
    field_sets_with_sources = await Get(
        FieldSetsWithSources,
        FieldSetsWithSourcesRequest(targets_to_valid_field_sets.field_sets))

    results = await MultiGet(
        Get(AddressAndTestResult, WrappedTestFieldSet(field_set))
        for field_set in field_sets_with_sources)

    # Print details.
    for result in results:
        if test_subsystem.options.output == ShowOutput.NONE or (
                test_subsystem.options.output == ShowOutput.FAILED
                and result.test_result.status == Status.SUCCESS):
            continue
        has_output = result.test_result.stdout or result.test_result.stderr
        if has_output:
            status = (console.green("✓") if result.test_result.status
                      == Status.SUCCESS else console.red("𐄂"))
            console.print_stderr(f"{status} {result.address}")
        if result.test_result.stdout:
            console.print_stderr(result.test_result.stdout)
        if result.test_result.stderr:
            console.print_stderr(result.test_result.stderr)
        if has_output and result != results[-1]:
            console.print_stderr("")

    # Print summary
    console.print_stderr("")
    for result in results:
        color = console.green if result.test_result.status == Status.SUCCESS else console.red
        # The right-align logic sees the color control codes as characters, so we have
        # to account for that. In f-strings the alignment field widths must be literals,
        # so we have to indirect via a call to .format().
        right_align = 19 if console.use_colors else 10
        format_str = f"{{addr:80}}.....{{result:>{right_align}}}"
        console.print_stderr(
            format_str.format(addr=result.address.spec,
                              result=color(result.test_result.status.value)))

    merged_xml_results = await Get(
        Digest,
        MergeDigests(result.test_result.xml_results for result in results
                     if result.test_result.xml_results),
    )
    workspace.write_digest(merged_xml_results)

    if test_subsystem.use_coverage:
        all_coverage_data: Iterable[CoverageData] = [
            result.test_result.coverage_data for result in results
            if result.test_result.coverage_data is not None
        ]

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]] = {
                collection_cls.element_type: collection_cls
                for collection_cls in union_membership.get(
                    CoverageDataCollection)
            }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data,
                                                lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))
        # We can create multiple reports for each coverage data (console, xml and html)
        coverage_reports_collections = await MultiGet(
            Get(CoverageReports, CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections)

        coverage_report_files: List[PurePath] = []
        for coverage_reports in coverage_reports_collections:
            report_files = coverage_reports.materialize(console, workspace)
            coverage_report_files.extend(report_files)

        if coverage_report_files and test_subsystem.open_coverage:
            desktop.ui_open(console, interactive_runner, coverage_report_files)

    exit_code = (PANTS_FAILED_EXIT_CODE if any(
        res.test_result.status == Status.FAILURE
        for res in results) else PANTS_SUCCEEDED_EXIT_CODE)

    return Test(exit_code)
コード例 #14
0
async def create_docker_build_context(
        request: DockerBuildContextRequest,
        docker_options: DockerOptions) -> DockerBuildContext:
    # Get all targets to include in context.
    transitive_targets = await Get(TransitiveTargets,
                                   TransitiveTargetsRequest([request.address]))
    docker_image = transitive_targets.roots[0]

    # Get all dependencies for the root target.
    root_dependencies = await Get(
        Targets, DependenciesRequest(docker_image.get(Dependencies)))

    # Get all file sources from the root dependencies. That includes any non-file sources that can
    # be "codegen"ed into a file source.
    sources_request = Get(
        SourceFiles,
        SourceFilesRequest(
            sources_fields=[
                tgt.get(SourcesField) for tgt in root_dependencies
            ],
            for_sources_types=(
                DockerContextFilesSourcesField,
                FileSourceField,
            ),
            enable_codegen=True,
        ),
    )

    embedded_pkgs_per_target_request = Get(
        FieldSetsPerTarget,
        FieldSetsPerTargetRequest(PackageFieldSet,
                                  transitive_targets.dependencies),
    )

    sources, embedded_pkgs_per_target, dockerfile_info = await MultiGet(
        sources_request,
        embedded_pkgs_per_target_request,
        Get(DockerfileInfo, DockerfileInfoRequest(docker_image.address)),
    )

    # Package binary dependencies for build context.
    embedded_pkgs = await MultiGet(
        Get(BuiltPackage, PackageFieldSet, field_set)
        for field_set in embedded_pkgs_per_target.field_sets
        # Exclude docker images, unless build_upstream_images is true.
        if request.build_upstream_images or not isinstance(
            getattr(field_set, "source", None), DockerImageSourceField))

    if request.build_upstream_images:
        images_str = ", ".join(a.tags[0] for p in embedded_pkgs
                               for a in p.artifacts
                               if isinstance(a, BuiltDockerImage))
        if images_str:
            logger.debug(f"Built upstream Docker images: {images_str}")
        else:
            logger.debug("Did not build any upstream Docker images")

    packages_str = ", ".join(a.relpath for p in embedded_pkgs
                             for a in p.artifacts if a.relpath)
    if packages_str:
        logger.debug(f"Built packages for Docker image: {packages_str}")
    else:
        logger.debug("Did not build any packages for Docker image")

    embedded_pkgs_digest = [
        built_package.digest for built_package in embedded_pkgs
    ]
    all_digests = (dockerfile_info.digest, sources.snapshot.digest,
                   *embedded_pkgs_digest)

    # Merge all digests to get the final docker build context digest.
    context_request = Get(Snapshot, MergeDigests(d for d in all_digests if d))

    # Requests for build args and env
    build_args_request = Get(DockerBuildArgs,
                             DockerBuildArgsRequest(docker_image))
    build_env_request = Get(DockerBuildEnvironment,
                            DockerBuildEnvironmentRequest(docker_image))
    context, build_args, build_env = await MultiGet(context_request,
                                                    build_args_request,
                                                    build_env_request)

    if request.build_upstream_images:
        # Update build arg values for FROM image build args.

        # Get the FROM image build args with defined values in the Dockerfile.
        dockerfile_build_args = {
            k: v
            for k, v in
            dockerfile_info.from_image_build_args.to_dict().items() if v
        }

        # Parse the build args values into Address instances.
        from_image_addresses = await Get(
            Addresses,
            UnparsedAddressInputs(
                dockerfile_build_args.values(),
                owning_address=dockerfile_info.address,
                description_of_origin="TODO(#14468)",
            ),
        )
        # Map those addresses to the corresponding built image ref (tag).
        address_to_built_image_tag = {
            field_set.address: image.tags[0]
            for field_set, built in zip(embedded_pkgs_per_target.field_sets,
                                        embedded_pkgs)
            for image in built.artifacts if isinstance(image, BuiltDockerImage)
        }
        # Create the FROM image build args.
        from_image_build_args = [
            f"{arg_name}={address_to_built_image_tag[addr]}" for arg_name, addr
            in zip(dockerfile_build_args.keys(), from_image_addresses)
        ]
        # Merge all build args.
        build_args = DockerBuildArgs.from_strings(*build_args,
                                                  *from_image_build_args)

    return DockerBuildContext.create(
        build_args=build_args,
        snapshot=context,
        dockerfile_info=dockerfile_info,
        build_env=build_env,
    )
コード例 #15
0
async def create_pex_binary_run_request(field_set: PexBinaryFieldSet,
                                        pex_binary_defaults: PexBinaryDefaults,
                                        pex_env: PexEnvironment) -> RunRequest:
    entry_point, transitive_targets = await MultiGet(
        Get(
            ResolvedPexEntryPoint,
            ResolvePexEntryPointRequest(field_set.entry_point),
        ),
        Get(TransitiveTargets, TransitiveTargetsRequest([field_set.address])),
    )

    addresses = [field_set.address]
    interpreter_constraints = await Get(
        InterpreterConstraints, InterpreterConstraintsRequest(addresses))

    pex_filename = (field_set.address.generated_name.replace(".", "_")
                    if field_set.address.generated_name else
                    field_set.address.target_name)
    pex_get = Get(
        Pex,
        PexFromTargetsRequest(
            [field_set.address],
            output_filename=f"{pex_filename}.pex",
            internal_only=True,
            include_source_files=False,
            # Note that the file for first-party entry points is not in the PEX itself. In that
            # case, it's loaded by setting `PEX_EXTRA_SYS_PATH`.
            main=entry_point.val or field_set.script.value,
            additional_args=(
                *field_set.generate_additional_args(pex_binary_defaults),
                # N.B.: Since we cobble together the runtime environment via PEX_EXTRA_SYS_PATH
                # below, it's important for any app that re-executes itself that these environment
                # variables are not stripped.
                "--no-strip-pex-env",
            ),
        ),
    )
    sources_get = Get(
        PythonSourceFiles,
        PythonSourceFilesRequest(transitive_targets.closure,
                                 include_files=True))
    pex, sources = await MultiGet(pex_get, sources_get)

    local_dists = await Get(
        LocalDistsPex,
        LocalDistsPexRequest(
            [field_set.address],
            internal_only=True,
            interpreter_constraints=interpreter_constraints,
            sources=sources,
        ),
    )

    merged_digest = await Get(
        Digest,
        MergeDigests([
            pex.digest,
            local_dists.pex.digest,
            local_dists.remaining_sources.source_files.snapshot.digest,
        ]),
    )

    def in_chroot(relpath: str) -> str:
        return os.path.join("{chroot}", relpath)

    complete_pex_env = pex_env.in_workspace()
    args = complete_pex_env.create_argv(in_chroot(pex.name), python=pex.python)

    chrooted_source_roots = [in_chroot(sr) for sr in sources.source_roots]
    extra_env = {
        **complete_pex_env.environment_dict(python_configured=pex.python is not None),
        "PEX_PATH":
        in_chroot(local_dists.pex.name),
        "PEX_EXTRA_SYS_PATH":
        os.pathsep.join(chrooted_source_roots),
    }

    return RunRequest(digest=merged_digest, args=args, extra_env=extra_env)
コード例 #16
0
ファイル: pytest_runner.py プロジェクト: Thangiee/pants
async def setup_pytest_for_target(
    config: PythonTestConfiguration,
    pytest: PyTest,
    test_options: TestOptions,
    python_setup: PythonSetup,
) -> TestTargetSetup:
    # TODO: Rather than consuming the TestOptions subsystem, the TestRunner should pass on coverage
    # configuration via #7490.

    test_addresses = Addresses((config.address, ))

    transitive_targets = await Get[TransitiveTargets](Addresses,
                                                      test_addresses)
    all_targets = transitive_targets.closure

    interpreter_constraints = PexInterpreterConstraints.create_from_compatibility_fields(
        (tgt[PythonInterpreterCompatibility] for tgt in all_targets
         if tgt.has_field(PythonInterpreterCompatibility)),
        python_setup,
    )

    # Ensure all pexes we merge via PEX_PATH to form the test runner use the interpreter constraints
    # of the tests. This is handled by CreatePexFromTargetClosure, but we must pass this through for
    # CreatePex requests.
    pex_request = functools.partial(
        PexRequest, interpreter_constraints=interpreter_constraints)

    # NB: We set `--not-zip-safe` because Pytest plugin discovery, which uses
    # `importlib_metadata` and thus `zipp`, does not play nicely when doing import magic directly
    # from zip files. `zipp` has pathologically bad behavior with large zipfiles.
    # TODO: this does have a performance cost as the pex must now be expanded to disk. Long term,
    # it would be better to fix Zipp (whose fix would then need to be used by importlib_metadata
    # and then by Pytest). See https://github.com/jaraco/zipp/pull/26.
    additional_args_for_pytest = ("--not-zip-safe", )

    run_coverage = test_options.values.run_coverage
    plugin_file_digest: Optional[Digest] = (await Get[Digest](
        InputFilesContent, COVERAGE_PLUGIN_INPUT) if run_coverage else None)

    pytest_pex_request = pex_request(
        output_filename="pytest.pex",
        requirements=PexRequirements(pytest.get_requirement_strings()),
        additional_args=additional_args_for_pytest,
        sources=plugin_file_digest,
    )

    requirements_pex_request = PexFromTargetsRequest(
        addresses=test_addresses,
        output_filename="requirements.pex",
        include_source_files=False,
        additional_args=additional_args_for_pytest,
    )

    test_runner_pex_request = pex_request(
        output_filename="test_runner.pex",
        entry_point="pytest:main",
        interpreter_constraints=interpreter_constraints,
        additional_args=(
            "--pex-path",
            # TODO(John Sirois): Support shading python binaries:
            #   https://github.com/pantsbuild/pants/issues/9206
            # Right now any pytest transitive requirements will shadow corresponding user
            # requirements which will lead to problems when APIs that are used by either
            # `pytest:main` or the tests themselves break between the two versions.
            ":".join((pytest_pex_request.output_filename,
                      requirements_pex_request.output_filename)),
        ),
    )

    # Get the file names for the test_target so that we can specify to Pytest precisely which files
    # to test, rather than using auto-discovery.
    specified_source_files_request = SpecifiedSourceFilesRequest(
        [(config.sources, config.origin)], strip_source_roots=True)

    # TODO(John Sirois): Support exploiting concurrency better:
    #   https://github.com/pantsbuild/pants/issues/9294
    # Some awkward code follows in order to execute 5-6 items concurrently given the current state
    # of MultiGet typing / API. Improve this since we should encourage full concurrency in general.
    requests: List[Get[Any]] = [
        Get[Pex](PexRequest, pytest_pex_request),
        Get[Pex](PexFromTargetsRequest, requirements_pex_request),
        Get[Pex](PexRequest, test_runner_pex_request),
        Get[ImportablePythonSources](Targets(all_targets)),
        Get[SourceFiles](SpecifiedSourceFilesRequest,
                         specified_source_files_request),
    ]
    if run_coverage:
        requests.append(
            Get[CoverageConfig](CoverageConfigRequest(
                Targets((tgt for tgt in all_targets
                         if tgt.has_field(PythonSources))),
                is_test_time=True,
            )), )

    (
        pytest_pex,
        requirements_pex,
        test_runner_pex,
        prepared_sources,
        specified_source_files,
        *rest,
    ) = cast(
        Union[Tuple[Pex, Pex, Pex, ImportablePythonSources, SourceFiles],
              Tuple[Pex, Pex, Pex, ImportablePythonSources, SourceFiles,
                    CoverageConfig], ],
        await MultiGet(requests),
    )

    digests_to_merge = [
        prepared_sources.snapshot.digest,
        requirements_pex.digest,
        pytest_pex.digest,
        test_runner_pex.digest,
    ]
    if run_coverage:
        coverage_config = rest[0]
        digests_to_merge.append(coverage_config.digest)
    merged_input_files = await Get[Digest](MergeDigests(digests_to_merge))

    coverage_args = []
    if run_coverage:
        coverage_args = [
            "--cov-report=",  # To not generate any output. https://pytest-cov.readthedocs.io/en/latest/config.html
        ]
        for package in config.coverage.determine_packages_to_cover(
                specified_source_files=specified_source_files):
            coverage_args.extend(["--cov", package])

    specified_source_file_names = sorted(specified_source_files.snapshot.files)
    return TestTargetSetup(
        test_runner_pex=test_runner_pex,
        args=(*pytest.options.args, *coverage_args,
              *specified_source_file_names),
        input_files_digest=merged_input_files,
        timeout_seconds=config.timeout.calculate_from_global_options(pytest),
        xml_dir=pytest.options.junit_xml_dir,
        junit_family=pytest.options.junit_family,
    )
コード例 #17
0
async def run_tests(
    console: Console,
    test_subsystem: TestSubsystem,
    interactive_runner: InteractiveRunner,
    workspace: Workspace,
    union_membership: UnionMembership,
) -> Test:
    if test_subsystem.debug:
        targets_to_valid_field_sets = await Get(
            TargetRootsToFieldSets,
            TargetRootsToFieldSetsRequest(
                TestFieldSet,
                goal_description="`test --debug`",
                no_applicable_targets_behavior=NoApplicableTargetsBehavior.
                error,
            ),
        )
        debug_requests = await MultiGet(
            Get(TestDebugRequest, TestFieldSet, field_set)
            for field_set in targets_to_valid_field_sets.field_sets)
        exit_code = 0
        for debug_request in debug_requests:
            if debug_request.process is None:
                continue
            debug_result = interactive_runner.run(debug_request.process)
            if debug_result.exit_code != 0:
                exit_code = debug_result.exit_code
        return Test(exit_code)

    targets_to_valid_field_sets = await Get(
        TargetRootsToFieldSets,
        TargetRootsToFieldSetsRequest(
            TestFieldSet,
            goal_description=f"the `{test_subsystem.name}` goal",
            no_applicable_targets_behavior=NoApplicableTargetsBehavior.warn,
        ),
    )
    field_sets_with_sources = await Get(
        FieldSetsWithSources,
        FieldSetsWithSourcesRequest(targets_to_valid_field_sets.field_sets))

    results = await MultiGet(
        Get(EnrichedTestResult, TestFieldSet, field_set)
        for field_set in field_sets_with_sources)

    # Print summary.
    exit_code = 0
    if results:
        console.print_stderr("")
    for result in sorted(results):
        if result.skipped:
            continue
        if result.exit_code == 0:
            sigil = console.green("✓")
            status = "succeeded"
        else:
            sigil = console.red("𐄂")
            status = "failed"
            exit_code = cast(int, result.exit_code)
        console.print_stderr(f"{sigil} {result.address} {status}.")

    merged_xml_results = await Get(
        Digest,
        MergeDigests(result.xml_results.digest for result in results
                     if result.xml_results),
    )
    workspace.write_digest(merged_xml_results)

    if test_subsystem.use_coverage:
        # NB: We must pre-sort the data for itertools.groupby() to work properly, using the same
        # key function for both. However, you can't sort by `types`, so we call `str()` on it.
        all_coverage_data = sorted(
            (result.coverage_data
             for result in results if result.coverage_data is not None),
            key=lambda cov_data: str(type(cov_data)),
        )

        coverage_types_to_collection_types: Dict[
            Type[CoverageData], Type[CoverageDataCollection]] = {
                collection_cls.element_type: collection_cls
                for collection_cls in union_membership.get(
                    CoverageDataCollection)
            }
        coverage_collections: List[CoverageDataCollection] = []
        for data_cls, data in itertools.groupby(all_coverage_data,
                                                lambda data: type(data)):
            collection_cls = coverage_types_to_collection_types[data_cls]
            coverage_collections.append(collection_cls(data))
        # We can create multiple reports for each coverage data (console, xml and html)
        coverage_reports_collections = await MultiGet(
            Get(CoverageReports, CoverageDataCollection, coverage_collection)
            for coverage_collection in coverage_collections)

        coverage_report_files: List[PurePath] = []
        for coverage_reports in coverage_reports_collections:
            report_files = coverage_reports.materialize(console, workspace)
            coverage_report_files.extend(report_files)

        if coverage_report_files and test_subsystem.open_coverage:
            open_files = await Get(
                OpenFiles,
                OpenFilesRequest(coverage_report_files,
                                 error_if_open_not_found=False))
            for process in open_files.processes:
                interactive_runner.run(process)

    return Test(exit_code)
コード例 #18
0
ファイル: setup_py.py プロジェクト: nadeemnazeer/pants
async def generate_chroot(request: SetupPyChrootRequest) -> SetupPyChroot:
    exported_target = request.exported_target
    exported_addr = exported_target.target.address

    owned_deps, transitive_targets = await MultiGet(
        Get(OwnedDependencies, DependencyOwner(exported_target)),
        Get(TransitiveTargets, TransitiveTargetsRequest([exported_target.target.address])),
    )

    # files() targets aren't owned by a single exported target - they aren't code, so
    # we allow them to be in multiple dists. This is helpful for, e.g., embedding
    # a standard license file in a dist.
    files_targets = (tgt for tgt in transitive_targets.closure if tgt.has_field(FilesSources))
    targets = Targets(itertools.chain((od.target for od in owned_deps), files_targets))

    sources, requirements = await MultiGet(
        Get(SetupPySources, SetupPySourcesRequest(targets, py2=request.py2)),
        Get(ExportedTargetRequirements, DependencyOwner(exported_target)),
    )

    # Generate the kwargs for the setup() call. In addition to using the kwargs that are either
    # explicitly provided or generated via a user's plugin, we add additional kwargs based on the
    # resolved requirements and sources.
    target = exported_target.target
    resolved_setup_kwargs = await Get(SetupKwargs, ExportedTarget, exported_target)
    setup_kwargs = resolved_setup_kwargs.kwargs.copy()
    # NB: We are careful to not overwrite these values, but we also don't expect them to have been
    # set. The user must have have gone out of their way to use a `SetupKwargs` plugin, and to have
    # specified `SetupKwargs(_allow_banned_keys=True)`.
    setup_kwargs.update(
        {
            "package_dir": {"": CHROOT_SOURCE_ROOT, **setup_kwargs.get("package_dir", {})},
            "packages": (*sources.packages, *(setup_kwargs.get("packages", []))),
            "namespace_packages": (
                *sources.namespace_packages,
                *setup_kwargs.get("namespace_packages", []),
            ),
            "package_data": {**dict(sources.package_data), **setup_kwargs.get("package_data", {})},
            "install_requires": (*requirements, *setup_kwargs.get("install_requires", [])),
        }
    )

    # Add any `pex_binary` targets from `setup_py().with_binaries()` to the dist's entry points.
    key_to_binary_spec = exported_target.provides.binaries
    binaries = await Get(
        Targets, UnparsedAddressInputs(key_to_binary_spec.values(), owning_address=target.address)
    )
    entry_point_requests = []
    for binary in binaries:
        if not binary.has_fields([PexEntryPointField, DeprecatedPexBinarySources]):
            raise InvalidEntryPoint(
                "Expected addresses to `pex_binary` targets in `.with_binaries()` for the "
                f"`provides` field for {exported_addr}, but found {binary.address} with target "
                f"type {binary.alias}."
            )
        entry_point = binary[PexEntryPointField].value
        url = "https://python-packaging.readthedocs.io/en/latest/command-line-scripts.html#the-console-scripts-entry-point"
        if not entry_point:
            raise InvalidEntryPoint(
                "Every `pex_binary` used in `.with_binaries()` for the `provides` field for "
                f"{exported_addr} must explicitly set the `entry_point` field, but "
                f"{binary.address} left the field off. Set `entry_point` to either "
                f"`app.py:func` or the longhand `path.to.app:func`. See {url}."
            )
        if ":" not in entry_point:
            raise InvalidEntryPoint(
                "Every `pex_binary` used in `with_binaries()` for the `provides()` field for "
                f"{exported_addr} must end in the format `:my_func` for the `entry_point` field, "
                f"but {binary.address} set it to {repr(entry_point)}. For example, set "
                f"`entry_point='{entry_point}:main'. See {url}."
            )
        entry_point_requests.append(
            ResolvePexEntryPointRequest(
                binary[PexEntryPointField], binary[DeprecatedPexBinarySources]
            )
        )
    binary_entry_points = await MultiGet(
        Get(ResolvedPexEntryPoint, ResolvePexEntryPointRequest, request)
        for request in entry_point_requests
    )
    for key, binary_entry_point in zip(key_to_binary_spec.keys(), binary_entry_points):
        entry_points = setup_kwargs["entry_points"] = setup_kwargs.get("entry_points", {})
        console_scripts = entry_points["console_scripts"] = entry_points.get("console_scripts", [])
        console_scripts.append(f"{key}={binary_entry_point.val}")

    # Generate the setup script.
    setup_py_content = SETUP_BOILERPLATE.format(
        target_address_spec=target.address.spec,
        setup_kwargs_str=distutils_repr(setup_kwargs),
    ).encode()
    files_to_create = [
        FileContent("setup.py", setup_py_content),
        FileContent("MANIFEST.in", "include *.py".encode()),
    ]
    extra_files_digest, src_digest = await MultiGet(
        Get(Digest, CreateDigest(files_to_create)),
        # Nest the sources under the src/ prefix.
        Get(Digest, AddPrefix(sources.digest, CHROOT_SOURCE_ROOT)),
    )

    chroot_digest = await Get(Digest, MergeDigests((src_digest, extra_files_digest)))
    return SetupPyChroot(chroot_digest, FinalizedSetupKwargs(setup_kwargs, address=target.address))
コード例 #19
0
ファイル: pytest_runner.py プロジェクト: Spacerat/pants
async def setup_pytest_for_target(
    field_set: PythonTestFieldSet,
    pytest: PyTest,
    test_subsystem: TestSubsystem,
    python_setup: PythonSetup,
    coverage_config: CoverageConfig,
    coverage_subsystem: CoverageSubsystem,
) -> TestTargetSetup:
    test_addresses = Addresses((field_set.address,))

    transitive_targets = await Get(TransitiveTargets, Addresses, test_addresses)
    all_targets = transitive_targets.closure

    interpreter_constraints = PexInterpreterConstraints.create_from_compatibility_fields(
        (
            tgt[PythonInterpreterCompatibility]
            for tgt in all_targets
            if tgt.has_field(PythonInterpreterCompatibility)
        ),
        python_setup,
    )

    # Ensure all pexes we merge via PEX_PATH to form the test runner use the interpreter constraints
    # of the tests. This is handled by CreatePexFromTargetClosure, but we must pass this through for
    # CreatePex requests.
    pex_request = functools.partial(PexRequest, interpreter_constraints=interpreter_constraints)

    # NB: We set `--not-zip-safe` because Pytest plugin discovery, which uses
    # `importlib_metadata` and thus `zipp`, does not play nicely when doing import magic directly
    # from zip files. `zipp` has pathologically bad behavior with large zipfiles.
    # TODO: this does have a performance cost as the pex must now be expanded to disk. Long term,
    # it would be better to fix Zipp (whose fix would then need to be used by importlib_metadata
    # and then by Pytest). See https://github.com/jaraco/zipp/pull/26.
    additional_args_for_pytest = ("--not-zip-safe",)

    pytest_pex_request = Get(
        Pex,
        PexRequest,
        pex_request(
            output_filename="pytest.pex",
            requirements=PexRequirements(pytest.get_requirement_strings()),
            additional_args=additional_args_for_pytest,
        ),
    )

    requirements_pex_request = Get(
        Pex,
        PexFromTargetsRequest(
            addresses=test_addresses,
            output_filename="requirements.pex",
            include_source_files=False,
            additional_args=additional_args_for_pytest,
        ),
    )

    test_runner_pex_request = Get(
        Pex,
        PexRequest,
        pex_request(
            output_filename="test_runner.pex",
            entry_point="pytest:main",
            interpreter_constraints=interpreter_constraints,
            additional_args=(
                "--pex-path",
                # TODO(John Sirois): Support shading python binaries:
                #   https://github.com/pantsbuild/pants/issues/9206
                # Right now any pytest transitive requirements will shadow corresponding user
                # requirements which will lead to problems when APIs that are used by either
                # `pytest:main` or the tests themselves break between the two versions.
                ":".join(
                    (
                        pytest_pex_request.subject.output_filename,
                        requirements_pex_request.subject.output_filename,
                    )
                ),
            ),
        ),
    )

    prepared_sources_request = Get(
        PythonSourceFiles, PythonSourceFilesRequest(all_targets, include_files=True)
    )

    # Get the file names for the test_target so that we can specify to Pytest precisely which files
    # to test, rather than using auto-discovery.
    field_set_source_files_request = Get(SourceFiles, SourceFilesRequest([field_set.sources]))

    (
        pytest_pex,
        requirements_pex,
        test_runner_pex,
        prepared_sources,
        field_set_source_files,
    ) = await MultiGet(
        pytest_pex_request,
        requirements_pex_request,
        test_runner_pex_request,
        prepared_sources_request,
        field_set_source_files_request,
    )

    input_digest = await Get(
        Digest,
        MergeDigests(
            (
                coverage_config.digest,
                prepared_sources.source_files.snapshot.digest,
                requirements_pex.digest,
                pytest_pex.digest,
                test_runner_pex.digest,
            )
        ),
    )

    coverage_args = []
    if test_subsystem.use_coverage:
        cov_paths = coverage_subsystem.filter if coverage_subsystem.filter else (".",)
        coverage_args = [
            "--cov-report=",  # Turn off output.
            *itertools.chain.from_iterable(["--cov", cov_path] for cov_path in cov_paths),
        ]
    return TestTargetSetup(
        test_runner_pex=test_runner_pex,
        args=(*pytest.options.args, *coverage_args, *field_set_source_files.files),
        input_digest=input_digest,
        source_roots=prepared_sources.source_roots,
        timeout_seconds=field_set.timeout.calculate_from_global_options(pytest),
        xml_dir=pytest.options.junit_xml_dir,
        junit_family=pytest.options.junit_family,
        execution_slot_variable=pytest.options.execution_slot_var,
    )
コード例 #20
0
ファイル: pex_from_targets.py プロジェクト: codealchemy/pants
async def create_pex_from_targets(request: PexFromTargetsRequest) -> PexRequest:
    interpreter_constraints = await Get(
        InterpreterConstraints,
        InterpreterConstraintsRequest,
        request.to_interpreter_constraints_request(),
    )

    transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses))

    sources_digests = []
    if request.additional_sources:
        sources_digests.append(request.additional_sources)
    if request.include_source_files:
        sources = await Get(PythonSourceFiles, PythonSourceFilesRequest(transitive_targets.closure))
    else:
        sources = PythonSourceFiles.empty()

    additional_inputs_digests = []
    if request.additional_inputs:
        additional_inputs_digests.append(request.additional_inputs)
    additional_args = request.additional_args
    if request.include_local_dists:
        local_dists = await Get(
            LocalDistsPex,
            LocalDistsPexRequest(
                request.addresses,
                internal_only=request.internal_only,
                interpreter_constraints=interpreter_constraints,
                sources=sources,
            ),
        )
        remaining_sources = local_dists.remaining_sources
        additional_inputs_digests.append(local_dists.pex.digest)
        additional_args += ("--requirements-pex", local_dists.pex.name)
    else:
        remaining_sources = sources

    remaining_sources_stripped = await Get(
        StrippedPythonSourceFiles, PythonSourceFiles, remaining_sources
    )
    sources_digests.append(remaining_sources_stripped.stripped_source_files.snapshot.digest)

    merged_sources_digest, additional_inputs = await MultiGet(
        Get(Digest, MergeDigests(sources_digests)),
        Get(Digest, MergeDigests(additional_inputs_digests)),
    )

    description = request.description

    if request.include_requirements:
        requirements = await Get(PexRequirements, _PexRequirementsRequest(request.addresses))
    else:
        requirements = PexRequirements()

    if requirements:
        repository_pex = await Get(
            OptionalPex,
            _RepositoryPexRequest(
                request.addresses,
                requirements=requirements,
                hardcoded_interpreter_constraints=request.hardcoded_interpreter_constraints,
                platforms=request.platforms,
                complete_platforms=request.complete_platforms,
                internal_only=request.internal_only,
                additional_lockfile_args=request.additional_lockfile_args,
            ),
        )
        requirements = dataclasses.replace(requirements, repository_pex=repository_pex.maybe_pex)

    return PexRequest(
        output_filename=request.output_filename,
        internal_only=request.internal_only,
        layout=request.layout,
        requirements=requirements,
        interpreter_constraints=interpreter_constraints,
        platforms=request.platforms,
        complete_platforms=request.complete_platforms,
        main=request.main,
        sources=merged_sources_digest,
        additional_inputs=additional_inputs,
        additional_args=additional_args,
        description=description,
    )
コード例 #21
0
async def generate_python_from_protobuf(
    request: GeneratePythonFromProtobufRequest,
    protoc: Protoc,
    grpc_python_plugin: GrpcPythonPlugin,
    python_protobuf_subsystem: PythonProtobufSubsystem,
) -> GeneratedSources:
    download_protoc_request = Get(DownloadedExternalTool, ExternalToolRequest,
                                  protoc.get_request(Platform.current))

    output_dir = "_generated_files"
    create_output_dir_request = Get(Digest,
                                    CreateDigest([Directory(output_dir)]))

    # Protoc needs all transitive dependencies on `protobuf_libraries` to work properly. It won't
    # actually generate those dependencies; it only needs to look at their .proto files to work
    # with imports.
    transitive_targets = await Get(
        TransitiveTargets,
        TransitiveTargetsRequest([request.protocol_target.address]))

    # NB: By stripping the source roots, we avoid having to set the value `--proto_path`
    # for Protobuf imports to be discoverable.
    all_stripped_sources_request = Get(
        StrippedSourceFiles,
        SourceFilesRequest(
            (tgt.get(Sources) for tgt in transitive_targets.closure),
            for_sources_types=(ProtobufSources, ),
        ),
    )
    target_stripped_sources_request = Get(
        StrippedSourceFiles,
        SourceFilesRequest([request.protocol_target[ProtobufSources]]))

    (
        downloaded_protoc_binary,
        empty_output_dir,
        all_sources_stripped,
        target_sources_stripped,
    ) = await MultiGet(
        download_protoc_request,
        create_output_dir_request,
        all_stripped_sources_request,
        target_stripped_sources_request,
    )

    protoc_gen_mypy_script = "protoc-gen-mypy"
    mypy_pex = None
    if python_protobuf_subsystem.mypy_plugin:
        mypy_pex = await Get(
            VenvPex,
            VenvPexRequest(
                bin_names=[protoc_gen_mypy_script],
                pex_request=PexRequest(
                    output_filename="mypy_protobuf.pex",
                    internal_only=True,
                    requirements=PexRequirements(
                        [python_protobuf_subsystem.mypy_plugin_version]),
                    # TODO(John Sirois): Fix these interpreter constraints to track the actual
                    #  python requirement of the mypy_plugin_version or else plumb an option for
                    #  manually setting the constraint to track what mypy_plugin_version needs:
                    #  https://github.com/pantsbuild/pants/issues/11565
                    # Here we guess a constraint that will likely work with any mypy_plugin_version
                    # selected.
                    interpreter_constraints=PexInterpreterConstraints(
                        ["CPython>=3.5"]),
                ),
            ),
        )

    downloaded_grpc_plugin = (await Get(
        DownloadedExternalTool,
        ExternalToolRequest,
        grpc_python_plugin.get_request(Platform.current),
    ) if request.protocol_target.get(ProtobufGrpcToggle).value else None)

    unmerged_digests = [
        all_sources_stripped.snapshot.digest,
        downloaded_protoc_binary.digest,
        empty_output_dir,
    ]
    if mypy_pex:
        unmerged_digests.append(mypy_pex.digest)
    if downloaded_grpc_plugin:
        unmerged_digests.append(downloaded_grpc_plugin.digest)
    input_digest = await Get(Digest, MergeDigests(unmerged_digests))

    argv = [downloaded_protoc_binary.exe, "--python_out", output_dir]
    if mypy_pex:
        argv.extend([
            f"--plugin=protoc-gen-mypy={mypy_pex.bin[protoc_gen_mypy_script].argv0}",
            "--mypy_out",
            output_dir,
        ])
    if downloaded_grpc_plugin:
        argv.extend([
            f"--plugin=protoc-gen-grpc={downloaded_grpc_plugin.exe}",
            "--grpc_out", output_dir
        ])
    argv.extend(target_sources_stripped.snapshot.files)

    result = await Get(
        ProcessResult,
        Process(
            argv,
            input_digest=input_digest,
            description=
            f"Generating Python sources from {request.protocol_target.address}.",
            level=LogLevel.DEBUG,
            output_directories=(output_dir, ),
        ),
    )

    # We must do some path manipulation on the output digest for it to look like normal sources,
    # including adding back a source root.
    py_source_root = request.protocol_target.get(PythonSourceRootField).value
    if py_source_root:
        # Verify that the python source root specified by the target is in fact a source root.
        source_root_request = SourceRootRequest(PurePath(py_source_root))
    else:
        # The target didn't specify a python source root, so use the protobuf_library's source root.
        source_root_request = SourceRootRequest.for_target(
            request.protocol_target)

    normalized_digest, source_root = await MultiGet(
        Get(Digest, RemovePrefix(result.output_digest, output_dir)),
        Get(SourceRoot, SourceRootRequest, source_root_request),
    )

    source_root_restored = (await Get(
        Snapshot, AddPrefix(normalized_digest, source_root.path))
                            if source_root.path != "." else await Get(
                                Snapshot, Digest, normalized_digest))
    return GeneratedSources(source_root_restored)
コード例 #22
0
ファイル: rules.py プロジェクト: pantsbuild/example-plugin
async def run_shellcheck(request: ShellcheckRequest,
                         shellcheck: Shellcheck) -> LintResults:
    if shellcheck.options.skip:
        return LintResults([], linter_name="Shellcheck")

    # Shellcheck looks at direct dependencies to make sure that every symbol is defined, so we must
    # include those in the run.
    all_dependencies = await MultiGet(
        Get(Targets, DependenciesRequest(field_set.dependencies))
        for field_set in request.field_sets)
    # Now that we have all dependencies, we flatten the results into a single list of `BashSources`
    # fields and we filter out all targets without a `BashSources` field registered because those
    # dependencies are irrelevant to Shellcheck.
    dependencies_sources_fields = [
        tgt[BashSources] for dependencies in all_dependencies
        for tgt in dependencies if tgt.has_field(BashSources)
    ]

    sources_request = Get(
        SourceFiles,
        SourceFilesRequest([
            *(field_set.sources for field_set in request.field_sets),
            *dependencies_sources_fields,
        ]),
    )

    download_shellcheck_request = Get(
        DownloadedExternalTool,
        ExternalToolRequest,
        shellcheck.get_request(Platform.current),
    )

    # If the user specified `--shellcheck-config`, we search for the file they specified with
    # `PathGlobs` to include it in the `input_digest`. We error if the file cannot be found.
    # See https://www.pantsbuild.org/v2.0/docs/rules-api-file-system.
    config_digest_request = Get(
        Digest,
        PathGlobs(
            globs=[shellcheck.options.config]
            if shellcheck.options.config else [],
            glob_match_error_behavior=GlobMatchErrorBehavior.error,
            description_of_origin="the option `--shellcheck-config`",
        ),
    )

    sources, downloaded_shellcheck, config_digest = await MultiGet(
        sources_request, download_shellcheck_request, config_digest_request)

    # The Process needs one single `Digest`, so we merge everything together. See
    # https://www.pantsbuild.org/v2.0/docs/rules-api-file-system.
    input_digest = await Get(
        Digest,
        MergeDigests((sources.snapshot.digest, downloaded_shellcheck.digest,
                      config_digest)),
    )

    # We use `FallibleProcessResult`, rather than `ProcessResult`, because we're okay with the
    # Process failing.
    process_result = await Get(
        FallibleProcessResult,
        Process(
            argv=[
                downloaded_shellcheck.exe,
                *shellcheck.options.args,
                *sources.snapshot.files,
            ],
            input_digest=input_digest,
            description=
            f"Run Shellcheck on {pluralize(len(request.field_sets), 'file')}.",
            level=LogLevel.DEBUG,
        ),
    )
    result = LintResult.from_fallible_process_result(process_result)
    return LintResults([result], linter_name="Shellcheck")
コード例 #23
0
async def get_helm_chart(request: HelmChartRequest,
                         subsystem: HelmSubsystem) -> HelmChart:
    dependencies, source_files, metadata = await MultiGet(
        Get(Targets, DependenciesRequest(request.field_set.dependencies)),
        Get(
            HelmChartSourceFiles,
            HelmChartSourceFilesRequest,
            HelmChartSourceFilesRequest.for_field_set(
                request.field_set,
                include_metadata=False,
                include_resources=True,
                include_files=True,
            ),
        ),
        Get(HelmChartMetadata, HelmChartMetaSourceField,
            request.field_set.chart),
    )

    third_party_artifacts = await Get(
        FetchedHelmArtifacts,
        FetchHelmArfifactsRequest,
        FetchHelmArfifactsRequest.for_targets(
            dependencies,
            description_of_origin=request.field_set.address.spec),
    )

    first_party_subcharts = await MultiGet(
        Get(HelmChart, HelmChartRequest, HelmChartRequest.from_target(target))
        for target in dependencies if HelmChartFieldSet.is_applicable(target))
    third_party_charts = await MultiGet(
        Get(HelmChart, FetchedHelmArtifact, artifact)
        for artifact in third_party_artifacts)

    subcharts = [*first_party_subcharts, *third_party_charts]
    subcharts_digest = EMPTY_DIGEST
    if subcharts:
        logger.debug(
            f"Found {pluralize(len(subcharts), 'subchart')} as direct dependencies on Helm chart at: {request.field_set.address}"
        )

        merged_subcharts = await Get(
            Digest,
            MergeDigests([chart.snapshot.digest for chart in subcharts]))
        subcharts_digest = await Get(Digest,
                                     AddPrefix(merged_subcharts, "charts"))

        # Update subchart dependencies in the metadata and re-render it.
        remotes = subsystem.remotes()
        subchart_map: dict[str, HelmChart] = {
            chart.metadata.name: chart
            for chart in subcharts
        }
        updated_dependencies: OrderedSet[HelmChartDependency] = OrderedSet()
        for dep in metadata.dependencies:
            updated_dep = dep

            if not dep.repository and remotes.default_registry:
                # If the dependency hasn't specified a repository, then we choose the registry with the 'default' alias.
                default_remote = remotes.default_registry
                updated_dep = dataclasses.replace(
                    updated_dep, repository=default_remote.address)
            elif dep.repository and dep.repository.startswith("@"):
                remote = next(remotes.get(dep.repository))
                updated_dep = dataclasses.replace(updated_dep,
                                                  repository=remote.address)

            if dep.name in subchart_map:
                updated_dep = dataclasses.replace(
                    updated_dep,
                    version=subchart_map[dep.name].metadata.version)

            updated_dependencies.add(updated_dep)

        # Include the explicitly provided subchats in the set of dependencies if not already present.
        updated_dependencies_names = {dep.name for dep in updated_dependencies}
        remaining_subcharts = [
            chart for chart in subcharts
            if chart.metadata.name not in updated_dependencies_names
        ]
        for chart in remaining_subcharts:
            if chart.artifact:
                dependency = HelmChartDependency(
                    name=chart.artifact.name,
                    version=chart.artifact.version,
                    repository=chart.artifact.location_url,
                )
            else:
                dependency = HelmChartDependency(
                    name=chart.metadata.name, version=chart.metadata.version)
            updated_dependencies.add(dependency)

        # Update metadata with the information about charts' dependencies.
        metadata = dataclasses.replace(
            metadata, dependencies=tuple(updated_dependencies))

    # Re-render the Chart.yaml file with the updated dependencies.
    metadata_digest, sources_without_metadata = await MultiGet(
        Get(Digest, HelmChartMetadata, metadata),
        Get(
            Digest,
            DigestSubset(
                source_files.snapshot.digest,
                PathGlobs([
                    "**/*", *(f"!**/{filename}"
                              for filename in HELM_CHART_METADATA_FILENAMES)
                ]),
            ),
        ),
    )

    # Merge all digests that conform chart's content.
    content_digest = await Get(
        Digest,
        MergeDigests(
            [metadata_digest, sources_without_metadata, subcharts_digest]))

    chart_snapshot = await Get(Snapshot,
                               AddPrefix(content_digest, metadata.name))
    return HelmChart(address=request.field_set.address,
                     metadata=metadata,
                     snapshot=chart_snapshot)
コード例 #24
0
ファイル: pytest_runner.py プロジェクト: nadeemnazeer/pants
async def setup_pytest_for_target(
    request: TestSetupRequest,
    pytest: PyTest,
    test_subsystem: TestSubsystem,
    python_setup: PythonSetup,
    coverage_config: CoverageConfig,
    coverage_subsystem: CoverageSubsystem,
    test_extra_env: TestExtraEnv,
    global_options: GlobalOptions,
) -> TestSetup:
    transitive_targets = await Get(
        TransitiveTargets,
        TransitiveTargetsRequest([request.field_set.address]))
    all_targets = transitive_targets.closure

    interpreter_constraints = PexInterpreterConstraints.create_from_targets(
        all_targets, python_setup)

    # Defaults to zip_safe=False.
    requirements_pex_request = Get(
        Pex,
        PexFromTargetsRequest,
        PexFromTargetsRequest.for_requirements([request.field_set.address],
                                               internal_only=True),
    )

    pytest_pex_request = Get(
        Pex,
        PexRequest(
            output_filename="pytest.pex",
            requirements=PexRequirements(pytest.get_requirement_strings()),
            interpreter_constraints=interpreter_constraints,
            entry_point="pytest:main",
            internal_only=True,
            additional_args=(
                # NB: We set `--not-zip-safe` because Pytest plugin discovery, which uses
                # `importlib_metadata` and thus `zipp`, does not play nicely when doing import
                # magic directly from zip files. `zipp` has pathologically bad behavior with large
                # zipfiles.
                # TODO: this does have a performance cost as the pex must now be expanded to disk.
                # Long term, it would be better to fix Zipp (whose fix would then need to be used
                # by importlib_metadata and then by Pytest). See
                # https://github.com/jaraco/zipp/pull/26.
                "--not-zip-safe",
                # TODO(John Sirois): Support shading python binaries:
                #   https://github.com/pantsbuild/pants/issues/9206
                "--pex-path",
                requirements_pex_request.input.output_filename,
            ),
        ),
    )

    prepared_sources_request = Get(
        PythonSourceFiles,
        PythonSourceFilesRequest(all_targets, include_files=True))

    # Create any assets that the test depends on through the `runtime_package_dependencies` field.
    assets: Tuple[BuiltPackage, ...] = ()
    unparsed_runtime_packages = (request.field_set.runtime_package_dependencies
                                 .to_unparsed_address_inputs())
    if unparsed_runtime_packages.values:
        runtime_package_targets = await Get(Targets, UnparsedAddressInputs,
                                            unparsed_runtime_packages)
        field_sets_per_target = await Get(
            FieldSetsPerTarget,
            FieldSetsPerTargetRequest(PackageFieldSet,
                                      runtime_package_targets),
        )
        assets = await MultiGet(
            Get(BuiltPackage, PackageFieldSet, field_set)
            for field_set in field_sets_per_target.field_sets)

    # Get the file names for the test_target so that we can specify to Pytest precisely which files
    # to test, rather than using auto-discovery.
    field_set_source_files_request = Get(
        SourceFiles, SourceFilesRequest([request.field_set.sources]))

    pytest_pex, requirements_pex, prepared_sources, field_set_source_files = await MultiGet(
        pytest_pex_request,
        requirements_pex_request,
        prepared_sources_request,
        field_set_source_files_request,
    )

    input_digest = await Get(
        Digest,
        MergeDigests((
            coverage_config.digest,
            prepared_sources.source_files.snapshot.digest,
            requirements_pex.digest,
            pytest_pex.digest,
            *(binary.digest for binary in assets),
        )),
    )

    add_opts = [f"--color={'yes' if global_options.options.colors else 'no'}"]
    output_files = []

    results_file_name = None
    if pytest.options.junit_xml_dir and not request.is_debug:
        results_file_name = f"{request.field_set.address.path_safe_spec}.xml"
        add_opts.extend((f"--junitxml={results_file_name}", "-o",
                         f"junit_family={pytest.options.junit_family}"))
        output_files.append(results_file_name)

    coverage_args = []
    if test_subsystem.use_coverage and not request.is_debug:
        output_files.append(".coverage")
        cov_paths = coverage_subsystem.filter if coverage_subsystem.filter else (
            ".", )
        coverage_args = [
            "--cov-report=",  # Turn off output.
            *itertools.chain.from_iterable(["--cov", cov_path]
                                           for cov_path in cov_paths),
        ]

    extra_env = {
        "PYTEST_ADDOPTS": " ".join(add_opts),
        "PEX_EXTRA_SYS_PATH": ":".join(prepared_sources.source_roots),
    }

    extra_env.update(test_extra_env.env)

    # Cache test runs only if they are successful, or not at all if `--test-force`.
    cache_scope = ProcessCacheScope.NEVER if test_subsystem.force else ProcessCacheScope.SUCCESSFUL
    process = await Get(
        Process,
        PexProcess(
            pytest_pex,
            argv=(*pytest.options.args, *coverage_args,
                  *field_set_source_files.files),
            extra_env=extra_env,
            input_digest=input_digest,
            output_files=output_files,
            timeout_seconds=request.field_set.timeout.
            calculate_from_global_options(pytest),
            execution_slot_variable=pytest.options.execution_slot_var,
            description=f"Run Pytest for {request.field_set.address}",
            level=LogLevel.DEBUG,
            cache_scope=cache_scope,
        ),
    )
    return TestSetup(process, results_file_name=results_file_name)
コード例 #25
0
async def generate_scala_from_protobuf(
    request: GenerateScalaFromProtobufRequest,
    protoc: Protoc,
    scalapb: ScalaPBSubsystem,
    shim_classfiles: ScalaPBShimCompiledClassfiles,
    jdk: InternalJdk,
) -> GeneratedSources:
    output_dir = "_generated_files"
    toolcp_relpath = "__toolcp"
    shimcp_relpath = "__shimcp"
    plugins_relpath = "__plugins"
    protoc_relpath = "__protoc"

    lockfile_request = await Get(GenerateJvmLockfileFromTool,
                                 ScalapbcToolLockfileSentinel())
    (
        downloaded_protoc_binary,
        tool_classpath,
        empty_output_dir,
        transitive_targets,
        inherit_env,
    ) = await MultiGet(
        Get(DownloadedExternalTool, ExternalToolRequest,
            protoc.get_request(Platform.current)),
        Get(ToolClasspath, ToolClasspathRequest(lockfile=lockfile_request)),
        Get(Digest, CreateDigest([Directory(output_dir)])),
        Get(TransitiveTargets,
            TransitiveTargetsRequest([request.protocol_target.address])),
        # Need PATH so that ScalaPB can invoke `mkfifo`.
        Get(Environment, EnvironmentRequest(requested=["PATH"])),
    )

    # NB: By stripping the source roots, we avoid having to set the value `--proto_path`
    # for Protobuf imports to be discoverable.
    all_sources_stripped, target_sources_stripped = await MultiGet(
        Get(
            StrippedSourceFiles,
            SourceFilesRequest(tgt[ProtobufSourceField]
                               for tgt in transitive_targets.closure
                               if tgt.has_field(ProtobufSourceField)),
        ),
        Get(StrippedSourceFiles,
            SourceFilesRequest([request.protocol_target[ProtobufSourceField]
                                ])),
    )

    merged_jvm_plugins_digest = EMPTY_DIGEST
    maybe_jvm_plugins_setup_args: tuple[str, ...] = ()
    maybe_jvm_plugins_output_args: tuple[str, ...] = ()
    jvm_plugins = scalapb.jvm_plugins
    if jvm_plugins:
        materialized_jvm_plugins = await Get(
            MaterializedJvmPlugins, MaterializeJvmPluginsRequest(jvm_plugins))
        merged_jvm_plugins_digest = materialized_jvm_plugins.digest
        maybe_jvm_plugins_setup_args = materialized_jvm_plugins.setup_args(
            plugins_relpath)
        maybe_jvm_plugins_output_args = tuple(
            f"--{plugin.name}_out={output_dir}"
            for plugin in materialized_jvm_plugins.plugins)

    extra_immutable_input_digests = {
        toolcp_relpath: tool_classpath.digest,
        shimcp_relpath: shim_classfiles.digest,
        plugins_relpath: merged_jvm_plugins_digest,
        protoc_relpath: downloaded_protoc_binary.digest,
    }

    input_digest = await Get(
        Digest,
        MergeDigests([all_sources_stripped.snapshot.digest, empty_output_dir]))

    result = await Get(
        ProcessResult,
        JvmProcess(
            jdk=jdk,
            classpath_entries=[
                *tool_classpath.classpath_entries(toolcp_relpath),
                shimcp_relpath
            ],
            argv=[
                "org.pantsbuild.backend.scala.scalapb.ScalaPBShim",
                f"--protoc={os.path.join(protoc_relpath, downloaded_protoc_binary.exe)}",
                *maybe_jvm_plugins_setup_args,
                f"--scala_out={output_dir}",
                *maybe_jvm_plugins_output_args,
                *target_sources_stripped.snapshot.files,
            ],
            input_digest=input_digest,
            extra_immutable_input_digests=extra_immutable_input_digests,
            extra_nailgun_keys=extra_immutable_input_digests,
            description=
            f"Generating Scala sources from {request.protocol_target.address}.",
            level=LogLevel.DEBUG,
            output_directories=(output_dir, ),
            extra_env=inherit_env,
        ),
    )

    normalized_digest, source_root = await MultiGet(
        Get(Digest, RemovePrefix(result.output_digest, output_dir)),
        Get(SourceRoot, SourceRootRequest,
            SourceRootRequest.for_target(request.protocol_target)),
    )

    source_root_restored = (await Get(
        Snapshot, AddPrefix(normalized_digest, source_root.path))
                            if source_root.path != "." else await Get(
                                Snapshot, Digest, normalized_digest))
    return GeneratedSources(source_root_restored)
コード例 #26
0
async def setup(
    request: SetupRequest,
    isort: Isort,
    python_setup: PythonSetup,
    subprocess_encoding_environment: SubprocessEncodingEnvironment,
) -> Setup:
    requirements_pex_request = Get[Pex](PexRequest(
        output_filename="isort.pex",
        requirements=PexRequirements(isort.get_requirement_specs()),
        interpreter_constraints=PexInterpreterConstraints(
            isort.default_interpreter_constraints),
        entry_point=isort.get_entry_point(),
    ))

    config_path: Optional[List[str]] = isort.options.config
    config_snapshot_request = Get[Snapshot](PathGlobs(
        globs=config_path or (),
        glob_match_error_behavior=GlobMatchErrorBehavior.error,
        conjunction=GlobExpansionConjunction.all_match,
        description_of_origin="the option `--isort-config`",
    ))

    all_source_files_request = Get[SourceFiles](AllSourceFilesRequest(
        field_set.sources for field_set in request.field_sets))
    specified_source_files_request = Get[SourceFiles](
        SpecifiedSourceFilesRequest((field_set.sources, field_set.origin)
                                    for field_set in request.field_sets))

    requests: List[Get] = [
        requirements_pex_request,
        config_snapshot_request,
        specified_source_files_request,
    ]
    if request.field_sets.prior_formatter_result is None:
        requests.append(all_source_files_request)
    requirements_pex, config_snapshot, specified_source_files, *rest = cast(
        Union[Tuple[Pex, Snapshot, SourceFiles],
              Tuple[Pex, Snapshot, SourceFiles, SourceFiles]],
        await MultiGet(requests),
    )

    all_source_files_snapshot = (request.field_sets.prior_formatter_result
                                 if request.field_sets.prior_formatter_result
                                 else rest[0].snapshot)

    input_digest = await Get[Digest](MergeDigests(
        (all_source_files_snapshot.digest, requirements_pex.digest,
         config_snapshot.digest)))

    address_references = ", ".join(
        sorted(field_set.address.reference()
               for field_set in request.field_sets))

    process = requirements_pex.create_process(
        python_setup=python_setup,
        subprocess_encoding_environment=subprocess_encoding_environment,
        pex_path="./isort.pex",
        pex_args=generate_args(
            specified_source_files=specified_source_files,
            isort=isort,
            check_only=request.check_only,
        ),
        input_digest=input_digest,
        output_files=all_source_files_snapshot.files,
        description=
        (f"Run isort on {pluralize(len(request.field_sets), 'target')}: {address_references}."
         ),
    )
    return Setup(process, original_digest=all_source_files_snapshot.digest)
コード例 #27
0
ファイル: rules.py プロジェクト: scalameta/pants
async def setup(
    setup_request: SetupRequest,
    docformatter: Docformatter,
    python_setup: PythonSetup,
    subprocess_encoding_environment: SubprocessEncodingEnvironment,
) -> Setup:
    requirements_pex_request = Get[Pex](
        PexRequest(
            output_filename="docformatter.pex",
            requirements=PexRequirements(docformatter.get_requirement_specs()),
            interpreter_constraints=PexInterpreterConstraints(
                docformatter.default_interpreter_constraints
            ),
            entry_point=docformatter.get_entry_point(),
        )
    )

    all_source_files_request = Get[SourceFiles](
        AllSourceFilesRequest(field_set.sources for field_set in setup_request.request.field_sets)
    )
    specified_source_files_request = Get[SourceFiles](
        SpecifiedSourceFilesRequest(
            (field_set.sources, field_set.origin) for field_set in setup_request.request.field_sets
        )
    )

    requests = requirements_pex_request, specified_source_files_request
    all_source_files, requirements_pex, specified_source_files = (
        await MultiGet(all_source_files_request, *requests)
        if setup_request.request.prior_formatter_result is None
        else (SourceFiles(EMPTY_SNAPSHOT), *await MultiGet(*requests))
    )
    all_source_files_snapshot = (
        all_source_files.snapshot
        if setup_request.request.prior_formatter_result is None
        else setup_request.request.prior_formatter_result
    )

    input_digest = await Get[Digest](
        MergeDigests((all_source_files_snapshot.digest, requirements_pex.digest))
    )

    address_references = ", ".join(
        sorted(field_set.address.reference() for field_set in setup_request.request.field_sets)
    )

    process = requirements_pex.create_process(
        python_setup=python_setup,
        subprocess_encoding_environment=subprocess_encoding_environment,
        pex_path="./docformatter.pex",
        pex_args=generate_args(
            specified_source_files=specified_source_files,
            docformatter=docformatter,
            check_only=setup_request.check_only,
        ),
        input_digest=input_digest,
        output_files=all_source_files_snapshot.files,
        description=(
            f"Run Docformatter on {pluralize(len(setup_request.request.field_sets), 'target')}: "
            f"{address_references}."
        ),
    )
    return Setup(process, original_digest=all_source_files_snapshot.digest)
コード例 #28
0
ファイル: rules.py プロジェクト: leigh-johnson/pants
async def bandit_lint_partition(partition: BanditPartition, bandit: Bandit,
                                lint_subsystem: LintSubsystem) -> LintResult:
    bandit_pex_get = Get(
        VenvPex,
        PexRequest(
            output_filename="bandit.pex",
            internal_only=True,
            requirements=PexRequirements(bandit.all_requirements),
            interpreter_constraints=partition.interpreter_constraints,
            main=bandit.main,
        ),
    )

    config_files_get = Get(ConfigFiles, ConfigFilesRequest,
                           bandit.config_request)
    source_files_get = Get(
        SourceFiles,
        SourceFilesRequest(field_set.sources
                           for field_set in partition.field_sets))

    bandit_pex, config_files, source_files = await MultiGet(
        bandit_pex_get, config_files_get, source_files_get)

    input_digest = await Get(
        Digest,
        MergeDigests(
            (source_files.snapshot.digest, config_files.snapshot.digest)))

    report_file_name = "bandit_report.txt" if lint_subsystem.reports_dir else None

    result = await Get(
        FallibleProcessResult,
        VenvPexProcess(
            bandit_pex,
            argv=generate_args(source_files=source_files,
                               bandit=bandit,
                               report_file_name=report_file_name),
            input_digest=input_digest,
            description=
            f"Run Bandit on {pluralize(len(partition.field_sets), 'file')}.",
            output_files=(report_file_name, ) if report_file_name else None,
            level=LogLevel.DEBUG,
        ),
    )

    report = None
    if report_file_name:
        report_digest = await Get(
            Digest,
            DigestSubset(
                result.output_digest,
                PathGlobs(
                    [report_file_name],
                    glob_match_error_behavior=GlobMatchErrorBehavior.warn,
                    description_of_origin="Bandit report file",
                ),
            ),
        )
        report = LintReport(report_file_name, report_digest)

    return LintResult.from_fallible_process_result(
        result,
        partition_description=str(
            sorted(str(c) for c in partition.interpreter_constraints)),
        report=report,
    )
コード例 #29
0
async def build_local_dists(
    request: LocalDistsPexRequest,
) -> LocalDistsPex:

    transitive_targets = await Get(TransitiveTargets, TransitiveTargetsRequest(request.addresses))
    applicable_targets = [
        tgt for tgt in transitive_targets.closure if PythonDistributionFieldSet.is_applicable(tgt)
    ]

    python_dist_field_sets = [
        PythonDistributionFieldSet.create(target) for target in applicable_targets
    ]

    dists = await MultiGet(
        [Get(BuiltPackage, PackageFieldSet, field_set) for field_set in python_dist_field_sets]
    )

    # The primary use-case of the "local dists" feature is to support consuming native extensions
    # as wheels without having to publish them first.
    # It doesn't seem very useful to consume locally-built sdists, and it makes it hard to
    # reason about possible sys.path collisions between the in-repo sources and whatever the
    # sdist will place on the sys.path when it's installed.
    # So for now we simply ignore sdists, with a warning if necessary.
    provided_files = set()
    wheels = []

    all_contents = await MultiGet(Get(DigestContents, Digest, dist.digest) for dist in dists)
    for dist, contents, tgt in zip(dists, all_contents, applicable_targets):
        artifacts = {(a.relpath or "") for a in dist.artifacts}
        # A given local dist might build a wheel and an sdist (and maybe other artifacts -
        # we don't know what setup command was run...)
        # As long as there is a wheel, we can ignore the other artifacts.
        wheel = next((art for art in artifacts if art.endswith(".whl")), None)
        if wheel:
            wheel_content = next(content for content in contents if content.path == wheel)
            wheels.append(wheel)
            buf = BytesIO()
            buf.write(wheel_content.content)
            buf.seek(0)
            with zipfile.ZipFile(buf) as zf:
                provided_files.update(zf.namelist())
        else:
            logger.warning(
                f"Encountered a dependency on the {tgt.alias} target at {tgt.address.spec}, but "
                "this target does not produce a Python wheel artifact. Therefore this target's "
                "code will be used directly from sources, without a distribution being built, "
                "and therefore any native extensions in it will not be built.\n\n"
                f"See {doc_url('python-distributions')} for details on how to set up a {tgt.alias} "
                "target to produce a wheel."
            )

    dists_digest = await Get(Digest, MergeDigests([dist.digest for dist in dists]))
    wheels_digest = await Get(Digest, DigestSubset(dists_digest, PathGlobs(["**/*.whl"])))

    dists_pex = await Get(
        Pex,
        PexRequest(
            output_filename="local_dists.pex",
            requirements=PexRequirements(wheels),
            interpreter_constraints=request.interpreter_constraints,
            additional_inputs=wheels_digest,
            internal_only=request.internal_only,
        ),
    )

    # We check source roots in reverse lexicographic order,
    # so we'll find the innermost root that matches.
    source_roots = list(reversed(sorted(request.sources.source_roots)))
    remaining_sources = set(request.sources.source_files.files)
    unrooted_files_set = set(request.sources.source_files.unrooted_files)
    for source in request.sources.source_files.files:
        if source not in unrooted_files_set:
            for source_root in source_roots:
                source_relpath = fast_relpath_optional(source, source_root)
                if source_relpath is not None and source_relpath in provided_files:
                    remaining_sources.remove(source)
    remaining_sources_snapshot = await Get(
        Snapshot,
        DigestSubset(
            request.sources.source_files.snapshot.digest, PathGlobs(sorted(remaining_sources))
        ),
    )
    subtracted_sources = PythonSourceFiles(
        SourceFiles(remaining_sources_snapshot, request.sources.source_files.unrooted_files),
        request.sources.source_roots,
    )

    return LocalDistsPex(dists_pex, subtracted_sources)
コード例 #30
0
ファイル: coverage_py.py プロジェクト: rhysyngsun/pants
async def generate_coverage_reports(
    merged_coverage_data: MergedCoverageData,
    coverage_setup: CoverageSetup,
    coverage_config: CoverageConfig,
    coverage_subsystem: CoverageSubsystem,
    all_used_addresses: Addresses,
) -> CoverageReports:
    """Takes all Python test results and generates a single coverage report."""
    transitive_targets = await Get(
        TransitiveTargets, TransitiveTargetsRequest(all_used_addresses))
    sources = await Get(
        PythonSourceFiles,
        # Coverage sometimes includes non-Python files in its `.coverage` data. We need to
        # ensure that they're present when generating the report. We include all the files included
        # by `pytest_runner.py`.
        PythonSourceFilesRequest(transitive_targets.closure,
                                 include_files=True,
                                 include_resources=True),
    )
    input_digest = await Get(
        Digest,
        MergeDigests((
            merged_coverage_data.coverage_data,
            coverage_config.digest,
            sources.source_files.snapshot.digest,
        )),
    )

    pex_processes = []
    report_types = []
    result_snapshot = await Get(Snapshot, Digest,
                                merged_coverage_data.coverage_data)
    coverage_reports: List[CoverageReport] = []
    for report_type in coverage_subsystem.reports:
        if report_type == CoverageReportType.RAW:
            coverage_reports.append(
                FilesystemCoverageReport(
                    report_type=CoverageReportType.RAW.value,
                    result_snapshot=result_snapshot,
                    directory_to_materialize_to=coverage_subsystem.output_dir,
                    report_file=coverage_subsystem.output_dir / ".coverage",
                ))
            continue
        report_types.append(report_type)
        output_file = (f"coverage.{report_type.value}" if report_type in {
            CoverageReportType.XML, CoverageReportType.JSON
        } else None)
        pex_processes.append(
            VenvPexProcess(
                coverage_setup.pex,
                argv=(report_type.report_name,
                      f"--rcfile={coverage_config.path}"),
                input_digest=input_digest,
                output_directories=("htmlcov", )
                if report_type == CoverageReportType.HTML else None,
                output_files=(output_file, ) if output_file else None,
                description=
                f"Generate Pytest {report_type.report_name} coverage report.",
                level=LogLevel.DEBUG,
            ))
    results = await MultiGet(
        Get(ProcessResult, VenvPexProcess, process)
        for process in pex_processes)
    result_stdouts = tuple(res.stdout for res in results)
    result_snapshots = await MultiGet(
        Get(Snapshot, Digest, res.output_digest) for res in results)

    coverage_reports.extend(
        _get_coverage_report(coverage_subsystem.output_dir, report_type,
                             stdout, snapshot)
        for (report_type, stdout,
             snapshot) in zip(report_types, result_stdouts, result_snapshots))

    return CoverageReports(tuple(coverage_reports))