示例#1
0
def main_filespecs():
  build_root, goals, args = pop_build_root_and_goals(
    '[build root path] [filespecs]*', sys.argv[1:])

  # Create PathGlobs for each arg relative to the buildroot.
  path_globs = PathGlobs.create('', include=args, exclude=[])
  visualize_build_request(build_root, goals, path_globs)
示例#2
0
def descendant_addresses_to_globs(address_mapper, descendant_addresses):
    """Given a DescendantAddresses object, return a PathGlobs object for matching build files.

  This allows us to limit our AddressFamily requests to directories that contain build files.
  """
    pattern = address_mapper.build_pattern
    return PathGlobs.create_from_specs(descendant_addresses.directory, [pattern, join("**", pattern)])
示例#3
0
    def path_globs(self):
        """Creates a `PathGlobs` object for the paths matched by these Sources.

    This field may be projected to request the content of the files for this Sources object.
    """
        return PathGlobs.create(
            self.spec_path, files=self.files, globs=self.globs, rglobs=self.rglobs, zglobs=self.zglobs
        )
示例#4
0
  def test_gather_snapshot_of_pathglobs(self):
    project_tree = self.mk_example_fs_tree()
    scheduler = self.mk_scheduler(project_tree=project_tree, tasks=create_snapshot_tasks(project_tree))
    snapshot_archive_root = os.path.join(project_tree.build_root, '.snapshots')

    request = scheduler.execution_request([Snapshot],
                                          [PathGlobs.create('', globs=['fs_test/a/b/*'])])
    LocalSerialEngine(scheduler).reduce(request)

    root_entries = scheduler.root_entries(request).items()
    self.assertEquals(1, len(root_entries))
    state = self.assertFirstEntryIsReturn(root_entries, scheduler)
    snapshot = state.value
    self.assert_archive_files(['fs_test/a/b/1.txt', 'fs_test/a/b/2'], snapshot,
                              snapshot_archive_root)
示例#5
0
  def test_gather_snapshot_of_pathglobs(self):
    project_tree = self.mk_example_fs_tree()
    scheduler = self.mk_scheduler(project_tree=project_tree)
    empty_step_context = StepContext(node_builder=None, project_tree=project_tree, node_states=[], inline_nodes=False)

    request = scheduler.execution_request([Snapshot],
                                          [PathGlobs.create('', globs=['fs_test/a/b/*'])])
    LocalSerialEngine(scheduler).reduce(request)

    root_entries = scheduler.root_entries(request).items()
    self.assertEquals(1, len(root_entries))
    state = self.assertFirstEntryIsReturn(root_entries, scheduler)
    snapshot = state.value
    self.assert_archive_files(['fs_test/a/b/1.txt', 'fs_test/a/b/2'], snapshot,
                              empty_step_context)
示例#6
0
def _spec_to_globs(address_mapper, specs):
  """Given a Specs object, return a PathGlobs object for the build files that it matches."""
  patterns = set()
  for spec in specs.dependencies:
    if type(spec) is DescendantAddresses:
      patterns.update(join(spec.directory, '**', pattern)
                      for pattern in address_mapper.build_patterns)
    elif type(spec) in (SiblingAddresses, SingleAddress):
      patterns.update(join(spec.directory, pattern)
                      for pattern in address_mapper.build_patterns)
    elif type(spec) is AscendantAddresses:
      patterns.update(join(f, pattern)
                      for pattern in address_mapper.build_patterns
                      for f in _recursive_dirname(spec.directory))
    else:
      raise ValueError('Unrecognized Spec type: {}'.format(spec))
  return PathGlobs.create('', include=patterns, exclude=address_mapper.build_ignore_patterns)
示例#7
0
def spec_to_globs(address_mapper, spec):
  """Given a Spec object, return a PathGlobs object for the build files that it matches."""
  if type(spec) is DescendantAddresses:
    directory = spec.directory
    patterns = [join('**', pattern) for pattern in address_mapper.build_patterns]
  elif type(spec) in (SiblingAddresses, SingleAddress):
    directory = spec.directory
    patterns = address_mapper.build_patterns
  elif type(spec) is AscendantAddresses:
    directory = ''
    patterns = [
      join(f, pattern)
      for pattern in address_mapper.build_patterns
      for f in _recursive_dirname(spec.directory)
    ]
  else:
    raise ValueError('Unrecognized Spec type: {}'.format(spec))
  return PathGlobs.create(directory, include=patterns, exclude=[])
示例#8
0
  def test_failed_command_propagates_throw(self):
    scheduler = self.mk_scheduler_in_example_fs([
      # subject to files / product of subject to files for snapshot.
      SnapshottedProcess.create(product_type=Concatted,
                                binary_type=ShellFailCommand,
                                input_selectors=tuple(),
                                input_conversion=empty_process_request,
                                output_conversion=fail_process_result),
      SingletonRule(ShellFailCommand, ShellFailCommand()),
    ])

    request = scheduler.execution_request([Concatted],
                                          [PathGlobs.create('', include=['fs_test/a/b/*'])])
    root_entries = scheduler.execute(request).root_products

    self.assertEquals(1, len(root_entries))
    self.assertFirstEntryIsThrow(root_entries,
                                 in_msg='Running ShellFailCommand failed with non-zero exit code: 1')
示例#9
0
  def test_failed_output_conversion_propagates_throw(self):
    scheduler = self.mk_scheduler_in_example_fs([
      # subject to files / product of subject to files for snapshot.
      SnapshottedProcess.create(product_type=Concatted,
                                binary_type=ShellCatToOutFile,
                                input_selectors=(Select(Snapshot),),
                                input_conversion=file_list_to_args_for_cat_with_snapshot_subjects_and_output_file,
                                output_conversion=fail_process_result),
      SingletonRule(ShellCatToOutFile, ShellCatToOutFile()),
    ])

    request = scheduler.execution_request([Concatted],
                                          [PathGlobs.create('', include=['fs_test/a/b/*'])])
    root_entries = scheduler.execute(request).root_products

    self.assertEquals(1, len(root_entries))
    self.assertFirstEntryIsThrow(root_entries,
                                 in_msg='Failed in output conversion!')
示例#10
0
def parse_address_family(address_mapper, directory):
  """Given an AddressMapper and a directory, return an AddressFamily.

  The AddressFamily may be empty, but it will not be None.
  """
  patterns = tuple(join(directory.path, p) for p in address_mapper.build_patterns)
  path_globs = PathGlobs.create('',
                                include=patterns,
                                exclude=address_mapper.build_ignore_patterns)
  files_content = yield Get(FilesContent, PathGlobs, path_globs)

  if not files_content:
    raise ResolveError('Directory "{}" does not contain build files.'.format(directory.path))
  address_maps = []
  for filecontent_product in files_content.dependencies:
    address_maps.append(AddressMap.parse(filecontent_product.path,
                                         filecontent_product.content,
                                         address_mapper.parser))
  yield AddressFamily.create(directory.path, address_maps)
示例#11
0
  def test_integration_concat_with_snapshot_subjects_test(self):
    scheduler = self.mk_scheduler_in_example_fs([
      # subject to files / product of subject to files for snapshot.
      SnapshottedProcess.create(product_type=Concatted,
                                binary_type=ShellCatToOutFile,
                                input_selectors=(Select(Snapshot),),
                                input_conversion=file_list_to_args_for_cat_with_snapshot_subjects_and_output_file,
                                output_conversion=process_result_to_concatted_from_outfile),
      SingletonRule(ShellCatToOutFile, ShellCatToOutFile()),
    ])

    request = scheduler.execution_request([Concatted],
                                          [PathGlobs.create('', include=['fs_test/a/b/*'])])

    root_entries = scheduler.execute(request).root_products
    self.assertEquals(1, len(root_entries))
    state = self.assertFirstEntryIsReturn(root_entries, scheduler)
    concatted = state.value

    self.assertEqual(Concatted('one\ntwo\n'), concatted)
示例#12
0
  def test_javac_compilation_example(self):
    sources = PathGlobs.create('', files=['scheduler_inputs/src/java/simple/Simple.java'])

    scheduler = self.mk_scheduler_in_example_fs([
      SnapshottedProcess.create(ClasspathEntry,
                                Javac,
                                (Select(Files), Select(Snapshot), SelectLiteral(JavaOutputDir('build'), JavaOutputDir)),
                                java_sources_to_javac_args,
                                process_result_to_classpath_entry),
      [Javac, [], Javac]
    ])

    request = scheduler.execution_request(
      [ClasspathEntry],
      [sources])
    LocalSerialEngine(scheduler).reduce(request)

    root_entries = scheduler.root_entries(request).items()
    self.assertEquals(1, len(root_entries))
    state = self.assertFirstEntryIsReturn(root_entries, scheduler)
    classpath_entry = state.value
    self.assertIsInstance(classpath_entry, ClasspathEntry)
    self.assertTrue(os.path.exists(os.path.join(classpath_entry.path, 'simple', 'Simple.class')))
示例#13
0
  def test_javac_compilation_example(self):
    sources = PathGlobs.create('', include=['scheduler_inputs/src/java/simple/Simple.java'])

    scheduler = self.mk_scheduler_in_example_fs([
      SnapshottedProcess.create(ClasspathEntry,
                                Javac,
                                (Select(Snapshot), Select(JavaOutputDir)),
                                java_sources_to_javac_args,
                                process_result_to_classpath_entry),
      SingletonRule(JavaOutputDir, JavaOutputDir('build')),
      SingletonRule(Javac, Javac()),
    ])

    request = scheduler.execution_request(
      [ClasspathEntry],
      [sources])
    root_entries = scheduler.execute(request).root_products

    self.assertEquals(1, len(root_entries))
    state = self.assertFirstEntryIsReturn(root_entries, scheduler)
    classpath_entry = state.value
    self.assertIsInstance(classpath_entry, ClasspathEntry)
    self.assertTrue(os.path.exists(os.path.join(classpath_entry.path, 'simple', 'Simple.class')))
示例#14
0
    def console_output(self, targets):
        input_snapshots = tuple(
            target.sources_snapshot(scheduler=self.context._scheduler)
            for target in targets)
        input_files = {
            f
            for snapshot in input_snapshots for f in snapshot.files
        }

        # TODO: Work out a nice library-like utility for writing an argfile, as this will be common.
        with temporary_dir() as tmpdir:
            list_file = os.path.join(tmpdir, "input_files_list")
            with open(list_file, "w") as list_file_out:
                for input_file in sorted(input_files):
                    list_file_out.write(input_file)
                    list_file_out.write("\n")
            list_file_snapshot = self.context._scheduler.capture_snapshots(
                (PathGlobsAndRoot(
                    PathGlobs(("input_files_list", )),
                    tmpdir,
                ), ))[0]

        cloc_binary = self.context._scheduler.product_request(
            DownloadedExternalTool,
            [ClocBinary.global_instance().get_request(Platform.current)])[0]
        cloc_snapshot = self.context._scheduler.product_request(
            Snapshot, [cloc_binary.digest])[0]

        input_digest = self.context._scheduler.merge_directories(
            tuple(s.digest for s in input_snapshots + (
                cloc_snapshot,
                list_file_snapshot,
            )))

        cmd = (
            "/usr/bin/perl",
            cloc_binary.exe,
            "--skip-uniqueness",
            "--ignored=ignored",
            "--list-file=input_files_list",
            "--report-file=report",
        )

        # The cloc script reaches into $PATH to look up perl. Let's assume it's in /usr/bin.
        req = Process(
            argv=cmd,
            input_files=input_digest,
            output_files=("ignored", "report"),
            description="cloc",
        )
        exec_result = self.context.execute_process_synchronously_or_raise(
            req, "cloc", (WorkUnitLabel.TOOL, ))

        files_content_tuple = self.context._scheduler.product_request(
            FilesContent, [exec_result.output_digest])[0].dependencies

        files_content = {
            fc.path: fc.content.decode()
            for fc in files_content_tuple
        }
        for line in files_content["report"].split("\n"):
            yield line

        if self.get_options().ignored:
            yield "Ignored the following files:"
            for line in files_content["ignored"].split("\n"):
                yield line
示例#15
0
async def merge_coverage_data(
    data_collection: PytestCoverageDataCollection,
    coverage_setup: CoverageSetup,
    coverage_config: CoverageConfig,
    coverage: CoverageSubsystem,
    source_roots: AllSourceRoots,
) -> MergedCoverageData:
    if len(data_collection) == 1 and not coverage.global_report:
        coverage_data = data_collection[0]
        return MergedCoverageData(coverage_data.digest,
                                  (coverage_data.address, ))

    coverage_digest_gets = []
    coverage_data_file_paths = []
    addresses = []
    for data in data_collection:
        # We prefix each .coverage file with its corresponding address to avoid collisions.
        coverage_digest_gets.append(
            Get(Digest,
                AddPrefix(data.digest, prefix=data.address.path_safe_spec)))
        coverage_data_file_paths.append(
            f"{data.address.path_safe_spec}/.coverage")
        addresses.append(data.address)

    if coverage.global_report:
        # It's important to set the `branch` value in the empty base report to the value it will
        # have when running on real inputs, so that the reports are of the same type, and can be
        # merged successfully. Otherwise we may get "Can't combine arc data with line data" errors.
        # See https://github.com/pantsbuild/pants/issues/14542 .
        config_contents = await Get(DigestContents, Digest,
                                    coverage_config.digest)
        branch = get_branch_value_from_config(
            config_contents[0]) if config_contents else False
        global_coverage_base_dir = PurePath("__global_coverage__")
        global_coverage_config_path = global_coverage_base_dir / "pyproject.toml"
        global_coverage_config_content = toml.dumps({
            "tool": {
                "coverage": {
                    "run": {
                        "relative_files":
                        True,
                        "source":
                        [source_root.path for source_root in source_roots],
                        "branch":
                        branch,
                    }
                }
            }
        }).encode()

        no_op_exe_py_path = global_coverage_base_dir / "no-op-exe.py"

        all_sources_digest, no_op_exe_py_digest, global_coverage_config_digest = await MultiGet(
            Get(
                Digest,
                PathGlobs(globs=[
                    f"{source_root.path}/**/*.py"
                    for source_root in source_roots
                ]),
            ),
            Get(
                Digest,
                CreateDigest(
                    [FileContent(path=str(no_op_exe_py_path), content=b"")])),
            Get(
                Digest,
                CreateDigest([
                    FileContent(
                        path=str(global_coverage_config_path),
                        content=global_coverage_config_content,
                    ),
                ]),
            ),
        )
        extra_sources_digest = await Get(
            Digest, MergeDigests((all_sources_digest, no_op_exe_py_digest)))
        input_digest = await Get(
            Digest,
            MergeDigests(
                (extra_sources_digest, global_coverage_config_digest)))
        result = await Get(
            ProcessResult,
            VenvPexProcess(
                coverage_setup.pex,
                argv=("run", "--rcfile", str(global_coverage_config_path),
                      str(no_op_exe_py_path)),
                input_digest=input_digest,
                output_files=(".coverage", ),
                description="Create base global Pytest coverage report.",
                level=LogLevel.DEBUG,
            ),
        )
        coverage_digest_gets.append(
            Get(
                Digest,
                AddPrefix(digest=result.output_digest,
                          prefix=str(global_coverage_base_dir))))
        coverage_data_file_paths.append(
            str(global_coverage_base_dir / ".coverage"))
    else:
        extra_sources_digest = EMPTY_DIGEST

    input_digest = await Get(
        Digest, MergeDigests(await MultiGet(coverage_digest_gets)))
    result = await Get(
        ProcessResult,
        VenvPexProcess(
            coverage_setup.pex,
            # We tell combine to keep the original input files, to aid debugging in the sandbox.
            argv=("combine", "--keep", *sorted(coverage_data_file_paths)),
            input_digest=input_digest,
            output_files=(".coverage", ),
            description=
            f"Merge {len(coverage_data_file_paths)} Pytest coverage reports.",
            level=LogLevel.DEBUG,
        ),
    )
    return MergedCoverageData(
        await Get(Digest,
                  MergeDigests((result.output_digest, extra_sources_digest))),
        tuple(addresses),
    )
示例#16
0
async def compile_avro_source(
    request: CompileAvroSourceRequest,
    jdk: InternalJdk,
    avro_tools: AvroSubsystem,
) -> CompiledAvroSource:
    output_dir = "_generated_files"
    toolcp_relpath = "__toolcp"

    lockfile_request = await Get(GenerateJvmLockfileFromTool,
                                 AvroToolLockfileSentinel())
    tool_classpath, subsetted_input_digest, empty_output_dir = await MultiGet(
        Get(ToolClasspath, ToolClasspathRequest(lockfile=lockfile_request)),
        Get(
            Digest,
            DigestSubset(
                request.digest,
                PathGlobs(
                    [request.path],
                    glob_match_error_behavior=GlobMatchErrorBehavior.error,
                    conjunction=GlobExpansionConjunction.all_match,
                    description_of_origin="the Avro source file name",
                ),
            ),
        ),
        Get(Digest, CreateDigest([Directory(output_dir)])),
    )

    input_digest = await Get(
        Digest,
        MergeDigests([
            subsetted_input_digest,
            empty_output_dir,
        ]),
    )

    extra_immutable_input_digests = {
        toolcp_relpath: tool_classpath.digest,
    }

    def make_avro_process(
        args: Iterable[str],
        *,
        overridden_input_digest: Digest | None = None,
        overridden_output_dir: str | None = None,
    ) -> JvmProcess:

        return JvmProcess(
            jdk=jdk,
            argv=(
                "org.apache.avro.tool.Main",
                *args,
            ),
            classpath_entries=tool_classpath.classpath_entries(toolcp_relpath),
            input_digest=(overridden_input_digest if overridden_input_digest
                          is not None else input_digest),
            extra_jvm_options=avro_tools.jvm_options,
            extra_immutable_input_digests=extra_immutable_input_digests,
            extra_nailgun_keys=extra_immutable_input_digests,
            description="Generating Java sources from Avro source.",
            level=LogLevel.DEBUG,
            output_directories=(overridden_output_dir
                                if overridden_output_dir else output_dir, ),
        )

    path = PurePath(request.path)
    if path.suffix == ".avsc":
        result = await Get(
            ProcessResult,
            JvmProcess,
            make_avro_process(["compile", "schema", request.path, output_dir]),
        )
    elif path.suffix == ".avpr":
        result = await Get(
            ProcessResult,
            JvmProcess,
            make_avro_process(
                ["compile", "protocol", request.path, output_dir]),
        )
    elif path.suffix == ".avdl":
        idl_output_dir = "__idl"
        avpr_path = os.path.join(idl_output_dir,
                                 str(path.with_suffix(".avpr")))
        idl_output_dir_digest = await Get(
            Digest, CreateDigest([Directory(os.path.dirname(avpr_path))]))
        idl_input_digest = await Get(
            Digest, MergeDigests([input_digest, idl_output_dir_digest]))
        idl_result = await Get(
            ProcessResult,
            JvmProcess,
            make_avro_process(
                ["idl", request.path, avpr_path],
                overridden_input_digest=idl_input_digest,
                overridden_output_dir=idl_output_dir,
            ),
        )
        generated_files_dir = await Get(Digest,
                                        CreateDigest([Directory(output_dir)]))
        protocol_input_digest = await Get(
            Digest,
            MergeDigests([idl_result.output_digest, generated_files_dir]))
        result = await Get(
            ProcessResult,
            JvmProcess,
            make_avro_process(
                ["compile", "protocol", avpr_path, output_dir],
                overridden_input_digest=protocol_input_digest,
            ),
        )
    else:
        raise AssertionError(
            f"Avro backend does not support files with extension `{path.suffix}`: {path}"
        )

    normalized_digest = await Get(
        Digest, RemovePrefix(result.output_digest, output_dir))
    return CompiledAvroSource(normalized_digest)
示例#17
0
 def get_contents(globs: Iterable[str]) -> Set[FileContent]:
     return set(rule_runner.request(DigestContents, [PathGlobs(globs)]))
示例#18
0
async def determine_finalized_setup_kwargs(request: GenerateSetupPyRequest) -> FinalizedSetupKwargs:
    exported_target = request.exported_target
    sources = request.sources
    requirements = await Get(ExportedTargetRequirements, DependencyOwner(exported_target))

    # Generate the kwargs for the setup() call. In addition to using the kwargs that are either
    # explicitly provided or generated via a user's plugin, we add additional kwargs based on the
    # resolved requirements and sources.
    target = exported_target.target
    resolved_setup_kwargs = await Get(SetupKwargs, ExportedTarget, exported_target)
    setup_kwargs = resolved_setup_kwargs.kwargs.copy()

    # NB: We are careful to not overwrite these values, but we also don't expect them to have been
    # set. The user must have have gone out of their way to use a `SetupKwargs` plugin, and to have
    # specified `SetupKwargs(_allow_banned_keys=True)`.
    setup_kwargs.update(
        {
            "packages": (*sources.packages, *(setup_kwargs.get("packages", []))),
            "namespace_packages": (
                *sources.namespace_packages,
                *setup_kwargs.get("namespace_packages", []),
            ),
            "package_data": {**dict(sources.package_data), **setup_kwargs.get("package_data", {})},
            "install_requires": (*requirements, *setup_kwargs.get("install_requires", [])),
        }
    )

    long_description_path = exported_target.target.get(LongDescriptionPathField).value

    if "long_description" in setup_kwargs and long_description_path:
        raise InvalidFieldException(
            f"The {repr(LongDescriptionPathField.alias)} field of the "
            f"target {exported_target.target.address} is set, but "
            f"'long_description' is already provided explicitly in "
            f"the provides=setup_py() field. You may only set one "
            f"of these two values."
        )

    if long_description_path:
        digest_contents = await Get(
            DigestContents,
            PathGlobs(
                [long_description_path],
                description_of_origin=(
                    f"the {LongDescriptionPathField.alias} "
                    f"field of {exported_target.target.address}"
                ),
                glob_match_error_behavior=GlobMatchErrorBehavior.error,
            ),
        )
        long_description_content = digest_contents[0].content.decode()
        setup_kwargs.update({"long_description": long_description_content})

    # Resolve entry points from python_distribution(entry_points=...) and from
    # python_distribution(provides=setup_py(entry_points=...)
    resolved_from_entry_points_field, resolved_from_provides_field = await MultiGet(
        Get(
            ResolvedPythonDistributionEntryPoints,
            ResolvePythonDistributionEntryPointsRequest(
                entry_points_field=exported_target.target.get(PythonDistributionEntryPointsField)
            ),
        ),
        Get(
            ResolvedPythonDistributionEntryPoints,
            ResolvePythonDistributionEntryPointsRequest(
                provides_field=exported_target.target.get(PythonProvidesField)
            ),
        ),
    )

    def _format_entry_points(
        resolved: ResolvedPythonDistributionEntryPoints,
    ) -> dict[str, dict[str, str]]:
        return {
            category: {ep_name: ep_val.entry_point.spec for ep_name, ep_val in entry_points.items()}
            for category, entry_points in resolved.val.items()
        }

    # Gather entry points with source description for any error messages when merging them.
    exported_addr = exported_target.target.address
    entry_point_sources = {
        f"{exported_addr}'s field `entry_points`": _format_entry_points(
            resolved_from_entry_points_field
        ),
        f"{exported_addr}'s field `provides=setup_py()`": _format_entry_points(
            resolved_from_provides_field
        ),
    }

    # Merge all collected entry points and add them to the dist's entry points.
    all_entry_points = merge_entry_points(*list(entry_point_sources.items()))
    if all_entry_points:
        setup_kwargs["entry_points"] = {
            category: [f"{name} = {entry_point}" for name, entry_point in entry_points.items()]
            for category, entry_points in all_entry_points.items()
        }

    return FinalizedSetupKwargs(setup_kwargs, address=target.address)
示例#19
0
 def read_file() -> str:
     digest_contents = rule_runner.request(DigestContents,
                                           [PathGlobs(["4.txt"])])
     assert len(digest_contents) == 1
     return digest_contents[0].content.decode()
示例#20
0
文件: fs_test.py 项目: wiwa/pants
    def test_remove_prefix(self) -> None:
        # Set up files:
        relevant_files = (
            "characters/dark_tower/roland",
            "characters/dark_tower/susannah",
        )
        all_files = (
            "books/dark_tower/gunslinger",
            "characters/altered_carbon/kovacs",
            *relevant_files,
            "index",
        )

        with temporary_dir() as temp_dir:
            safe_file_dump(os.path.join(temp_dir, "index"),
                           "books\ncharacters\n")
            safe_file_dump(
                os.path.join(temp_dir, "characters", "altered_carbon",
                             "kovacs"),
                "Envoy",
                makedirs=True,
            )

            tower_dir = os.path.join(temp_dir, "characters", "dark_tower")
            safe_file_dump(os.path.join(tower_dir, "roland"),
                           "European Burmese",
                           makedirs=True)
            safe_file_dump(os.path.join(tower_dir, "susannah"),
                           "Not sure actually",
                           makedirs=True)

            safe_file_dump(
                os.path.join(temp_dir, "books", "dark_tower", "gunslinger"),
                "1982",
                makedirs=True,
            )

            snapshot, snapshot_with_extra_files = self.scheduler.capture_snapshots(
                (
                    PathGlobsAndRoot(PathGlobs(["characters/dark_tower/*"]),
                                     temp_dir),
                    PathGlobsAndRoot(PathGlobs(["**"]), temp_dir),
                ))
            # Check that we got the full snapshots that we expect
            assert snapshot.files == relevant_files
            assert snapshot_with_extra_files.files == all_files

            # Strip empty prefix:
            zero_prefix_stripped_digest = self.request_single_product(
                Digest,
                RemovePrefix(snapshot.digest, ""),
            )
            assert snapshot.digest == zero_prefix_stripped_digest

            # Strip a non-empty prefix shared by all files:
            stripped_digest = self.request_single_product(
                Digest,
                RemovePrefix(snapshot.digest, "characters/dark_tower"),
            )
            assert stripped_digest == Digest(
                fingerprint=
                "71e788fc25783c424db555477071f5e476d942fc958a5d06ffc1ed223f779a8c",
                serialized_bytes_length=162,
            )

            expected_snapshot = assert_single_element(
                self.scheduler.capture_snapshots(
                    (PathGlobsAndRoot(PathGlobs(["*"]), tower_dir), )))
            assert expected_snapshot.files == ("roland", "susannah")
            assert stripped_digest == expected_snapshot.digest

            # Try to strip a prefix which isn't shared by all files:
            with self.assertRaisesWithMessageContaining(
                    Exception,
                    "Cannot strip prefix characters/dark_tower from root directory Digest(Fingerprint<28c47f77"
                    "867f0c8d577d2ada2f06b03fc8e5ef2d780e8942713b26c5e3f434b8>, 243) - root directory "
                    "contained non-matching directory named: books and file named: index",
            ):
                self.request_single_product(
                    Digest,
                    RemovePrefix(snapshot_with_extra_files.digest,
                                 "characters/dark_tower"),
                )
示例#21
0
文件: fs_test.py 项目: wiwa/pants
 def path_globs(globs) -> PathGlobs:
     if isinstance(globs, PathGlobs):
         return globs
     return PathGlobs(globs)
示例#22
0
    def _compile_hermetic(self, jvm_options, ctx, classes_dir, jar_file,
                          compiler_bridge_classpath_entry,
                          dependency_classpath, scalac_classpath_entries):
        zinc_relpath = fast_relpath(self._zinc.zinc.path, get_buildroot())

        snapshots = [
            ctx.target.sources_snapshot(self.context._scheduler),
        ]

        # scala_library() targets with java_sources have circular dependencies on those java source
        # files, and we provide them to the same zinc command line that compiles the scala, so we need
        # to make sure those source files are available in the hermetic execution sandbox.
        java_sources_targets = getattr(ctx.target, 'java_sources', [])
        java_sources_snapshots = [
            tgt.sources_snapshot(self.context._scheduler)
            for tgt in java_sources_targets
        ]
        snapshots.extend(java_sources_snapshots)

        # Ensure the dependencies and compiler bridge jars are available in the execution sandbox.
        relevant_classpath_entries = (
            dependency_classpath + [
                compiler_bridge_classpath_entry,
                self._nailgun_server_classpath_entry(
                ),  # We include nailgun-server, to use it to start servers when needed from the hermetic execution case.
            ])
        directory_digests = [
            entry.directory_digest for entry in relevant_classpath_entries
            if entry.directory_digest
        ]
        if len(directory_digests) != len(relevant_classpath_entries):
            for dep in relevant_classpath_entries:
                if not dep.directory_digest:
                    raise AssertionError(
                        "ClasspathEntry {} didn't have a Digest, so won't be present for hermetic "
                        "execution of zinc".format(dep))
        directory_digests.extend(
            classpath_entry.directory_digest
            for classpath_entry in scalac_classpath_entries)

        if self._zinc.use_native_image:
            if jvm_options:
                raise ValueError(
                    "`{}` got non-empty jvm_options when running with a graal native-image, but this is "
                    "unsupported. jvm_options received: {}".format(
                        self.options_scope, safe_shlex_join(jvm_options)))
            native_image_path, native_image_snapshot = self._zinc.native_image(
                self.context)
            native_image_snapshots = [
                native_image_snapshot.directory_digest,
            ]
            scala_boot_classpath = [
                classpath_entry.path
                for classpath_entry in scalac_classpath_entries
            ] + [
                # We include rt.jar on the scala boot classpath because the compiler usually gets its
                # contents from the VM it is executing in, but not in the case of a native image. This
                # resolves a `object java.lang.Object in compiler mirror not found.` error.
                '.jdk/jre/lib/rt.jar',
                # The same goes for the jce.jar, which provides javax.crypto.
                '.jdk/jre/lib/jce.jar',
            ]
            image_specific_argv = [
                native_image_path,
                '-java-home',
                '.jdk',
                f'-Dscala.boot.class.path={os.pathsep.join(scala_boot_classpath)}',
                '-Dscala.usejavacp=true',
            ]
        else:
            native_image_snapshots = []
            # TODO: Lean on distribution for the bin/java appending here
            image_specific_argv = ['.jdk/bin/java'] + jvm_options + [
                '-cp', zinc_relpath, Zinc.ZINC_COMPILE_MAIN
            ]

        argfile_snapshot, = self.context._scheduler.capture_snapshots([
            PathGlobsAndRoot(
                PathGlobs([fast_relpath(ctx.args_file, get_buildroot())]),
                get_buildroot(),
            ),
        ])

        relpath_to_analysis = fast_relpath(ctx.analysis_file, get_buildroot())
        merged_local_only_scratch_inputs = self._compute_local_only_inputs(
            classes_dir, relpath_to_analysis, jar_file)

        # TODO: Extract something common from Executor._create_command to make the command line
        argv = image_specific_argv + [f'@{argfile_snapshot.files[0]}']

        merged_input_digest = self.context._scheduler.merge_directories(
            [self._zinc.zinc.directory_digest] +
            [s.directory_digest for s in snapshots] + directory_digests +
            native_image_snapshots + [
                self.post_compile_extra_resources_digest(ctx),
                argfile_snapshot.directory_digest
            ])

        # NB: We always capture the output jar, but if classpath jars are not used, we additionally
        # capture loose classes from the workspace. This is because we need to both:
        #   1) allow loose classes as an input to dependent compiles
        #   2) allow jars to be materialized at the end of the run.
        output_directories = () if self.get_options().use_classpath_jars else (
            classes_dir, )

        req = ExecuteProcessRequest(
            argv=tuple(argv),
            input_files=merged_input_digest,
            output_files=(jar_file, relpath_to_analysis),
            output_directories=output_directories,
            description=f"zinc compile for {ctx.target.address.spec}",
            unsafe_local_only_files_because_we_favor_speed_over_correctness_for_this_rule
            =merged_local_only_scratch_inputs,
            jdk_home=self._zinc.underlying_dist.home,
            is_nailgunnable=True,
        )
        res = self.context.execute_process_synchronously_or_raise(
            req, self.name(), [WorkUnitLabel.COMPILER])

        # TODO: Materialize as a batch in do_compile or somewhere
        self.context._scheduler.materialize_directory(
            DirectoryToMaterialize(res.output_directory_digest))

        # TODO: This should probably return a ClasspathEntry rather than a Digest
        return res.output_directory_digest
示例#23
0
文件: pex.py 项目: pyranja/pants
async def create_pex(
    request: PexRequest,
    pex_bin: DownloadedPexBin,
    python_setup: PythonSetup,
    python_repos: PythonRepos,
    subprocess_encoding_environment: SubprocessEncodingEnvironment,
    pex_build_environment: PexBuildEnvironment,
    platform: Platform,
    log_level: LogLevel,
) -> Pex:
    """Returns a PEX with the given requirements, optional entry point, optional interpreter
    constraints, and optional requirement constraints."""

    argv = [
        "--output-file",
        request.output_filename,
        # NB: In setting `--no-pypi`, we rely on the default value of `--python-repos-indexes`
        # including PyPI, which will override `--no-pypi` and result in using PyPI in the default
        # case. Why set `--no-pypi`, then? We need to do this so that
        # `--python-repos-repos=['custom_url']` will only point to that index and not include PyPI.
        "--no-pypi",
        *(f"--index={index}" for index in python_repos.indexes),
        *(f"--repo={repo}" for repo in python_repos.repos),
        *request.additional_args,
    ]

    # NB: If `--platform` is specified, this signals that the PEX should not be built locally.
    # `--interpreter-constraint` only makes sense in the context of building locally. These two
    # flags are mutually exclusive. See https://github.com/pantsbuild/pex/issues/957.
    if request.platforms:
        # TODO(#9560): consider validating that these platforms are valid with the interpreter
        #  constraints.
        argv.extend(request.platforms.generate_pex_arg_list())
    else:
        argv.extend(request.interpreter_constraints.generate_pex_arg_list())

    pex_debug = PexDebug(log_level)
    argv.extend(pex_debug.iter_pex_args())

    if python_setup.resolver_jobs:
        argv.extend(["--jobs", python_setup.resolver_jobs])

    if python_setup.manylinux:
        argv.extend(["--manylinux", python_setup.manylinux])
    else:
        argv.append("--no-manylinux")

    if request.entry_point is not None:
        argv.extend(["--entry-point", request.entry_point])

    if python_setup.requirement_constraints is not None:
        argv.extend(["--constraints", python_setup.requirement_constraints])

    source_dir_name = "source_files"
    argv.append(f"--sources-directory={source_dir_name}")

    argv.extend(request.requirements)

    constraint_file_snapshot = EMPTY_SNAPSHOT
    if python_setup.requirement_constraints is not None:
        constraint_file_snapshot = await Get[Snapshot](
            PathGlobs(
                [python_setup.requirement_constraints],
                glob_match_error_behavior=GlobMatchErrorBehavior.error,
                conjunction=GlobExpansionConjunction.all_match,
                description_of_origin="the option `--python-setup-requirement-constraints`",
            )
        )

    sources_digest_as_subdir = await Get[Digest](
        AddPrefix(request.sources or EMPTY_DIGEST, source_dir_name)
    )
    additional_inputs_digest = request.additional_inputs or EMPTY_DIGEST

    merged_digest = await Get[Digest](
        MergeDigests(
            (
                pex_bin.digest,
                sources_digest_as_subdir,
                additional_inputs_digest,
                constraint_file_snapshot.digest,
            )
        )
    )

    # NB: PEX outputs are platform dependent so in order to get a PEX that we can use locally, without
    # cross-building, we specify that our PEX command be run on the current local platform. When we
    # support cross-building through CLI flags we can configure requests that build a PEX for our
    # local platform that are able to execute on a different platform, but for now in order to
    # guarantee correct build we need to restrict this command to execute on the same platform type
    # that the output is intended for. The correct way to interpret the keys
    # (execution_platform_constraint, target_platform_constraint) of this dictionary is "The output of
    # this command is intended for `target_platform_constraint` iff it is run on `execution_platform
    # constraint`".
    description = request.description
    if description is None:
        if request.requirements:
            description = (
                f"Building {request.output_filename} with "
                f"{pluralize(len(request.requirements), 'requirement')}: "
                f"{', '.join(request.requirements)}"
            )
        else:
            description = f"Building {request.output_filename}"
    process = MultiPlatformProcess(
        {
            (
                PlatformConstraint(platform.value),
                PlatformConstraint(platform.value),
            ): pex_bin.create_process(
                python_setup=python_setup,
                subprocess_encoding_environment=subprocess_encoding_environment,
                pex_build_environment=pex_build_environment,
                pex_args=argv,
                input_digest=merged_digest,
                description=description,
                output_files=(request.output_filename,),
            )
        }
    )

    result = await Get[ProcessResult](MultiPlatformProcess, process)

    if pex_debug.might_log:
        lines = result.stderr.decode().splitlines()
        if lines:
            pex_debug.log(f"Debug output from Pex for: {process}")
            for line in lines:
                pex_debug.log(line)

    return Pex(digest=result.output_digest, output_filename=request.output_filename)
示例#24
0
async def compile_wsdl_source(
    request: CompileWsdlSourceRequest,
    jdk: InternalJdk,
    jaxws: JaxWsTools,
) -> CompiledWsdlSource:
    output_dir = "_generated_files"
    toolcp_relpath = "__toolcp"

    lockfile_request = await Get(GenerateJvmLockfileFromTool,
                                 JaxWsToolsLockfileSentinel())
    tool_classpath, subsetted_input_digest, empty_output_dir = await MultiGet(
        Get(
            ToolClasspath,
            ToolClasspathRequest(lockfile=lockfile_request),
        ),
        Get(
            Digest,
            DigestSubset(
                request.digest,
                PathGlobs(
                    [request.path],
                    glob_match_error_behavior=GlobMatchErrorBehavior.error,
                    conjunction=GlobExpansionConjunction.all_match,
                    description_of_origin="the WSDL file name",
                ),
            ),
        ),
        Get(Digest, CreateDigest([Directory(output_dir)])),
    )

    input_digest = await Get(
        Digest, MergeDigests([subsetted_input_digest, empty_output_dir]))

    immutable_input_digests = {
        toolcp_relpath: tool_classpath.digest,
    }

    jaxws_args = [
        "-d",
        output_dir,
        "-encoding",
        "utf8",
        "-keep",
        "-Xnocompile",
        "-B-XautoNameResolution",
    ]
    if request.module:
        jaxws_args.extend(["-m", request.module])
    if request.package:
        jaxws_args.extend(["-p", request.package])

    jaxws_process = JvmProcess(
        jdk=jdk,
        argv=[
            "com.sun.tools.ws.WsImport",
            *jaxws_args,
            request.path,
        ],
        classpath_entries=tool_classpath.classpath_entries(toolcp_relpath),
        input_digest=input_digest,
        extra_immutable_input_digests=immutable_input_digests,
        extra_nailgun_keys=immutable_input_digests,
        description="Generating Java sources from WSDL source",
        level=LogLevel.DEBUG,
        output_directories=(output_dir, ),
    )
    jaxws_result = await Get(ProcessResult, JvmProcess, jaxws_process)

    normalized_digest = await Get(
        Digest, RemovePrefix(jaxws_result.output_digest, output_dir))
    return CompiledWsdlSource(normalized_digest)
示例#25
0
 def _get_snapshot(self):
     """Returns a Snapshot of the input globs"""
     return self._scheduler_session.product_request(
         Snapshot, subjects=[PathGlobs(self._invalidation_globs)])[0]
示例#26
0
async def flake8_lint_partition(
    partition: Flake8Partition, flake8: Flake8, lint_subsystem: LintSubsystem
) -> LintResult:
    requirements_pex_request = Get(
        Pex,
        PexRequest(
            output_filename="flake8.pex",
            internal_only=True,
            requirements=PexRequirements(flake8.all_requirements),
            interpreter_constraints=(
                partition.interpreter_constraints
                or PexInterpreterConstraints(flake8.interpreter_constraints)
            ),
            entry_point=flake8.entry_point,
        ),
    )

    config_digest_request = Get(
        Digest,
        PathGlobs(
            globs=[flake8.config] if flake8.config else [],
            glob_match_error_behavior=GlobMatchErrorBehavior.error,
            description_of_origin="the option `--flake8-config`",
        ),
    )

    source_files_request = Get(
        SourceFiles, SourceFilesRequest(field_set.sources for field_set in partition.field_sets)
    )

    requirements_pex, config_digest, source_files = await MultiGet(
        requirements_pex_request, config_digest_request, source_files_request
    )

    input_digest = await Get(
        Digest,
        MergeDigests((source_files.snapshot.digest, requirements_pex.digest, config_digest)),
    )

    report_file_name = "flake8_report.txt" if lint_subsystem.reports_dir else None

    result = await Get(
        FallibleProcessResult,
        PexProcess(
            requirements_pex,
            argv=generate_args(
                source_files=source_files, flake8=flake8, report_file_name=report_file_name,
            ),
            input_digest=input_digest,
            output_files=(report_file_name,) if report_file_name else None,
            description=f"Run Flake8 on {pluralize(len(partition.field_sets), 'file')}.",
            level=LogLevel.DEBUG,
        ),
    )

    report = None
    if report_file_name:
        report_digest = await Get(
            Digest,
            DigestSubset(
                result.output_digest,
                PathGlobs(
                    [report_file_name],
                    glob_match_error_behavior=GlobMatchErrorBehavior.warn,
                    description_of_origin="Flake8 report file",
                ),
            ),
        )
        report = LintReport(report_file_name, report_digest)

    return LintResult.from_fallible_process_result(
        result, partition_description=str(sorted(partition.interpreter_constraints)), report=report
    )
示例#27
0
    def test_strip_prefix(self):
        # Set up files:

        relevant_files = (
            'characters/dark_tower/roland',
            'characters/dark_tower/susannah',
        )
        all_files = (
            'books/dark_tower/gunslinger',
            'characters/altered_carbon/kovacs',
        ) + relevant_files + ('index', )

        with temporary_dir() as temp_dir:
            safe_file_dump(os.path.join(temp_dir, 'index'),
                           'books\ncharacters\n')
            safe_file_dump(
                os.path.join(temp_dir, "characters", "altered_carbon",
                             "kovacs"),
                "Envoy",
                makedirs=True,
            )

            tower_dir = os.path.join(temp_dir, "characters", "dark_tower")
            safe_file_dump(os.path.join(tower_dir, "roland"),
                           "European Burmese",
                           makedirs=True)
            safe_file_dump(os.path.join(tower_dir, "susannah"),
                           "Not sure actually",
                           makedirs=True)

            safe_file_dump(
                os.path.join(temp_dir, "books", "dark_tower", "gunslinger"),
                "1982",
                makedirs=True,
            )

            snapshot, snapshot_with_extra_files = self.scheduler.capture_snapshots(
                (
                    PathGlobsAndRoot(PathGlobs(("characters/dark_tower/*", )),
                                     temp_dir),
                    PathGlobsAndRoot(PathGlobs(("**", )), temp_dir),
                ))
            # Check that we got the full snapshots that we expect
            self.assertEquals(snapshot.files, relevant_files)
            self.assertEquals(snapshot_with_extra_files.files, all_files)

            # Strip empty prefix:
            zero_prefix_stripped_digest = assert_single_element(
                self.scheduler.product_request(
                    Digest,
                    [
                        DirectoryWithPrefixToStrip(snapshot.directory_digest,
                                                   "")
                    ],
                ))
            self.assertEquals(snapshot.directory_digest,
                              zero_prefix_stripped_digest)

            # Strip a non-empty prefix shared by all files:
            stripped_digest = assert_single_element(
                self.scheduler.product_request(
                    Digest,
                    [
                        DirectoryWithPrefixToStrip(snapshot.directory_digest,
                                                   "characters/dark_tower")
                    ],
                ))
            self.assertEquals(
                stripped_digest,
                Digest(
                    fingerprint=
                    '71e788fc25783c424db555477071f5e476d942fc958a5d06ffc1ed223f779a8c',
                    serialized_bytes_length=162,
                ))
            expected_snapshot = assert_single_element(
                self.scheduler.capture_snapshots(
                    (PathGlobsAndRoot(PathGlobs(("*", )), tower_dir), )))
            self.assertEquals(expected_snapshot.files, ('roland', 'susannah'))
            self.assertEquals(stripped_digest,
                              expected_snapshot.directory_digest)

            # Try to strip a prefix which isn't shared by all files:
            with self.assertRaisesWithMessageContaining(
                    Exception,
                    "Cannot strip prefix characters/dark_tower from root directory Digest(Fingerprint<28c47f77867f0c8d577d2ada2f06b03fc8e5ef2d780e8942713b26c5e3f434b8>, 243) - root directory contained non-matching directory named: books and file named: index"
            ):
                self.scheduler.product_request(Digest, [
                    DirectoryWithPrefixToStrip(
                        snapshot_with_extra_files.directory_digest,
                        "characters/dark_tower")
                ])
示例#28
0
 def path_globs(self, filename_glob: str) -> PathGlobs:
     return PathGlobs(
         [os.path.join(d, "**", filename_glob) for d in self.dirs])
示例#29
0
 def specs(filespecs):
     if isinstance(filespecs, PathGlobs):
         return filespecs
     else:
         return PathGlobs(include=filespecs)
示例#30
0
文件: graph.py 项目: patricklaw/pants
async def find_owners(owners_request: OwnersRequest) -> Owners:
    # Determine which of the sources are live and which are deleted.
    sources_paths = await Get(Paths, PathGlobs(owners_request.sources))

    live_files = FrozenOrderedSet(sources_paths.files)
    deleted_files = FrozenOrderedSet(s for s in owners_request.sources
                                     if s not in live_files)
    live_dirs = FrozenOrderedSet(os.path.dirname(s) for s in live_files)
    deleted_dirs = FrozenOrderedSet(os.path.dirname(s) for s in deleted_files)

    # Walk up the buildroot looking for targets that would conceivably claim changed sources.
    # For live files, we use ExpandedTargets, which causes more precise, often file-level, targets
    # to be created. For deleted files we use UnexpandedTargets, which have the original declared
    # glob.
    live_candidate_specs = tuple(
        AscendantAddresses(directory=d) for d in live_dirs)
    deleted_candidate_specs = tuple(
        AscendantAddresses(directory=d) for d in deleted_dirs)
    live_candidate_tgts, deleted_candidate_tgts = await MultiGet(
        Get(Targets, AddressSpecs(live_candidate_specs)),
        Get(UnexpandedTargets, AddressSpecs(deleted_candidate_specs)),
    )

    matching_addresses: OrderedSet[Address] = OrderedSet()
    unmatched_sources = set(owners_request.sources)
    for live in (True, False):
        candidate_tgts: Sequence[Target]
        if live:
            candidate_tgts = live_candidate_tgts
            sources_set = live_files
        else:
            candidate_tgts = deleted_candidate_tgts
            sources_set = deleted_files

        build_file_addresses = await MultiGet(
            Get(BuildFileAddress, Address, tgt.address)
            for tgt in candidate_tgts)

        for candidate_tgt, bfa in zip(candidate_tgts, build_file_addresses):
            matching_files = set(
                matches_filespec(candidate_tgt.get(Sources).filespec,
                                 paths=sources_set))
            # Also consider secondary ownership, meaning it's not a `Sources` field with primary
            # ownership, but the target still should match the file. We can't use `tgt.get()`
            # because this is a mixin, and there technically may be >1 field.
            secondary_owner_fields = tuple(
                field  # type: ignore[misc]
                for field in candidate_tgt.field_values.values()
                if isinstance(field, SecondaryOwnerMixin))
            for secondary_owner_field in secondary_owner_fields:
                matching_files.update(
                    matches_filespec(secondary_owner_field.filespec,
                                     paths=sources_set))
            if not matching_files and bfa.rel_path not in sources_set:
                continue

            unmatched_sources -= matching_files
            matching_addresses.add(candidate_tgt.address)

    if (unmatched_sources and owners_request.owners_not_found_behavior !=
            OwnersNotFoundBehavior.ignore):
        _log_or_raise_unmatched_owners(
            [PurePath(path) for path in unmatched_sources],
            owners_request.owners_not_found_behavior)

    return Owners(matching_addresses)
示例#31
0
def buildfile_path_globs_for_dir(address_mapper, directory):
    patterns = address_mapper.build_patterns
    return BuildFileGlobs(
        PathGlobs.create(directory.path, include=patterns, exclude=()))
示例#32
0
文件: sources.py 项目: benjyw/pants
  def path_globs(self):
    """Creates a `PathGlobs` object for the paths matched by these Sources.

    This field may be projected to request the content of the files for this Sources object.
    """
    return PathGlobs.create(self.spec_path, include=self.filespecs, exclude=(self.excludes or []))
示例#33
0
 def is_changed_snapshot() -> bool:
     new_snapshot = rule_runner.request(Snapshot, [PathGlobs(["a/*"])])
     return (new_snapshot.digest != original_snapshot.digest
             and new_snapshot.files
             == ("a/3.txt", "a/4.txt.ln", "a/new_file.txt")
             and new_snapshot.dirs == ("a", "a/b"))
示例#34
0
 def to_path_globs(self, relpath):
   """Return two PathGlobs representing the included and excluded Files for these patterns."""
   return PathGlobs.create(relpath, self._file_globs, self._excluded_file_globs)
示例#35
0
async def create_pex(
    request: PexRequest,
    python_setup: PythonSetup,
    python_repos: PythonRepos,
    platform: Platform,
    pex_runtime_env: PexRuntimeEnvironment,
) -> Pex:
    """Returns a PEX with the given settings."""

    argv = [
        "--output-file",
        request.output_filename,
        # NB: In setting `--no-pypi`, we rely on the default value of `--python-repos-indexes`
        # including PyPI, which will override `--no-pypi` and result in using PyPI in the default
        # case. Why set `--no-pypi`, then? We need to do this so that
        # `--python-repos-repos=['custom_url']` will only point to that index and not include PyPI.
        "--no-pypi",
        *(f"--index={index}" for index in python_repos.indexes),
        *(f"--repo={repo}" for repo in python_repos.repos),
        "--resolver-version",
        python_setup.resolver_version.value,
        *request.additional_args,
    ]

    python: Optional[PythonExecutable] = None

    # NB: If `--platform` is specified, this signals that the PEX should not be built locally.
    # `--interpreter-constraint` only makes sense in the context of building locally. These two
    # flags are mutually exclusive. See https://github.com/pantsbuild/pex/issues/957.
    if request.platforms:
        # TODO(#9560): consider validating that these platforms are valid with the interpreter
        #  constraints.
        argv.extend(request.platforms.generate_pex_arg_list())
    else:
        # NB: If it's an internal_only PEX, we do our own lookup of the interpreter based on the
        # interpreter constraints, and then will run the PEX with that specific interpreter. We
        # will have already validated that there were no platforms.
        # Otherwise, we let Pex resolve the constraints.
        if request.internal_only:
            python = await Get(PythonExecutable, PexInterpreterConstraints,
                               request.interpreter_constraints)
        else:
            argv.extend(
                request.interpreter_constraints.generate_pex_arg_list())

    argv.append("--no-emit-warnings")

    if python_setup.resolver_jobs:
        argv.extend(["--jobs", str(python_setup.resolver_jobs)])

    if python_setup.manylinux:
        argv.extend(["--manylinux", python_setup.manylinux])
    else:
        argv.append("--no-manylinux")

    if request.entry_point is not None:
        argv.extend(["--entry-point", request.entry_point])

    if python_setup.requirement_constraints is not None:
        argv.extend(["--constraints", python_setup.requirement_constraints])

    source_dir_name = "source_files"
    argv.append(f"--sources-directory={source_dir_name}")

    argv.extend(request.requirements)

    constraint_file_digest = EMPTY_DIGEST
    if python_setup.requirement_constraints is not None:
        constraint_file_digest = await Get(
            Digest,
            PathGlobs(
                [python_setup.requirement_constraints],
                glob_match_error_behavior=GlobMatchErrorBehavior.error,
                conjunction=GlobExpansionConjunction.all_match,
                description_of_origin=
                "the option `--python-setup-requirement-constraints`",
            ),
        )

    sources_digest_as_subdir = await Get(
        Digest, AddPrefix(request.sources or EMPTY_DIGEST, source_dir_name))
    additional_inputs_digest = request.additional_inputs or EMPTY_DIGEST

    merged_digest = await Get(
        Digest,
        MergeDigests((
            sources_digest_as_subdir,
            additional_inputs_digest,
            constraint_file_digest,
        )),
    )

    description = request.description
    if description is None:
        if request.requirements:
            description = (
                f"Building {request.output_filename} with "
                f"{pluralize(len(request.requirements), 'requirement')}: "
                f"{', '.join(request.requirements)}")
        else:
            description = f"Building {request.output_filename}"

    process = await Get(
        Process,
        PexCliProcess(
            python=python,
            argv=argv,
            additional_input_digest=merged_digest,
            description=description,
            output_files=[request.output_filename],
        ),
    )

    # NB: Building a Pex is platform dependent, so in order to get a PEX that we can use locally
    # without cross-building, we specify that our PEX command should be run on the current local
    # platform.
    result = await Get(ProcessResult, MultiPlatformProcess({platform:
                                                            process}))

    if pex_runtime_env.verbosity > 0:
        log_output = result.stderr.decode()
        if log_output:
            logger.info("%s", log_output)

    return Pex(digest=result.output_digest,
               name=request.output_filename,
               python=python)
示例#36
0
 def specs(relative_to, *filespecs):
   return PathGlobs.create(relative_to, include=filespecs)
示例#37
0
async def lint_with_regex_patterns(
        request: RegexLintRequest,
        regex_lint_subsystem: RegexLintSubsystem) -> LintResults:
    multi_matcher = regex_lint_subsystem.get_multi_matcher()
    if multi_matcher is None:
        return LintResults((), linter_name=request.name)

    file_to_content_pattern_names_and_encoding = {}
    for fp in request.file_paths:
        content_pattern_names, encoding = multi_matcher.get_applicable_content_pattern_names(
            fp)
        if content_pattern_names and encoding:
            file_to_content_pattern_names_and_encoding[fp] = (
                content_pattern_names, encoding)

    digest_contents = await Get(
        DigestContents,
        PathGlobs(globs=file_to_content_pattern_names_and_encoding.keys()))

    result = []
    for file_content in digest_contents:
        content_patterns, encoding = file_to_content_pattern_names_and_encoding[
            file_content.path]
        result.append(
            multi_matcher.check_content(file_content.path,
                                        file_content.content, content_patterns,
                                        encoding))

    stdout = ""
    detail_level = regex_lint_subsystem.detail_level
    num_matched_all = 0
    num_nonmatched_some = 0
    for rmr in sorted(result, key=lambda rmr: rmr.path):
        if not rmr.matching and not rmr.nonmatching:
            continue
        if detail_level == DetailLevel.names:
            if rmr.nonmatching:
                stdout += f"{rmr.path}\n"
            continue

        if rmr.nonmatching:
            icon = "X"
            num_nonmatched_some += 1
        else:
            icon = "V"
            num_matched_all += 1
        matched_msg = " Matched: {}".format(",".join(
            rmr.matching)) if rmr.matching else ""
        nonmatched_msg = (" Didn't match: {}".format(",".join(rmr.nonmatching))
                          if rmr.nonmatching else "")
        if detail_level == DetailLevel.all or (
                detail_level == DetailLevel.nonmatching and nonmatched_msg):
            stdout += f"{icon} {rmr.path}:{matched_msg}{nonmatched_msg}\n"

    if detail_level not in (DetailLevel.none, DetailLevel.names):
        if stdout:
            stdout += "\n"
        stdout += f"{num_matched_all} files matched all required patterns.\n"
        stdout += f"{num_nonmatched_some} files failed to match at least one required pattern."

    exit_code = PANTS_FAILED_EXIT_CODE if num_nonmatched_some else PANTS_SUCCEEDED_EXIT_CODE
    return LintResults((LintResult(exit_code, stdout, ""), ),
                       linter_name=request.name)
示例#38
0
 def assert_pg_equals(self, pathglobs, relative_to, filespecs):
   self.assertEquals(PathGlobs(tuple(pathglobs)), PathGlobs.create_from_specs(relative_to, filespecs))
示例#39
0
async def merge_coverage_data(
    data_collection: PytestCoverageDataCollection,
    coverage_setup: CoverageSetup,
    coverage: CoverageSubsystem,
    source_roots: AllSourceRoots,
) -> MergedCoverageData:
    if len(data_collection) == 1 and not coverage.global_report:
        coverage_data = data_collection[0]
        return MergedCoverageData(coverage_data.digest, (coverage_data.address,))

    coverage_digest_gets = []
    coverage_data_file_paths = []
    addresses = []
    for data in data_collection:
        # We prefix each .coverage file with its corresponding address to avoid collisions.
        coverage_digest_gets.append(
            Get(Digest, AddPrefix(data.digest, prefix=data.address.path_safe_spec))
        )
        coverage_data_file_paths.append(f"{data.address.path_safe_spec}/.coverage")
        addresses.append(data.address)

    if coverage.global_report:
        global_coverage_base_dir = PurePath("__global_coverage__")

        global_coverage_config_path = global_coverage_base_dir / "pyproject.toml"
        global_coverage_config_content = toml.dumps(
            {
                "tool": {
                    "coverage": {
                        "run": {
                            "relative_files": True,
                            "source": list(source_root.path for source_root in source_roots),
                        }
                    }
                }
            }
        ).encode()

        no_op_exe_py_path = global_coverage_base_dir / "no-op-exe.py"

        all_sources_digest, no_op_exe_py_digest, global_coverage_config_digest = await MultiGet(
            Get(
                Digest,
                PathGlobs(globs=[f"{source_root.path}/**/*.py" for source_root in source_roots]),
            ),
            Get(Digest, CreateDigest([FileContent(path=str(no_op_exe_py_path), content=b"")])),
            Get(
                Digest,
                CreateDigest(
                    [
                        FileContent(
                            path=str(global_coverage_config_path),
                            content=global_coverage_config_content,
                        ),
                    ]
                ),
            ),
        )
        extra_sources_digest = await Get(
            Digest, MergeDigests((all_sources_digest, no_op_exe_py_digest))
        )
        input_digest = await Get(
            Digest, MergeDigests((extra_sources_digest, global_coverage_config_digest))
        )
        result = await Get(
            ProcessResult,
            VenvPexProcess(
                coverage_setup.pex,
                argv=("run", "--rcfile", str(global_coverage_config_path), str(no_op_exe_py_path)),
                input_digest=input_digest,
                output_files=(".coverage",),
                description="Create base global Pytest coverage report.",
                level=LogLevel.DEBUG,
            ),
        )
        coverage_digest_gets.append(
            Get(
                Digest, AddPrefix(digest=result.output_digest, prefix=str(global_coverage_base_dir))
            )
        )
        coverage_data_file_paths.append(str(global_coverage_base_dir / ".coverage"))
    else:
        extra_sources_digest = EMPTY_DIGEST

    input_digest = await Get(Digest, MergeDigests(await MultiGet(coverage_digest_gets)))
    result = await Get(
        ProcessResult,
        VenvPexProcess(
            coverage_setup.pex,
            argv=("combine", *sorted(coverage_data_file_paths)),
            input_digest=input_digest,
            output_files=(".coverage",),
            description=f"Merge {len(coverage_data_file_paths)} Pytest coverage reports.",
            level=LogLevel.DEBUG,
        ),
    )
    return MergedCoverageData(
        await Get(Digest, MergeDigests((result.output_digest, extra_sources_digest))),
        tuple(addresses),
    )
示例#40
0
def ascendant_addresses_to_globs(address_mapper, ascendant_addresses):
    """Given an AscendantAddresses object, return a PathGlobs object for matching build files."""
    pattern = address_mapper.build_pattern
    patterns = [join(f, pattern) for f in _recursive_dirname(ascendant_addresses.directory)]
    return PathGlobs.create_from_specs("", patterns)
示例#41
0
async def coursier_resolve_lockfile(
    bash: BashBinary,
    coursier: Coursier,
    artifact_requirements: ArtifactRequirements,
) -> CoursierResolvedLockfile:
    """Run `coursier fetch ...` against a list of Maven coordinates and capture the result.

    This rule does two things in a single Process invocation:

        * Runs `coursier fetch` to let Coursier do the heavy lifting of resolving
          dependencies and downloading resolved artifacts (jars, etc).
        * Copies the resolved artifacts into the Process output directory, capturing
          the artifacts as content-addressed `Digest`s.

    It's important that this happens in the same process, since the process isn't
    guaranteed to run on the same machine as the rule, nor is a subsequent process
    invocation.  This guarantees that whatever Coursier resolved, it was fully
    captured into Pants' content addressed artifact storage.

    Note however that we still get the benefit of Coursier's "global" cache if it
    had already been run on the machine where the `coursier fetch` runs, so rerunning
    `coursier fetch` tends to be fast in practice.

    Finally, this rule bundles up the result into a `CoursierResolvedLockfile`.  This
    data structure encapsulates everything necessary to either materialize the
    resolved dependencies to a classpath for Java invocations, or to write the
    lockfile out to the workspace to hermetically freeze the result of the resolve.
    """

    if len(artifact_requirements) == 0:
        return CoursierResolvedLockfile(entries=())

    coursier_report_file_name = "coursier_report.json"
    process_result = await Get(
        ProcessResult,
        Process(
            argv=[
                bash.path,
                coursier.wrapper_script,
                coursier.coursier.exe,
                coursier_report_file_name,
                *(req.to_coord_str() for req in artifact_requirements),
            ],
            input_digest=coursier.digest,
            output_directories=("classpath", ),
            output_files=(coursier_report_file_name, ),
            description=
            ("Running `coursier fetch` against "
             f"{pluralize(len(artifact_requirements), 'requirement')}: "
             f"{', '.join(req.to_coord_str() for req in artifact_requirements)}"
             ),
            level=LogLevel.DEBUG,
        ),
    )
    report_digest = await Get(
        Digest,
        DigestSubset(process_result.output_digest,
                     PathGlobs([coursier_report_file_name])))
    report_contents = await Get(DigestContents, Digest, report_digest)
    report = json.loads(report_contents[0].content)

    artifact_file_names = tuple(
        PurePath(dep["file"]).name for dep in report["dependencies"])
    artifact_output_paths = tuple(f"classpath/{file_name}"
                                  for file_name in artifact_file_names)
    artifact_digests = await MultiGet(
        Get(
            Digest,
            DigestSubset(process_result.output_digest, PathGlobs(
                [output_path]))) for output_path in artifact_output_paths)
    stripped_artifact_digests = await MultiGet(
        Get(Digest, RemovePrefix(artifact_digest, "classpath"))
        for artifact_digest in artifact_digests)
    artifact_file_digests = await MultiGet(
        Get(FileDigest, ExtractFileDigest(stripped_artifact_digest, file_name))
        for stripped_artifact_digest, file_name in zip(
            stripped_artifact_digests, artifact_file_names))
    return CoursierResolvedLockfile(entries=tuple(
        CoursierLockfileEntry(
            coord=Coordinate.from_coord_str(dep["coord"]),
            direct_dependencies=Coordinates(
                Coordinate.from_coord_str(dd)
                for dd in dep["directDependencies"]),
            dependencies=Coordinates(
                Coordinate.from_coord_str(d) for d in dep["dependencies"]),
            file_name=file_name,
            file_digest=artifact_file_digest,
        ) for dep, file_name, artifact_file_digest in zip(
            report["dependencies"], artifact_file_names,
            artifact_file_digests)))
示例#42
0
文件: rules.py 项目: zomglings/pants
async def bandit_lint_partition(partition: BanditPartition, bandit: Bandit,
                                lint_subsystem: LintSubsystem) -> LintResult:
    bandit_pex_request = Get(
        VenvPex,
        PexRequest(
            output_filename="bandit.pex",
            internal_only=True,
            requirements=PexRequirements(bandit.all_requirements),
            interpreter_constraints=partition.interpreter_constraints,
            main=bandit.main,
        ),
    )

    config_digest_request = Get(
        Digest,
        PathGlobs(
            globs=[bandit.config] if bandit.config else [],
            glob_match_error_behavior=GlobMatchErrorBehavior.error,
            description_of_origin="the option `--bandit-config`",
        ),
    )

    source_files_request = Get(
        SourceFiles,
        SourceFilesRequest(field_set.sources
                           for field_set in partition.field_sets))

    bandit_pex, config_digest, source_files = await MultiGet(
        bandit_pex_request, config_digest_request, source_files_request)

    input_digest = await Get(
        Digest, MergeDigests((source_files.snapshot.digest, config_digest)))

    report_file_name = "bandit_report.txt" if lint_subsystem.reports_dir else None

    result = await Get(
        FallibleProcessResult,
        VenvPexProcess(
            bandit_pex,
            argv=generate_args(source_files=source_files,
                               bandit=bandit,
                               report_file_name=report_file_name),
            input_digest=input_digest,
            description=
            f"Run Bandit on {pluralize(len(partition.field_sets), 'file')}.",
            output_files=(report_file_name, ) if report_file_name else None,
            level=LogLevel.DEBUG,
        ),
    )

    report = None
    if report_file_name:
        report_digest = await Get(
            Digest,
            DigestSubset(
                result.output_digest,
                PathGlobs(
                    [report_file_name],
                    glob_match_error_behavior=GlobMatchErrorBehavior.warn,
                    description_of_origin="Bandit report file",
                ),
            ),
        )
        report = LintReport(report_file_name, report_digest)

    return LintResult.from_fallible_process_result(
        result,
        partition_description=str(
            sorted(str(c) for c in partition.interpreter_constraints)),
        report=report,
    )
示例#43
0
文件: structs.py 项目: RobinTec/pants
 def to_path_globs(self, relpath):
   """Return two PathGlobs representing the included and excluded Files for these patterns."""
   return (
       PathGlobs.create_from_specs(relpath, self._filespecs),
       PathGlobs.create_from_specs(relpath, self._excluded_filespecs)
     )
示例#44
0
async def coursier_fetch_one_coord(
    bash: BashBinary,
    coursier: Coursier,
    request: CoursierLockfileEntry,
) -> ResolvedClasspathEntry:
    """Run `coursier fetch --intrasitive` to fetch a single artifact.

    This rule exists to permit efficient subsetting of a "global" classpath
    in the form of a lockfile.  Callers can determine what subset of dependencies
    from the lockfile are needed for a given target, then request those
    lockfile entries individually.

    By fetching only one entry at a time, we maximize our cache efficiency.  If instead
    we fetched the entire subset that the caller wanted, there would be a different cache
    key for every possible subset.

    This rule also guarantees exact reproducibility.  If all caches have been
    removed, `coursier fetch` will re-download the artifact, and this rule will
    confirm that what was downloaded matches exactly (by content digest) what
    was specified in the lockfile (what Coursier originally downloaded).
    """
    coursier_report_file_name = "coursier_report.json"
    process_result = await Get(
        ProcessResult,
        Process(
            argv=[
                bash.path,
                coursier.wrapper_script,
                coursier.coursier.exe,
                coursier_report_file_name,
                "--intransitive",
                request.coord.to_coord_str(),
            ],
            input_digest=coursier.digest,
            output_directories=("classpath", ),
            output_files=(coursier_report_file_name, ),
            description="Run coursier resolve",
            level=LogLevel.DEBUG,
        ),
    )
    report_digest = await Get(
        Digest,
        DigestSubset(process_result.output_digest,
                     PathGlobs([coursier_report_file_name])))
    report_contents = await Get(DigestContents, Digest, report_digest)
    report = json.loads(report_contents[0].content)

    report_deps = report["dependencies"]
    if len(report_deps) == 0:
        raise CoursierError(
            "Coursier fetch report has no dependencies (i.e. nothing was fetched)."
        )
    elif len(report_deps) > 1:
        raise CoursierError(
            "Coursier fetch report has multiple dependencies, but exactly 1 was expected."
        )

    dep = report_deps[0]

    resolved_coord = Coordinate.from_coord_str(dep["coord"])
    if resolved_coord != request.coord:
        raise CoursierError(
            f'Coursier resolved coord "{resolved_coord.to_coord_str()}" does not match requested coord "{request.coord.to_coord_str()}".'
        )

    file_path = PurePath(dep["file"])
    classpath_dest = f"classpath/{file_path.name}"

    resolved_file_digest = await Get(
        Digest,
        DigestSubset(process_result.output_digest,
                     PathGlobs([classpath_dest])))
    stripped_digest = await Get(
        Digest, RemovePrefix(resolved_file_digest, "classpath"))
    file_digest = await Get(
        FileDigest,
        ExtractFileDigest(stripped_digest, file_path.name),
    )
    if file_digest != request.file_digest:
        raise CoursierError(
            f"Coursier fetch for '{resolved_coord}' succeeded, but fetched artifact {file_digest} did not match the expected artifact: {request.file_digest}."
        )
    return ResolvedClasspathEntry(coord=request.coord,
                                  file_name=file_path.name,
                                  digest=stripped_digest)
示例#45
0
def buildfile_path_globs_for_dir(address_mapper, directory):
  patterns = address_mapper.build_patterns
  return BuildFileGlobs(PathGlobs.create(directory.path, include=patterns, exclude=()))
示例#46
0
文件: rules.py 项目: pyranja/pants
async def flake8_lint(
    field_sets: Flake8FieldSets,
    flake8: Flake8,
    python_setup: PythonSetup,
    subprocess_encoding_environment: SubprocessEncodingEnvironment,
) -> LintResult:
    if flake8.options.skip:
        return LintResult.noop()

    # NB: Flake8 output depends upon which Python interpreter version it's run with. We ensure that
    # each target runs with its own interpreter constraints. See
    # http://flake8.pycqa.org/en/latest/user/invocation.html.
    interpreter_constraints = PexInterpreterConstraints.create_from_compatibility_fields(
        (field_set.compatibility for field_set in field_sets), python_setup)
    requirements_pex_request = Get[Pex](PexRequest(
        output_filename="flake8.pex",
        requirements=PexRequirements(flake8.get_requirement_specs()),
        interpreter_constraints=interpreter_constraints,
        entry_point=flake8.get_entry_point(),
    ))

    config_path: Optional[str] = flake8.options.config
    config_snapshot_request = Get[Snapshot](PathGlobs(
        globs=[config_path] if config_path else [],
        glob_match_error_behavior=GlobMatchErrorBehavior.error,
        description_of_origin="the option `--flake8-config`",
    ))

    all_source_files_request = Get[SourceFiles](AllSourceFilesRequest(
        field_set.sources for field_set in field_sets))
    specified_source_files_request = Get[SourceFiles](
        SpecifiedSourceFilesRequest(
            (field_set.sources, field_set.origin) for field_set in field_sets))

    requirements_pex, config_snapshot, all_source_files, specified_source_files = cast(
        Tuple[Pex, Snapshot, SourceFiles, SourceFiles],
        await MultiGet([
            requirements_pex_request,
            config_snapshot_request,
            all_source_files_request,
            specified_source_files_request,
        ]),
    )

    input_digest = await Get[Digest](MergeDigests(
        (all_source_files.snapshot.digest, requirements_pex.digest,
         config_snapshot.digest)))

    address_references = ", ".join(
        sorted(field_set.address.reference() for field_set in field_sets))

    process = requirements_pex.create_process(
        python_setup=python_setup,
        subprocess_encoding_environment=subprocess_encoding_environment,
        pex_path=f"./flake8.pex",
        pex_args=generate_args(specified_source_files=specified_source_files,
                               flake8=flake8),
        input_digest=input_digest,
        description=
        f"Run Flake8 on {pluralize(len(field_sets), 'target')}: {address_references}.",
    )
    result = await Get[FallibleProcessResult](Process, process)
    return LintResult.from_fallible_process_result(result,
                                                   linter_name="Flake8")
示例#47
0
文件: graph.py 项目: anubnair/pants
def descendant_addresses_to_globs(descendant_addresses):
  """Given a DescendantAddresses object, return a PathGlobs object for matching directories."""
  return PathGlobs.create(Dirs, descendant_addresses.directory, globs=['.', '*', '**/*'])
示例#48
0
文件: test_fs.py 项目: neven7/pants
 def specs(self, ftype, relative_to, *filespecs):
     return PathGlobs.create_from_specs(ftype, relative_to, filespecs)
示例#49
0
def calculate_package_search_path(jvm_package_name, source_roots):
  """Return PathGlobs to match directories where the given JVMPackageName might exist."""
  rel_package_dir = jvm_package_name.name.replace('.', os_sep)
  specs = [os_path_join(srcroot, rel_package_dir) for srcroot in source_roots.srcroots]
  return PathGlobs.create_from_specs('', specs)
示例#50
0
文件: structs.py 项目: lgirault/pants
 def to_path_globs(self, relpath, conjunction):
   """Return a PathGlobs representing the included and excluded Files for these patterns."""
   return PathGlobs(
     include=tuple(os.path.join(relpath, glob) for glob in self._file_globs),
     exclude=tuple(os.path.join(relpath, exclude) for exclude in self._excluded_file_globs),
     conjunction=conjunction)
示例#51
0
文件: test_fs.py 项目: ericxsun/pants
 def specs(self, ftype, relative_to, *filespecs):
   return PathGlobs.create_from_specs(ftype, relative_to, filespecs)
示例#52
0
 def to_path_globs(self, relpath):
     """Return two PathGlobs representing the included and excluded Files for these patterns."""
     return PathGlobs.create(relpath, self._file_globs,
                             self._excluded_file_globs)
示例#53
0
def main_filespecs():
  build_root, goals, args = pop_build_root_and_goals('[build root path] [filespecs]*', sys.argv[1:])

  # Create PathGlobs for each arg relative to the buildroot.
  path_globs = [PathGlobs.create('', globs=[arg]) for arg in args]
  visualize_build_request(build_root, goals, path_globs)
示例#54
0
def run_black(
    wrapped_target: FormattablePythonTarget,
    black: Black,
    python_setup: PythonSetup,
    subprocess_encoding_environment: SubprocessEncodingEnvironment,
) -> FmtResult:
    config_path = black.get_options().config
    config_snapshot = yield Get(Snapshot, PathGlobs(include=(config_path, )))

    resolved_requirements_pex = yield Get(
        Pex,
        CreatePex(
            output_filename="black.pex",
            requirements=tuple(black.get_requirement_specs()),
            interpreter_constraints=tuple(
                black.default_interpreter_constraints),
            entry_point=black.get_entry_point(),
        ))
    target = wrapped_target.target
    sources_digest = target.sources.snapshot.directory_digest

    all_input_digests = [
        sources_digest,
        resolved_requirements_pex.directory_digest,
        config_snapshot.directory_digest,
    ]
    merged_input_files = yield Get(
        Digest,
        DirectoriesToMerge(directories=tuple(all_input_digests)),
    )

    # The exclude option from Black only works on recursive invocations,
    # so call black with the directories in which the files are present
    # and passing the full file names with the include option
    dirs: Set[str] = set()
    for filename in target.sources.snapshot.files:
        dirs.add(f"{Path(filename).parent}")
    pex_args = tuple(sorted(dirs))
    if config_path:
        pex_args += ("--config", config_path)
    if target.sources.snapshot.files:
        pex_args += ("--include", "|".join(
            re.escape(f) for f in target.sources.snapshot.files))

    request = resolved_requirements_pex.create_execute_request(
        python_setup=python_setup,
        subprocess_encoding_environment=subprocess_encoding_environment,
        pex_path="./black.pex",
        pex_args=pex_args,
        input_files=merged_input_files,
        output_files=target.sources.snapshot.files,
        description=f'Run Black for {target.address.reference()}',
    )

    result = yield Get(ExecuteProcessResult, ExecuteProcessRequest, request)

    yield FmtResult(
        digest=result.output_directory_digest,
        stdout=result.stdout.decode(),
        stderr=result.stderr.decode(),
    )