コード例 #1
0
ファイル: changed.py プロジェクト: wisechengyi/pants
async def find_owners(
    build_configuration: BuildConfiguration,
    address_mapper: AddressMapper,
    changed_request: ChangedRequest,
) -> ChangedAddresses:
    owners = await Get[Owners](OwnersRequest(sources=changed_request.sources))

    # If the ChangedRequest does not require dependees, then we're done.
    if changed_request.include_dependees == IncludeDependeesOption.NONE:
        return ChangedAddresses(owners.addresses)

    # Otherwise: find dependees.
    all_addresses = await Get[Addresses](AddressSpecs(
        (DescendantAddresses(""), )))
    all_structs = [
        s.value for s in await MultiGet(Get[HydratedStruct](Address, a)
                                        for a in all_addresses)
    ]

    bfa = build_configuration.registered_aliases()
    graph = _DependentGraph.from_iterable(
        target_types_from_build_file_aliases(bfa), address_mapper, all_structs)
    if changed_request.include_dependees == IncludeDependeesOption.DIRECT:
        return ChangedAddresses(
            Addresses(graph.dependents_of_addresses(owners.addresses)))
    return ChangedAddresses(
        Addresses(graph.transitive_dependents_of_addresses(owners.addresses)))
コード例 #2
0
ファイル: run_setup_py.py プロジェクト: rahuliyer95/pants
async def get_exporting_owner(
        owned_dependency: OwnedDependency) -> ExportedTarget:
    """Find the exported target that owns the given target (and therefore exports it).

    The owner of T (i.e., the exported target in whose artifact T's code is published) is:

     1. An exported target that depends on T (or is T itself).
     2. Is T's closest filesystem ancestor among those satisfying 1.

    If there are multiple such exported targets at the same degree of ancestry, the ownership
    is ambiguous and an error is raised. If there is no exported target that depends on T
    and is its ancestor, then there is no owner and an error is raised.
    """
    hydrated_target = owned_dependency.hydrated_target
    ancestor_addrs = AscendantAddresses(
        hydrated_target.adaptor.address.spec_path)
    ancestor_tgts = await Get[HydratedTargets](AddressSpecs(
        (ancestor_addrs, )))
    # Note that addresses sort by (spec_path, target_name), and all these targets are
    # ancestors of the given target, i.e., their spec_paths are all prefixes. So sorting by
    # address will effectively sort by closeness of ancestry to the given target.
    exported_ancestor_tgts = sorted(
        [t.adaptor for t in ancestor_tgts if _is_exported(t)],
        key=lambda adaptor: adaptor.address,
        reverse=True,
    )
    exported_ancestor_iter = iter(exported_ancestor_tgts)
    for exported_ancestor in exported_ancestor_iter:
        tht = await Get[TransitiveHydratedTargets](Addresses(
            [exported_ancestor.address]))
        if hydrated_target in tht.closure:
            owner = exported_ancestor
            # Find any exported siblings of owner that also depend on hydrated_target. They have the
            # same spec_path as it, so they must immediately follow it in ancestor_iter.
            sibling_owners = []
            sibling = next(exported_ancestor_iter, None)
            while sibling and sibling.address.spec_path == owner.address.spec_path:
                tht = await Get[TransitiveHydratedTargets](Addresses(
                    [sibling.address]))
                if hydrated_target in tht.closure:
                    sibling_owners.append(sibling)
                sibling = next(exported_ancestor_iter, None)
            if sibling_owners:
                raise AmbiguousOwnerError(
                    f"Exporting owners for {hydrated_target.adaptor.address.reference()} are "
                    f"ambiguous. Found {exported_ancestor.address.reference()} and "
                    f"{len(sibling_owners)} others: "
                    f'{", ".join(so.address.reference() for so in sibling_owners)}'
                )
            return ExportedTarget(HydratedTarget(owner))
    raise NoOwnerError(
        f"No exported target owner found for {hydrated_target.adaptor.address.reference()}"
    )
コード例 #3
0
async def find_owners(owners_request: OwnersRequest) -> Owners:
    sources_set = FrozenOrderedSet(owners_request.sources)
    dirs_set = FrozenOrderedSet(
        os.path.dirname(source) for source in sources_set)

    # Walk up the buildroot looking for targets that would conceivably claim changed sources.
    candidate_specs = tuple(AscendantAddresses(directory=d) for d in dirs_set)
    candidate_targets = await Get[HydratedTargets](
        AddressSpecs(candidate_specs))

    # Match the source globs against the expanded candidate targets.
    def owns_any_source(legacy_target: HydratedTarget) -> bool:
        """Given a `HydratedTarget` instance, check if it owns the given source file."""
        target_kwargs = legacy_target.adaptor.kwargs()

        # Handle `sources`-declaring targets.
        # NB: Deleted files can only be matched against the 'filespec' (ie, `PathGlobs`) for a target,
        # so we don't actually call `fileset.matches` here.
        # TODO: This matching logic should be implemented using the rust `fs` crate for two reasons:
        #  1) having two implementations isn't great
        #  2) we're expanding sources via HydratedTarget, but it isn't necessary to do that to match
        target_sources = target_kwargs.get("sources", None)
        return target_sources and any_matches_filespec(
            paths=sources_set, spec=target_sources.filespec)

    build_file_addresses = await MultiGet(
        Get[BuildFileAddress](Address, ht.adaptor.address)
        for ht in candidate_targets)
    owners = Addresses(
        ht.adaptor.address
        for ht, bfa in zip(candidate_targets, build_file_addresses)
        if LegacyAddressMapper.any_is_declaring_file(bfa, sources_set)
        or owns_any_source(ht))
    return Owners(owners)
コード例 #4
0
async def create_python_awslambda(
    lambda_tgt_adaptor: PythonAWSLambdaAdaptor,
    lambdex_setup: LambdexSetup,
    python_setup: PythonSetup,
    subprocess_encoding_environment: SubprocessEncodingEnvironment,
) -> CreatedAWSLambda:
    # TODO: We must enforce that everything is built for Linux, no matter the local platform.
    pex_filename = f"{lambda_tgt_adaptor.address.target_name}.pex"
    pex_request = LegacyPexFromTargetsRequest(
        addresses=Addresses([lambda_tgt_adaptor.address]),
        entry_point=None,
        output_filename=pex_filename,
    )

    pex = await Get[Pex](LegacyPexFromTargetsRequest, pex_request)
    merged_input_files = await Get[Digest](DirectoriesToMerge(
        directories=(pex.directory_digest,
                     lambdex_setup.requirements_pex.directory_digest)))

    # NB: Lambdex modifies its input pex in-place, so the input file is also the output file.
    lambdex_args = ("build", "-e", lambda_tgt_adaptor.handler, pex_filename)
    process_request = lambdex_setup.requirements_pex.create_execute_request(
        python_setup=python_setup,
        subprocess_encoding_environment=subprocess_encoding_environment,
        pex_path="./lambdex.pex",
        pex_args=lambdex_args,
        input_files=merged_input_files,
        output_files=(pex_filename, ),
        description=f"Run Lambdex for {lambda_tgt_adaptor.address.reference()}",
    )
    result = await Get[ExecuteProcessResult](ExecuteProcessRequest,
                                             process_request)
    return CreatedAWSLambda(digest=result.output_directory_digest,
                            name=pex_filename)
コード例 #5
0
async def create_python_binary(
        config: PythonBinaryConfiguration) -> CreatedBinary:
    entry_point: Optional[str]
    if config.entry_point.value is not None:
        entry_point = config.entry_point.value
    else:
        source_files = await Get[SourceFiles](AllSourceFilesRequest(
            [config.sources], strip_source_roots=True))
        # NB: `PythonBinarySources` enforces that we have 0-1 sources.
        if len(source_files.files) == 1:
            module_name = source_files.files[0]
            entry_point = PythonBinary.translate_source_path_to_py_module_specifier(
                module_name)
        else:
            entry_point = None

    output_filename = f"{config.address.target_name}.pex"
    two_step_pex = await Get[TwoStepPex](TwoStepPexFromTargetsRequest(
        PexFromTargetsRequest(
            addresses=Addresses([config.address]),
            entry_point=entry_point,
            output_filename=output_filename,
            additional_args=config.generate_additional_args(),
            description=f"Building {output_filename}",
        )))
    pex = two_step_pex.pex
    return CreatedBinary(digest=pex.directory_digest,
                         binary_name=pex.output_filename)
コード例 #6
0
ファイル: python_create_binary.py プロジェクト: revl/pants
async def create_python_binary(
        python_binary_adaptor: PythonBinaryAdaptor) -> CreatedBinary:
    # TODO(#8420) This way of calculating the entry point works but is a bit hackish.
    if hasattr(python_binary_adaptor, "entry_point"):
        entry_point = python_binary_adaptor.entry_point
    else:
        sources = await Get[SourceFiles](AllSourceFilesRequest(
            [python_binary_adaptor], strip_source_roots=True))
        # NB: `python_binary` targets may have 0-1 sources. This is enforced by
        # `PythonBinaryAdaptor`.
        if len(sources.snapshot.files) == 1:
            module_name = sources.snapshot.files[0]
            entry_point = PythonBinary.translate_source_path_to_py_module_specifier(
                module_name)
        else:
            entry_point = None

    request = CreatePexFromTargetClosure(
        addresses=Addresses((python_binary_adaptor.address, )),
        entry_point=entry_point,
        output_filename=f"{python_binary_adaptor.address.target_name}.pex",
    )

    pex = await Get[Pex](CreatePexFromTargetClosure, request)
    return CreatedBinary(digest=pex.directory_digest,
                         binary_name=pex.output_filename)
コード例 #7
0
async def create_python_binary(fields: PythonBinaryFields) -> CreatedBinary:
    entry_point: Optional[str]
    if fields.entry_point.value is not None:
        entry_point = fields.entry_point.value
    else:
        # TODO: rework determine_source_files.py to work with the Target API. It should take the
        #  Sources AsyncField as input, rather than TargetAdaptor.
        sources_result = await Get[SourcesResult](SourcesRequest,
                                                  fields.sources.request)
        stripped_sources = await Get[SourceRootStrippedSources](
            StripSnapshotRequest(sources_result.snapshot))
        source_files = stripped_sources.snapshot.files
        # NB: `PythonBinarySources` enforces that we have 0-1 sources.
        if len(source_files) == 1:
            module_name = source_files[0]
            entry_point = PythonBinary.translate_source_path_to_py_module_specifier(
                module_name)
        else:
            entry_point = None

    request = CreatePexFromTargetClosure(
        addresses=Addresses([fields.address]),
        entry_point=entry_point,
        output_filename=f"{fields.address.target_name}.pex",
    )

    pex = await Get[Pex](CreatePexFromTargetClosure, request)
    return CreatedBinary(digest=pex.directory_digest,
                         binary_name=pex.output_filename)
コード例 #8
0
 def single_target_run(
     self,
     *,
     console: MockConsole,
     program_text: bytes,
     address_spec: str,
 ) -> Run:
     workspace = Workspace(self.scheduler)
     interactive_runner = InteractiveRunner(self.scheduler)
     BuildRoot().path = self.build_root
     res = run_rule(
         run,
         rule_args=[
             console,
             workspace,
             interactive_runner,
             BuildRoot(),
             Addresses([Address.parse(address_spec)]),
             MockOptions(args=[]),
         ],
         mock_gets=[
             MockGet(
                 product_type=CreatedBinary,
                 subject_type=Address,
                 mock=lambda _: self.create_mock_binary(program_text),
             ),
         ],
     )
     return cast(Run, res)
コード例 #9
0
ファイル: graph_test.py プロジェクト: tushar19/pants
    def test_transitive_targets(self) -> None:
        t1 = MockTarget({}, address=Address.parse(":t1"))
        t2 = MockTarget({Dependencies.alias: [t1.address]},
                        address=Address.parse(":t2"))
        d1 = MockTarget({Dependencies.alias: [t1.address]},
                        address=Address.parse(":d1"))
        d2 = MockTarget({Dependencies.alias: [t2.address]},
                        address=Address.parse(":d2"))
        d3 = MockTarget({}, address=Address.parse(":d3"))
        root = MockTarget(
            {Dependencies.alias: [d1.address, d2.address, d3.address]},
            address=Address.parse(":root"),
        )

        # TODO: possibly figure out how to deduplicate this when developing utilities for testing
        #  with the Target API.
        self.add_to_build_file(
            "",
            dedent("""\
                target(name='t1')
                target(name='t2', dependencies=[':t1'])
                target(name='d1', dependencies=[':t1'])
                target(name='d2', dependencies=[':t2'])
                target(name='d3')
                target(name='root', dependencies=[':d1', ':d2', ':d3'])
                """),
        )

        direct_deps = self.request_single_product(
            Targets,
            Addresses(root[Dependencies].value)  # type: ignore[arg-type]
        )
        assert direct_deps == Targets([d1, d2, d3])

        transitive_target = self.request_single_product(
            TransitiveTarget, WrappedTarget(root))
        assert transitive_target.root == root
        assert {
            dep_transitive_target.root
            for dep_transitive_target in transitive_target.dependencies
        } == {d1, d2, d3}

        transitive_targets = self.request_single_product(
            TransitiveTargets, Addresses([root.address, d2.address]))
        assert transitive_targets.roots == (root, d2)
        assert transitive_targets.closure == FrozenOrderedSet(
            [root, d2, d1, d3, t2, t1])
コード例 #10
0
def addresses_from_address_families(address_families, spec):
  """Given a list of AddressFamilies and a Spec, return matching Addresses."""
  if type(spec) in (DescendantAddresses, SiblingAddresses, AscendantAddresses):
    addresses = tuple(a for af in address_families for a in af.addressables.keys())
  elif type(spec) is SingleAddress:
    addresses = tuple(a
                      for af in address_families
                      for a in af.addressables.keys() if a.target_name == spec.name)
  else:
    raise ValueError('Unrecognized Spec type: {}'.format(spec))
  return Addresses(addresses)
コード例 #11
0
ファイル: graph.py プロジェクト: neven7/pants
def address_from_address_family(address_family, single_address):
    """Given an AddressFamily and a SingleAddress, return an Addresses object containing the Address.

  Raises an exception if the SingleAddress does not match an existing Address.
  """
    name = single_address.name
    if name is None:
        name = os_path_basename(single_address.directory)
    if name not in address_family.objects_by_name:
        _raise_did_you_mean(address_family, single_address.name)
    return Addresses(tuple([Address(address_family.namespace, name)]))
コード例 #12
0
ファイル: repl.py プロジェクト: wisechengyi/pants
async def run_python_repl(repl: PythonRepl) -> ReplBinary:
    targets = await Get[TransitiveHydratedTargets](Addresses, repl.addresses)
    python_addresses = Addresses(
        ht.adaptor.address for ht in targets.closure
        if isinstance(ht.adaptor, PythonTargetAdaptor))
    create_pex = CreatePexFromTargetClosure(
        addresses=python_addresses,
        output_filename="python-repl.pex",
    )

    repl_pex = await Get[Pex](CreatePexFromTargetClosure, create_pex)
    return ReplBinary(
        digest=repl_pex.directory_digest,
        binary_name=repl_pex.output_filename,
    )
コード例 #13
0
async def dependencies(
    console: Console,
    addresses: Addresses,
    options: DependenciesOptions,
) -> Dependencies:
    if options.values.transitive:
        transitive_targets = await Get[TransitiveTargets](Addresses, addresses)
        targets = Targets(transitive_targets.closure -
                          FrozenOrderedSet(transitive_targets.roots))
    else:
        target_roots = await Get[Targets](Addresses, addresses)
        targets = await Get[Targets](Addresses(
            itertools.chain.from_iterable(
                tgt.get(DependenciesField).value or ()
                for tgt in target_roots)))

    include_3rdparty = options.values.type in [
        DependencyType.THIRD_PARTY,
        DependencyType.SOURCE_AND_THIRD_PARTY,
    ]
    include_source = options.values.type in [
        DependencyType.SOURCE,
        DependencyType.SOURCE_AND_THIRD_PARTY,
    ]

    address_strings = set()
    third_party_requirements: Set[str] = set()
    for tgt in targets:
        if include_source:
            address_strings.add(tgt.address.spec)
        if include_3rdparty:
            if tgt.has_field(PythonRequirementsField):
                third_party_requirements.update(
                    str(python_req.requirement)
                    for python_req in tgt[PythonRequirementsField].value)
            if tgt.has_field(JarsField):
                third_party_requirements.update(
                    (f"{jar.org}:{jar.name}:{jar.rev}" if jar.
                     rev is not None else f"{jar.org}:{jar.name}")
                    for jar in tgt[JarsField].value)

    with options.line_oriented(console) as print_stdout:
        for address in sorted(address_strings):
            print_stdout(address)
        for requirement_string in sorted(third_party_requirements):
            print_stdout(requirement_string)

    return Dependencies(exit_code=0)
コード例 #14
0
ファイル: run_setup_py.py プロジェクト: rahuliyer95/pants
async def get_requirements(
        dep_owner: DependencyOwner) -> ExportedTargetRequirements:
    tht = await Get[TransitiveHydratedTargets](Addresses(
        [dep_owner.exported_target.hydrated_target.adaptor.address]))

    ownable_tgts = [tgt for tgt in tht.closure if is_ownable_target(tgt)]
    owners = await MultiGet(Get[ExportedTarget](OwnedDependency(ht))
                            for ht in ownable_tgts)
    owned_by_us: Set[HydratedTarget] = set()
    owned_by_others: Set[HydratedTarget] = set()
    for tgt, owner in zip(ownable_tgts, owners):
        (owned_by_us
         if owner == dep_owner.exported_target else owned_by_others).add(tgt)

    # Get all 3rdparty deps of our owned deps.
    #
    # Note that we need only consider requirements that are direct dependencies of our owned deps:
    # If T depends on R indirectly, then it must be via some direct deps U1, U2, ... For each such U,
    # if U is in the owned deps then we'll pick up R through U. And if U is not in the owned deps
    # then it's owned by an exported target ET, and so R will be in the requirements for ET, and we
    # will require ET.
    #
    # TODO: Note that this logic doesn't account for indirection via dep aggregator targets, of type
    #  `target`. But we don't have those in v2 (yet) anyway. Plus, as we move towards buildgen and/or
    #  stricter build graph hygiene, it makes sense to require that targets directly declare their
    #  true dependencies. Plus, in the specific realm of setup-py, since we must exclude indirect
    #  deps across exported target boundaries, it's not a big stretch to just insist that
    #  requirements must be direct deps.
    direct_deps_addrs = tuple(
        {dep
         for ht in owned_by_us for dep in ht.adaptor.dependencies})
    direct_deps_tgts = await MultiGet(Get[HydratedTarget](Address, a)
                                      for a in direct_deps_addrs)
    reqs = PexRequirements.create_from_adaptors(tgt.adaptor
                                                for tgt in direct_deps_tgts)
    req_strs = list(reqs.requirements)

    # Add the requirements on any exported targets on which we depend.
    exported_targets_we_depend_on = await MultiGet(
        Get[ExportedTarget](OwnedDependency(ht)) for ht in owned_by_others)
    req_strs.extend(
        sorted(et.hydrated_target.adaptor.provides.requirement
               for et in set(exported_targets_we_depend_on)))

    return ExportedTargetRequirements(tuple(req_strs))
コード例 #15
0
ファイル: repl.py プロジェクト: wisechengyi/pants
async def run_ipython_repl(repl: IPythonRepl, ipython: IPython) -> ReplBinary:
    targets = await Get[TransitiveHydratedTargets](Addresses, repl.addresses)
    python_addresses = Addresses(
        ht.adaptor.address for ht in targets.closure
        if isinstance(ht.adaptor, PythonTargetAdaptor))

    create_pex = CreatePexFromTargetClosure(
        addresses=python_addresses,
        output_filename="ipython-repl.pex",
        entry_point=ipython.get_entry_point(),
        additional_requirements=ipython.get_requirement_specs(),
    )

    repl_pex = await Get[Pex](CreatePexFromTargetClosure, create_pex)
    return ReplBinary(
        digest=repl_pex.directory_digest,
        binary_name=repl_pex.output_filename,
    )
コード例 #16
0
ファイル: run_setup_py.py プロジェクト: rahuliyer95/pants
async def get_owned_dependencies(
        dependency_owner: DependencyOwner) -> OwnedDependencies:
    """Find the dependencies of dependency_owner that are owned by it.

    Includes dependency_owner itself.
    """
    tht = await Get[TransitiveHydratedTargets](Addresses(
        [dependency_owner.exported_target.hydrated_target.adaptor.address]))
    ownable_targets = [
        tgt for tgt in tht.closure
        if isinstance(tgt.adaptor, (PythonTargetAdaptor, ResourcesAdaptor))
    ]
    owners = await MultiGet(Get[ExportedTarget](OwnedDependency(ht))
                            for ht in ownable_targets)
    owned_dependencies = [
        tgt for owner, tgt in zip(owners, ownable_targets)
        if owner == dependency_owner.exported_target
    ]
    return OwnedDependencies(OwnedDependency(t) for t in owned_dependencies)
コード例 #17
0
async def run(
    console: Console,
    workspace: Workspace,
    runner: InteractiveRunner,
    build_root: BuildRoot,
    addresses: Addresses,
    options: RunOptions,
) -> Run:
    address = addresses.expect_single()
    binary = await Get[CreatedBinary](Address, address)

    with temporary_dir(root_dir=PurePath(build_root.path,
                                         ".pants.d").as_posix(),
                       cleanup=True) as tmpdir:
        path_relative_to_build_root = PurePath(tmpdir).relative_to(
            build_root.path).as_posix()
        workspace.materialize_directory(
            DirectoryToMaterialize(binary.digest,
                                   path_prefix=path_relative_to_build_root))

        console.write_stdout(f"Running target: {address}\n")
        full_path = PurePath(tmpdir, binary.binary_name).as_posix()
        run_request = InteractiveProcessRequest(
            argv=(full_path, *options.values.args),
            run_in_workspace=True,
        )

        try:
            result = runner.run_local_interactive_process(run_request)
            exit_code = result.process_exit_code
            if result.process_exit_code == 0:
                console.write_stdout(f"{address} ran successfully.\n")
            else:
                console.write_stderr(
                    f"{address} failed with code {result.process_exit_code}!\n"
                )

        except Exception as e:
            console.write_stderr(
                f"Exception when attempting to run {address}: {e!r}\n")
            exit_code = -1

    return Run(exit_code)
コード例 #18
0
ファイル: list_targets_test.py プロジェクト: tushar19/pants
def run_goal(
    targets: List[MockTarget],
    *,
    show_documented: bool = False,
    show_provides: bool = False,
    provides_columns: Optional[str] = None,
) -> Tuple[str, str]:
    console = MockConsole(use_colors=False)
    run_rule(
        list_targets,
        rule_args=[
            Addresses(tgt.address for tgt in targets),
            MockOptions(
                documented=show_documented,
                provides=show_provides,
                provides_columns=provides_columns or "address,artifact_id",
            ),
            console,
        ],
        mock_gets=[MockGet(product_type=Targets, subject_type=Addresses, mock=lambda _: targets)],
    )
    return cast(str, console.stdout.getvalue()), cast(str, console.stderr.getvalue())
コード例 #19
0
ファイル: build_files.py プロジェクト: wisechengyi/pants
def strip_address_origins(
        addresses_with_origins: AddressesWithOrigins) -> Addresses:
    return Addresses(address_with_origin.address
                     for address_with_origin in addresses_with_origins)
コード例 #20
0
ファイル: run_setup_py.py プロジェクト: rahuliyer95/pants
async def run_setup_pys(
    targets_with_origins: HydratedTargetsWithOrigins,
    options: SetupPyOptions,
    console: Console,
    python_setup: PythonSetup,
    distdir: DistDir,
    workspace: Workspace,
) -> SetupPy:
    """Run setup.py commands on all exported targets addressed."""
    args = tuple(options.values.args)
    validate_args(args)

    # Get all exported targets, ignoring any non-exported targets that happened to be
    # globbed over, but erroring on any explicitly-requested non-exported targets.

    exported_targets: List[ExportedTarget] = []
    explicit_nonexported_targets: List[HydratedTarget] = []

    for hydrated_target_with_origin in targets_with_origins:
        target = hydrated_target_with_origin.target
        if _is_exported(target):
            exported_targets.append(ExportedTarget(target))
        elif isinstance(hydrated_target_with_origin.origin, SingleAddress):
            explicit_nonexported_targets.append(target)
    if explicit_nonexported_targets:
        raise TargetNotExported(
            "Cannot run setup.py on these targets, because they have no `provides=` clause: "
            f'{", ".join(so.adaptor.address.reference() for so in explicit_nonexported_targets)}'
        )

    if options.values.transitive:
        # Expand out to all owners of the entire dep closure.
        tht = await Get[TransitiveHydratedTargets](Addresses(
            et.hydrated_target.adaptor.address for et in exported_targets))
        owners = await MultiGet(Get[ExportedTarget](OwnedDependency(ht))
                                for ht in tht.closure if is_ownable_target(ht))
        exported_targets = list(set(owners))

    py2 = is_python2(
        (target_with_origin.target.adaptor.compatibility
         for target_with_origin in targets_with_origins
         if isinstance(target_with_origin.target.adaptor, PythonTargetAdaptor)
         ),
        python_setup,
    )
    chroots = await MultiGet(
        Get[SetupPyChroot](SetupPyChrootRequest(target, py2))
        for target in exported_targets)

    # If args were provided, run setup.py with them; Otherwise just dump chroots.
    if args:
        setup_py_results = await MultiGet(
            Get[RunSetupPyResult](RunSetupPyRequest(exported_target, chroot,
                                                    tuple(args)))
            for exported_target, chroot in zip(exported_targets, chroots))

        for exported_target, setup_py_result in zip(exported_targets,
                                                    setup_py_results):
            addr = exported_target.hydrated_target.adaptor.address.reference()
            console.print_stderr(
                f"Writing dist for {addr} under {distdir.relpath}/.")
            workspace.materialize_directory(
                DirectoryToMaterialize(setup_py_result.output,
                                       path_prefix=str(distdir.relpath)))
    else:
        # Just dump the chroot.
        for exported_target, chroot in zip(exported_targets, chroots):
            addr = exported_target.hydrated_target.adaptor.address.reference()
            provides = exported_target.hydrated_target.adaptor.provides
            setup_py_dir = distdir.relpath / f"{provides.name}-{provides.version}"
            console.print_stderr(
                f"Writing setup.py chroot for {addr} to {setup_py_dir}")
            workspace.materialize_directory(
                DirectoryToMaterialize(chroot.digest,
                                       path_prefix=str(setup_py_dir)))

    return SetupPy(0)
コード例 #21
0
async def setup_pytest_for_target(
    config: PythonTestConfiguration,
    pytest: PyTest,
    test_options: TestOptions,
    python_setup: PythonSetup,
) -> TestTargetSetup:
    # TODO: Rather than consuming the TestOptions subsystem, the TestRunner should pass on coverage
    # configuration via #7490.

    test_addresses = Addresses((config.address,))

    # TODO(John Sirois): PexInterpreterConstraints are gathered in the same way by the
    #  `create_pex_from_target_closure` rule, factor up.
    transitive_targets = await Get[TransitiveTargets](Addresses, test_addresses)
    all_targets = transitive_targets.closure

    # TODO: factor this up? It's mostly duplicated with pex_from_targets.py.
    python_targets = []
    resource_targets = []
    for tgt in all_targets:
        if tgt.has_field(PythonSources):
            python_targets.append(tgt)
        # NB: PythonRequirementsFileSources is a subclass of FilesSources. We filter it out so that
        # requirements.txt is not included in the PEX and so that irrelevant changes to it (e.g.
        # whitespace changes) do not invalidate the PEX.
        if tgt.has_field(ResourcesSources) or (
            tgt.has_field(FilesSources) and not tgt.has_field(PythonRequirementsFileSources)
        ):
            resource_targets.append(tgt)

    interpreter_constraints = PexInterpreterConstraints.create_from_compatibility_fields(
        (tgt.get(PythonInterpreterCompatibility) for tgt in python_targets), python_setup
    )

    # Ensure all pexes we merge via PEX_PATH to form the test runner use the interpreter constraints
    # of the tests. This is handled by CreatePexFromTargetClosure, but we must pass this through for
    # CreatePex requests.
    pex_request = functools.partial(PexRequest, interpreter_constraints=interpreter_constraints)

    # NB: We set `--not-zip-safe` because Pytest plugin discovery, which uses
    # `importlib_metadata` and thus `zipp`, does not play nicely when doing import magic directly
    # from zip files. `zipp` has pathologically bad behavior with large zipfiles.
    # TODO: this does have a performance cost as the pex must now be expanded to disk. Long term,
    # it would be better to fix Zipp (whose fix would then need to be used by importlib_metadata
    # and then by Pytest). See https://github.com/jaraco/zipp/pull/26.
    additional_args_for_pytest = ("--not-zip-safe",)

    run_coverage = test_options.values.run_coverage
    plugin_file_digest: Optional[Digest] = (
        await Get[Digest](InputFilesContent, COVERAGE_PLUGIN_INPUT) if run_coverage else None
    )

    pytest_pex_request = pex_request(
        output_filename="pytest.pex",
        requirements=PexRequirements(pytest.get_requirement_strings()),
        additional_args=additional_args_for_pytest,
        sources=plugin_file_digest,
    )

    requirements_pex_request = LegacyPexFromTargetsRequest(
        addresses=test_addresses,
        output_filename="requirements.pex",
        include_source_files=False,
        additional_args=additional_args_for_pytest,
    )

    test_runner_pex_request = pex_request(
        output_filename="test_runner.pex",
        entry_point="pytest:main",
        interpreter_constraints=interpreter_constraints,
        additional_args=(
            "--pex-path",
            # TODO(John Sirois): Support shading python binaries:
            #   https://github.com/pantsbuild/pants/issues/9206
            # Right now any pytest transitive requirements will shadow corresponding user
            # requirements which will lead to problems when APIs that are used by either
            # `pytest:main` or the tests themselves break between the two versions.
            ":".join(
                (pytest_pex_request.output_filename, requirements_pex_request.output_filename)
            ),
        ),
    )

    # Get the file names for the test_target so that we can specify to Pytest precisely which files
    # to test, rather than using auto-discovery.
    specified_source_files_request = SpecifiedSourceFilesRequest(
        [(config.sources, config.origin)], strip_source_roots=True
    )

    # TODO(John Sirois): Support exploiting concurrency better:
    #   https://github.com/pantsbuild/pants/issues/9294
    # Some awkward code follows in order to execute 5-6 items concurrently given the current state
    # of MultiGet typing / API. Improve this since we should encourage full concurrency in general.
    requests: List[Get[Any]] = [
        Get[Pex](PexRequest, pytest_pex_request),
        Get[Pex](LegacyPexFromTargetsRequest, requirements_pex_request),
        Get[Pex](PexRequest, test_runner_pex_request),
        Get[ImportablePythonSources](Targets(python_targets + resource_targets)),
        Get[SourceFiles](SpecifiedSourceFilesRequest, specified_source_files_request),
    ]
    if run_coverage:
        # TODO: update coverage to use the Target API. Also, add tests.
        hydrated_python_targets = await Get[HydratedTargets](
            Addresses(tgt.address for tgt in python_targets)
        )
        requests.append(
            Get[Coveragerc](
                CoveragercRequest(HydratedTargets(hydrated_python_targets), test_time=True)
            ),
        )

    (
        pytest_pex,
        requirements_pex,
        test_runner_pex,
        prepared_sources,
        specified_source_files,
        *rest,
    ) = cast(
        Union[
            Tuple[Pex, Pex, Pex, ImportablePythonSources, SourceFiles],
            Tuple[Pex, Pex, Pex, ImportablePythonSources, SourceFiles, Coveragerc],
        ],
        await MultiGet(requests),
    )

    directories_to_merge = [
        prepared_sources.snapshot.directory_digest,
        requirements_pex.directory_digest,
        pytest_pex.directory_digest,
        test_runner_pex.directory_digest,
    ]
    if run_coverage:
        coveragerc = rest[0]
        directories_to_merge.append(coveragerc.digest)

    merged_input_files = await Get[Digest](
        DirectoriesToMerge(directories=tuple(directories_to_merge))
    )

    coverage_args = []
    if run_coverage:
        coverage_args = [
            "--cov-report=",  # To not generate any output. https://pytest-cov.readthedocs.io/en/latest/config.html
        ]
        for package in config.coverage.determine_packages_to_cover(
            specified_source_files=specified_source_files
        ):
            coverage_args.extend(["--cov", package])

    specified_source_file_names = sorted(specified_source_files.snapshot.files)
    return TestTargetSetup(
        test_runner_pex=test_runner_pex,
        args=(*pytest.options.args, *coverage_args, *specified_source_file_names),
        input_files_digest=merged_input_files,
        timeout_seconds=config.timeout.calculate_from_global_options(pytest),
    )
コード例 #22
0
ファイル: graph.py プロジェクト: neven7/pants
def addresses_from_address_family(address_family):
    """Given an AddressFamily, return an Addresses objects containing all of its `addressables`."""
    return Addresses(tuple(address_family.addressables.keys()))
コード例 #23
0
ファイル: graph.py プロジェクト: neven7/pants
def addresses_from_address_families(address_families):
    """Given a list of AddressFamilies, return an Addresses object containing all addressables."""
    return Addresses(
        tuple(a for af in address_families for a in af.addressables.keys()))
コード例 #24
0
ファイル: python_test_runner.py プロジェクト: revl/pants
async def setup_pytest_for_target(
    adaptor_with_origin: PythonTestsAdaptorWithOrigin,
    pytest: PyTest,
    test_options: TestOptions,
    python_setup: PythonSetup,
) -> TestTargetSetup:
    # TODO: Rather than consuming the TestOptions subsystem, the TestRunner should pass on coverage
    # configuration via #7490.

    adaptor = adaptor_with_origin.adaptor
    test_addresses = Addresses((adaptor.address,))

    # TODO(John Sirois): PexInterpreterConstraints are gathered in the same way by the
    #  `create_pex_from_target_closure` rule, factor up.
    transitive_hydrated_targets = await Get[TransitiveHydratedTargets](Addresses, test_addresses)
    all_targets = transitive_hydrated_targets.closure
    all_target_adaptors = [t.adaptor for t in all_targets]
    interpreter_constraints = PexInterpreterConstraints.create_from_adaptors(
        adaptors=all_target_adaptors, python_setup=python_setup
    )

    # Ensure all pexes we merge via PEX_PATH to form the test runner use the interpreter constraints
    # of the tests. This is handled by CreatePexFromTargetClosure, but we must pass this through for
    # CreatePex requests.
    create_pex = functools.partial(CreatePex, interpreter_constraints=interpreter_constraints)

    # NB: We set `--not-zip-safe` because Pytest plugin discovery, which uses
    # `importlib_metadata` and thus `zipp`, does not play nicely when doing import magic directly
    # from zip files. `zipp` has pathologically bad behavior with large zipfiles.
    # TODO: this does have a performance cost as the pex must now be expanded to disk. Long term,
    # it would be better to fix Zipp (whose fix would then need to be used by importlib_metadata
    # and then by Pytest). See https://github.com/jaraco/zipp/pull/26.
    additional_args_for_pytest = ("--not-zip-safe",)

    run_coverage = test_options.values.run_coverage
    plugin_file_digest: Optional[Digest] = (
        await Get[Digest](InputFilesContent, get_coverage_plugin_input()) if run_coverage else None
    )
    pytest_pex = await Get[Pex](
        CreatePex,
        create_pex(
            output_filename="pytest.pex",
            requirements=PexRequirements(pytest.get_requirement_strings()),
            additional_args=additional_args_for_pytest,
            input_files_digest=plugin_file_digest,
        ),
    )

    requirements_pex = await Get[Pex](
        CreatePexFromTargetClosure(
            addresses=test_addresses,
            output_filename="requirements.pex",
            include_source_files=False,
            additional_args=additional_args_for_pytest,
        )
    )

    test_runner_pex = await Get[Pex](
        CreatePex,
        create_pex(
            output_filename="test_runner.pex",
            entry_point="pytest:main",
            interpreter_constraints=interpreter_constraints,
            additional_args=(
                "--pex-path",
                ":".join(
                    pex_request.output_filename
                    # TODO(John Sirois): Support shading python binaries:
                    #   https://github.com/pantsbuild/pants/issues/9206
                    # Right now any pytest transitive requirements will shadow corresponding user
                    # requirements which will lead to problems when APIs that are used by either
                    # `pytest:main` or the tests themselves break between the two versions.
                    for pex_request in (pytest_pex, requirements_pex)
                ),
            ),
        ),
    )

    chrooted_sources = await Get[ChrootedPythonSources](HydratedTargets(all_targets))
    directories_to_merge = [
        chrooted_sources.snapshot.directory_digest,
        requirements_pex.directory_digest,
        pytest_pex.directory_digest,
        test_runner_pex.directory_digest,
    ]

    # Get the file names for the test_target so that we can specify to Pytest precisely which files
    # to test, rather than using auto-discovery.
    specified_source_files = await Get[SourceFiles](
        SpecifiedSourceFilesRequest([adaptor_with_origin], strip_source_roots=True)
    )
    specified_source_file_names = specified_source_files.snapshot.files

    coverage_args = []
    if run_coverage:
        coveragerc = await Get[Coveragerc](
            CoveragercRequest(HydratedTargets(all_targets), test_time=True)
        )
        directories_to_merge.append(coveragerc.digest)
        packages_to_cover = get_packages_to_cover(
            target=adaptor, specified_source_files=specified_source_files,
        )
        coverage_args = [
            "--cov-report=",  # To not generate any output. https://pytest-cov.readthedocs.io/en/latest/config.html
        ]
        for package in packages_to_cover:
            coverage_args.extend(["--cov", package])
    merged_input_files = await Get[Digest](
        DirectoriesToMerge(directories=tuple(directories_to_merge))
    )

    timeout_seconds = calculate_timeout_seconds(
        timeouts_enabled=pytest.options.timeouts,
        target_timeout=getattr(adaptor, "timeout", None),
        timeout_default=pytest.options.timeout_default,
        timeout_maximum=pytest.options.timeout_maximum,
    )

    return TestTargetSetup(
        test_runner_pex=test_runner_pex,
        args=(*pytest.options.args, *coverage_args, *sorted(specified_source_file_names)),
        input_files_digest=merged_input_files,
        timeout_seconds=timeout_seconds,
    )