コード例 #1
0
async def get_helm_chart(request: HelmChartRequest,
                         subsystem: HelmSubsystem) -> HelmChart:
    dependencies, source_files, metadata = await MultiGet(
        Get(Targets, DependenciesRequest(request.field_set.dependencies)),
        Get(
            HelmChartSourceFiles,
            HelmChartSourceFilesRequest,
            HelmChartSourceFilesRequest.for_field_set(
                request.field_set,
                include_metadata=False,
                include_resources=True,
                include_files=True,
            ),
        ),
        Get(HelmChartMetadata, HelmChartMetaSourceField,
            request.field_set.chart),
    )

    third_party_artifacts = await Get(
        FetchedHelmArtifacts,
        FetchHelmArfifactsRequest,
        FetchHelmArfifactsRequest.for_targets(
            dependencies,
            description_of_origin=request.field_set.address.spec),
    )

    first_party_subcharts = await MultiGet(
        Get(HelmChart, HelmChartRequest, HelmChartRequest.from_target(target))
        for target in dependencies if HelmChartFieldSet.is_applicable(target))
    third_party_charts = await MultiGet(
        Get(HelmChart, FetchedHelmArtifact, artifact)
        for artifact in third_party_artifacts)

    subcharts = [*first_party_subcharts, *third_party_charts]
    subcharts_digest = EMPTY_DIGEST
    if subcharts:
        logger.debug(
            f"Found {pluralize(len(subcharts), 'subchart')} as direct dependencies on Helm chart at: {request.field_set.address}"
        )

        merged_subcharts = await Get(
            Digest,
            MergeDigests([chart.snapshot.digest for chart in subcharts]))
        subcharts_digest = await Get(Digest,
                                     AddPrefix(merged_subcharts, "charts"))

        # Update subchart dependencies in the metadata and re-render it.
        remotes = subsystem.remotes()
        subchart_map: dict[str, HelmChart] = {
            chart.metadata.name: chart
            for chart in subcharts
        }
        updated_dependencies: OrderedSet[HelmChartDependency] = OrderedSet()
        for dep in metadata.dependencies:
            updated_dep = dep

            if not dep.repository and remotes.default_registry:
                # If the dependency hasn't specified a repository, then we choose the registry with the 'default' alias.
                default_remote = remotes.default_registry
                updated_dep = dataclasses.replace(
                    updated_dep, repository=default_remote.address)
            elif dep.repository and dep.repository.startswith("@"):
                remote = next(remotes.get(dep.repository))
                updated_dep = dataclasses.replace(updated_dep,
                                                  repository=remote.address)

            if dep.name in subchart_map:
                updated_dep = dataclasses.replace(
                    updated_dep,
                    version=subchart_map[dep.name].metadata.version)

            updated_dependencies.add(updated_dep)

        # Include the explicitly provided subchats in the set of dependencies if not already present.
        updated_dependencies_names = {dep.name for dep in updated_dependencies}
        remaining_subcharts = [
            chart for chart in subcharts
            if chart.metadata.name not in updated_dependencies_names
        ]
        for chart in remaining_subcharts:
            if chart.artifact:
                dependency = HelmChartDependency(
                    name=chart.artifact.name,
                    version=chart.artifact.version,
                    repository=chart.artifact.location_url,
                )
            else:
                dependency = HelmChartDependency(
                    name=chart.metadata.name, version=chart.metadata.version)
            updated_dependencies.add(dependency)

        # Update metadata with the information about charts' dependencies.
        metadata = dataclasses.replace(
            metadata, dependencies=tuple(updated_dependencies))

    # Re-render the Chart.yaml file with the updated dependencies.
    metadata_digest, sources_without_metadata = await MultiGet(
        Get(Digest, HelmChartMetadata, metadata),
        Get(
            Digest,
            DigestSubset(
                source_files.snapshot.digest,
                PathGlobs([
                    "**/*", *(f"!**/{filename}"
                              for filename in HELM_CHART_METADATA_FILENAMES)
                ]),
            ),
        ),
    )

    # Merge all digests that conform chart's content.
    content_digest = await Get(
        Digest,
        MergeDigests(
            [metadata_digest, sources_without_metadata, subcharts_digest]))

    chart_snapshot = await Get(Snapshot,
                               AddPrefix(content_digest, metadata.name))
    return HelmChart(address=request.field_set.address,
                     metadata=metadata,
                     snapshot=chart_snapshot)
コード例 #2
0
        def all_imports(self):
            """Return all imports for this package, including any test imports.

            :rtype: list of string
            """
            return list(OrderedSet(self.imports + self.test_imports + self.x_test_imports))
コード例 #3
0
 def collector(dep):
     return OrderedSet([dep])
コード例 #4
0
    def split_args(self, args: Sequence[str]) -> SplitArgs:
        """Split the specified arg list (or sys.argv if unspecified).

        args[0] is ignored.

        Returns a SplitArgs tuple.
        """
        goals: OrderedSet[str] = OrderedSet()
        scope_to_flags: Dict[str, List[str]] = {}

        def add_scope(s: str) -> None:
            # Force the scope to appear, even if empty.
            if s not in scope_to_flags:
                scope_to_flags[s] = []

        specs = []
        passthru = []

        self._unconsumed_args = list(reversed(args))
        # The first token is the binary name, so skip it.
        self._unconsumed_args.pop()

        def assign_flag_to_scope(flg: str, default_scope: str) -> None:
            flag_scope, descoped_flag = self._descope_flag(
                flg, default_scope=default_scope)
            if flag_scope not in scope_to_flags:
                scope_to_flags[flag_scope] = []
            scope_to_flags[flag_scope].append(descoped_flag)

        global_flags = self._consume_flags()

        add_scope(GLOBAL_SCOPE)
        for flag in global_flags:
            assign_flag_to_scope(flag, GLOBAL_SCOPE)
        scope, flags = self._consume_scope()
        while scope:
            if not self._check_for_help_request(scope.lower()):
                add_scope(scope)
                goals.add(scope.partition(".")[0])
                for flag in flags:
                    assign_flag_to_scope(flag, scope)
            scope, flags = self._consume_scope()

        while self._unconsumed_args and not self._at_double_dash():
            arg = self._unconsumed_args.pop()
            if arg.startswith("-"):
                # We assume any args here are in global scope.
                if not self._check_for_help_request(arg):
                    assign_flag_to_scope(arg, GLOBAL_SCOPE)
            elif self.likely_a_spec(arg):
                specs.append(arg)
            elif arg not in self._known_scopes:
                self._unknown_scopes.append(arg)

        if self._at_double_dash():
            self._unconsumed_args.pop()
            passthru = list(reversed(self._unconsumed_args))

        if self._unknown_scopes:
            self._help_request = UnknownGoalHelp(tuple(self._unknown_scopes))

        if not goals and not self._help_request:
            self._help_request = NoGoalHelp()

        if isinstance(self._help_request, OptionsHelp):
            self._help_request = dataclasses.replace(self._help_request,
                                                     scopes=tuple(goals))
        return SplitArgs(
            goals=list(goals),
            scope_to_flags=scope_to_flags,
            specs=specs,
            passthru=passthru,
            unknown_scopes=self._unknown_scopes,
        )
コード例 #5
0
ファイル: java_thrifty_gen.py プロジェクト: wisechengyi/pants
 def synthetic_target_extra_dependencies(self, target, target_workdir):
     deps = OrderedSet(
         self.resolve_deps([self.get_options().thrifty_runtime]))
     deps.update(target.dependencies)
     return deps
コード例 #6
0
 def synthetic_target_extra_dependencies(self, target, target_workdir):
     deps = OrderedSet(self._thrift_dependencies_for_target(target))
     deps.update(target.dependencies)
     return deps
コード例 #7
0
    def _compute_missing_deps(self, src_tgt, actual_deps):
        """Computes deps that are used by the compiler but not specified in a BUILD file.

        These deps are bugs waiting to happen: the code may happen to compile because the dep was
        brought in some other way (e.g., by some other root target), but that is obviously fragile.

        Note that in practice we're OK with reliance on indirect deps that are only brought in
        transitively. E.g., in Scala type inference can bring in such a dep subtly. Fortunately these
        cases aren't as fragile as a completely missing dependency. It's still a good idea to have
        explicit direct deps where relevant, so we optionally warn about indirect deps, to make them
        easy to find and reason about.

        - actual_deps: a map src -> list of actual deps (source, class or jar file) as noted by the
          compiler.

        Returns a tuple (missing_file_deps, missing_direct_tgt_deps) where:

        - missing_file_deps: a list of dep_files where src_tgt requires dep_file, and we're unable
          to map to a target (because its target isn't in the total set of targets in play,
          and we don't want to parse every BUILD file in the workspace just to find it).

        - missing_direct_tgt_deps: a list of dep_tgts where src_tgt is missing a direct dependency
                                   on dep_tgt but has a transitive dep on it.

        All paths in the input and output are absolute.
        """
        analyzer = self._analyzer

        def must_be_explicit_dep(dep):
            # We don't require explicit deps on the java runtime, so we shouldn't consider that
            # a missing dep.
            return dep not in analyzer.bootstrap_jar_classfiles and not dep.startswith(
                self._distribution.real_home
            )

        def target_or_java_dep_in_targets(target, targets):
            # We want to check if the target is in the targets collection
            #
            # However, for the special case of scala_library that has a java_sources
            # reference we're ok if that exists in targets even if the scala_library does not.

            if target in targets:
                return True
            elif isinstance(target, ScalaLibrary):
                return any(t in targets for t in target.java_sources)
            else:
                return False

        # Find deps that are actual but not specified.
        missing_file_deps = OrderedSet()  # (src, src).
        missing_direct_tgt_deps_map = defaultdict(list)  # The same, but for direct deps.

        targets_by_file = analyzer.targets_by_file(self.context.targets())
        for actual_dep in filter(must_be_explicit_dep, actual_deps):
            actual_dep_tgts = targets_by_file.get(actual_dep)
            # actual_dep_tgts is usually a singleton. If it's not, we only need one of these
            # to be in our declared deps to be OK.
            if actual_dep_tgts is None:
                missing_file_deps.add((src_tgt, actual_dep))
            elif not target_or_java_dep_in_targets(src_tgt, actual_dep_tgts):
                # Obviously intra-target deps are fine.
                canonical_actual_dep_tgt = next(iter(actual_dep_tgts))
                if canonical_actual_dep_tgt not in src_tgt.dependencies:
                    # The canonical dep is the only one a direct dependency makes sense on.
                    # TODO get rid of src usage here. we dont have a way to map class
                    # files back to source files when using jdeps. I think we can get away without
                    # listing the src file directly and just list the target which has the transient
                    # dep
                    missing_direct_tgt_deps_map[(src_tgt, canonical_actual_dep_tgt)].append(
                        (src_tgt, actual_dep)
                    )

        return (list(missing_file_deps), list(missing_direct_tgt_deps_map.items()))
コード例 #8
0
ファイル: config_test.py プロジェクト: patricklaw/pants
 def test_sections(self) -> None:
     expected_sections = OrderedSet(
         [*FILE_2.expected_options.keys(), *FILE_1.expected_options.keys()])
     assert self.config.sections() == list(expected_sections)
     for section in expected_sections:
         assert self.config.has_section(section) is True
コード例 #9
0
async def addresses_from_raw_specs_without_file_owners(
    specs: RawSpecsWithoutFileOwners,
    build_file_options: BuildFileOptions,
    specs_filter: SpecsFilter,
) -> Addresses:
    matched_addresses: OrderedSet[Address] = OrderedSet()
    filtering_disabled = specs.filter_by_global_options is False

    literal_wrapped_targets = await _determine_literal_addresses_from_raw_specs(
        specs.address_literals,
        description_of_origin=specs.description_of_origin)
    matched_addresses.update(
        wrapped_tgt.target.address for wrapped_tgt in literal_wrapped_targets
        if filtering_disabled or specs_filter.matches(wrapped_tgt.target))
    if not (specs.dir_literals or specs.dir_globs or specs.recursive_globs
            or specs.ancestor_globs):
        return Addresses(matched_addresses)

    # Resolve all globs.
    build_file_globs, validation_globs = specs.to_build_file_path_globs_tuple(
        build_patterns=build_file_options.patterns,
        build_ignore_patterns=build_file_options.ignores,
    )
    build_file_paths, _ = await MultiGet(
        Get(Paths, PathGlobs, build_file_globs),
        Get(Paths, PathGlobs, validation_globs),
    )

    dirnames = {os.path.dirname(f) for f in build_file_paths.files}
    address_families = await MultiGet(
        Get(AddressFamily, AddressFamilyDir(d)) for d in dirnames)
    base_addresses = Addresses(
        itertools.chain.from_iterable(
            address_family.addresses_to_target_adaptors
            for address_family in address_families))

    target_parametrizations_list = await MultiGet(
        Get(
            _TargetParametrizations,
            _TargetParametrizationsRequest(
                base_address,
                description_of_origin=specs.description_of_origin),
        ) for base_address in base_addresses)
    residence_dir_to_targets = defaultdict(list)
    for target_parametrizations in target_parametrizations_list:
        for tgt in target_parametrizations.all:
            residence_dir_to_targets[tgt.residence_dir].append(tgt)

    def valid_tgt(
        tgt: Target, spec: DirLiteralSpec | DirGlobSpec | RecursiveGlobSpec
        | AncestorGlobSpec
    ) -> bool:
        if not spec.matches_target_generators and isinstance(
                tgt, TargetGenerator):
            return False
        return filtering_disabled or specs_filter.matches(tgt)

    for glob_spec in specs.glob_specs():
        for residence_dir in residence_dir_to_targets:
            if not glob_spec.matches_target_residence_dir(residence_dir):
                continue
            matched_addresses.update(
                tgt.address for tgt in residence_dir_to_targets[residence_dir]
                if valid_tgt(tgt, glob_spec))

    return Addresses(sorted(matched_addresses))
コード例 #10
0
ファイル: external_module.py プロジェクト: patricklaw/pants
async def resolve_external_module_to_go_packages(
    request: ResolveExternalGoModuleToPackagesRequest,
) -> ResolveExternalGoModuleToPackagesResult:
    module_path = request.path
    assert module_path
    module_version = request.version
    assert module_version

    downloaded_module = await Get(
        DownloadedExternalModule,
        DownloadExternalModuleRequest(path=module_path,
                                      version=module_version),
    )
    sources_digest = await Get(
        Digest, AddPrefix(downloaded_module.digest, "__sources__"))

    # TODO: Super hacky merge of go.sum from both digests. We should really just pass in the fully-resolved
    # go.sum and use that, but this allows the go.sum from the downloaded module to have some effect. Not sure
    # if that is right call, but hackity hack!
    left_digest_contents = await Get(DigestContents, Digest, sources_digest)
    left_go_sum_contents = b""
    for fc in left_digest_contents:
        if fc.path == "__sources__/go.sum":
            left_go_sum_contents = fc.content
            break

    go_sum_only_digest = await Get(
        Digest, DigestSubset(request.go_sum_digest, PathGlobs(["go.sum"])))
    go_sum_prefixed_digest = await Get(
        Digest, AddPrefix(go_sum_only_digest, "__sources__"))
    right_digest_contents = await Get(DigestContents, Digest,
                                      go_sum_prefixed_digest)
    right_go_sum_contents = b""
    for fc in right_digest_contents:
        if fc.path == "__sources__/go.sum":
            right_go_sum_contents = fc.content
            break
    go_sum_contents = left_go_sum_contents + b"\n" + right_go_sum_contents
    go_sum_digest = await Get(
        Digest,
        CreateDigest([
            FileContent(
                path="__sources__/go.sum",
                content=go_sum_contents,
            )
        ]),
    )

    sources_digest_no_go_sum = await Get(
        Digest,
        DigestSubset(
            sources_digest,
            PathGlobs(
                ["!__sources__/go.sum", "__sources__/**"],
                conjunction=GlobExpansionConjunction.all_match,
                glob_match_error_behavior=GlobMatchErrorBehavior.error,
                description_of_origin="FUNKY",
            ),
        ),
    )

    input_digest = await Get(
        Digest, MergeDigests([sources_digest_no_go_sum, go_sum_digest]))

    result = await Get(
        ProcessResult,
        GoSdkProcess(
            input_digest=input_digest,
            command=("list", "-json", "./..."),
            working_dir="__sources__",
            description=
            f"Resolve packages in Go external module {module_path}@{module_version}",
        ),
    )

    packages: OrderedSet[ResolvedGoPackage] = OrderedSet()
    for metadata in ijson.items(result.stdout, "", multiple_values=True):
        package = ResolvedGoPackage.from_metadata(
            metadata, module_path=module_path, module_version=module_version)
        packages.add(package)

    return ResolveExternalGoModuleToPackagesResult(
        packages=FrozenOrderedSet(packages))
コード例 #11
0
def test_clear() -> None:
    set1 = OrderedSet("abracadabra")
    set1.clear()

    assert len(set1) == 0
    assert set1 == OrderedSet()
コード例 #12
0
ファイル: hash_utils_test.py プロジェクト: codealchemy/pants
 def test_rejects_ordered_collections(self):
     with pytest.raises(TypeError, match=r"CoercingEncoder does not support OrderedDict inputs"):
         json_hash(OrderedDict([("a", 3)]))
     with pytest.raises(TypeError, match=r"CoercingEncoder does not support OrderedSet inputs"):
         json_hash(OrderedSet([3]))
コード例 #13
0
def test_tuples(cls: OrderedSetCls) -> None:
    tup = ("tuple", 1)
    set1 = OrderedSet([tup])
    assert set1.index(tup) == 0
    assert set1[0] == tup
コード例 #14
0
    def split_args(self, args: Sequence[str]) -> SplitArgs:
        """Split the specified arg list (or sys.argv if unspecified).

        args[0] is ignored.

        Returns a SplitArgs tuple.
        """
        goals: OrderedSet[str] = OrderedSet()
        scope_to_flags: DefaultDict[str, list[str]] = defaultdict(list)
        specs: list[str] = []
        passthru: list[str] = []
        unknown_scopes: list[str] = []
        builtin_goal: str | None = None

        def add_scope(s: str) -> None:
            # Force the scope to appear, even if empty.
            if s not in scope_to_flags:
                scope_to_flags[s] = []

        def add_goal(scope: str) -> str:
            """Returns the scope name to assign flags to."""
            scope_info = self._known_goal_scopes.get(scope)
            if not scope_info:
                unknown_scopes.append(scope)
                add_scope(scope)
                return scope

            nonlocal builtin_goal
            if scope_info.is_builtin and not builtin_goal:
                # Get scope from info in case we hit an aliased builtin goal.
                builtin_goal = scope_info.scope
            else:
                goals.add(scope_info.scope)
            add_scope(scope_info.scope)

            # Use builtin goal as default scope for args.
            return builtin_goal or scope_info.scope

        self._unconsumed_args = list(reversed(args))
        # The first token is the binary name, so skip it.
        self._unconsumed_args.pop()

        def assign_flag_to_scope(flg: str, default_scope: str) -> None:
            flag_scope, descoped_flag = self._descope_flag(
                flg, default_scope=default_scope)
            scope_to_flags[flag_scope].append(descoped_flag)

        global_flags = self._consume_flags()
        add_scope(GLOBAL_SCOPE)
        for flag in global_flags:
            assign_flag_to_scope(flag, GLOBAL_SCOPE)

        scope, flags = self._consume_scope()
        while scope:
            # `add_goal` returns the currently active scope to assign flags to.
            scope = add_goal(scope)
            for flag in flags:
                assign_flag_to_scope(
                    flag,
                    GLOBAL_SCOPE if self.is_level_short_arg(flag) else scope)
            scope, flags = self._consume_scope()

        while self._unconsumed_args and not self._at_standalone_double_dash():
            if self._at_flag():
                arg = self._unconsumed_args.pop()
                # We assume any args here are in global scope.
                assign_flag_to_scope(arg, GLOBAL_SCOPE)
                continue

            arg = self._unconsumed_args.pop()
            if self.likely_a_spec(arg):
                specs.append(arg)
            else:
                add_goal(arg)

        if not builtin_goal:
            if unknown_scopes and UNKNOWN_GOAL_NAME in self._known_goal_scopes:
                builtin_goal = UNKNOWN_GOAL_NAME
            elif not goals and NO_GOAL_NAME in self._known_goal_scopes:
                builtin_goal = NO_GOAL_NAME

        if self._at_standalone_double_dash():
            self._unconsumed_args.pop()
            passthru = list(reversed(self._unconsumed_args))

        for goal in goals:
            si = self._known_goal_scopes[goal]
            if (si.deprecated_scope and goal == si.deprecated_scope
                    and si.subsystem_cls
                    and si.deprecated_scope_removal_version):
                warn_or_error(
                    si.deprecated_scope_removal_version,
                    f"the {si.deprecated_scope} goal",
                    f"The {si.deprecated_scope} goal was renamed to {si.subsystem_cls.options_scope}",
                )

        return SplitArgs(
            builtin_goal=builtin_goal,
            goals=list(goals),
            unknown_goals=unknown_scopes,
            scope_to_flags=dict(scope_to_flags),
            specs=specs,
            passthru=passthru,
        )
コード例 #15
0
async def find_owners(owners_request: OwnersRequest) -> Owners:
    # Determine which of the sources are live and which are deleted.
    sources_paths = await Get(Paths, PathGlobs(owners_request.sources))

    live_files = FrozenOrderedSet(sources_paths.files)
    deleted_files = FrozenOrderedSet(s for s in owners_request.sources
                                     if s not in live_files)
    live_dirs = FrozenOrderedSet(os.path.dirname(s) for s in live_files)
    deleted_dirs = FrozenOrderedSet(os.path.dirname(s) for s in deleted_files)

    def create_live_and_deleted_gets(
        *, filter_by_global_options: bool
    ) -> tuple[Get[FilteredTargets | Targets, RawSpecsWithoutFileOwners], Get[
            UnexpandedTargets, RawSpecsWithoutFileOwners], ]:
        """Walk up the buildroot looking for targets that would conceivably claim changed sources.

        For live files, we use Targets, which causes generated targets to be used rather than their
        target generators. For deleted files we use UnexpandedTargets, which have the original
        declared `sources` globs from target generators.

        We ignore unrecognized files, which can happen e.g. when finding owners for deleted files.
        """
        live_raw_specs = RawSpecsWithoutFileOwners(
            ancestor_globs=tuple(
                AncestorGlobSpec(directory=d) for d in live_dirs),
            filter_by_global_options=filter_by_global_options,
            description_of_origin="<owners rule - unused>",
            unmatched_glob_behavior=GlobMatchErrorBehavior.ignore,
        )
        live_get: Get[FilteredTargets | Targets, RawSpecsWithoutFileOwners] = (
            Get(FilteredTargets, RawSpecsWithoutFileOwners,
                live_raw_specs) if filter_by_global_options else Get(
                    Targets, RawSpecsWithoutFileOwners, live_raw_specs))
        deleted_get = Get(
            UnexpandedTargets,
            RawSpecsWithoutFileOwners(
                ancestor_globs=tuple(
                    AncestorGlobSpec(directory=d) for d in deleted_dirs),
                filter_by_global_options=filter_by_global_options,
                description_of_origin="<owners rule - unused>",
                unmatched_glob_behavior=GlobMatchErrorBehavior.ignore,
            ),
        )
        return live_get, deleted_get

    live_get, deleted_get = create_live_and_deleted_gets(
        filter_by_global_options=owners_request.filter_by_global_options)
    live_candidate_tgts, deleted_candidate_tgts = await MultiGet(
        live_get, deleted_get)

    matching_addresses: OrderedSet[Address] = OrderedSet()
    unmatched_sources = set(owners_request.sources)
    for live in (True, False):
        candidate_tgts: Sequence[Target]
        if live:
            candidate_tgts = live_candidate_tgts
            sources_set = live_files
        else:
            candidate_tgts = deleted_candidate_tgts
            sources_set = deleted_files

        build_file_addresses = await MultiGet(
            Get(
                BuildFileAddress,
                BuildFileAddressRequest(
                    tgt.address,
                    description_of_origin="<owners rule - cannot trigger>"),
            ) for tgt in candidate_tgts)

        for candidate_tgt, bfa in zip(candidate_tgts, build_file_addresses):
            matching_files = set(
                matches_filespec(candidate_tgt.get(SourcesField).filespec,
                                 paths=sources_set))
            # Also consider secondary ownership, meaning it's not a `SourcesField` field with
            # primary ownership, but the target still should match the file. We can't use
            # `tgt.get()` because this is a mixin, and there technically may be >1 field.
            secondary_owner_fields = tuple(
                field for field in candidate_tgt.field_values.values()
                if isinstance(field, SecondaryOwnerMixin))
            for secondary_owner_field in secondary_owner_fields:
                matching_files.update(
                    matches_filespec(secondary_owner_field.filespec,
                                     paths=sources_set))
            if not matching_files and bfa.rel_path not in sources_set:
                continue

            unmatched_sources -= matching_files
            matching_addresses.add(candidate_tgt.address)

    if (unmatched_sources and owners_request.owners_not_found_behavior !=
            OwnersNotFoundBehavior.ignore):
        _log_or_raise_unmatched_owners(
            [PurePath(path) for path in unmatched_sources],
            owners_request.owners_not_found_behavior)

    return Owners(matching_addresses)
コード例 #16
0
async def resolve_specs_paths(specs: Specs) -> SpecsPaths:
    """Resolve all files matching the given specs.

    All matched targets will use their `sources` field. Certain specs like FileLiteralSpec will
    also match against all their files, regardless of if a target owns them.

    Ignores win out over includes, with these edge cases:

    * Ignored paths: the resolved paths should be excluded.
    * Ignored targets: their `sources` should be excluded.
    * File owned by a target that gets filtered out, e.g. via `--tag`. See
      https://github.com/pantsbuild/pants/issues/15478.
    """

    unfiltered_include_targets, ignore_targets, include_paths, ignore_paths = await MultiGet(
        Get(
            Targets, RawSpecs,
            dataclasses.replace(specs.includes,
                                filter_by_global_options=False)),
        Get(Targets, RawSpecs, specs.ignores),
        Get(Paths, PathGlobs, specs.includes.to_specs_paths_path_globs()),
        Get(Paths, PathGlobs, specs.ignores.to_specs_paths_path_globs()),
    )

    filtered_include_targets = await Get(FilteredTargets, Targets,
                                         unfiltered_include_targets)
    include_targets_sources_paths = await MultiGet(
        Get(SourcesPaths, SourcesPathsRequest(tgt[SourcesField]))
        for tgt in filtered_include_targets if tgt.has_field(SourcesField))

    ignore_targets_sources_paths = await MultiGet(
        Get(SourcesPaths, SourcesPathsRequest(tgt[SourcesField]))
        for tgt in ignore_targets if tgt.has_field(SourcesField))

    result_paths = OrderedSet(
        itertools.chain.from_iterable(
            paths.files for paths in include_targets_sources_paths), )
    result_paths.update(include_paths.files)
    result_paths.difference_update(
        itertools.chain.from_iterable(
            paths.files for paths in ignore_targets_sources_paths))
    result_paths.difference_update(ignore_paths.files)

    # If include paths were given, we need to also remove any paths from filtered out targets
    # (e.g. via `--tag`), per https://github.com/pantsbuild/pants/issues/15478.
    if include_paths.files:
        filtered_out_include_targets = FrozenOrderedSet(
            unfiltered_include_targets).difference(
                FrozenOrderedSet(filtered_include_targets))
        filtered_include_targets_sources_paths = await MultiGet(
            Get(SourcesPaths, SourcesPathsRequest(tgt[SourcesField]))
            for tgt in filtered_out_include_targets
            if tgt.has_field(SourcesField))
        result_paths.difference_update(
            itertools.chain.from_iterable(
                paths.files
                for paths in filtered_include_targets_sources_paths))

    dirs = OrderedSet(
        itertools.chain.from_iterable(
            recursive_dirname(os.path.dirname(f))
            for f in result_paths)) - {""}
    return SpecsPaths(tuple(sorted(result_paths)), tuple(sorted(dirs)))
コード例 #17
0
 def __init__(self, binary_name: str, paths: Iterable[BinaryPath] | None = None):
     self.binary_name = binary_name
     self.paths = tuple(OrderedSet(paths) if paths else ())
コード例 #18
0
ファイル: rules.py プロジェクト: briespoke/pants
 def add_task(product_type, rule):
     # TODO(#7311): make a defaultdict-like wrapper for OrderedDict if more widely used.
     if product_type not in serializable_rules:
         serializable_rules[product_type] = OrderedSet()
     serializable_rules[product_type].add(rule)
コード例 #19
0
async def find_owners(owners_request: OwnersRequest) -> Owners:
    # Determine which of the sources are live and which are deleted.
    sources_paths = await Get(Paths, PathGlobs(owners_request.sources))

    live_files = FrozenOrderedSet(sources_paths.files)
    deleted_files = FrozenOrderedSet(s for s in owners_request.sources
                                     if s not in live_files)
    live_dirs = FrozenOrderedSet(os.path.dirname(s) for s in live_files)
    deleted_dirs = FrozenOrderedSet(os.path.dirname(s) for s in deleted_files)

    # Walk up the buildroot looking for targets that would conceivably claim changed sources.
    # For live files, we use expanded Targets, which have file level precision but which are
    # only created for existing files. For deleted files we use UnexpandedTargets, which have
    # the original declared glob.
    live_candidate_specs = tuple(
        AscendantAddresses(directory=d) for d in live_dirs)
    deleted_candidate_specs = tuple(
        AscendantAddresses(directory=d) for d in deleted_dirs)
    live_candidate_tgts, deleted_candidate_tgts = await MultiGet(
        Get(Targets, AddressSpecs(live_candidate_specs)),
        Get(UnexpandedTargets, AddressSpecs(deleted_candidate_specs)),
    )

    matching_addresses: OrderedSet[Address] = OrderedSet()
    unmatched_sources = set(owners_request.sources)
    for live in (True, False):
        candidate_tgts: Sequence[Target]
        if live:
            candidate_tgts = live_candidate_tgts
            sources_set = live_files
        else:
            candidate_tgts = deleted_candidate_tgts
            sources_set = deleted_files

        build_file_addresses = await MultiGet(
            Get(BuildFileAddress, Address, tgt.address)
            for tgt in candidate_tgts)

        for candidate_tgt, bfa in zip(candidate_tgts, build_file_addresses):
            matching_files = set(
                matches_filespec(candidate_tgt.get(Sources).filespec,
                                 paths=sources_set))
            # Also consider secondary ownership, meaning it's not a `Sources` field with primary
            # ownership, but the target still should match the file. We can't use `tgt.get()`
            # because this is a mixin, and there technically may be >1 field.
            secondary_owner_fields = tuple(
                field  # type: ignore[misc]
                for field in candidate_tgt.field_values.values()
                if isinstance(field, SecondaryOwnerMixin))
            for secondary_owner_field in secondary_owner_fields:
                matching_files.update(
                    matches_filespec(secondary_owner_field.filespec,
                                     paths=sources_set))
            if not matching_files and bfa.rel_path not in sources_set:
                continue

            unmatched_sources -= matching_files
            matching_addresses.add(candidate_tgt.address)

    if (unmatched_sources and owners_request.owners_not_found_behavior !=
            OwnersNotFoundBehavior.ignore):
        _log_or_raise_unmatched_owners(
            [PurePath(path) for path in unmatched_sources],
            owners_request.owners_not_found_behavior)

    return Owners(matching_addresses)
コード例 #20
0
 def synthetic_target_extra_dependencies(self, target, target_workdir):
     deps = OrderedSet()
     deps.update(self.javadeps)
     return deps
コード例 #21
0
ファイル: junit_run.py プロジェクト: wisechengyi/pants
    def run_tests(self, fail_fast, test_targets, output_dir, coverage, complete_test_registry):
        test_registry = complete_test_registry.filter(test_targets)
        if test_registry.empty:
            return TestResult.successful

        coverage.instrument(output_dir)

        def parse_error_handler(parse_error):
            # Just log and move on since the result is only used to characterize failures, and raising
            # an error here would just distract from the underlying test failures.
            self.context.log.error(
                "Error parsing test result file {path}: {cause}".format(
                    path=parse_error.xml_path, cause=parse_error.cause
                )
            )

        # The 'instrument_classpath' product below below will be `None` if not set, and we'll default
        # back to runtime_classpath
        classpath_product = self.context.products.get_data("instrument_classpath")

        result = 0
        for batch_id, (properties, batch) in enumerate(self._iter_batches(test_registry)):
            (
                workdir,
                platform,
                target_jvm_options,
                target_env_vars,
                concurrency,
                threads,
            ) = properties

            batch_output_dir = output_dir
            if self._batched:
                batch_output_dir = os.path.join(batch_output_dir, f"batch-{batch_id}")

            run_modifications = coverage.run_modifications(batch_output_dir)
            self.context.log.debug(f"run_modifications: {run_modifications}")

            extra_jvm_options = run_modifications.extra_jvm_options

            # Batches of test classes will likely exist within the same targets: dedupe them.
            relevant_targets = {test_registry.get_owning_target(t) for t in batch}

            complete_classpath = OrderedSet()
            complete_classpath.update(run_modifications.classpath_prepend)
            complete_classpath.update(JUnit.global_instance().runner_classpath(self.context))
            complete_classpath.update(
                self.classpath(relevant_targets, classpath_product=classpath_product)
            )

            distribution = self.preferred_jvm_distribution([platform], self._strict_jvm_version)

            # Override cmdline args with values from junit_test() target that specify concurrency:
            args = self._args(fail_fast, batch_output_dir) + ["-xmlreport"]

            if concurrency is not None:
                args = remove_arg(args, "-default-parallel")
                if concurrency == JUnitTests.CONCURRENCY_SERIAL:
                    args = ensure_arg(args, "-default-concurrency", param="SERIAL")
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES:
                    args = ensure_arg(args, "-default-concurrency", param="PARALLEL_CLASSES")
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_METHODS:
                    args = ensure_arg(args, "-default-concurrency", param="PARALLEL_METHODS")
                elif concurrency == JUnitTests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS:
                    args = ensure_arg(
                        args, "-default-concurrency", param="PARALLEL_CLASSES_AND_METHODS"
                    )

            if threads is not None:
                args = remove_arg(args, "-parallel-threads", has_param=True)
                args += ["-parallel-threads", str(threads)]

            batch_test_specs = [test.render_test_spec() for test in batch]
            with argfile.safe_args(batch_test_specs, self.get_options()) as batch_tests:
                with self.chroot(relevant_targets, workdir) as chroot:
                    self.context.log.debug(f"CWD = {chroot}")
                    self.context.log.debug(f"platform = {platform}")
                    with environment_as(**dict(target_env_vars)):
                        subprocess_result = self.spawn_and_wait(
                            relevant_targets,
                            executor=SubprocessExecutor(distribution),
                            distribution=distribution,
                            classpath=complete_classpath,
                            main=JUnit.RUNNER_MAIN,
                            jvm_options=self.jvm_options
                            + list(platform.jvm_options)
                            + extra_jvm_options
                            + list(target_jvm_options),
                            args=args + batch_tests,
                            workunit_factory=self.context.new_workunit,
                            workunit_name="run",
                            workunit_labels=[WorkUnitLabel.TEST],
                            cwd=chroot,
                            synthetic_jar_dir=batch_output_dir,
                            create_synthetic_jar=self.synthetic_classpath,
                        )
                        self.context.log.debug(
                            "JUnit subprocess exited with result ({})".format(subprocess_result)
                        )
                        result += abs(subprocess_result)

                tests_info = self.parse_test_info(
                    batch_output_dir, parse_error_handler, ["classname"]
                )
                for test_name, test_info in tests_info.items():
                    test_item = Test(test_info["classname"], test_name)
                    test_target = test_registry.get_owning_target(test_item)
                    self.report_all_info_for_single_test(
                        self.options_scope, test_target, test_name, test_info
                    )

                if result != 0 and fail_fast:
                    break

        if result == 0:
            return TestResult.successful

        # NB: If the TestRegistry fails to find the owning target of a failed test, the target key in
        # this dictionary will be None: helper methods in this block account for that.
        target_to_failed_test = parse_failed_targets(test_registry, output_dir, parse_error_handler)

        def sort_owning_target(t):
            return t.address.spec if t else ""

        failed_targets = sorted(target_to_failed_test, key=sort_owning_target)
        error_message_lines = []
        if self._failure_summary:

            def render_owning_target(t):
                return t.address.reference() if t else "<Unknown Target>"

            for target in failed_targets:
                error_message_lines.append(f"\n{(' ' * 4)}{render_owning_target(target)}")
                for test in sorted(target_to_failed_test[target]):
                    error_message_lines.append(f"{' ' * 8}{test.classname}#{test.methodname}")
        error_message_lines.append(
            "\njava {main} ... exited non-zero ({code}); {failed} failed {targets}.".format(
                main=JUnit.RUNNER_MAIN,
                code=result,
                failed=len(failed_targets),
                targets=pluralize(len(failed_targets), "target"),
            )
        )
        return TestResult(
            msg="\n".join(error_message_lines), rc=result, failed_targets=failed_targets
        )
コード例 #22
0
async def run_go_tests(field_set: GoTestFieldSet,
                       test_subsystem: TestSubsystem,
                       go_test_subsystem: GoTestSubsystem) -> TestResult:
    maybe_pkg_analysis, maybe_pkg_digest, dependencies = await MultiGet(
        Get(FallibleFirstPartyPkgAnalysis,
            FirstPartyPkgAnalysisRequest(field_set.address)),
        Get(FallibleFirstPartyPkgDigest,
            FirstPartyPkgDigestRequest(field_set.address)),
        Get(Targets, DependenciesRequest(field_set.dependencies)),
    )

    def compilation_failure(exit_code: int, stdout: str | None,
                            stderr: str | None) -> TestResult:
        return TestResult(
            exit_code=exit_code,
            stdout=stdout or "",
            stderr=stderr or "",
            stdout_digest=EMPTY_FILE_DIGEST,
            stderr_digest=EMPTY_FILE_DIGEST,
            address=field_set.address,
            output_setting=test_subsystem.output,
            result_metadata=None,
        )

    if maybe_pkg_analysis.analysis is None:
        assert maybe_pkg_analysis.stderr is not None
        return compilation_failure(maybe_pkg_analysis.exit_code, None,
                                   maybe_pkg_analysis.stderr)
    if maybe_pkg_digest.pkg_digest is None:
        assert maybe_pkg_digest.stderr is not None
        return compilation_failure(maybe_pkg_digest.exit_code, None,
                                   maybe_pkg_digest.stderr)

    pkg_analysis = maybe_pkg_analysis.analysis
    pkg_digest = maybe_pkg_digest.pkg_digest
    import_path = pkg_analysis.import_path

    testmain = await Get(
        GeneratedTestMain,
        GenerateTestMainRequest(
            pkg_digest.digest,
            FrozenOrderedSet(
                os.path.join(".", pkg_analysis.dir_path, name)
                for name in pkg_analysis.test_go_files),
            FrozenOrderedSet(
                os.path.join(".", pkg_analysis.dir_path, name)
                for name in pkg_analysis.xtest_go_files),
            import_path,
            field_set.address,
        ),
    )

    if testmain.failed_exit_code_and_stderr is not None:
        _exit_code, _stderr = testmain.failed_exit_code_and_stderr
        return compilation_failure(_exit_code, None, _stderr)

    if not testmain.has_tests and not testmain.has_xtests:
        return TestResult.skip(field_set.address,
                               output_setting=test_subsystem.output)

    # Construct the build request for the package under test.
    maybe_test_pkg_build_request = await Get(
        FallibleBuildGoPackageRequest,
        BuildGoPackageTargetRequest(field_set.address, for_tests=True),
    )
    if maybe_test_pkg_build_request.request is None:
        assert maybe_test_pkg_build_request.stderr is not None
        return compilation_failure(maybe_test_pkg_build_request.exit_code,
                                   None, maybe_test_pkg_build_request.stderr)
    test_pkg_build_request = maybe_test_pkg_build_request.request

    main_direct_deps = [test_pkg_build_request]

    if testmain.has_xtests:
        # Build a synthetic package for xtests where the import path is the same as the package under test
        # but with "_test" appended.
        #
        # Subset the direct dependencies to only the dependencies used by the xtest code. (Dependency
        # inference will have included all of the regular, test, and xtest dependencies of the package in
        # the build graph.) Moreover, ensure that any import of the package under test is on the _test_
        # version of the package that was just built.
        dep_by_import_path = {
            dep.import_path: dep
            for dep in test_pkg_build_request.direct_dependencies
        }
        direct_dependencies: OrderedSet[BuildGoPackageRequest] = OrderedSet()
        for xtest_import in pkg_analysis.xtest_imports:
            if xtest_import == pkg_analysis.import_path:
                direct_dependencies.add(test_pkg_build_request)
            elif xtest_import in dep_by_import_path:
                direct_dependencies.add(dep_by_import_path[xtest_import])

        xtest_pkg_build_request = BuildGoPackageRequest(
            import_path=f"{import_path}_test",
            digest=pkg_digest.digest,
            dir_path=pkg_analysis.dir_path,
            go_file_names=pkg_analysis.xtest_go_files,
            s_file_names=(),  # TODO: Are there .s files for xtest?
            direct_dependencies=tuple(direct_dependencies),
            minimum_go_version=pkg_analysis.minimum_go_version,
            embed_config=pkg_digest.xtest_embed_config,
        )
        main_direct_deps.append(xtest_pkg_build_request)

    # Generate the synthetic main package which imports the test and/or xtest packages.
    maybe_built_main_pkg = await Get(
        FallibleBuiltGoPackage,
        BuildGoPackageRequest(
            import_path="main",
            digest=testmain.digest,
            dir_path="",
            go_file_names=(GeneratedTestMain.TEST_MAIN_FILE, ),
            s_file_names=(),
            direct_dependencies=tuple(main_direct_deps),
            minimum_go_version=pkg_analysis.minimum_go_version,
        ),
    )
    if maybe_built_main_pkg.output is None:
        assert maybe_built_main_pkg.stderr is not None
        return compilation_failure(maybe_built_main_pkg.exit_code,
                                   maybe_built_main_pkg.stdout,
                                   maybe_built_main_pkg.stderr)
    built_main_pkg = maybe_built_main_pkg.output

    main_pkg_a_file_path = built_main_pkg.import_paths_to_pkg_a_files["main"]
    import_config = await Get(
        ImportConfig,
        ImportConfigRequest(built_main_pkg.import_paths_to_pkg_a_files))
    linker_input_digest = await Get(
        Digest, MergeDigests([built_main_pkg.digest, import_config.digest]))
    binary = await Get(
        LinkedGoBinary,
        LinkGoBinaryRequest(
            input_digest=linker_input_digest,
            archives=(main_pkg_a_file_path, ),
            import_config_path=import_config.CONFIG_PATH,
            output_filename=
            "./test_runner",  # TODO: Name test binary the way that `go` does?
            description=f"Link Go test binary for {field_set.address}",
        ),
    )

    # To emulate Go's test runner, we set the working directory to the path of the `go_package`.
    # This allows tests to open dependencies on `file` targets regardless of where they are
    # located. See https://dave.cheney.net/2016/05/10/test-fixtures-in-go.
    working_dir = field_set.address.spec_path
    binary_with_prefix, files_sources = await MultiGet(
        Get(Digest, AddPrefix(binary.digest, working_dir)),
        Get(
            SourceFiles,
            SourceFilesRequest(
                (dep.get(SourcesField) for dep in dependencies),
                for_sources_types=(FileSourceField, ),
                enable_codegen=True,
            ),
        ),
    )
    test_input_digest = await Get(
        Digest,
        MergeDigests((binary_with_prefix, files_sources.snapshot.digest)))

    cache_scope = (ProcessCacheScope.PER_SESSION
                   if test_subsystem.force else ProcessCacheScope.SUCCESSFUL)

    result = await Get(
        FallibleProcessResult,
        Process(
            [
                "./test_runner",
                *transform_test_args(go_test_subsystem.args,
                                     field_set.timeout.value),
            ],
            input_digest=test_input_digest,
            description=f"Run Go tests: {field_set.address}",
            cache_scope=cache_scope,
            working_directory=working_dir,
            level=LogLevel.DEBUG,
        ),
    )
    return TestResult.from_fallible_process_result(result, field_set.address,
                                                   test_subsystem.output)
コード例 #23
0
ファイル: jar_publish.py プロジェクト: wisechengyi/pants
        def stage_artifacts(tgt, jar, version, tag, changelog):
            publications = OrderedSet()

            # TODO Remove this once we fix https://github.com/pantsbuild/pants/issues/1229
            if (
                not self.context.products.get("jars").has(tgt)
                and not self.get_options().individual_plugins
            ):
                raise TaskError(
                    "Expected to find a primary artifact for {} but there was no jar for it.".format(
                        tgt.address.reference()
                    )
                )

            # TODO Remove this guard once we fix https://github.com/pantsbuild/pants/issues/1229, there
            # should always be a primary artifact.
            if self.context.products.get("jars").has(tgt):
                self._copy_artifact(tgt, jar, version, typename="jars")
                publications.add(self.Publication(name=jar.name, classifier=None, ext="jar"))

                self.create_source_jar(tgt, jar, version)
                publications.add(self.Publication(name=jar.name, classifier="sources", ext="jar"))

                # don't request docs unless they are available for all transitive targets
                # TODO: doc products should be checked by an independent jar'ing task, and
                # conditionally enabled; see https://github.com/pantsbuild/pants/issues/568
                doc_jar = self.create_doc_jar(tgt, jar, version)
                if doc_jar:
                    publications.add(
                        self.Publication(name=jar.name, classifier="javadoc", ext="jar")
                    )

                if self.publish_changelog:
                    changelog_path = self.artifact_path(
                        jar, version, suffix="-CHANGELOG", extension="txt"
                    )
                    with safe_open(changelog_path, "w") as changelog_file:
                        changelog_file.write(changelog)
                    publications.add(
                        self.Publication(name=jar.name, classifier="CHANGELOG", ext="txt")
                    )

            # Process any extra jars that might have been previously generated for this target, or a
            # target that it was derived from.
            for extra_product, extra_config in (self.get_options().publish_extras or {}).items():
                override_name = jar.name
                if "override_name" in extra_config:
                    # If the supplied string has a '{target_provides_name}' in it, replace it with the
                    # current jar name. If not, the string will be taken verbatim.
                    override_name = extra_config["override_name"].format(
                        target_provides_name=jar.name
                    )

                classifier = None
                suffix = ""
                if "classifier" in extra_config:
                    classifier = extra_config["classifier"]
                    suffix = f"-{classifier}"

                extension = extra_config.get("extension", "jar")

                extra_pub = self.Publication(
                    name=override_name, classifier=classifier, ext=extension
                )

                # A lot of flexibility is allowed in parameterizing the extra artifact, ensure those
                # parameters lead to a unique publication.
                # TODO(John Sirois): Check this much earlier.
                if extra_pub in publications:
                    raise TaskError(
                        "publish_extra for '{0}' must override one of name, classifier or "
                        "extension with a non-default value.".format(extra_product)
                    )

                # Build a list of targets to check. This list will consist of the current target, plus the
                # entire derived_from chain.
                target_list = [tgt]
                target = tgt
                while target.derived_from != target:
                    target_list.append(target.derived_from)
                    target = target.derived_from
                for cur_tgt in target_list:
                    if self.context.products.get(extra_product).has(cur_tgt):
                        self._copy_artifact(
                            cur_tgt,
                            jar,
                            version,
                            typename=extra_product,
                            suffix=suffix,
                            extension=extension,
                            override_name=override_name,
                        )
                        publications.add(extra_pub)

            pom_path = self.artifact_path(jar, version, extension="pom")
            PomWriter(get_pushdb, tag).write(tgt, path=pom_path)
            return publications
コード例 #24
0
 def sections(self) -> list[str]:
     ret: OrderedSet[str] = OrderedSet()
     for cfg in self._configs:
         ret.update(cfg.sections())
     return list(ret)
コード例 #25
0
ファイル: rules.py プロジェクト: hephex/pants
async def infer_java_dependencies_via_imports(
    request: InferJavaSourceDependencies,
    java_infer_subsystem: JavaInferSubsystem,
    first_party_dep_map: FirstPartySymbolMapping,
    third_party_artifact_mapping: ThirdPartyPackageToArtifactMapping,
    available_artifacts: AvailableThirdPartyArtifacts,
) -> InferredDependencies:
    if (not java_infer_subsystem.imports
            and not java_infer_subsystem.consumed_types
            and not java_infer_subsystem.third_party_imports):
        return InferredDependencies([])

    address = request.sources_field.address
    wrapped_tgt = await Get(WrappedTarget, Address, address)
    explicitly_provided_deps, analysis = await MultiGet(
        Get(ExplicitlyProvidedDependencies,
            DependenciesRequest(wrapped_tgt.target[Dependencies])),
        Get(JavaSourceDependencyAnalysis,
            SourceFilesRequest([request.sources_field])),
    )

    types: OrderedSet[str] = OrderedSet()
    if java_infer_subsystem.imports:
        types.update(
            dependency_name(imp.name, imp.is_static)
            for imp in analysis.imports)
    if java_infer_subsystem.consumed_types:
        package = analysis.declared_package

        # 13545: `analysis.consumed_types` may be unqualified (package-local or imported) or qualified
        # (prefixed by package name). Heuristic for now is that if there's a `.` in the type name, it's
        # probably fully qualified. This is probably fine for now.
        maybe_qualify_types = (f"{package}.{consumed_type}" if package
                               and "." not in consumed_type else consumed_type
                               for consumed_type in analysis.consumed_types)

        types.update(maybe_qualify_types)

    dep_map = first_party_dep_map.symbols

    dependencies: OrderedSet[Address] = OrderedSet()

    for typ in types:
        first_party_matches = dep_map.addresses_for_symbol(typ)
        third_party_matches: FrozenOrderedSet[Address] = FrozenOrderedSet()
        if java_infer_subsystem.third_party_imports:
            third_party_matches = find_artifact_mapping(
                typ, third_party_artifact_mapping, available_artifacts)
        matches = first_party_matches.union(third_party_matches)
        if not matches:
            continue

        explicitly_provided_deps.maybe_warn_of_ambiguous_dependency_inference(
            matches,
            address,
            import_reference="type",
            context=f"The target {address} imports `{typ}`",
        )
        maybe_disambiguated = explicitly_provided_deps.disambiguated(matches)
        if maybe_disambiguated:
            dependencies.add(maybe_disambiguated)

    return InferredDependencies(dependencies)
コード例 #26
0
ファイル: setup_py.py プロジェクト: wisechengyi/pants
    def reduced_dependencies(self, exported_target):
        """Calculates the reduced transitive dependencies for an exported target.

        The reduced set of dependencies will be just those transitive dependencies "owned" by
        the `exported_target`.

        A target is considered "owned" if:
        1. It's "3rdparty" and "directly reachable" from `exported_target` by at least 1 path.
        2. It's not "3rdparty" and not "directly reachable" by any of `exported_target`'s "3rdparty"
           dependencies.

        Here "3rdparty" refers to targets identified as either `is_third_party` or `is_exported`.

        And in this context "directly reachable" means the target can be reached by following a series
        of dependency links from the `exported_target`, never crossing another exported target and
        staying within the `exported_target` address space.  It's the latter restriction that allows for
        unambiguous ownership of exportable targets and mirrors the BUILD file convention of targets
        only being able to own sources in their filesystem subtree.  The single ambiguous case that can
        arise is when there is more than one exported target in the same BUILD file family that can
        "directly reach" a target in its address space.

        :raises: `UnExportedError` if the given `exported_target` is not, in-fact, exported.
        :raises: `NoOwnerError` if a transitive dependency is found with no proper owning exported
                 target.
        :raises: `AmbiguousOwnerError` if there is more than one viable exported owner target for a
                 given transitive dependency.
        """
        # The strategy adopted requires 3 passes:
        # 1.) Walk the exported target to collect provisional owned exportable targets, but _not_
        #     3rdparty since these may be introduced by exported subgraphs we discover in later steps!
        # 2.) Determine the owner of each target collected in 1 by walking the ancestor chain to find
        #     the closest exported target.  The ancestor chain is just all targets whose spec path is
        #     a prefix of the descendant.  In other words, all targets in descendant's BUILD file family
        #     (its siblings), all targets in its parent directory BUILD file family, and so on.
        # 3.) Finally walk the exported target once more, replacing each visited dependency with its
        #     owner.

        if not self.is_exported(exported_target):
            raise self.UnExportedError(
                "Cannot calculate reduced dependencies for a non-exported "
                "target, given: {}".format(exported_target))

        owner_by_owned_python_target = OrderedDict()

        # Only check ownership on the original target graph.
        original_exported_target = exported_target.derived_from

        def collect_potentially_owned_python_targets(current):
            if current.is_original:
                owner_by_owned_python_target[
                    current] = None  # We can't know the owner in the 1st pass.
            return (current
                    == exported_target) or not self.is_exported(current)

        self._walk(original_exported_target,
                   collect_potentially_owned_python_targets)

        for owned in owner_by_owned_python_target:
            if self.requires_export(owned) and not self.is_exported(owned):
                potential_owners = set()
                for potential_owner in self._ancestor_iterator.iter_target_siblings_and_ancestors(
                        owned):
                    if self.is_exported(
                            potential_owner) and owned in self._closure(
                                potential_owner):
                        potential_owners.add(potential_owner)
                if not potential_owners:
                    raise self.NoOwnerError(
                        "No exported target owner found for {}".format(owned))
                owner = potential_owners.pop()
                if potential_owners:
                    ambiguous_owners = [
                        o for o in potential_owners
                        if o.address.spec_path == owner.address.spec_path
                    ]
                    if ambiguous_owners:
                        raise self.AmbiguousOwnerError(
                            "Owners for {} are ambiguous.  Found {} and "
                            "{} others: {}".format(owned, owner,
                                                   len(ambiguous_owners),
                                                   ambiguous_owners))
                owner_by_owned_python_target[owned] = owner

        reduced_dependencies = OrderedSet()

        def collect_reduced_dependencies(current):
            if current == exported_target:
                return True
            else:
                # The provider will be one of:
                # 1. `None`, ie: a 3rdparty requirement we should collect.
                # 2. `exported_target`, ie: a local exportable target owned by `exported_target` that we
                #    should collect
                # 3. Or else a local exportable target owned by some other exported target in which case
                #    we should collect the exported owner.
                owner = owner_by_owned_python_target.get(current)
                if owner is None or owner == exported_target:
                    reduced_dependencies.add(current)
                else:
                    reduced_dependencies.add(owner)
                return owner == exported_target or not self.requires_export(
                    current)

        self._walk(exported_target, collect_reduced_dependencies)
        return OrderedSet(d for d in reduced_dependencies if d.is_original)
コード例 #27
0
    def _process_target(
        self,
        current_target,
        modulizable_target_set,
        resource_target_map,
        runtime_classpath,
        zinc_args_for_target,
    ):
        """
        :type current_target:pants.build_graph.target.Target
        """
        info = {
            # this means 'dependencies'
            "targets": [],
            "source_dependencies_in_classpath": [],
            "libraries": [],
            "roots": [],
            "id": current_target.id,
            "target_type": ExportDepAsJar._get_target_type(
                current_target, resource_target_map, runtime_classpath
            ),
            "is_synthetic": current_target.is_synthetic,
            "pants_target_type": self._get_pants_target_alias(type(current_target)),
            "is_target_root": current_target in modulizable_target_set,
            "transitive": current_target.transitive,
            "scope": str(current_target.scope),
            "scalac_args": self._extract_arguments_with_prefix_from_zinc_args(
                zinc_args_for_target, "-S"
            ),
            "javac_args": self._extract_arguments_with_prefix_from_zinc_args(
                zinc_args_for_target, "-C"
            ),
            "extra_jvm_options": current_target.payload.get_field_value("extra_jvm_options", []),
        }

        def iter_transitive_jars(jar_lib):
            """
            :type jar_lib: :class:`pants.backend.jvm.targets.jar_library.JarLibrary`
            :rtype: :class:`collections.Iterator` of
                    :class:`pants.java.jar.M2Coordinate`
            """
            if runtime_classpath:
                jar_products = runtime_classpath.get_artifact_classpath_entries_for_targets(
                    (jar_lib,)
                )
                for _, jar_entry in jar_products:
                    coordinate = jar_entry.coordinate
                    # We drop classifier and type_ since those fields are represented in the global
                    # libraries dict and here we just want the key into that dict (see `_jar_id`).
                    yield M2Coordinate(org=coordinate.org, name=coordinate.name, rev=coordinate.rev)

        def _full_library_set_for_target(target):
            """Get the full library set for a target, including jar dependencies and jars of the
            library itself."""
            libraries = set([])
            if isinstance(target, JarLibrary):
                jars = set([])
                for jar in target.jar_dependencies:
                    jars.add(M2Coordinate(jar.org, jar.name, jar.rev))
                # Add all the jars pulled in by this jar_library
                jars.update(iter_transitive_jars(target))
                libraries = [self._jar_id(jar) for jar in jars]
            else:
                libraries.add(target.id)
            return libraries

        if not current_target.is_synthetic:
            info["globs"] = current_target.globs_relative_to_buildroot()

        def _dependencies_needed_in_classpath(target):
            if isinstance(target, JvmTarget):
                return [
                    dep
                    for dep in DependencyContext.global_instance().dependencies_respecting_strict_deps(
                        target
                    )
                ]
            else:
                return [dep for dep in target.closure()]

        dependencies_needed_in_classpath = _dependencies_needed_in_classpath(current_target)

        libraries_for_target = set(
            [self._jar_id(jar) for jar in iter_transitive_jars(current_target)]
        )
        for dep in self._dependencies_to_include_in_libraries(
            current_target, modulizable_target_set, dependencies_needed_in_classpath
        ):
            libraries_for_target.update(_full_library_set_for_target(dep))
        info["libraries"].extend(libraries_for_target)

        info["roots"] = [
            {
                "source_root": os.path.realpath(source_root_package_prefix[0]),
                "package_prefix": source_root_package_prefix[1],
            }
            for source_root_package_prefix in self._source_roots_for_target(current_target)
        ]

        for dep in current_target.dependencies:
            if dep in modulizable_target_set:
                info["targets"].append(dep.address.spec)

        if isinstance(current_target, ScalaLibrary):
            for dep in current_target.java_sources:
                info["targets"].append(dep.address.spec)

        if isinstance(current_target, JvmTarget):
            info["excludes"] = [self._exclude_id(exclude) for exclude in current_target.excludes]
            info["platform"] = current_target.platform.name
            if hasattr(current_target, "runtime_platform"):
                info["runtime_platform"] = current_target.runtime_platform.name

        transitive_targets = OrderedSet(
            [
                dep.address.spec
                for dep in dependencies_needed_in_classpath
                if dep in modulizable_target_set
            ]
        )
        transitive_targets.update(info["targets"])
        info["source_dependencies_in_classpath"] = [dep for dep in transitive_targets]

        return info
コード例 #28
0
ファイル: setup_py.py プロジェクト: wisechengyi/pants
    def execute(self):
        # We drive creation of setup.py distributions from the original target graph, grabbing codegen'd
        # sources when needed. We ignore PythonDistribution targets.
        def is_exported_python_target(t):
            return t.is_original and self.has_provides(
                t) and not is_local_python_dist(t)

        exported_python_targets = OrderedSet(t
                                             for t in self.context.target_roots
                                             if is_exported_python_target(t))

        dist_dir = self.get_options().pants_distdir

        # NB: We have to create and then run in 2 steps so that we can discover all exported targets
        # in-play in the creation phase which then allows a tsort of these exported targets in the run
        # phase to ensure an exported target is, for example (--run="sdist upload"), uploaded before any
        # exported target that depends on it is uploaded.

        created: Dict[PythonTarget, Path] = {}

        def create(exported_python_target):
            if exported_python_target not in created:
                self.context.log.info(
                    "Creating setup.py project for {}".format(
                        exported_python_target))
                subject = self.derived_by_original.get(exported_python_target,
                                                       exported_python_target)
                setup_dir, dependencies = self.create_setup_py(
                    subject, dist_dir)
                created[exported_python_target] = Path(setup_dir)
                if self._recursive:
                    for dep in dependencies:
                        if is_exported_python_target(dep):
                            create(dep)

        for exported_python_target in exported_python_targets:
            create(exported_python_target)

        interpreter = self.context.products.get_data(PythonInterpreter)
        python_dists = self.context.products.register_data(
            self.PYTHON_DISTS_PRODUCT, {})

        setup_runner = SetupPyRunner.Factory.create(
            scope=self,
            interpreter=interpreter,
            pex_file_path=os.path.join(self.workdir, self.fingerprint,
                                       "setup-py-runner.pex"),
        )
        for exported_python_target in reversed(
                sort_targets(list(created.keys()))):
            setup_dir = created.get(exported_python_target)
            if setup_dir:
                if not self._run:
                    self.context.log.info(
                        "Running sdist against {}".format(setup_dir))
                    sdist = setup_runner.sdist(setup_dir)
                    tgz_name = sdist.name
                    sdist_path = os.path.join(dist_dir, tgz_name)
                    self.context.log.info("Writing {}".format(sdist_path))
                    shutil.move(sdist, sdist_path)
                    safe_rmtree(str(setup_dir))
                    python_dists[exported_python_target] = sdist_path
                else:
                    self.context.log.info("Running {} against {}".format(
                        self._run, setup_dir))
                    split_command = safe_shlex_split(self._run)
                    try:
                        setup_runner.run_setup_command(
                            source_dir=setup_dir, setup_command=split_command)
                    except SetupPyRunner.CommandFailure as e:
                        raise TaskError(f"Install failed: {e}")
                    python_dists[exported_python_target] = setup_dir
コード例 #29
0
    def findbugs(self, target):
        runtime_classpaths = self.context.products.get_data(
            "runtime_classpath")
        runtime_classpath = runtime_classpaths.get_for_targets(
            target.closure(bfs=True))
        aux_classpath = OrderedSet(jar for conf, jar in runtime_classpath
                                   if conf == "default")

        target_jars = OrderedSet(
            jar for conf, jar in runtime_classpaths.get_for_target(target)
            if conf == "default")

        bug_counts = {"error": 0, "high": 0, "normal": 0, "low": 0}

        if not target_jars:
            self.context.log.info("  No jars to be analyzed")
            return bug_counts

        output_dir = os.path.join(self.workdir, target.id)
        safe_mkdir(output_dir)
        output_file = os.path.join(output_dir, "findbugsXml.xml")

        aux_classpath_file = os.path.join(
            self.workdir, "{}.classpath".format(os.path.basename(output_dir)))
        with open(aux_classpath_file, "w") as f:
            f.write("\n".join(aux_classpath - target_jars))

        args = [
            "-auxclasspathFromFile",
            aux_classpath_file,
            "-projectName",
            target.address.spec,
            "-xml:withMessages",
            "-effort:{}".format(self.get_options().effort),
            "-{}".format(self.get_options().threshold),
            "-nested:{}".format(
                "true" if self.get_options().nested else "false"),
            "-output",
            output_file,
            "-noClassOk",
        ]

        if self.get_options().exclude_filter_file:
            args.extend([
                "-exclude",
                os.path.join(get_buildroot(),
                             self.get_options().exclude_filter_file)
            ])

        if self.get_options().include_filter_file:
            args.extend([
                "-include",
                os.path.join(get_buildroot(),
                             self.get_options().include_filter_file)
            ])

        if self.get_options().max_rank:
            args.extend(["-maxRank", str(self.get_options().max_rank)])

        if self.get_options().relaxed:
            args.extend(["-relaxed"])

        if self.debug:
            args.extend(["-progress"])

        args.extend(target_jars)

        # Try to run spotbugs with the same java version as the target
        # The minimum JDK for spotbugs is JDK 1.8
        min_jdk_version = max(target.platform.target_level,
                              Revision.lenient("1.8"))
        if min_jdk_version.components[0] == 1:
            max_jdk_version = Revision(min_jdk_version.components[0],
                                       min_jdk_version.components[1], "9999")
        else:
            max_jdk_version = Revision(min_jdk_version.components[0], "9999")

        self.set_distribution(minimum_version=min_jdk_version,
                              maximum_version=max_jdk_version,
                              jdk=True)

        result = self.runjava(
            classpath=self.tool_classpath("findbugs"),
            main=self._FINDBUGS_MAIN,
            jvm_options=self.get_options().jvm_options,
            args=args,
            workunit_name="findbugs",
            workunit_labels=[WorkUnitLabel.LINT],
        )
        if result != 0:
            raise TaskError(
                "java {main} ... exited non-zero ({result})".format(
                    main=self._FINDBUGS_MAIN, result=result))

        xml = XmlParser.from_file(output_file)
        for error in xml.parsed.getElementsByTagName("Error"):
            self.context.log.warn(
                "Error: {msg}".format(msg=error.getElementsByTagName(
                    "ErrorMessage")[0].firstChild.data))
            bug_counts["error"] += 1

        for bug_instance in xml.parsed.getElementsByTagName("BugInstance"):
            bug_rank = bug_instance.getAttribute("rank")
            if int(bug_rank) <= self._HIGH_PRIORITY_LOWEST_RANK:
                priority = "high"
            elif int(bug_rank) <= self._NORMAL_PRIORITY_LOWEST_RANK:
                priority = "normal"
            else:
                priority = "low"
            bug_counts[priority] += 1

            source_line = bug_instance.getElementsByTagName(
                "Class")[0].getElementsByTagName("SourceLine")[0]
            self.context.log.warn(
                "Bug[{priority}]: {type} {desc} {line}".format(
                    priority=priority,
                    type=bug_instance.getAttribute("type"),
                    desc=bug_instance.getElementsByTagName(
                        "LongMessage")[0].firstChild.data,
                    line=source_line.getElementsByTagName("Message")
                    [0].firstChild.data,
                ))

        return bug_counts
コード例 #30
0
ファイル: round_engine.py プロジェクト: wiwa/pants
 def __init__(self, context):
     self._dependencies = OrderedSet()
     self._optional_dependencies = OrderedSet()
     self._context = context
     self._producer_infos_by_product_type = None