コード例 #1
0
    def _compute_sources(self, target):
        relative_sources = OrderedSet()
        source_roots = OrderedSet()

        def capture_and_relativize_to_source_root(source):
            source_root = self.context.source_roots.find_by_path(source)
            if not source_root:
                source_root = self.context.source_roots.find_by_path(target.address.spec_path)
            source_roots.add(source_root.path)
            return fast_relpath(source, source_root.path)

        if target.payload.get_field_value("ordered_sources"):
            # Re-match the filespecs against the sources in order to apply them in the literal order
            # they were specified in.
            filespec = target.globs_relative_to_buildroot()
            excludes = filespec.get("excludes", [])
            for filespec in filespec.get("globs", []):
                sources = [
                    s
                    for s in target.sources_relative_to_buildroot()
                    if globs_matches([s], [filespec], excludes)
                ]
                if len(sources) != 1:
                    raise TargetDefinitionException(
                        target,
                        "With `ordered_sources=True`, expected one match for each file literal, "
                        "but got: {} for literal `{}`.".format(sources, filespec),
                    )
                relative_sources.add(capture_and_relativize_to_source_root(sources[0]))
        else:
            # Otherwise, use the default (unspecified) snapshot ordering.
            for source in target.sources_relative_to_buildroot():
                relative_sources.add(capture_and_relativize_to_source_root(source))
        return relative_sources, source_roots
コード例 #2
0
    def compute_pantsd_invalidation_globs(buildroot, bootstrap_options):
        """Computes the merged value of the `--pantsd-invalidation-globs` option.

        Combines --pythonpath and --pants-config-files files that are in {buildroot} dir with those
        invalidation_globs provided by users.
        """
        invalidation_globs = OrderedSet()
        globs = [
            *sys.path,
            *bootstrap_options.pythonpath,
            *bootstrap_options.pants_config_files,
            "!*.pyc",
            "!__pycache__/",
            *bootstrap_options.pantsd_invalidation_globs,
        ]

        for glob in globs:
            if glob.startswith("!"):
                invalidation_globs.add(glob)
                continue

            glob_relpath = fast_relpath_optional(
                glob, buildroot) if os.path.isabs(glob) else glob
            if glob_relpath:
                invalidation_globs.update([glob_relpath, glob_relpath + "/**"])
            else:
                logger.debug(
                    f"Changes to {glob}, outside of the buildroot, will not be invalidated."
                )

        return list(invalidation_globs)
コード例 #3
0
    def execute(self, **kwargs):
        # NB: kwargs are for testing and pass-through to underlying subprocess process spawning.

        go_targets = OrderedSet(target for target in self.context.target_roots
                                if self.is_go(target))
        args = self.get_passthru_args()
        if not go_targets or not args:
            template = yellow(
                "The pants `{goal}` goal expects at least one go target and at least one "
                "pass-through argument to be specified, call with:\n") + green(
                    "  ./pants {goal} {targets} -- {args}")
            msg = template.format(
                goal=self.options_scope,
                targets=(green(" ".join(t.address.reference()
                                        for t in go_targets))
                         if go_targets else red("[missing go targets]")),
                args=green(" ".join(args))
                if args else red("[missing pass-through args]"),
            )
            raise self.MissingArgsError(msg)

        go_path = OrderedSet()
        import_paths = OrderedSet()
        for target in go_targets:
            self.ensure_workspace(target)
            go_path.add(self.get_gopath(target))
            import_paths.add(target.import_path)

        self.execute_with_go_env(os.pathsep.join(go_path), list(import_paths),
                                 args, **kwargs)
コード例 #4
0
ファイル: setup_py.py プロジェクト: wisechengyi/pants
 def install_requires(cls, reduced_dependencies):
     install_requires = OrderedSet()
     for dep in reduced_dependencies:
         if cls.is_requirements(dep):
             for req in dep.payload.requirements:
                 install_requires.add(str(req.requirement))
         elif cls.has_provides(dep):
             install_requires.add(dep.provides.requirement)
     return install_requires
コード例 #5
0
 def executable_search_path(self) -> Tuple[str, ...]:
     result = OrderedSet()
     for entry in self.options.executable_search_paths:
         if entry == "<PATH>":
             path = os.environ.get("PATH")
             if path:
                 for path_entry in path.split(os.pathsep):
                     result.add(path_entry)
         else:
             result.add(entry)
     return tuple(result)
コード例 #6
0
    def strict_dependencies(self, dep_context):
        """
        :param dep_context: A DependencyContext with configuration for the request.
        :return: targets that this target "strictly" depends on. This set of dependencies contains
          only directly declared dependencies, with two exceptions:
            1) aliases are expanded transitively
            2) the strict_dependencies of targets exported targets exported by
          strict_dependencies (transitively).
        :rtype: list of Target
        """
        strict_deps = self._cached_strict_dependencies_map.get(
            dep_context, None)
        if strict_deps is None:
            default_predicate = self._closure_dep_predicate(
                {self}, **dep_context.target_closure_kwargs)
            # TODO(#5977): this branch needs testing!
            if not default_predicate:

                def default_predicate(*args, **kwargs):
                    return True

            def dep_predicate(source, dependency):
                if not default_predicate(source, dependency):
                    return False

                # Always expand aliases.
                if type(source) in dep_context.alias_types:
                    return True

                # Traverse other dependencies if they are exported.
                if source._dep_is_exported(dependency):
                    return True
                return False

            dep_addresses = [
                d.address for d in self.dependencies
                if default_predicate(self, d)
            ]
            result = self._build_graph.transitive_subgraph_of_addresses_bfs(
                addresses=dep_addresses, dep_predicate=dep_predicate)

            strict_deps = OrderedSet()
            for declared in result:
                if type(declared) in dep_context.alias_types:
                    continue
                if isinstance(declared, dep_context.types_with_closure):
                    strict_deps.update(
                        declared.closure(bfs=True,
                                         **dep_context.target_closure_kwargs))
                strict_deps.add(declared)

            strict_deps = list(strict_deps)
            self._cached_strict_dependencies_map[dep_context] = strict_deps
        return strict_deps
コード例 #7
0
    def find_all_relevant_resources_targets(self):
        # NB: Ordering isn't relevant here, because it is applied during the dep walk to
        # consume from the runtime_classpath.
        def is_jvm_target(target):
            return isinstance(target, JvmTarget)

        jvm_targets = self.context.targets(predicate=is_jvm_target)

        all_resources_tgts = OrderedSet()
        for target in Target.closure_for_targets(jvm_targets, bfs=True):
            if isinstance(target, Resources):
                all_resources_tgts.add(target)
        return all_resources_tgts
コード例 #8
0
ファイル: parse_search_dirs.py プロジェクト: wiwa/pants
    def _filter_existing_dirs(self, dir_candidates, compiler_exe):
        real_dirs = OrderedSet()

        for maybe_existing_dir in dir_candidates:
            # Could use a `seen_dir_paths` set if we want to avoid pinging the fs for duplicate entries.
            if is_readable_dir(maybe_existing_dir):
                real_dirs.add(os.path.realpath(maybe_existing_dir))
            else:
                logger.debug(
                    "non-existent or non-accessible directory at {} while "
                    "parsing directories from {}".format(
                        maybe_existing_dir, compiler_exe))

        return list(real_dirs)
コード例 #9
0
ファイル: context.py プロジェクト: wiwa/pants
    def _unfiltered_targets(self, **kwargs):
        def _collect_targets(root_targets, **kwargs):
            return Target.closure_for_targets(target_roots=root_targets,
                                              **kwargs)

        target_set = _collect_targets(self.target_roots, **kwargs)

        synthetics = OrderedSet()
        for synthetic_address in self.build_graph.synthetic_addresses:
            if self.build_graph.get_concrete_derived_from(
                    synthetic_address) in target_set:
                synthetics.add(self.build_graph.get_target(synthetic_address))
        target_set.update(_collect_targets(synthetics, **kwargs))

        return target_set
コード例 #10
0
ファイル: products.py プロジェクト: wiwa/pants
class RootedProducts:
    """File products of a build that have a concept of a 'root' directory.

    E.g., classfiles, under a root package directory.

    :API: public
    """
    def __init__(self, root):
        """
        :API: public
        """
        self._root = root
        self._rel_paths = OrderedSet()

    def add_abs_paths(self, abs_paths):
        """
        :API: public
        """
        for abs_path in abs_paths:
            self._rel_paths.add(fast_relpath(abs_path, self._root))

    def add_rel_paths(self, rel_paths):
        """
        :API: public
        """
        self._rel_paths.update(rel_paths)

    def root(self):
        """
        :API: public
        """
        return self._root

    def rel_paths(self):
        """
        :API: public
        """
        return self._rel_paths

    def abs_paths(self):
        """
        :API: public
        """
        for relpath in self._rel_paths:
            yield os.path.join(self._root, relpath)

    def __bool__(self):
        return self._rel_paths
コード例 #11
0
    def execute_codegen(self, target, target_workdir):
        sources_by_base = self._calculate_sources(target)
        sources = target.sources_relative_to_buildroot()

        bases = OrderedSet()
        # Note that the root import must come first, otherwise protoc can get confused
        # when trying to resolve imports from the root against the import's source root.
        if self.get_options().import_from_root:
            bases.add(".")
        bases.update(sources_by_base.keys())
        bases.update(self._proto_path_imports([target]))

        gen_flag = "--java_out"

        gen = "{0}={1}".format(gen_flag, target_workdir)

        args = [self.protobuf_binary, gen]

        if self.plugins:
            for plugin in self.plugins:
                args.append("--{0}_out={1}".format(plugin, target_workdir))

        for base in bases:
            args.append("--proto_path={0}".format(base))

        args.extend(sources)

        # Tack on extra path entries. These can be used to find protoc plugins.
        protoc_environ = os.environ.copy()
        if self._extra_paths:
            protoc_environ["PATH"] = os.pathsep.join(
                self._extra_paths + protoc_environ["PATH"].split(os.pathsep))

        # Note: The test_source_ordering integration test scrapes this output, so modify it with care.
        self.context.log.debug("Executing: {0}".format("\\\n  ".join(args)))
        with self.context.new_workunit(name="protoc",
                                       labels=[WorkUnitLabel.TOOL],
                                       cmd=" ".join(args)) as workunit:
            result = subprocess.call(
                args,
                env=protoc_environ,
                stdout=workunit.output("stdout"),
                stderr=workunit.output("stderr"),
            )
            if result != 0:
                raise TaskError("{} ... exited non-zero ({})".format(
                    self.protobuf_binary, result))
コード例 #12
0
ファイル: pex_build_util.py プロジェクト: revl/pants
    def resolve_distributions(self, reqs, platforms=None):
        """Multi-platform dependency resolution.

        :param reqs: A list of :class:`PythonRequirement` to resolve.
        :param platforms: A list of platform strings to resolve requirements for.
                          Defaults to the platforms specified by PythonSetup.
        :returns: List of :class:`pex.resolver.ResolvedDistribution` instances meeting requirements for
                  the given platforms.
        """
        deduped_reqs = OrderedSet(reqs)
        find_links = OrderedSet()
        for req in deduped_reqs:
            if req.repository:
                find_links.add(req.repository)

        return self._resolve_multi(deduped_reqs,
                                   platforms=platforms,
                                   find_links=find_links)
コード例 #13
0
ファイル: java_thrifty_gen.py プロジェクト: wisechengyi/pants
    def _compute_include_paths(self, target):
        """Computes the set of paths that thrifty uses to lookup imports.

        The IDL files under these paths are not compiled, but they are required to compile
        downstream IDL files.

        :param target: the JavaThriftyLibrary target to compile.
        :return: an ordered set of directories to pass along to thrifty.
        """
        paths = OrderedSet()
        paths.add(os.path.join(get_buildroot(), target.target_base))

        def collect_paths(dep):
            if not dep.has_sources(".thrift"):
                return
            paths.add(os.path.join(get_buildroot(), dep.target_base))

        collect_paths(target)
        target.walk(collect_paths)
        return paths
コード例 #14
0
ファイル: build_graph.py プロジェクト: wiwa/pants
    def transitive_subgraph_of_addresses_bfs(self,
                                             addresses,
                                             predicate=None,
                                             dep_predicate=None):
        """Returns the transitive dependency closure of `addresses` using BFS.

        :API: public

        :param list<Address> addresses: The closure of `addresses` will be walked.
        :param function predicate: If this parameter is not given, no Targets will be filtered
          out of the closure.  If it is given, any Target which fails the predicate will not be
          walked, nor will its dependencies.  Thus predicate effectively trims out any subgraph
          that would only be reachable through Targets that fail the predicate.
        :param function dep_predicate: Takes two parameters, the current target and the dependency of
          the current target. If this parameter is not given, no dependencies will be filtered
          when traversing the closure. If it is given, when the predicate fails, the edge to the dependency
          will not be expanded.
        """
        walk = self._walk_factory(dep_predicate)

        ordered_closure = OrderedSet()
        to_walk = deque((0, addr) for addr in addresses)
        while len(to_walk) > 0:
            level, address = to_walk.popleft()

            if not walk.expand_once(address, level):
                continue

            target = self._target_by_address[address]
            if predicate and not predicate(target):
                continue
            if walk.do_work_once(address):
                ordered_closure.add(target)
            for dep_address in self._target_dependencies_by_address[address]:
                if walk.expanded_or_worked(dep_address):
                    continue
                if walk.dep_predicate(target,
                                      self._target_by_address[dep_address],
                                      level):
                    to_walk.append((level + 1, dep_address))
        return ordered_closure
コード例 #15
0
 def _compute_transitive_source_dependencies(
     self,
     target: Target,
     info_entry: Tuple[str, ...],
     modulizable_target_set: FrozenOrderedSet[Target],
 ) -> Tuple[str, ...]:
     if self._is_strict_deps(target):
         return info_entry
     else:
         transitive_targets = OrderedSet(info_entry)
         self.context.build_graph.walk_transitive_dependency_graph(
             addresses=[target.address],
             predicate=lambda d: d in modulizable_target_set,
             work=lambda d: transitive_targets.add(d.address.spec),
         )
         return tuple(transitive_targets)
コード例 #16
0
    def _compute_missing_deps(self, src_tgt, actual_deps):
        """Computes deps that are used by the compiler but not specified in a BUILD file.

        These deps are bugs waiting to happen: the code may happen to compile because the dep was
        brought in some other way (e.g., by some other root target), but that is obviously fragile.

        Note that in practice we're OK with reliance on indirect deps that are only brought in
        transitively. E.g., in Scala type inference can bring in such a dep subtly. Fortunately these
        cases aren't as fragile as a completely missing dependency. It's still a good idea to have
        explicit direct deps where relevant, so we optionally warn about indirect deps, to make them
        easy to find and reason about.

        - actual_deps: a map src -> list of actual deps (source, class or jar file) as noted by the
          compiler.

        Returns a tuple (missing_file_deps, missing_direct_tgt_deps) where:

        - missing_file_deps: a list of dep_files where src_tgt requires dep_file, and we're unable
          to map to a target (because its target isn't in the total set of targets in play,
          and we don't want to parse every BUILD file in the workspace just to find it).

        - missing_direct_tgt_deps: a list of dep_tgts where src_tgt is missing a direct dependency
                                   on dep_tgt but has a transitive dep on it.

        All paths in the input and output are absolute.
        """
        analyzer = self._analyzer

        def must_be_explicit_dep(dep):
            # We don't require explicit deps on the java runtime, so we shouldn't consider that
            # a missing dep.
            return dep not in analyzer.bootstrap_jar_classfiles and not dep.startswith(
                self._distribution.real_home
            )

        def target_or_java_dep_in_targets(target, targets):
            # We want to check if the target is in the targets collection
            #
            # However, for the special case of scala_library that has a java_sources
            # reference we're ok if that exists in targets even if the scala_library does not.

            if target in targets:
                return True
            elif isinstance(target, ScalaLibrary):
                return any(t in targets for t in target.java_sources)
            else:
                return False

        # Find deps that are actual but not specified.
        missing_file_deps = OrderedSet()  # (src, src).
        missing_direct_tgt_deps_map = defaultdict(list)  # The same, but for direct deps.

        targets_by_file = analyzer.targets_by_file(self.context.targets())
        for actual_dep in filter(must_be_explicit_dep, actual_deps):
            actual_dep_tgts = targets_by_file.get(actual_dep)
            # actual_dep_tgts is usually a singleton. If it's not, we only need one of these
            # to be in our declared deps to be OK.
            if actual_dep_tgts is None:
                missing_file_deps.add((src_tgt, actual_dep))
            elif not target_or_java_dep_in_targets(src_tgt, actual_dep_tgts):
                # Obviously intra-target deps are fine.
                canonical_actual_dep_tgt = next(iter(actual_dep_tgts))
                if canonical_actual_dep_tgt not in src_tgt.dependencies:
                    # The canonical dep is the only one a direct dependency makes sense on.
                    # TODO get rid of src usage here. we dont have a way to map class
                    # files back to source files when using jdeps. I think we can get away without
                    # listing the src file directly and just list the target which has the transient
                    # dep
                    missing_direct_tgt_deps_map[(src_tgt, canonical_actual_dep_tgt)].append(
                        (src_tgt, actual_dep)
                    )

        return (list(missing_file_deps), list(missing_direct_tgt_deps_map.items()))
コード例 #17
0
ファイル: round_engine.py プロジェクト: wiwa/pants
class RoundManager:
    """
    :API: public
    """
    class MissingProductError(KeyError):
        """Indicates a required product type is provided by non-one."""

    @staticmethod
    def _index_products():
        producer_info_by_product_type = defaultdict(OrderedSet)
        for goal in Goal.all():
            for task_type in goal.task_types():
                for product_type in task_type.product_types():
                    producer_info = ProducerInfo(product_type, task_type, goal)
                    producer_info_by_product_type[product_type].add(
                        producer_info)
        return producer_info_by_product_type

    def __init__(self, context):
        self._dependencies = OrderedSet()
        self._optional_dependencies = OrderedSet()
        self._context = context
        self._producer_infos_by_product_type = None

    def require(self, product_type):
        """Schedules the tasks that produce product_type to be executed before the requesting task.

        There must be at least one task that produces the required product type, or the
        dependencies will not be satisfied.

        :API: public
        """
        self._dependencies.add(product_type)
        self._context.products.require(product_type)

    def optional_product(self, product_type):
        """Schedules tasks, if any, that produce product_type to be executed before the requesting
        task.

        There need not be any tasks that produce the required product type.  All this method
        guarantees is that if there are any then they will be executed before the requesting task.

        :API: public
        """
        self._optional_dependencies.add(product_type)
        self.require(product_type)

    def require_data(self, product_type):
        """Schedules the tasks that produce product_type to be executed before the requesting task.

        There must be at least one task that produces the required product type, or the
        dependencies will not be satisfied.

        :API: public
        """
        self._dependencies.add(product_type)
        self._context.products.require_data(product_type)

    def optional_data(self, product_type):
        """Schedules tasks, if any, that produce product_type to be executed before the requesting
        task.

        There need not be any tasks that produce the required product type.  All this method
        guarantees is that if there are any then they will be executed before the requesting task.

        :API: public
        """
        self._optional_dependencies.add(product_type)
        self.require_data(product_type)

    def get_dependencies(self):
        """Returns the set of data dependencies as producer infos corresponding to data
        requirements."""
        producer_infos = OrderedSet()
        for product_type in self._dependencies:
            producer_infos.update(
                self._get_producer_infos_by_product_type(product_type))
        return producer_infos

    def _get_producer_infos_by_product_type(self, product_type):
        if self._producer_infos_by_product_type is None:
            self._producer_infos_by_product_type = self._index_products()

        producer_infos = self._producer_infos_by_product_type[product_type]
        if not producer_infos and product_type not in self._optional_dependencies:
            raise self.MissingProductError(
                "No producers registered for '{0}'".format(product_type))
        return producer_infos
コード例 #18
0
        def process_target(current_target):
            """
            :type current_target:pants.build_graph.target.Target
            """
            def get_target_type(tgt):
                def is_test(t):
                    return isinstance(t, JUnitTests) or isinstance(
                        t, PythonTests)

                if is_test(tgt):
                    return SourceRootTypes.TEST
                else:
                    if (isinstance(tgt, Resources)
                            and tgt in resource_target_map
                            and is_test(resource_target_map[tgt])):
                        return SourceRootTypes.TEST_RESOURCE
                    elif isinstance(tgt, Resources):
                        return SourceRootTypes.RESOURCE
                    else:
                        return SourceRootTypes.SOURCE

            info = {
                "targets": [],
                "libraries": [],
                "roots": [],
                "id":
                current_target.id,
                "target_type":
                get_target_type(current_target),
                # NB: is_code_gen should be removed when export format advances to 1.1.0 or higher
                "is_code_gen":
                current_target.is_synthetic,
                "is_synthetic":
                current_target.is_synthetic,
                "pants_target_type":
                self._get_pants_target_alias(type(current_target)),
            }

            if not current_target.is_synthetic:
                info["globs"] = current_target.globs_relative_to_buildroot()
                if self.get_options().sources:
                    info["sources"] = list(
                        current_target.sources_relative_to_buildroot())

            info["transitive"] = current_target.transitive
            info["scope"] = str(current_target.scope)
            info["is_target_root"] = current_target in target_roots_set

            if isinstance(current_target, PythonRequirementLibrary):
                reqs = current_target.payload.get_field_value(
                    "requirements", set())
                """:type : set[pants.python.python_requirement.PythonRequirement]"""
                info["requirements"] = [req.key for req in reqs]

            if isinstance(current_target, PythonTarget):
                interpreter_for_target = self._interpreter_cache.select_interpreter_for_targets(
                    [current_target])
                if interpreter_for_target is None:
                    raise TaskError(
                        "Unable to find suitable interpreter for {}".format(
                            current_target.address))
                python_interpreter_targets_mapping[
                    interpreter_for_target].append(current_target)
                info["python_interpreter"] = str(
                    interpreter_for_target.identity)

            def iter_transitive_jars(jar_lib):
                """
                :type jar_lib: :class:`pants.backend.jvm.targets.jar_library.JarLibrary`
                :rtype: :class:`collections.Iterator` of
                        :class:`pants.java.jar.M2Coordinate`
                """
                if classpath_products:
                    jar_products = classpath_products.get_artifact_classpath_entries_for_targets(
                        (jar_lib, ))
                    for _, jar_entry in jar_products:
                        coordinate = jar_entry.coordinate
                        # We drop classifier and type_ since those fields are represented in the global
                        # libraries dict and here we just want the key into that dict (see `_jar_id`).
                        yield M2Coordinate(org=coordinate.org,
                                           name=coordinate.name,
                                           rev=coordinate.rev)

            target_libraries = OrderedSet()
            if isinstance(current_target, JarLibrary):
                target_libraries = OrderedSet(
                    iter_transitive_jars(current_target))
            for dep in current_target.dependencies:
                info["targets"].append(dep.address.spec)
                if isinstance(dep, JarLibrary):
                    for jar in dep.jar_dependencies:
                        target_libraries.add(
                            M2Coordinate(jar.org, jar.name, jar.rev))
                    # Add all the jars pulled in by this jar_library
                    target_libraries.update(iter_transitive_jars(dep))
                if isinstance(dep, Resources):
                    resource_target_map[dep] = current_target

            if isinstance(current_target, ScalaLibrary):
                for dep in current_target.java_sources:
                    info["targets"].append(dep.address.spec)
                    process_target(dep)

            if isinstance(current_target, JvmTarget):
                info["excludes"] = [
                    self._exclude_id(exclude)
                    for exclude in current_target.excludes
                ]
                info["platform"] = current_target.platform.name
                if hasattr(current_target, "runtime_platform"):
                    info[
                        "runtime_platform"] = current_target.runtime_platform.name

            info["roots"] = [{
                "source_root": source_root_package_prefix[0],
                "package_prefix": source_root_package_prefix[1],
            } for source_root_package_prefix in self._source_roots_for_target(
                current_target)]

            if classpath_products:
                info["libraries"] = [
                    self._jar_id(lib) for lib in target_libraries
                ]
            targets_map[current_target.address.spec] = info
コード例 #19
0
ファイル: jar_publish.py プロジェクト: wisechengyi/pants
        def stage_artifacts(tgt, jar, version, tag, changelog):
            publications = OrderedSet()

            # TODO Remove this once we fix https://github.com/pantsbuild/pants/issues/1229
            if (
                not self.context.products.get("jars").has(tgt)
                and not self.get_options().individual_plugins
            ):
                raise TaskError(
                    "Expected to find a primary artifact for {} but there was no jar for it.".format(
                        tgt.address.reference()
                    )
                )

            # TODO Remove this guard once we fix https://github.com/pantsbuild/pants/issues/1229, there
            # should always be a primary artifact.
            if self.context.products.get("jars").has(tgt):
                self._copy_artifact(tgt, jar, version, typename="jars")
                publications.add(self.Publication(name=jar.name, classifier=None, ext="jar"))

                self.create_source_jar(tgt, jar, version)
                publications.add(self.Publication(name=jar.name, classifier="sources", ext="jar"))

                # don't request docs unless they are available for all transitive targets
                # TODO: doc products should be checked by an independent jar'ing task, and
                # conditionally enabled; see https://github.com/pantsbuild/pants/issues/568
                doc_jar = self.create_doc_jar(tgt, jar, version)
                if doc_jar:
                    publications.add(
                        self.Publication(name=jar.name, classifier="javadoc", ext="jar")
                    )

                if self.publish_changelog:
                    changelog_path = self.artifact_path(
                        jar, version, suffix="-CHANGELOG", extension="txt"
                    )
                    with safe_open(changelog_path, "w") as changelog_file:
                        changelog_file.write(changelog)
                    publications.add(
                        self.Publication(name=jar.name, classifier="CHANGELOG", ext="txt")
                    )

            # Process any extra jars that might have been previously generated for this target, or a
            # target that it was derived from.
            for extra_product, extra_config in (self.get_options().publish_extras or {}).items():
                override_name = jar.name
                if "override_name" in extra_config:
                    # If the supplied string has a '{target_provides_name}' in it, replace it with the
                    # current jar name. If not, the string will be taken verbatim.
                    override_name = extra_config["override_name"].format(
                        target_provides_name=jar.name
                    )

                classifier = None
                suffix = ""
                if "classifier" in extra_config:
                    classifier = extra_config["classifier"]
                    suffix = f"-{classifier}"

                extension = extra_config.get("extension", "jar")

                extra_pub = self.Publication(
                    name=override_name, classifier=classifier, ext=extension
                )

                # A lot of flexibility is allowed in parameterizing the extra artifact, ensure those
                # parameters lead to a unique publication.
                # TODO(John Sirois): Check this much earlier.
                if extra_pub in publications:
                    raise TaskError(
                        "publish_extra for '{0}' must override one of name, classifier or "
                        "extension with a non-default value.".format(extra_product)
                    )

                # Build a list of targets to check. This list will consist of the current target, plus the
                # entire derived_from chain.
                target_list = [tgt]
                target = tgt
                while target.derived_from != target:
                    target_list.append(target.derived_from)
                    target = target.derived_from
                for cur_tgt in target_list:
                    if self.context.products.get(extra_product).has(cur_tgt):
                        self._copy_artifact(
                            cur_tgt,
                            jar,
                            version,
                            typename=extra_product,
                            suffix=suffix,
                            extension=extension,
                            override_name=override_name,
                        )
                        publications.add(extra_pub)

            pom_path = self.artifact_path(jar, version, extension="pom")
            PomWriter(get_pushdb, tag).write(tgt, path=pom_path)
            return publications