Example #1
0
 def set_distribution(self, minimum_version=None, maximum_version=None, jdk=False):
   try:
     self._dist = Distribution.cached(minimum_version=minimum_version,
                                      maximum_version=maximum_version, jdk=jdk)
   except Distribution.Error as e:
     raise TaskError(e)
Example #2
0
    def context(self,
                for_task_types=None,
                for_subsystems=None,
                options=None,
                target_roots=None,
                console_outstream=None,
                workspace=None,
                scheduler=None,
                address_mapper=None,
                **kwargs):
        """
    :API: public

    :param dict **kwargs: keyword arguments passed in to `create_options_for_optionables`.
    """
        # Many tests use source root functionality via the SourceRootConfig.global_instance().
        # (typically accessed via Target.target_base), so we always set it up, for convenience.
        for_subsystems = set(for_subsystems or ())
        for subsystem in for_subsystems:
            if subsystem.options_scope is None:
                raise TaskError(
                    'You must set a scope on your subsystem type before using it in tests.'
                )

        optionables = {
            SourceRootConfig
        } | self._build_configuration.optionables() | for_subsystems

        for_task_types = for_task_types or ()
        for task_type in for_task_types:
            scope = task_type.options_scope
            if scope is None:
                raise TaskError(
                    'You must set a scope on your task type before using it in tests.'
                )
            optionables.add(task_type)
            # If task is expected to inherit goal-level options, register those directly on the task,
            # by subclassing the goal options registrar and settings its scope to the task scope.
            if issubclass(task_type, GoalOptionsMixin):
                subclass_name = 'test_{}_{}_{}'.format(
                    task_type.__name__,
                    task_type.goal_options_registrar_cls.options_scope,
                    task_type.options_scope)
                if PY2:
                    subclass_name = subclass_name.encode('utf-8')
                optionables.add(
                    type(subclass_name,
                         (task_type.goal_options_registrar_cls, ),
                         {'options_scope': task_type.options_scope}))

        # Now expand to all deps.
        all_optionables = set()
        for optionable in optionables:
            all_optionables.update(si.optionable_cls
                                   for si in optionable.known_scope_infos())

        # Now default the option values and override with any caller-specified values.
        # TODO(benjy): Get rid of the options arg, and require tests to call set_options.
        options = options.copy() if options else {}
        for s, opts in self.options.items():
            scoped_opts = options.setdefault(s, {})
            scoped_opts.update(opts)

        fake_options = create_options_for_optionables(all_optionables,
                                                      options=options,
                                                      **kwargs)

        Subsystem.reset(reset_options=True)
        Subsystem.set_options(fake_options)

        scheduler = scheduler or self.scheduler

        address_mapper = address_mapper or self.address_mapper

        context = create_context_from_options(
            fake_options,
            target_roots=target_roots,
            build_graph=self.build_graph,
            build_file_parser=self._build_file_parser,
            address_mapper=address_mapper,
            console_outstream=console_outstream,
            workspace=workspace,
            scheduler=scheduler)
        return context
Example #3
0
    def __init__(self, *args, **kwargs):
        super(JUnitRun, self).__init__(*args, **kwargs)

        options = self.get_options()
        self._coverage = None
        if options.coverage or options.is_flagged('coverage_open'):
            coverage_processor = options.coverage_processor
            if coverage_processor == 'cobertura':
                settings = CoberturaTaskSettings.from_task(self)
                self._coverage = Cobertura(settings)
            else:
                raise TaskError('unknown coverage processor {0}'.format(
                    coverage_processor))

        self._tests_to_run = options.test
        self._batch_size = options.batch_size
        self._fail_fast = options.fail_fast
        self._working_dir = options.cwd or get_buildroot()
        self._strict_jvm_version = options.strict_jvm_version
        self._args = copy.copy(self.args)
        self._failure_summary = options.failure_summary
        self._open = options.open
        self._html_report = self._open or options.html_report

        if options.output_mode == 'ALL':
            self._args.append('-output-mode=ALL')
        elif options.output_mode == 'FAILURE_ONLY':
            self._args.append('-output-mode=FAILURE_ONLY')
        else:
            self._args.append('-output-mode=NONE')

        if self._fail_fast:
            self._args.append('-fail-fast')
        self._args.append('-outdir')
        self._args.append(self.workdir)
        if options.per_test_timer:
            self._args.append('-per-test-timer')

        if options.default_parallel:
            # TODO(zundel): Remove when --default_parallel finishes deprecation
            if options.default_concurrency != junit_tests.CONCURRENCY_SERIAL:
                self.context.log.warn(
                    '--default-parallel overrides --default-concurrency')
            self._args.append('-default-concurrency')
            self._args.append('PARALLEL_CLASSES')
        else:
            if options.default_concurrency == junit_tests.CONCURRENCY_PARALLEL_CLASSES_AND_METHODS:
                if not options.use_experimental_runner:
                    self.context.log.warn(
                        '--default-concurrency=PARALLEL_CLASSES_AND_METHODS is experimental, use --use-experimental-runner.'
                    )
                self._args.append('-default-concurrency')
                self._args.append('PARALLEL_CLASSES_AND_METHODS')
            elif options.default_concurrency == junit_tests.CONCURRENCY_PARALLEL_METHODS:
                if not options.use_experimental_runner:
                    self.context.log.warn(
                        '--default-concurrency=PARALLEL_METHODS is experimental, use --use-experimental-runner.'
                    )
                if options.test_shard:
                    # NB(zundel): The experimental junit runner doesn't support test sharding natively.  The
                    # legacy junit runner allows both methods and classes to run in parallel with this option.
                    self.context.log.warn(
                        '--default-concurrency=PARALLEL_METHODS with test sharding will run classes in parallel too.'
                    )
                self._args.append('-default-concurrency')
                self._args.append('PARALLEL_METHODS')
            elif options.default_concurrency == junit_tests.CONCURRENCY_PARALLEL_CLASSES:
                self._args.append('-default-concurrency')
                self._args.append('PARALLEL_CLASSES')
            elif options.default_concurrency == junit_tests.CONCURRENCY_SERIAL:
                self._args.append('-default-concurrency')
                self._args.append('SERIAL')

        self._args.append('-parallel-threads')
        self._args.append(str(options.parallel_threads))

        if options.test_shard:
            self._args.append('-test-shard')
            self._args.append(options.test_shard)

        if options.use_experimental_runner:
            self.context.log.info('Using experimental junit-runner logic.')
            self._args.append('-use-experimental-runner')
Example #4
0
    def execute(self):
        self.check_clean_master(commit=(not self.dryrun and self.commit))

        exported_targets = self.exported_targets()
        self.check_targets(exported_targets)

        pushdbs = {}

        def get_db(tgt):
            # TODO(tdesai) Handle resource type in get_db.
            if tgt.provides is None:
                raise TaskError(
                    f"trying to publish target {tgt!r} which does not provide an artifact"
                )
            dbfile = tgt.provides.repo.push_db(tgt)
            result = pushdbs.get(dbfile)
            if not result:
                # Create an empty pushdb if no dbfile exists.
                if os.path.exists(dbfile):
                    db = PushDb.load(dbfile)
                else:
                    safe_mkdir(os.path.dirname(dbfile))
                    db = PushDb()
                try:
                    repo = self.repos[tgt.provides.repo.name]
                except KeyError:
                    raise TaskError(
                        "Repository {0} has no entry in the --repos option.".
                        format(tgt.provides.repo.name))
                result = (db, dbfile, repo)
                pushdbs[dbfile] = result
            return result

        def get_pushdb(tgt):
            return get_db(tgt)[0]

        def fingerprint_internal(tgt):
            pushdb = get_pushdb(tgt)
            entry = pushdb.get_entry(tgt)
            return entry.fingerprint or "0.0.0"

        def stage_artifacts(tgt, jar, version, tag, changelog):
            publications = OrderedSet()

            # TODO Remove this once we fix https://github.com/pantsbuild/pants/issues/1229
            if (not self.context.products.get("jars").has(tgt)
                    and not self.get_options().individual_plugins):
                raise TaskError(
                    "Expected to find a primary artifact for {} but there was no jar for it."
                    .format(tgt.address.reference()))

            # TODO Remove this guard once we fix https://github.com/pantsbuild/pants/issues/1229, there
            # should always be a primary artifact.
            if self.context.products.get("jars").has(tgt):
                self._copy_artifact(tgt, jar, version, typename="jars")
                publications.add(
                    self.Publication(name=jar.name, classifier=None,
                                     ext="jar"))

                self.create_source_jar(tgt, jar, version)
                publications.add(
                    self.Publication(name=jar.name,
                                     classifier="sources",
                                     ext="jar"))

                # don't request docs unless they are available for all transitive targets
                # TODO: doc products should be checked by an independent jar'ing task, and
                # conditionally enabled; see https://github.com/pantsbuild/pants/issues/568
                doc_jar = self.create_doc_jar(tgt, jar, version)
                if doc_jar:
                    publications.add(
                        self.Publication(name=jar.name,
                                         classifier="javadoc",
                                         ext="jar"))

                if self.publish_changelog:
                    changelog_path = self.artifact_path(jar,
                                                        version,
                                                        suffix="-CHANGELOG",
                                                        extension="txt")
                    with safe_open(changelog_path, "w") as changelog_file:
                        changelog_file.write(changelog)
                    publications.add(
                        self.Publication(name=jar.name,
                                         classifier="CHANGELOG",
                                         ext="txt"))

            # Process any extra jars that might have been previously generated for this target, or a
            # target that it was derived from.
            for extra_product, extra_config in (
                    self.get_options().publish_extras or {}).items():
                override_name = jar.name
                if "override_name" in extra_config:
                    # If the supplied string has a '{target_provides_name}' in it, replace it with the
                    # current jar name. If not, the string will be taken verbatim.
                    override_name = extra_config["override_name"].format(
                        target_provides_name=jar.name)

                classifier = None
                suffix = ""
                if "classifier" in extra_config:
                    classifier = extra_config["classifier"]
                    suffix = f"-{classifier}"

                extension = extra_config.get("extension", "jar")

                extra_pub = self.Publication(name=override_name,
                                             classifier=classifier,
                                             ext=extension)

                # A lot of flexibility is allowed in parameterizing the extra artifact, ensure those
                # parameters lead to a unique publication.
                # TODO(John Sirois): Check this much earlier.
                if extra_pub in publications:
                    raise TaskError(
                        "publish_extra for '{0}' must override one of name, classifier or "
                        "extension with a non-default value.".format(
                            extra_product))

                # Build a list of targets to check. This list will consist of the current target, plus the
                # entire derived_from chain.
                target_list = [tgt]
                target = tgt
                while target.derived_from != target:
                    target_list.append(target.derived_from)
                    target = target.derived_from
                for cur_tgt in target_list:
                    if self.context.products.get(extra_product).has(cur_tgt):
                        self._copy_artifact(
                            cur_tgt,
                            jar,
                            version,
                            typename=extra_product,
                            suffix=suffix,
                            extension=extension,
                            override_name=override_name,
                        )
                        publications.add(extra_pub)

            pom_path = self.artifact_path(jar, version, extension="pom")
            PomWriter(get_pushdb, tag).write(tgt, path=pom_path)
            return publications

        if self.overrides:
            print("\nPublishing with revision overrides:")
            for (org, name), rev in self.overrides.items():
                print(f"{coordinate(org, name)}={rev}")

        head_sha = self.scm.commit_id if self.scm else None

        safe_rmtree(self.workdir)
        published = []
        skip = self.restart_at is not None
        for target in exported_targets:
            pushdb, dbfile, repo = get_db(target)
            oldentry = pushdb.get_entry(target)

            # the jar version is ignored here, since it is overridden below with the new entry
            jar, _ = target.get_artifact_info()
            published.append(jar)

            if skip and (jar.org, jar.name) == self.restart_at:
                skip = False
            # select the next version: either a named version, or semver via the pushdb/overrides
            if self.named_snapshot:
                newentry = oldentry.with_named_ver(self.named_snapshot)
            else:
                override = self.overrides.get((jar.org, jar.name))
                sem_ver = override if override else oldentry.sem_ver.bump()
                if self.local_snapshot:
                    sem_ver = sem_ver.make_snapshot()

                if sem_ver <= oldentry.sem_ver:
                    raise TaskError(
                        "Requested version {} must be greater than the current version {}"
                        .format(sem_ver, oldentry.sem_ver))
                newentry = oldentry.with_sem_ver(sem_ver)

            newfingerprint = self.entry_fingerprint(target,
                                                    fingerprint_internal)
            newentry = newentry.with_sha_and_fingerprint(
                head_sha, newfingerprint)
            no_changes = newentry.fingerprint == oldentry.fingerprint

            changelog = ""
            if self.publish_changelog:
                if no_changes:
                    changelog = (
                        f"No changes for {pushdb_coordinate(jar, oldentry)} - forced push.\n"
                    )
                else:
                    changelog = (self.changelog(target, oldentry.sha)
                                 or "Direct dependencies changed.\n")

            org = jar.org
            name = jar.name
            rev = newentry.version().version()
            tag_name = f"{org}-{name}-{rev}" if self.commit else None

            if no_changes and not self.force:
                print(f"No changes for {pushdb_coordinate(jar, oldentry)}")
                stage_artifacts(target, jar,
                                oldentry.version().version(), tag_name,
                                changelog)
            elif skip:
                print("Skipping {} to resume at {}".format(
                    jar_coordinate(
                        jar,
                        (newentry.version()
                         if self.force else oldentry.version()).version(),
                    ),
                    coordinate(self.restart_at[0], self.restart_at[1]),
                ))
                stage_artifacts(target, jar,
                                oldentry.version().version(), tag_name,
                                changelog)
            else:
                if not self.dryrun:
                    # Confirm push looks good
                    if self.publish_changelog:
                        if no_changes:
                            print(changelog)
                        else:
                            # The changelog may contain non-ascii text, but in Py2 the print function can, under certain
                            # circumstances, incorrectly detect the output encoding to be ascii and thus blow up
                            # on non-ascii changelog characters.  Here we explicitly control the encoding to avoid
                            # the print function's mis-interpretation.
                            # TODO(John Sirois): Consider introducing a pants/util `print_safe` helper for this.
                            message = "\nChanges for {} since {} @ {}:\n\n{}\n".format(
                                coordinate(jar.org, jar.name),
                                oldentry.version(),
                                oldentry.sha,
                                changelog,
                            )
                            # The stdout encoding can be detected as None when running without a tty (common in
                            # tests), in which case we want to force encoding with a unicode-supporting codec.
                            sys.stdout.write(message)
                    if not self.confirm_push(coordinate(jar.org, jar.name),
                                             newentry.version()):
                        raise TaskError("User aborted push")

                pushdb.set_entry(target, newentry)
                publications = stage_artifacts(target, jar, rev, tag_name,
                                               changelog)

                if self.dryrun:
                    print(
                        f"Skipping publish of {pushdb_coordinate(jar, newentry)} in test mode."
                    )
                else:
                    self.publish(publications,
                                 jar=jar,
                                 entry=newentry,
                                 repo=repo,
                                 published=published)

                    if self.commit:
                        coord = coordinate(org, name, rev)

                        pushdb.dump(dbfile)

                        self.publish_pushdb_changes_to_remote_scm(
                            pushdb_file=dbfile,
                            coordinate=coord,
                            tag_name=tag_name,
                            tag_message=
                            "Publish of {coordinate} initiated by {user} {cause}"
                            .format(
                                coordinate=coord,
                                user=getpass.getuser(),
                                cause="with forced revision" if
                                (org, name) in self.overrides else "(autoinc)",
                            ),
                            postscript=self.push_postscript,
                        )
Example #5
0
    def _runtool_hermetic(self, main, tool_name, distribution, input_digest,
                          ctx):
        use_youtline = tool_name == 'scalac-outliner'

        tool_classpath_abs = self._scalac_classpath if use_youtline else self._rsc_classpath
        tool_classpath = fast_relpath_collection(tool_classpath_abs)

        rsc_jvm_options = Rsc.global_instance().get_options().jvm_options

        if not use_youtline and self._rsc.use_native_image:
            if rsc_jvm_options:
                raise ValueError(
                    "`{}` got non-empty jvm_options when running with a graal native-image, but this is "
                    "unsupported. jvm_options received: {}".format(
                        self.options_scope, safe_shlex_join(rsc_jvm_options)))
            native_image_path, native_image_snapshot = self._rsc.native_image(
                self.context)
            additional_snapshots = [native_image_snapshot]
            initial_args = [native_image_path]
        else:
            additional_snapshots = []
            initial_args = [
                distribution.java,
            ] + rsc_jvm_options + [
                '-cp',
                os.pathsep.join(tool_classpath),
                main,
            ]

        argfile_snapshot, = self.context._scheduler.capture_snapshots([
            PathGlobsAndRoot(
                PathGlobs([fast_relpath(ctx.args_file, get_buildroot())]),
                get_buildroot(),
            ),
        ])

        cmd = initial_args + ['@{}'.format(argfile_snapshot.files[0])]

        pathglobs = list(tool_classpath)

        if pathglobs:
            root = PathGlobsAndRoot(PathGlobs(tuple(pathglobs)),
                                    get_buildroot())
            # dont capture snapshot, if pathglobs is empty
            path_globs_input_digest = self.context._scheduler.capture_snapshots(
                (root, ))[0].directory_digest

        epr_input_files = self.context._scheduler.merge_directories(
            ((path_globs_input_digest, ) if path_globs_input_digest else ()) +
            ((input_digest, ) if input_digest else ()) +
            tuple(s.directory_digest for s in additional_snapshots) +
            (argfile_snapshot.directory_digest, ))

        epr = ExecuteProcessRequest(
            argv=tuple(cmd),
            input_files=epr_input_files,
            output_files=(fast_relpath(ctx.rsc_jar_file.path,
                                       get_buildroot()), ),
            output_directories=tuple(),
            timeout_seconds=15 * 60,
            description='run {} for {}'.format(tool_name, ctx.target),
            # TODO: These should always be unicodes
            # Since this is always hermetic, we need to use `underlying.home` because
            # ExecuteProcessRequest requires an existing, local jdk location.
            jdk_home=distribution.underlying_home,
            is_nailgunnable=True,
        )
        res = self.context.execute_process_synchronously_without_raising(
            epr, self.name(), [WorkUnitLabel.COMPILER])

        if res.exit_code != 0:
            raise TaskError(res.stderr, exit_code=res.exit_code)

        # TODO: parse the output of -Xprint:timings for rsc and write it to self._record_target_stats()!

        res.output_directory_digest.dump(ctx.rsc_jar_file.path)
        self.context._scheduler.materialize_directories((
            DirectoryToMaterialize(
                # NB the first element here is the root to materialize into, not the dir to snapshot
                get_buildroot(),
                res.output_directory_digest), ))
        ctx.rsc_jar_file.hydrate_missing_directory_digest(
            res.output_directory_digest)

        return res
Example #6
0
    def execute(self):
        # We drive creation of setup.py distributions from the original target graph, grabbing codegen'd
        # sources when needed. We ignore PythonDistribution targets.
        def is_exported_python_target(t):
            return t.is_original and self.has_provides(
                t) and not is_local_python_dist(t)

        exported_python_targets = OrderedSet(t
                                             for t in self.context.target_roots
                                             if is_exported_python_target(t))

        dist_dir = self.get_options().pants_distdir

        # NB: We have to create and then run in 2 steps so that we can discover all exported targets
        # in-play in the creation phase which then allows a tsort of these exported targets in the run
        # phase to ensure an exported target is, for example (--run="sdist upload"), uploaded before any
        # exported target that depends on it is uploaded.

        created = {}

        def create(exported_python_target):
            if exported_python_target not in created:
                self.context.log.info(
                    'Creating setup.py project for {}'.format(
                        exported_python_target))
                subject = self.derived_by_original.get(exported_python_target,
                                                       exported_python_target)
                setup_dir, dependencies = self.create_setup_py(
                    subject, dist_dir)
                created[exported_python_target] = setup_dir
                if self._recursive:
                    for dep in dependencies:
                        if is_exported_python_target(dep):
                            create(dep)

        for exported_python_target in exported_python_targets:
            create(exported_python_target)

        interpreter = self.context.products.get_data(PythonInterpreter)
        python_dists = self.context.products.register_data(
            self.PYTHON_DISTS_PRODUCT, {})
        for exported_python_target in reversed(
                sort_targets(list(created.keys()))):
            setup_dir = created.get(exported_python_target)
            if setup_dir:
                if not self._run:
                    self.context.log.info(
                        'Running packager against {}'.format(setup_dir))
                    setup_runner = Packager(setup_dir, interpreter=interpreter)
                    tgz_name = os.path.basename(setup_runner.sdist())
                    sdist_path = os.path.join(dist_dir, tgz_name)
                    self.context.log.info('Writing {}'.format(sdist_path))
                    shutil.move(setup_runner.sdist(), sdist_path)
                    safe_rmtree(setup_dir)
                    python_dists[exported_python_target] = sdist_path
                else:
                    self.context.log.info('Running {} against {}'.format(
                        self._run, setup_dir))
                    split_command = safe_shlex_split(self._run)
                    setup_runner = SetupPyRunner(setup_dir,
                                                 split_command,
                                                 interpreter=interpreter)
                    installed = setup_runner.run()
                    if not installed:
                        raise TaskError('Install failed.')
                    python_dists[exported_python_target] = setup_dir
Example #7
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.cachedir = os.path.join(self.workdir, "cache")

        self._jvm_options = self.get_options().jvm_options

        self.log = self.context.log

        if self.get_options().local:
            local_repo = dict(
                resolver="publish_local",
                path=os.path.abspath(
                    os.path.expanduser(self.get_options().local)),
                confs=["default"],
                auth=None,
            )
            self.repos = defaultdict(lambda: local_repo)
            self.commit = False
            self.local_snapshot = self.get_options().local_snapshot
        else:
            self.repos = self.get_options().repos
            if not self.repos:
                raise TaskError(
                    "This repo is not configured to publish externally! Please configure per\n"
                    "http://pantsbuild.org/publish.html#authenticating-to-the-artifact-repository,\n"
                    "by setting --publish-jar-repos=<dict> or re-run with '--publish-jar-local=<dir>'."
                )
            for repo, data in self.repos.items():
                auth = data.get("auth")
                if auth:
                    credentials = next(iter(self.context.resolve(auth)))
                    user = credentials.username(data["resolver"])
                    password = credentials.password(data["resolver"])
                    self.context.log.debug(
                        f"Found auth for repo={repo} user={user}")
                    self.repos[repo]["username"] = user
                    self.repos[repo]["password"] = password
            self.commit = self.get_options().commit
            self.push_postscript = self.get_options().push_postscript or ""
            self.local_snapshot = False

        self.scm = get_scm() if self.commit else None

        self.named_snapshot = self.get_options().named_snapshot
        if self.named_snapshot:
            self.named_snapshot = Namedver.parse(self.named_snapshot)

        self.dryrun = self.get_options().dryrun
        self.force = self.get_options().force
        self.publish_changelog = self.get_options().changelog and self.scm

        def parse_jarcoordinate(coordinate):
            components = coordinate.split("#", 1)
            if len(components) == 2:
                org, name = components
                return org, name
            else:
                spec = components[0]
                address = Address.parse(spec)
                try:
                    self.context.build_graph.inject_address_closure(address)
                    target = self.context.build_graph.get_target(address)
                    if not target:
                        siblings = self.context.address_mapper.addresses_in_spec_path(
                            address.spec_path)
                        prompt = ("did you mean" if len(siblings) == 1 else
                                  "maybe you meant one of these")
                        raise TaskError("{} => {}?:\n    {}".format(
                            address, prompt,
                            "\n    ".join(str(a) for a in siblings)))
                    if not self._is_exported(target):
                        raise TaskError(
                            f"{coordinate} is not an exported target")
                    return target.provides.org, target.provides.name
                except AddressLookupError as e:
                    raise TaskError(
                        f"{e!r}\n  Problem identifying target at {spec}")

        self.overrides = {}
        if self.get_options().override:
            if self.named_snapshot:
                raise TaskError(
                    "Options --named-snapshot and --override are mutually exclusive!"
                )

            def parse_override(override):
                try:
                    coordinate, rev = override.split("=", 1)
                    try:
                        # overrides imply semantic versioning
                        rev = Semver.parse(rev)
                    except ValueError as e:
                        raise TaskError(f"Invalid version {rev}: {e!r}")
                    return parse_jarcoordinate(coordinate), rev
                except ValueError:
                    raise TaskError(f"Invalid override: {override}")

            self.overrides.update(
                parse_override(o) for o in self.get_options().override)

        self.restart_at = None
        if self.get_options().restart_at:
            self.restart_at = parse_jarcoordinate(
                self.get_options().restart_at)
Example #8
0
    def resolve_version_conflict(self,
                                 managed_coord,
                                 direct_coord,
                                 force=False):
        """Resolves an artifact version conflict between directly specified and managed jars.

    This uses the user-defined --conflict-strategy to pick the appropriate artifact version (or to
    raise an error).

    This assumes the two conflict coordinates differ only by their version.

    :param M2Coordinate managed_coord: the artifact coordinate as defined by a
      managed_jar_dependencies object.
    :param M2Coordinate direct_coord: the artifact coordinate as defined by a jar_library target.
    :param bool force: Whether the artifact defined by the jar_library() was marked with force=True.
      This is checked only if one of the *_IF_FORCED conflict strategies is being used.
    :return: the coordinate of the artifact that should be resolved.
    :rtype: M2Coordinate
    :raises: JarDependencyManagement.DirectManagedVersionConflict if the versions are different and
      the --conflict-strategy is 'FAIL' (which is the default).
    """
        if M2Coordinate.unversioned(managed_coord) != M2Coordinate.unversioned(
                direct_coord):
            raise ValueError(
                'Illegal arguments passed to resolve_version_conflict: managed_coord and '
                'direct_coord must only differ by their version!\n'
                '  Managed: {}\n  Direct:  {}\n'.format(
                    M2Coordinate.unversioned(managed_coord),
                    M2Coordinate.unversioned(direct_coord),
                ))

        if direct_coord.rev is None or direct_coord.rev == managed_coord.rev:
            return managed_coord

        strategy = self.get_options().conflict_strategy
        message = dedent("""
      An artifact directly specified by a jar_library target has a different version than what
      is specified by managed_jar_dependencies.

        Artifact: jar(org={org}, name={name}, classifier={classifier}, ext={ext})
        Direct version:  {direct}
        Managed version: {managed}
    """).format(
            org=direct_coord.org,
            name=direct_coord.name,
            classifier=direct_coord.classifier,
            ext=direct_coord.ext,
            direct=direct_coord.rev,
            managed=managed_coord.rev,
        )

        if strategy == 'FAIL':
            raise self.DirectManagedVersionConflict(
                '{}\nThis raises an error due to the current --jar-dependency-management-conflict-strategy.'
                .format(message))

        is_silent = self.get_options().suppress_conflict_warnings
        log = logger.debug if is_silent else logger.warn

        if strategy == 'USE_DIRECT':
            log(message)
            log('[{}] Using direct version: {}'.format(strategy, direct_coord))
            return direct_coord

        if strategy == 'USE_DIRECT_IF_FORCED':
            log(message)
            if force:
                log('[{}] Using direct version, because force=True: {}'.format(
                    strategy, direct_coord))
                return direct_coord
            else:
                log('[{}] Using managed version, because force=False: {}'.
                    format(strategy, managed_coord))
                return managed_coord

        if strategy == 'USE_MANAGED':
            log(message)
            log('[{}] Using managed version: {}'.format(
                strategy, managed_coord))
            return managed_coord

        if strategy == 'USE_NEWER':
            newer = max([managed_coord, direct_coord],
                        key=lambda coord: Revision.lenient(coord.rev))
            log(message)
            log('[{}] Using newer version: {}'.format(strategy, newer))
            return newer

        raise TaskError(
            'Unknown value for --conflict-strategy: {}'.format(strategy))
Example #9
0
        def add_target(self, target, recursive=False):
            """Adds the classes and resources for a target to an open jar.

      :param target: The target to add generated classes and resources for.
      :param bool recursive: `True` to add classes and resources for the target's transitive
        internal dependency closure.
      :returns: `True` if the target contributed any files - manifest entries, classfiles or
        resource files - to this jar.
      :rtype: bool
      """
            products_added = False

            classpath_products = self._context.products.get_data(
                'runtime_classpath')

            # TODO(John Sirois): Manifest handling is broken.  We should be tracking state and failing
            # fast if any duplicate entries are added; ie: if we get a second binary or a second agent.

            if isinstance(target, JvmBinary):
                self._add_manifest_entries(target, self._manifest)
                products_added = True
            elif isinstance(target, JavaAgent):
                self._add_agent_manifest(target, self._manifest)
                products_added = True
            elif recursive:
                agents = [
                    t for t in target.closure() if isinstance(t, JavaAgent)
                ]
                if len(agents) > 1:
                    raise TaskError(
                        'Only 1 agent can be added to a jar, found {} for {}:\n\t{}'
                        .format(
                            len(agents), target.address.reference(),
                            '\n\t'.join(agent.address.reference()
                                        for agent in agents)))
                elif agents:
                    self._add_agent_manifest(agents[0], self._manifest)
                    products_added = True

            # In the transitive case we'll gather internal resources naturally as dependencies, but in the
            # non-transitive case we need to manually add these special (in the context of jarring)
            # dependencies.
            targets = target.closure(bfs=True) if recursive else [target]
            if not recursive and target.has_resources:
                targets += target.resources
            # We only gather internal classpath elements per our contract.
            target_classpath = ClasspathUtil.internal_classpath(
                targets, classpath_products)
            for entry in target_classpath:
                if ClasspathUtil.is_jar(entry):
                    self._jar.writejar(entry)
                    products_added = True
                elif ClasspathUtil.is_dir(entry):
                    for rel_file in ClasspathUtil.classpath_entries_contents(
                        [entry]):
                        self._jar.write(os.path.join(entry, rel_file),
                                        rel_file)
                        products_added = True
                else:
                    # non-jar and non-directory classpath entries should be ignored
                    pass

            return products_added
Example #10
0
    def _run_pytest(self, fail_fast, targets, workdirs):
        if not targets:
            return PytestResult.rc(0)

        if self.run_tests_in_chroot:
            path_func = lambda rel_src: rel_src
        else:
            source_chroot = os.path.relpath(self._source_chroot_path(targets),
                                            get_buildroot())
            path_func = lambda rel_src: os.path.join(source_chroot, rel_src)

        sources_map = OrderedDict()  # Path from chroot -> Path from buildroot.
        for t in targets:
            for p in t.sources_relative_to_source_root():
                sources_map[path_func(p)] = os.path.join(t.target_base, p)

        if not sources_map:
            return PytestResult.rc(0)

        with self._test_runner(workdirs, targets,
                               sources_map) as (pex, test_args):
            # Validate that the user didn't provide any passthru args that conflict
            # with those we must set ourselves.
            for arg in self.get_passthru_args():
                if arg.startswith('--junitxml') or arg.startswith(
                        '--confcutdir'):
                    raise TaskError(
                        'Cannot pass this arg through to pytest: {}'.format(
                            arg))

            junitxml_path = workdirs.junitxml_path(*targets)

            # N.B. the `--confcutdir` here instructs pytest to stop scanning for conftest.py files at the
            # top of the buildroot. This prevents conftest.py files from outside (e.g. in users home dirs)
            # from leaking into pants test runs. See: https://github.com/pantsbuild/pants/issues/2726
            args = [
                '--junitxml', junitxml_path, '--confcutdir',
                get_buildroot(), '--continue-on-collection-errors'
            ]
            if fail_fast:
                args.extend(['-x'])
            if self._debug:
                args.extend(['-s'])
            if self.get_options().colors:
                args.extend(['--color', 'yes'])
            for options in self.get_options().options + self.get_passthru_args(
            ):
                args.extend(safe_shlex_split(options))
            args.extend(test_args)
            args.extend(sources_map.keys())

            # We want to ensure our reporting based off junit xml is from this run so kill results from
            # prior runs.
            if os.path.exists(junitxml_path):
                os.unlink(junitxml_path)

            with self._maybe_run_in_chroot(targets):
                result = self._do_run_tests_with_args(pex, args)

            # There was a problem prior to test execution preventing junit xml file creation so just let
            # the failure result bubble.
            if not os.path.exists(junitxml_path):
                return result

            failed_targets = self._get_failed_targets_from_junitxml(
                junitxml_path, targets)

            def parse_error_handler(parse_error):
                # Simple error handler to pass to xml parsing function.
                raise TaskError('Error parsing xml file at {}: {}'.format(
                    parse_error.xml_path, parse_error.cause))

            all_tests_info = self.parse_test_info(
                junitxml_path, parse_error_handler,
                ['file', 'name', 'classname'])
            for test_name, test_info in all_tests_info.items():
                test_target = self._get_target_from_test(test_info, targets)
                self.report_all_info_for_single_test(self.options_scope,
                                                     test_target, test_name,
                                                     test_info)

            return result.with_failed_targets(failed_targets)
Example #11
0
 def parse_error_handler(parse_error):
     # Simple error handler to pass to xml parsing function.
     raise TaskError('Error parsing xml file at {}: {}'.format(
         parse_error.xml_path, parse_error.cause))
Example #12
0
 def validate_package_manager(cls, package_manager):
   if package_manager not in cls.VALID_PACKAGE_MANAGER_LIST.keys():
     raise TaskError('Unknown package manager: %s' % package_manager)
   package_manager = cls.VALID_PACKAGE_MANAGER_LIST[package_manager]
   return package_manager
Example #13
0
    def execute(self):
        if self.goal not in PrepCommand.goals():
            raise AssertionError(
                'Got goal "{}". Expected goal to be one of {}'.format(
                    self.goal, PrepCommand.goals()))

        targets = self.context.targets(postorder=True,
                                       predicate=self.runnable_prep_cmd)
        Cmdline = namedtuple('Cmdline', ['cmdline', 'environ'])

        def make_cmdline(target):
            executable = target.payload.get_field_value(
                'prep_command_executable')
            args = target.payload.get_field_value('prep_command_args', [])
            prep_environ = target.payload.get_field_value('prep_environ')
            cmdline = [executable]
            cmdline.extend(args)
            return Cmdline(cmdline=tuple(cmdline), environ=prep_environ)

        def has_prep(target):
            return target.payload.get_field_value('prep_command_executable')

        cmdlines = [
            make_cmdline(target) for target in targets if has_prep(target)
        ]

        if not cmdlines:
            return

        with self.context.new_workunit(name='prep_command',
                                       labels=[WorkUnitLabel.PREP
                                               ]) as workunit:
            completed_cmdlines = set()
            for item in cmdlines:
                cmdline = item.cmdline
                environ = item.environ
                if not cmdline in completed_cmdlines:
                    completed_cmdlines.add(cmdline)
                    stderr = workunit.output('stderr') if workunit else None
                    try:
                        process = subprocess.Popen(cmdline,
                                                   stdout=subprocess.PIPE,
                                                   stderr=stderr)
                    except OSError as e:
                        workunit.set_outcome(WorkUnit.FAILURE)
                        raise TaskError(
                            'RunPrepCommand failed to execute {cmdline}: {error}'
                            .format(cmdline=cmdline, error=e))
                    stdout, _ = process.communicate()

                    if environ:
                        if not process.returncode:
                            environment_vars = stdout.split('\0')
                            for kvpair in environment_vars:
                                var, value = kvpair.split('=', 1)
                                os.environ[var] = value
                    else:
                        if workunit:
                            workunit.output('stdout').write(stdout)

                    workunit.set_outcome(WorkUnit.FAILURE if process.
                                         returncode else WorkUnit.SUCCESS)
                    if process.returncode:
                        raise TaskError(
                            'RunPrepCommand failed to run {cmdline}'.format(
                                cmdline=cmdline))
Example #14
0
    def _construct_cmd_args(jars, common_args, global_excludes, pinned_coords,
                            coursier_workdir, json_output_path):

        # Make a copy, so there is no side effect or others using `common_args`
        cmd_args = list(common_args)

        cmd_args.extend(['--json-output-file', json_output_path])

        # Dealing with intransitivity and forced versions.
        for j in jars:
            if not j.rev:
                raise TaskError(
                    'Undefined revs for jars unsupported by Coursier. "{}"'.
                    format(repr(j.coordinate).replace('M2Coordinate', 'jar')))

            module = j.coordinate.simple_coord
            if j.coordinate.classifier:
                module += ',classifier={}'.format(j.coordinate.classifier)

            if j.get_url():
                jar_url = j.get_url()
                module += ',url={}'.format(parse.quote_plus(jar_url))

            if j.intransitive:
                cmd_args.append('--intransitive')

            cmd_args.append(module)

            # Force requires specifying the coord again with -V
            if j.force:
                cmd_args.append('-V')
                cmd_args.append(j.coordinate.simple_coord)

        # Force pinned coordinates
        for m2coord in pinned_coords:
            cmd_args.append('-V')
            cmd_args.append(m2coord.simple_coord)

        # Local exclusions
        local_exclude_args = []
        for jar in jars:
            for ex in jar.excludes:
                # `--` means exclude. See --local-exclude-file in `coursier fetch --help`
                # If ex.name does not exist, that means the whole org needs to be excluded.
                ex_arg = "{}:{}--{}:{}".format(jar.org, jar.name, ex.org,
                                               ex.name or '*')
                local_exclude_args.append(ex_arg)

        if local_exclude_args:
            with temporary_file(coursier_workdir, cleanup=False) as f:
                exclude_file = f.name
                with open(exclude_file, 'w') as ex_f:
                    ex_f.write('\n'.join(local_exclude_args))

                cmd_args.append('--local-exclude-file')
                cmd_args.append(exclude_file)

        for ex in global_excludes:
            cmd_args.append('-E')
            cmd_args.append('{}:{}'.format(ex.org, ex.name or '*'))

        return cmd_args
Example #15
0
    def _bootstrap_shaded_jvm_tool(self, jvm_tool, targets):
        fingerprint_strategy = ShadedToolFingerprintStrategy(
            jvm_tool.main, custom_rules=jvm_tool.custom_rules)

        with self.invalidated(
                targets,
                # We're the only dependent in reality since we shade.
                invalidate_dependents=False,
                fingerprint_strategy=fingerprint_strategy
        ) as invalidation_check:

            # If there are no vts, then there are no resolvable targets, so we exit early with an empty
            # classpath.  This supports the optional tool classpath case.
            if not invalidation_check.all_vts:
                return []

            tool_vts = self.tool_vts(invalidation_check)
            jar_name = '{main}-{hash}.jar'.format(main=jvm_tool.main,
                                                  hash=tool_vts.cache_key.hash)
            shaded_jar = os.path.join(self._tool_cache_path, 'shaded_jars',
                                      jar_name)

            if not invalidation_check.invalid_vts and os.path.exists(
                    shaded_jar):
                return [shaded_jar]

            # Ensure we have a single binary jar we can shade.
            binary_jar = os.path.join(self._tool_cache_path, 'binary_jars',
                                      jar_name)
            safe_mkdir_for(binary_jar)

            classpath = self._bootstrap_classpath(jvm_tool, targets)
            if len(classpath) == 1:
                shutil.copy(classpath[0], binary_jar)
            else:
                with self.open_jar(binary_jar) as jar:
                    for classpath_jar in classpath:
                        jar.writejar(classpath_jar)
                    jar.main(jvm_tool.main)

            # Now shade the binary jar and return that single jar as the safe tool classpath.
            safe_mkdir_for(shaded_jar)
            with self.shader.binary_shader(
                    shaded_jar,
                    jvm_tool.main,
                    binary_jar,
                    custom_rules=jvm_tool.custom_rules,
                    jvm_options=self.get_options().jvm_options) as shader:
                try:
                    result = util.execute_runner(
                        shader,
                        workunit_factory=self.context.new_workunit,
                        workunit_name='shade-{}'.format(jvm_tool.key))
                    if result != 0:
                        raise TaskError(
                            "Shading of tool '{key}' with main class {main} for {scope} failed "
                            "with exit code {result}, command run was:\n\t{cmd}"
                            .format(key=jvm_tool.key,
                                    main=jvm_tool.main,
                                    scope=jvm_tool.scope,
                                    result=result,
                                    cmd=shader.cmd))
                except Executor.Error as e:
                    raise TaskError(
                        "Shading of tool '{key}' with main class {main} for {scope} failed "
                        "with: {exception}".format(key=jvm_tool.key,
                                                   main=jvm_tool.main,
                                                   scope=jvm_tool.scope,
                                                   exception=e))

            if self.artifact_cache_writes_enabled():
                self.update_artifact_cache([(tool_vts, [shaded_jar])])

            return [shaded_jar]
Example #16
0
    def _compute_missing_deps(self, srcs, actual_deps):
        """Computes deps that are used by the compiler but not specified in a BUILD file.

    These deps are bugs waiting to happen: the code may happen to compile because the dep was
    brought in some other way (e.g., by some other root target), but that is obviously fragile.

    Note that in practice we're OK with reliance on indirect deps that are only brought in
    transitively. E.g., in Scala type inference can bring in such a dep subtly. Fortunately these
    cases aren't as fragile as a completely missing dependency. It's still a good idea to have
    explicit direct deps where relevant, so we optionally warn about indirect deps, to make them
    easy to find and reason about.

    - actual_deps: a map src -> list of actual deps (source, class or jar file) as noted by the
      compiler.

    Returns a triple (missing_file_deps, missing_tgt_deps, missing_direct_tgt_deps) where:

    - missing_file_deps: a list of pairs (src_tgt, dep_file) where src_tgt requires dep_file, and
      we're unable to map to a target (because its target isn't in the total set of targets in play,
      and we don't want to parse every BUILD file in the workspace just to find it).

    - missing_tgt_deps: a list of pairs (src_tgt, dep_tgt) where src_tgt is missing a necessary
                        transitive dependency on dep_tgt.

    - missing_direct_tgt_deps: a list of pairs (src_tgt, dep_tgt) where src_tgt is missing a direct
                               dependency on dep_tgt but has a transitive dep on it.

    All paths in the input and output are absolute.
    """
        def must_be_explicit_dep(dep):
            # We don't require explicit deps on the java runtime, so we shouldn't consider that
            # a missing dep.
            return not dep.startswith(self._context.java_home)

        def target_or_java_dep_in_targets(target, targets):
            # We want to check if the target is in the targets collection
            #
            # However, for the special case of scala_library that has a java_sources
            # reference we're ok if that exists in targets even if the scala_library does not.

            if target in targets:
                return True
            elif target.is_scala:
                return any(t in targets for t in target.java_sources)
            else:
                return False

        # TODO: If recomputing these every time becomes a performance issue, memoize for
        # already-seen targets and incrementally compute for new targets not seen in a previous
        # partition, in this or a previous chunk.
        targets_by_file = self._compute_targets_by_file()
        transitive_deps_by_target = self._compute_transitive_deps_by_target()

        # Find deps that are actual but not specified.
        with self._context.new_workunit(name='scan_deps'):
            missing_file_deps = OrderedSet()  # (src, src).
            missing_tgt_deps_map = defaultdict(
                list)  # (tgt, tgt) -> a list of (src, src) as evidence.
            missing_direct_tgt_deps_map = defaultdict(
                list)  # The same, but for direct deps.

            buildroot = get_buildroot()
            abs_srcs = [os.path.join(buildroot, src) for src in srcs]
            for src in abs_srcs:
                src_tgt = next(iter(targets_by_file.get(src)))
                if src_tgt is not None:
                    for actual_dep in filter(must_be_explicit_dep,
                                             actual_deps.get(src, [])):
                        actual_dep_tgts = targets_by_file.get(actual_dep)
                        # actual_dep_tgts is usually a singleton. If it's not, we only need one of these
                        # to be in our declared deps to be OK.
                        if actual_dep_tgts is None:
                            missing_file_deps.add((src_tgt, actual_dep))
                        elif not target_or_java_dep_in_targets(
                                src_tgt, actual_dep_tgts):
                            # Obviously intra-target deps are fine.
                            canonical_actual_dep_tgt = next(
                                iter(actual_dep_tgts))
                            if actual_dep_tgts.isdisjoint(
                                    transitive_deps_by_target.get(src_tgt,
                                                                  [])):
                                missing_tgt_deps_map[(
                                    src_tgt, canonical_actual_dep_tgt)].append(
                                        (src, actual_dep))
                            elif canonical_actual_dep_tgt not in src_tgt.dependencies:
                                # The canonical dep is the only one a direct dependency makes sense on.
                                missing_direct_tgt_deps_map[(
                                    src_tgt, canonical_actual_dep_tgt)].append(
                                        (src, actual_dep))
                else:
                    raise TaskError(
                        'Requested dep info for unknown source file: %s' % src)

        return (list(missing_file_deps), missing_tgt_deps_map.items(),
                missing_direct_tgt_deps_map.items())
Example #17
0
def _cautious_rmtree(root):
  real_buildroot = os.path.realpath(os.path.abspath(get_buildroot()))
  real_root = os.path.realpath(os.path.abspath(root))
  if not real_root.startswith(real_buildroot):
    raise TaskError('DANGER: Attempting to delete %s, which is not under the build root!')
  safe_rmtree(real_root)
Example #18
0
 def process_results(self, result):
     if result != 0:
         raise TaskError('Scalafmt failed with exit code {}; to fix run: '
                         '`./pants fmt <targets>`'.format(result),
                         exit_code=result)
Example #19
0
 def process_info_file(cp_elem, info_file):
   plugin_info = ElementTree.parse(info_file).getroot()
   if plugin_info.tag != 'plugin':
     raise TaskError('File {} in {} is not a valid scalac plugin descriptor'.format(
         _SCALAC_PLUGIN_INFO_FILE, cp_elem))
   return plugin_info.find('name').text
Example #20
0
 def process_results(self, result):
     # Processes the results of running the scalafmt command.
     if result != 0:
         raise TaskError('Scalafmt failed to format files',
                         exit_code=result)
Example #21
0
    def publish(self, publications, jar, entry, repo, published):
        """Run ivy to publish a jar.

        ivyxml_path is the path to the ivy file; published is a list of jars published so far
        (including this one). entry is a pushdb entry.
        """

        try:
            ivy = Bootstrapper.default_ivy()
        except Bootstrapper.Error as e:
            raise TaskError(
                f"Failed to push {pushdb_coordinate(jar, entry)}! {e!r}")

        path = repo.get("path")
        ivysettings = self.generate_ivysettings(ivy,
                                                published,
                                                publish_local=path)

        version = entry.version().version()
        ivyxml = self.generate_ivy(jar, version, publications)

        resolver = repo["resolver"]
        args = [
            "-settings",
            ivysettings,
            "-ivy",
            ivyxml,
            # Without this setting, the ivy.xml is delivered to the CWD, littering the workspace.  We
            # don't need the ivy.xml, so just give it path under the workdir we won't use.
            "-deliverto",
            ivyxml + ".unused",
            "-publish",
            resolver,
            "-publishpattern",
            "{}/[organisation]/[module]/"
            "[artifact]-[revision](-[classifier]).[ext]".format(self.workdir),
            "-revision",
            version,
            "-m2compatible",
        ]

        # TODO(John Sirois): global logging options should be hidden behind some sort of log manager
        # that we can:
        # a.) obtain a handle to (dependency injection or manual plumbing)
        # b.) query for log detail, ie: `if log_manager.is_verbose:`
        if self.debug:
            args.append("-verbose")

        if self.local_snapshot:
            args.append("-overwrite")

        try:
            jvm_options = self._ivy_jvm_options(repo)
            ivy.execute(
                jvm_options=jvm_options,
                args=args,
                workunit_factory=self.context.new_workunit,
                workunit_name="ivy-publish",
            )
        except Ivy.Error as e:
            raise TaskError(
                f"Failed to push {pushdb_coordinate(jar, entry)}! {e!r}")
Example #22
0
 def validate_target_roots(self):
   if len(self.target_roots) != 2:
     raise TaskError('Specify two targets please (found {})'.format(len(self.target_roots)))
Example #23
0
        def stage_artifacts(tgt, jar, version, tag, changelog):
            publications = OrderedSet()

            # TODO Remove this once we fix https://github.com/pantsbuild/pants/issues/1229
            if (not self.context.products.get("jars").has(tgt)
                    and not self.get_options().individual_plugins):
                raise TaskError(
                    "Expected to find a primary artifact for {} but there was no jar for it."
                    .format(tgt.address.reference()))

            # TODO Remove this guard once we fix https://github.com/pantsbuild/pants/issues/1229, there
            # should always be a primary artifact.
            if self.context.products.get("jars").has(tgt):
                self._copy_artifact(tgt, jar, version, typename="jars")
                publications.add(
                    self.Publication(name=jar.name, classifier=None,
                                     ext="jar"))

                self.create_source_jar(tgt, jar, version)
                publications.add(
                    self.Publication(name=jar.name,
                                     classifier="sources",
                                     ext="jar"))

                # don't request docs unless they are available for all transitive targets
                # TODO: doc products should be checked by an independent jar'ing task, and
                # conditionally enabled; see https://github.com/pantsbuild/pants/issues/568
                doc_jar = self.create_doc_jar(tgt, jar, version)
                if doc_jar:
                    publications.add(
                        self.Publication(name=jar.name,
                                         classifier="javadoc",
                                         ext="jar"))

                if self.publish_changelog:
                    changelog_path = self.artifact_path(jar,
                                                        version,
                                                        suffix="-CHANGELOG",
                                                        extension="txt")
                    with safe_open(changelog_path, "w") as changelog_file:
                        changelog_file.write(changelog)
                    publications.add(
                        self.Publication(name=jar.name,
                                         classifier="CHANGELOG",
                                         ext="txt"))

            # Process any extra jars that might have been previously generated for this target, or a
            # target that it was derived from.
            for extra_product, extra_config in (
                    self.get_options().publish_extras or {}).items():
                override_name = jar.name
                if "override_name" in extra_config:
                    # If the supplied string has a '{target_provides_name}' in it, replace it with the
                    # current jar name. If not, the string will be taken verbatim.
                    override_name = extra_config["override_name"].format(
                        target_provides_name=jar.name)

                classifier = None
                suffix = ""
                if "classifier" in extra_config:
                    classifier = extra_config["classifier"]
                    suffix = f"-{classifier}"

                extension = extra_config.get("extension", "jar")

                extra_pub = self.Publication(name=override_name,
                                             classifier=classifier,
                                             ext=extension)

                # A lot of flexibility is allowed in parameterizing the extra artifact, ensure those
                # parameters lead to a unique publication.
                # TODO(John Sirois): Check this much earlier.
                if extra_pub in publications:
                    raise TaskError(
                        "publish_extra for '{0}' must override one of name, classifier or "
                        "extension with a non-default value.".format(
                            extra_product))

                # Build a list of targets to check. This list will consist of the current target, plus the
                # entire derived_from chain.
                target_list = [tgt]
                target = tgt
                while target.derived_from != target:
                    target_list.append(target.derived_from)
                    target = target.derived_from
                for cur_tgt in target_list:
                    if self.context.products.get(extra_product).has(cur_tgt):
                        self._copy_artifact(
                            cur_tgt,
                            jar,
                            version,
                            typename=extra_product,
                            suffix=suffix,
                            extension=extension,
                            override_name=override_name,
                        )
                        publications.add(extra_pub)

            pom_path = self.artifact_path(jar, version, extension="pom")
            PomWriter(get_pushdb, tag).write(tgt, path=pom_path)
            return publications
Example #24
0
 def _get_go_namespace(self, source):
   with open(source) as thrift:
     namespace = self.NAMESPACE_PARSER.search(thrift.read())
     if not namespace:
       raise TaskError('Thrift file {} must contain "namespace go "', source)
     return namespace.group(1)
Example #25
0
    def gen(self, partial_cmd, targets):
        with self.invalidated(
                targets, invalidate_dependents=True) as invalidation_check:
            invalid_targets = []
            for vt in invalidation_check.invalid_vts:
                invalid_targets.extend(vt.targets)

            compiler = partial_cmd.compiler
            import_paths, changed_srcs = compiler.calc_srcs(
                invalid_targets, self.is_gentarget)
            outdir = self._outdir(partial_cmd)
            if changed_srcs:
                args = []

                for import_path in import_paths:
                    args.extend(['--import-path', import_path])

                args.extend(['--language', partial_cmd.language])

                for lhs, rhs in partial_cmd.namespace_map:
                    args.extend(['--namespace-map', '%s=%s' % (lhs, rhs)])

                if partial_cmd.rpc_style == 'ostrich':
                    args.append('--finagle')
                    args.append('--ostrich')
                elif partial_cmd.rpc_style == 'finagle':
                    args.append('--finagle')

                args.extend(['--dest', outdir])
                safe_mkdir(outdir)

                if not compiler.strict:
                    args.append('--disable-strict')

                if compiler.verbose:
                    args.append('--verbose')

                gen_file_map_path = os.path.relpath(self._tempname())
                args.extend(['--gen-file-map', gen_file_map_path])

                args.extend(changed_srcs)

                classpath = self.tool_classpath(compiler.name)
                returncode = self.runjava(classpath=classpath,
                                          main=compiler.main,
                                          jvm_options=compiler.jvm_args,
                                          args=args,
                                          workunit_name=compiler.name)
                try:
                    if 0 == returncode:
                        gen_files_for_source = self.parse_gen_file_map(
                            gen_file_map_path, outdir)
                    else:
                        gen_files_for_source = None
                finally:
                    os.remove(gen_file_map_path)

                if 0 != returncode:
                    raise TaskError('java %s ... exited non-zero (%i)' %
                                    (compiler.main, returncode))
                self.write_gen_file_map(gen_files_for_source, invalid_targets,
                                        outdir)

        return self.gen_file_map(targets, outdir)
Example #26
0
    def compile(self, ctx, args, dependency_classpath, upstream_analysis,
                settings, compiler_option_sets, zinc_file_manager,
                javac_plugin_map, scalac_plugin_map):
        classpath = (ctx.classes_dir.path, ) + tuple(
            ce.path for ce in dependency_classpath)

        if self.get_options().capture_classpath:
            self._record_compile_classpath(classpath, ctx.target,
                                           ctx.classes_dir.path)

        try:
            distribution = JvmPlatform.preferred_jvm_distribution([settings],
                                                                  strict=True)
        except DistributionLocator.Error:
            distribution = JvmPlatform.preferred_jvm_distribution([settings],
                                                                  strict=False)

        javac_args = []

        if settings.args:
            settings_args = settings.args
            if any('$JAVA_HOME' in a for a in settings.args):
                logger.debug(
                    'Substituting "$JAVA_HOME" with "{}" in jvm-platform args.'
                    .format(distribution.home))
                settings_args = (a.replace('$JAVA_HOME', distribution.home)
                                 for a in settings.args)
            javac_args.extend(settings_args)

            javac_args.extend([
                # TODO: support -release
                '-source',
                str(settings.source_level),
                '-target',
                str(settings.target_level),
            ])

        if self.execution_strategy == self.ExecutionStrategy.hermetic:
            javac_args.extend([
                # We need to strip the source root from our output files. Outputting to a directory, and
                # capturing that directory, does the job.
                # Unfortunately, javac errors if the directory you pass to -d doesn't exist, and we don't
                # have a convenient way of making a directory in the output tree, so let's just use the
                # working directory as our output dir.
                # This also has the benefit of not needing to strip leading directories from the returned
                # snapshot.
                '-d',
                '.',
            ])
        else:
            javac_args.extend([
                '-d',
                ctx.classes_dir.path,
            ])

        javac_args.extend(self._javac_plugin_args(javac_plugin_map))

        javac_args.extend(args)

        compiler_option_sets_args = self.get_merged_args_for_compiler_option_sets(
            compiler_option_sets)
        javac_args.extend(compiler_option_sets_args)

        javac_args.extend([
            '-classpath',
            ':'.join(classpath),
        ])
        javac_args.extend(ctx.sources)

        # From https://docs.oracle.com/javase/8/docs/technotes/tools/windows/javac.html#BHCJEIBB
        # Wildcards (*) aren’t allowed in these lists (such as for specifying *.java).
        # Use of the at sign (@) to recursively interpret files isn’t supported.
        # The -J options aren’t supported because they’re passed to the launcher,
        # which doesn’t support argument files.
        j_args = [j_arg for j_arg in javac_args if j_arg.startswith('-J')]
        safe_javac_args = list(filter(lambda x: x not in j_args, javac_args))

        with argfile.safe_args(safe_javac_args,
                               self.get_options()) as batched_args:
            javac_cmd = [f'{distribution.real_home}/bin/javac']
            javac_cmd.extend(j_args)
            javac_cmd.extend(batched_args)

            if self.execution_strategy == self.ExecutionStrategy.hermetic:
                self._execute_hermetic_compile(javac_cmd, ctx)
            else:
                with self.context.new_workunit(name='javac',
                                               cmd=' '.join(javac_cmd),
                                               labels=[WorkUnitLabel.COMPILER
                                                       ]) as workunit:
                    self.context.log.debug(f"Executing {' '.join(javac_cmd)}")
                    p = subprocess.Popen(javac_cmd,
                                         stdout=workunit.output('stdout'),
                                         stderr=workunit.output('stderr'))
                    return_code = p.wait()
                    workunit.set_outcome(
                        WorkUnit.FAILURE if return_code else WorkUnit.SUCCESS)
                    if return_code:
                        raise TaskError(
                            f'javac exited with return code {return_code}')
                classes_directory = Path(ctx.classes_dir.path).relative_to(
                    get_buildroot())
                self.context._scheduler.materialize_directory(
                    DirectoryToMaterialize(
                        self.post_compile_extra_resources_digest(
                            ctx, prepend_post_merge_relative_path=False),
                        path_prefix=str(classes_directory),
                    ), )

        self._create_context_jar(ctx)
Example #27
0
  def execute(self):
    # We operate on the target roots, except that we replace codegen targets with their
    # corresponding synthetic targets, since those have the generated sources that actually
    # get published. Note that the "provides" attributed is copied from the original target
    # to the synthetic target,  so that the latter can be used as a direct stand-in for the
    # former here.
    preliminary_targets = set(t for t in self.context.target_roots if self.has_provides(t))
    targets = set(preliminary_targets)
    for t in self.context.targets():
      # A non-codegen target has derived_from equal to itself, so we check is_original
      # to ensure that the synthetic targets take precedence.
      # We check that the synthetic target has the same "provides" as the original, because
      # there are other synthetic targets in play (e.g., resources targets) to which this
      # substitution logic must not apply.
      if (t.derived_from in preliminary_targets and not t.is_original and
          self.has_provides(t) and t.provides == t.derived_from.provides):
        targets.discard(t.derived_from)
        targets.add(t)
    if not targets:
      raise TaskError('setup-py target(s) must provide an artifact.')

    dist_dir = self.get_options().pants_distdir

    # NB: We have to create and then run in 2 steps so that we can discover all exported targets
    # in-play in the creation phase which then allows a tsort of these exported targets in the run
    # phase to ensure an exported target is, for example (--run="sdist upload"), uploaded before any
    # exported target that depends on it is uploaded.

    created = {}

    def create(target):
      if target not in created:
        self.context.log.info('Creating setup.py project for {}'.format(target))
        setup_dir, dependencies = self.create_setup_py(target, dist_dir)
        created[target] = setup_dir
        if self._recursive:
          for dep in dependencies:
            if self.has_provides(dep):
              create(dep)

    for target in targets:
      create(target)

    interpreter = self.context.products.get_data(PythonInterpreter)
    python_dists = self.context.products.register_data(self.PYTHON_DISTS_PRODUCT, {})
    for target in reversed(sort_targets(created.keys())):
      setup_dir = created.get(target)
      if setup_dir:
        if not self._run:
          self.context.log.info('Running packager against {}'.format(setup_dir))
          setup_runner = Packager(setup_dir, interpreter=interpreter)
          tgz_name = os.path.basename(setup_runner.sdist())
          sdist_path = os.path.join(dist_dir, tgz_name)
          self.context.log.info('Writing {}'.format(sdist_path))
          shutil.move(setup_runner.sdist(), sdist_path)
          safe_rmtree(setup_dir)
          python_dists[target] = sdist_path
        else:
          self.context.log.info('Running {} against {}'.format(self._run, setup_dir))
          setup_runner = SetupPyRunner(setup_dir, self._run, interpreter=interpreter)
          setup_runner.run()
          python_dists[target] = setup_dir
Example #28
0
 def _get_touched_files(self):
     try:
         return self._workspace.touched_files(self._parent)
     except Workspace.WorkspaceError as e:
         raise TaskError(e)
Example #29
0
        def process_target(current_target):
            """
      :type current_target:pants.build_graph.target.Target
      """
            def get_target_type(tgt):
                def is_test(t):
                    return isinstance(t, JUnitTests) or isinstance(
                        t, PythonTests)

                if is_test(tgt):
                    return SourceRootTypes.TEST
                else:
                    if (isinstance(tgt, Resources)
                            and tgt in resource_target_map
                            and is_test(resource_target_map[tgt])):
                        return SourceRootTypes.TEST_RESOURCE
                    elif isinstance(tgt, Resources):
                        return SourceRootTypes.RESOURCE
                    else:
                        return SourceRootTypes.SOURCE

            info = {
                'targets': [],
                'libraries': [],
                'roots': [],
                'id':
                current_target.id,
                'target_type':
                get_target_type(current_target),
                # NB: is_code_gen should be removed when export format advances to 1.1.0 or higher
                'is_code_gen':
                current_target.is_synthetic,
                'is_synthetic':
                current_target.is_synthetic,
                'pants_target_type':
                self._get_pants_target_alias(type(current_target)),
            }

            if not current_target.is_synthetic:
                info['globs'] = current_target.globs_relative_to_buildroot()
                if self.get_options().sources:
                    info['sources'] = list(
                        current_target.sources_relative_to_buildroot())

            info['transitive'] = current_target.transitive
            info['scope'] = str(current_target.scope)
            info['is_target_root'] = current_target in target_roots_set

            if isinstance(current_target, PythonRequirementLibrary):
                reqs = current_target.payload.get_field_value(
                    'requirements', set())
                """:type : set[pants.backend.python.python_requirement.PythonRequirement]"""
                info['requirements'] = [req.key for req in reqs]

            if isinstance(current_target, PythonTarget):
                interpreter_for_target = self._interpreter_cache.select_interpreter_for_targets(
                    [current_target])
                if interpreter_for_target is None:
                    raise TaskError(
                        'Unable to find suitable interpreter for {}'.format(
                            current_target.address))
                python_interpreter_targets_mapping[
                    interpreter_for_target].append(current_target)
                info['python_interpreter'] = str(
                    interpreter_for_target.identity)

            def iter_transitive_jars(jar_lib):
                """
        :type jar_lib: :class:`pants.backend.jvm.targets.jar_library.JarLibrary`
        :rtype: :class:`collections.Iterator` of
                :class:`pants.java.jar.M2Coordinate`
        """
                if classpath_products:
                    jar_products = classpath_products.get_artifact_classpath_entries_for_targets(
                        (jar_lib, ))
                    for _, jar_entry in jar_products:
                        coordinate = jar_entry.coordinate
                        # We drop classifier and type_ since those fields are represented in the global
                        # libraries dict and here we just want the key into that dict (see `_jar_id`).
                        yield M2Coordinate(org=coordinate.org,
                                           name=coordinate.name,
                                           rev=coordinate.rev)

            target_libraries = OrderedSet()
            if isinstance(current_target, JarLibrary):
                target_libraries = OrderedSet(
                    iter_transitive_jars(current_target))
            for dep in current_target.dependencies:
                info['targets'].append(dep.address.spec)
                if isinstance(dep, JarLibrary):
                    for jar in dep.jar_dependencies:
                        target_libraries.add(
                            M2Coordinate(jar.org, jar.name, jar.rev))
                    # Add all the jars pulled in by this jar_library
                    target_libraries.update(iter_transitive_jars(dep))
                if isinstance(dep, Resources):
                    resource_target_map[dep] = current_target

            if isinstance(current_target, ScalaLibrary):
                for dep in current_target.java_sources:
                    info['targets'].append(dep.address.spec)
                    process_target(dep)

            if isinstance(current_target, JvmTarget):
                info['excludes'] = [
                    self._exclude_id(exclude)
                    for exclude in current_target.excludes
                ]
                info['platform'] = current_target.platform.name
                if hasattr(current_target, 'test_platform'):
                    info['test_platform'] = current_target.test_platform.name

            info['roots'] = [{
                'source_root': source_root_package_prefix[0],
                'package_prefix': source_root_package_prefix[1]
            } for source_root_package_prefix in self._source_roots_for_target(
                current_target)]

            if classpath_products:
                info['libraries'] = [
                    self._jar_id(lib) for lib in target_libraries
                ]
            targets_map[current_target.address.spec] = info
Example #30
0
 def version(self):
     if not self.get_options().version:
         raise TaskError("--version is required")
     return self.get_options().version