Пример #1
0
    def _construct_cmd_args(jars, common_args, global_excludes, pinned_coords,
                            coursier_workdir, json_output_path):

        # Make a copy, so there is no side effect or others using `common_args`
        cmd_args = list(common_args)

        cmd_args.extend(['--json-output-file', json_output_path])

        # Dealing with intransitivity and forced versions.
        for j in jars:
            if not j.rev:
                raise TaskError(
                    'Undefined revs for jars unsupported by Coursier. "{}"'.
                    format(repr(j.coordinate).replace('M2Coordinate', 'jar')))

            module = j.coordinate.simple_coord
            if j.coordinate.classifier:
                module += ',classifier={}'.format(j.coordinate.classifier)

            if j.get_url():
                jar_url = j.get_url()
                module += ',url={}'.format(parse.quote_plus(jar_url))

            if j.intransitive:
                cmd_args.append('--intransitive')

            cmd_args.append(module)

            # Force requires specifying the coord again with -V
            if j.force:
                cmd_args.append('-V')
                cmd_args.append(j.coordinate.simple_coord)

        # Force pinned coordinates
        for m2coord in pinned_coords:
            cmd_args.append('-V')
            cmd_args.append(m2coord.simple_coord)

        # Local exclusions
        local_exclude_args = []
        for jar in jars:
            for ex in jar.excludes:
                # `--` means exclude. See --local-exclude-file in `coursier fetch --help`
                # If ex.name does not exist, that means the whole org needs to be excluded.
                ex_arg = "{}:{}--{}:{}".format(jar.org, jar.name, ex.org,
                                               ex.name or '*')
                local_exclude_args.append(ex_arg)

        if local_exclude_args:
            with temporary_file(coursier_workdir, cleanup=False) as f:
                exclude_file = f.name
                with open(exclude_file, 'w') as ex_f:
                    ex_f.write('\n'.join(local_exclude_args).encode('utf8'))

                cmd_args.append('--local-exclude-file')
                cmd_args.append(exclude_file)

        for ex in global_excludes:
            cmd_args.append('-E')
            cmd_args.append('{}:{}'.format(ex.org, ex.name or '*'))

        return cmd_args
Пример #2
0
    def build_rpm(self, platform, vt, build_dir):
        # Copy the spec file to the build directory.
        target = vt.target
        rpm_spec_path = os.path.join(get_buildroot(), target.rpm_spec)
        shutil.copy(rpm_spec_path, build_dir)
        spec_basename = os.path.basename(target.rpm_spec)

        # Resolve the build requirements.
        build_reqs = self.extract_build_reqs(rpm_spec_path)

        # TODO(mateo): There is a bit of an API conflation now that we have remote_source urls and targets.
        # Especially when you consider that there is also sources/dependencies.
        # The distinction between these things is going to be confusing, they should be unified or at least streamlined.

        # Copy sources to the buildroot. (TODO - unify these stanzas, they differ only in being relative vs absolute paths)
        local_sources = []
        for source in self._remote_source_targets(target):

            remote_source = RemoteSourceFetcher.Factory.scoped_instance(
                self).create(source)
            source_path = remote_source.path
            shutil.copy(source_path, build_dir)
            local_sources.append({
                'basename':
                os.path.basename(os.path.relpath(source_path,
                                                 get_buildroot())),
            })
        for source_rel_path in target.sources_relative_to_buildroot():
            shutil.copy(os.path.join(get_buildroot(), source_rel_path),
                        build_dir)
            local_sources.append({
                'basename': os.path.basename(source_rel_path),
            })

        # Setup information on remote sources.
        def convert_remote_source(remote_source):
            if isinstance(remote_source, string_types):
                return {
                    'url': remote_source,
                    'basename': os.path.basename(remote_source)
                }
            elif isinstance(remote_source, tuple):
                return {'url': remote_source[0], 'basename': remote_source[1]}
            else:
                raise ValueError(
                    'invalid remote_source entry: {}'.format(remote_source))

        remote_sources = [
            convert_remote_source(rs) for rs in target.remote_sources
        ]

        # Put together rpmbuild options for defines.
        rpmbuild_options = ''
        for key in sorted(target.defines.keys()):
            quoted_value = str(target.defines[key]).replace("\\",
                                                            "\\\\").replace(
                                                                "\"", "\\\"")
            rpmbuild_options += ' --define="%{} {}"'.format(key, quoted_value)

        # Write the entry point script.
        entrypoint_generator = Generator(
            resource_string(__name__, 'build_rpm.sh.mustache'),
            spec_basename=spec_basename,
            pre_commands=[{
                'command': '/bin/bash -i'
            }] if self.get_options().shell_before else [],
            post_commands=[{
                'command': '/bin/bash -i'
            }] if self.get_options().shell_after else [],
            rpmbuild_options=rpmbuild_options,
        )
        entrypoint_path = os.path.join(build_dir, 'build_rpm.sh')
        with open(entrypoint_path, 'wb') as f:
            f.write(entrypoint_generator.render())
        os.chmod(entrypoint_path, 0555)

        # Copy globally-configured files into build directory.
        for context_file_path_template in self.get_options(
        ).docker_build_context_files:
            context_file_path = context_file_path_template.format(
                platform_id=platform['id'])
            shutil.copy(context_file_path, build_dir)

        # Determine setup commands.
        setup_commands = [{
            'command': command.format(platform_id=platform['id'])
        } for command in self.get_options().docker_build_setup_commands]

        # Get the RPMs created by the target's RpmSpecTarget dependencies.
        rpm_products = []
        for dep in target.dependencies:
            if isinstance(dep, RpmSpecTarget):
                specs = self.context.products.get('rpms')[dep]
                if specs:
                    for dirname, relpath in specs.items():
                        for rpmpath in relpath:
                            local_rpm = os.path.join(dirname, rpmpath)
                            shutil.copy(local_rpm, build_dir)
                            rpm_products.append({
                                'local_rpm':
                                os.path.basename(rpmpath),
                            })

        # Write the Dockerfile for this build.
        dockerfile_generator = Generator(
            resource_string(__name__, 'dockerfile_template.mustache'),
            image=platform['base'],
            setup_commands=setup_commands,
            spec_basename=spec_basename,
            rpm_dependencies=rpm_products,
            build_reqs={'reqs': ' '.join(build_reqs)} if build_reqs else None,
            local_sources=local_sources,
            remote_sources=remote_sources,
        )
        dockerfile_path = os.path.join(build_dir, 'Dockerfile')
        with open(dockerfile_path, 'wb') as f:
            f.write(dockerfile_generator.render())

        # Generate a UUID to identify the image.
        uuid_identifier = uuid.uuid4()
        image_base_name = 'rpm-image-{}'.format(uuid_identifier)
        image_name = '{}:latest'.format(image_base_name)
        container_name = None

        try:
            # Build the Docker image that will build the RPMS.
            build_image_cmd = [
                self.get_options().docker,
                'build',
            ]
            if self.get_options().docker_build_no_cache:
                build_image_cmd.append('--no-cache')
            build_image_cmd.extend([
                '-t',
                image_name,
                build_dir,
            ])
            with self.docker_workunit(name='build-image',
                                      cmd=build_image_cmd) as workunit:
                self.context.log.debug('Executing: {}'.format(
                    ' '.join(build_image_cmd)))
                proc = subprocess.Popen(build_image_cmd,
                                        stdout=workunit.output('stdout'),
                                        stderr=subprocess.STDOUT)
                returncode = proc.wait()
                if returncode != 0:
                    raise TaskError(
                        'Failed to build image, returncode={0}'.format(
                            returncode))

            # Run the image in a container to actually build the RPMs.
            container_name = 'rpm-container-{}'.format(uuid_identifier)
            run_container_cmd = [
                self.get_options().docker,
                'run',
                '--attach=stderr',
                '--attach=stdout',
                '--name={}'.format(container_name),
            ]
            if self.get_options().shell_before or self.get_options(
            ).shell_after:
                run_container_cmd.extend(['-i', '-t'])
            run_container_cmd.extend([
                image_name,
            ])
            with self.docker_workunit(name='run-container',
                                      cmd=run_container_cmd) as workunit:
                proc = subprocess.Popen(run_container_cmd,
                                        stdout=workunit.output('stdout'),
                                        stderr=subprocess.STDOUT)
                returncode = proc.wait()
                if returncode != 0:
                    raise TaskError(
                        'Failed to build RPM, returncode={0}'.format(
                            returncode))

            # TODO(mateo): Convert this to output to a per-platform namespace to make it easy to upload all RPMs to the
            # correct platform (something like: `dist/rpmbuilder/centos7/x86_64/foo.rpm`).
            # Extract the built RPMs from the container.
            extract_rpms_cmd = [
                self.get_options().docker,
                'export',
                container_name,
            ]
            with self.docker_workunit(name='extract-rpms',
                                      cmd=extract_rpms_cmd) as workunit:
                proc = subprocess.Popen(extract_rpms_cmd,
                                        stdout=subprocess.PIPE,
                                        stderr=None)
                with tarfile.open(fileobj=proc.stdout, mode='r|*') as tar:
                    for entry in tar:
                        name = entry.name
                        if (name.startswith('home/rpmuser/rpmbuild/RPMS/') or
                                name.startswith('home/rpmuser/rpmbuild/SRPMS/')
                            ) and name.endswith('.rpm'):
                            rel_rpm_path = name.lstrip(
                                'home/rpmuser/rpmbuild/')
                            if rel_rpm_path:

                                rpmdir = os.path.dirname(rel_rpm_path)
                                safe_mkdir(os.path.join(
                                    vt.results_dir, rpmdir))
                                rpmfile = os.path.join(vt.results_dir,
                                                       rel_rpm_path)

                                self.context.log.info(
                                    'Extracting {}'.format(rel_rpm_path))
                                fileobj = tar.extractfile(entry)
                                # NOTE(mateo): I believe it has free streaming w/ context manager/stream mode. But this doesn't hurt!
                                with open(rpmfile, 'wb') as f:
                                    self.write_stream(fileobj, f)
                                output_dir = os.path.join(
                                    self.get_options().pants_distdir,
                                    'rpmbuild', rpmdir)
                                safe_mkdir(output_dir)
                                shutil.copy(rpmfile, output_dir)
                                if name.startswith(
                                        'home/rpmuser/rpmbuild/RPMS/'):
                                    self.context.products.get('rpms').add(
                                        vt.target,
                                        vt.results_dir).append(rel_rpm_path)
                                else:
                                    self.context.products.get('srpms').add(
                                        vt.target,
                                        vt.results_dir).append(rel_rpm_path)

                retcode = proc.wait()
                if retcode != 0:
                    raise TaskError('Failed to extract RPMS')
                else:
                    # Save the resulting image if asked. Eventually this image should be pushed to the registry every build,
                    # and subsequent invocations on the published RPM should simply pull and extract.
                    if self.get_options().commit_container_image:
                        commited_name = 'rpm-commited-image-{}'.format(
                            uuid_identifier)
                        self.context.log.info(
                            'Saving container state as image...')
                        docker_commit_cmd = [
                            self.get_options().docker, 'commit', container_name
                        ]
                        with self.docker_workunit(
                                name='commit-to-image',
                                cmd=docker_commit_cmd) as workunit:
                            subprocess.call(docker_commit_cmd,
                                            stdout=workunit.output('stdout'),
                                            stderr=subprocess.STDOUT)
                            self.context.log.info(
                                'Saved container as image: {}\n'.format(
                                    commited_name))

        finally:
            # Remove the build container.
            if container_name and not self.get_options().keep_build_products:
                remove_container_cmd = [
                    self.get_options().docker, 'rm', container_name
                ]
                with self.docker_workunit(
                        name='remove-build-container',
                        cmd=remove_container_cmd) as workunit:
                    subprocess.call(remove_container_cmd,
                                    stdout=workunit.output('stdout'),
                                    stderr=subprocess.STDOUT)

            # Remove the build image.
            if not self.get_options().keep_build_products:
                remove_image_cmd = [
                    self.get_options().docker, 'rmi', image_name
                ]
                with self.docker_workunit(name='remove-build-image',
                                          cmd=remove_image_cmd) as workunit:
                    subprocess.call(remove_image_cmd,
                                    stdout=workunit.output('stdout'),
                                    stderr=subprocess.STDOUT)
Пример #3
0
    def generate_doc(self, language_predicate, create_jvmdoc_command):
        """
    Generate an execute method given a language predicate and command to create documentation

    language_predicate: a function that accepts a target and returns True if the target is of that
                        language
    create_jvmdoc_command: (classpath, directory, *targets) -> command (string) that will generate
                           documentation documentation for targets
    """
        catalog = self.context.products.isrequired(self.jvmdoc().product_type)
        if catalog and self.combined:
            raise TaskError(
                'Cannot provide {} target mappings for combined output'.format(
                    self.jvmdoc().product_type))

        def docable(target):
            if not language_predicate(target):
                self.context.log.debug(
                    'Skipping [{}] because it is does not pass the language predicate'
                    .format(target.address.spec))
                return False
            if not self._include_codegen and target.is_synthetic:
                self.context.log.debug(
                    'Skipping [{}] because it is a synthetic target'.format(
                        target.address.spec))
                return False
            for pattern in self._exclude_patterns:
                if pattern.search(target.address.spec):
                    self.context.log.debug(
                        "Skipping [{}] because it matches exclude pattern '{}'"
                        .format(target.address.spec, pattern.pattern))
                    return False
            return True

        targets = self.get_targets(predicate=docable)
        if not targets:
            return

        with self.invalidated(targets) as invalidation_check:

            def find_jvmdoc_targets():
                invalid_targets = set()
                for vt in invalidation_check.invalid_vts:
                    invalid_targets.update(vt.targets)
                return invalid_targets

            jvmdoc_targets = list(find_jvmdoc_targets())
            if self.combined:
                self._generate_combined(jvmdoc_targets, create_jvmdoc_command)
            else:
                self._generate_individual(jvmdoc_targets,
                                          create_jvmdoc_command)

        if catalog:
            for target in targets:
                gendir = self._gendir(target)
                jvmdocs = []
                for root, dirs, files in safe_walk(gendir):
                    jvmdocs.extend(
                        os.path.relpath(os.path.join(root, f), gendir)
                        for f in files)
                self.context.products.get(self.jvmdoc().product_type).add(
                    target, gendir, jvmdocs)
Пример #4
0
  def compile(self, args, classpath, sources, classes_output_dir, upstream_analysis, analysis_file,
              log_file, zinc_args_file, settings, fatal_warnings, zinc_file_manager,
              javac_plugin_map, scalac_plugin_map):
    self._verify_zinc_classpath(classpath)
    self._verify_zinc_classpath(upstream_analysis.keys())

    zinc_args = []

    zinc_args.extend([
      '-log-level', self.get_options().level,
      '-analysis-cache', analysis_file,
      '-classpath', ':'.join(classpath),
      '-d', classes_output_dir
    ])
    if not self.get_options().colors:
      zinc_args.append('-no-color')
    if log_file:
      zinc_args.extend(['-capture-log', log_file])

    zinc_args.extend(['-compiler-interface', self.tool_jar('compiler-interface')])
    zinc_args.extend(['-compiler-bridge', self.tool_jar('compiler-bridge')])
    zinc_args.extend(['-zinc-cache-dir', self._zinc_cache_dir])
    zinc_args.extend(['-scala-path', ':'.join(self.scalac_classpath())])

    zinc_args.extend(self._javac_plugin_args(javac_plugin_map))
    # Search for scalac plugins on the entire classpath, which will allow use of
    # in-repo plugins for scalac (which works naturally for javac).
    # Note that:
    # - At this point the classpath will already have the extra_compile_time_classpath_elements()
    #   appended to it, so those will also get searched here.
    # - In scala 2.11 and up, the plugin's classpath element can be a dir, but for 2.10 it must be
    #   a jar.  So in-repo plugins will only work with 2.10 if --use-classpath-jars is true.
    # - We exclude our own classes_output_dir, because if we're a plugin ourselves, then our
    #   classes_output_dir doesn't have scalac-plugin.xml yet, and we don't want that fact to get
    #   memoized (which in practice will only happen if this plugin uses some other plugin, thus
    #   triggering the plugin search mechanism, which does the memoizing).
    scalac_plugin_search_classpath = set(classpath) - {classes_output_dir}
    zinc_args.extend(self._scalac_plugin_args(scalac_plugin_map, scalac_plugin_search_classpath))
    if upstream_analysis:
      zinc_args.extend(['-analysis-map',
                        ','.join('{}:{}'.format(*kv) for kv in upstream_analysis.items())])

    zinc_args.extend(args)
    zinc_args.extend(self._get_zinc_arguments(settings))
    zinc_args.append('-transactional')

    if fatal_warnings:
      zinc_args.extend(self.get_options().fatal_warnings_enabled_args)
    else:
      zinc_args.extend(self.get_options().fatal_warnings_disabled_args)

    if not zinc_file_manager:
      zinc_args.append('-no-zinc-file-manager')

    jvm_options = []

    if self.javac_classpath():
      # Make the custom javac classpath the first thing on the bootclasspath, to ensure that
      # it's the one javax.tools.ToolProvider.getSystemJavaCompiler() loads.
      # It will probably be loaded even on the regular classpath: If not found on the bootclasspath,
      # getSystemJavaCompiler() constructs a classloader that loads from the JDK's tools.jar.
      # That classloader will first delegate to its parent classloader, which will search the
      # regular classpath.  However it's harder to guarantee that our javac will preceed any others
      # on the classpath, so it's safer to prefix it to the bootclasspath.
      jvm_options.extend(['-Xbootclasspath/p:{}'.format(':'.join(self.javac_classpath()))])

    jvm_options.extend(self._jvm_options)

    zinc_args.extend(sources)

    self.log_zinc_file(analysis_file)
    with open(zinc_args_file, 'w') as fp:
      for arg in zinc_args:
        fp.write(arg)
        fp.write(b'\n')

    if self.runjava(classpath=self.zinc_classpath(),
                    main=self._ZINC_MAIN,
                    jvm_options=jvm_options,
                    args=zinc_args,
                    workunit_name=self.name(),
                    workunit_labels=[WorkUnitLabel.COMPILER]):
      raise TaskError('Zinc compile failed.')
Пример #5
0
        def add_target(self,
                       target,
                       recursive=False,
                       canonical_classpath_base_dir=None):
            """Adds the classes and resources for a target to an open jar.

      :param target: The target to add generated classes and resources for.
      :param bool recursive: `True` to add classes and resources for the target's transitive
        internal dependency closure.
      :param string canonical_classpath_base_dir: If set, instead of adding targets to the jar
        bundle, create canonical symlinks to the original classpath and save canonical symlinks
        to Manifest's Class-Path.
      :returns: `True` if the target contributed any files - manifest entries, classfiles or
        resource files - to this jar.
      :rtype: bool
      """
            products_added = False

            classpath_products = self._context.products.get_data(
                'runtime_classpath')

            # TODO(John Sirois): Manifest handling is broken.  We should be tracking state and failing
            # fast if any duplicate entries are added; ie: if we get a second binary or a second agent.

            if isinstance(target, JvmBinary):
                self._add_manifest_entries(target, self._manifest)
                products_added = True
            elif isinstance(target, JavaAgent):
                self._add_agent_manifest(target, self._manifest)
                products_added = True
            elif recursive:
                agents = [
                    t for t in target.closure() if isinstance(t, JavaAgent)
                ]
                if len(agents) > 1:
                    raise TaskError(
                        'Only 1 agent can be added to a jar, found {} for {}:\n\t{}'
                        .format(
                            len(agents), target.address.reference(),
                            '\n\t'.join(agent.address.reference()
                                        for agent in agents)))
                elif agents:
                    self._add_agent_manifest(agents[0], self._manifest)
                    products_added = True

            # In the transitive case we'll gather internal resources naturally as dependencies, but in the
            # non-transitive case we need to manually add these special (in the context of jarring)
            # dependencies.
            targets = target.closure(bfs=True) if recursive else [target]
            if not recursive and target.has_resources:
                targets += target.resources
            # We only gather internal classpath elements per our contract.
            if canonical_classpath_base_dir:
                canonical_classpath = ClasspathUtil.create_canonical_classpath(
                    classpath_products, targets, canonical_classpath_base_dir)
                self._jar.append_classpath(canonical_classpath)
                products_added = True
            else:
                target_classpath = ClasspathUtil.internal_classpath(
                    targets, classpath_products)
                for entry in target_classpath:
                    if ClasspathUtil.is_jar(entry):
                        self._jar.writejar(entry)
                        products_added = True
                    elif ClasspathUtil.is_dir(entry):
                        for rel_file in ClasspathUtil.classpath_entries_contents(
                            [entry]):
                            self._jar.write(os.path.join(entry, rel_file),
                                            rel_file)
                            products_added = True
                    else:
                        # non-jar and non-directory classpath entries should be ignored
                        pass

            return products_added
Пример #6
0
 def _sources_for_targets(self, targets):
     """Returns a map target->sources for the specified targets."""
     if self._sources_by_target is None:
         raise TaskError('self._sources_by_target not computed yet.')
     return dict((t, self._sources_by_target.get(t, [])) for t in targets)
Пример #7
0
    def __init__(self, context, workdir, scm=None):
        super(JarPublish, self).__init__(context, workdir)
        ScmPublish.__init__(
            self, scm or get_scm(),
            self.context.config.getlist(JarPublish._CONFIG_SECTION,
                                        'restrict_push_branches'))
        self.cachedir = os.path.join(self.workdir, 'cache')

        self._jvmargs = context.config.getlist(JarPublish._CONFIG_SECTION,
                                               'ivy_jvmargs',
                                               default=[])

        if context.options.jar_publish_local:
            local_repo = dict(resolver='publish_local',
                              path=os.path.abspath(
                                  os.path.expanduser(
                                      context.options.jar_publish_local)),
                              confs=['default'],
                              auth=None)
            self.repos = defaultdict(lambda: local_repo)
            self.commit = False
            self.snapshot = context.options.jar_publish_local_snapshot
        else:
            self.repos = context.config.getdict(JarPublish._CONFIG_SECTION,
                                                'repos')
            if not self.repos:
                raise TaskError(
                    "This repo is not yet set for publishing to the world!"
                    "Please re-run with --publish-local")
            for repo, data in self.repos.items():
                auth = data.get('auth')
                if auth:
                    credentials = context.resolve(auth).next()
                    user = credentials.username(data['resolver'])
                    password = credentials.password(data['resolver'])
                    self.context.log.debug('Found auth for repo=%s user=%s' %
                                           (repo, user))
                    self.repos[repo]['username'] = user
                    self.repos[repo]['password'] = password
            self.commit = context.options.jar_publish_commit
            self.snapshot = False

        self.ivycp = context.config.getlist('ivy', 'classpath')
        self.ivysettings = context.config.get('jar-publish', 'ivy_settings')

        self.dryrun = context.options.jar_publish_dryrun
        self.transitive = context.options.jar_publish_transitive
        self.force = context.options.jar_publish_force

        def parse_jarcoordinate(coordinate):
            components = coordinate.split('#', 1)
            if len(components) == 2:
                org, name = components
                return org, name
            else:
                try:
                    address = Address.parse(get_buildroot(), coordinate)
                    try:
                        target = Target.get(address)
                        if not target:
                            siblings = Target.get_all_addresses(
                                address.buildfile)
                            prompt = 'did you mean' if len(
                                siblings
                            ) == 1 else 'maybe you meant one of these'
                            raise TaskError('%s => %s?:\n    %s' %
                                            (address, prompt, '\n    '.join(
                                                str(a) for a in siblings)))
                        if not target.is_exported:
                            raise TaskError('%s is not an exported target' %
                                            coordinate)
                        return target.provides.org, target.provides.name
                    except (ImportError, SyntaxError, TypeError):
                        raise TaskError('Failed to parse %s' %
                                        address.buildfile.relpath)
                except IOError:
                    raise TaskError('No BUILD file could be found at %s' %
                                    coordinate)

        self.overrides = {}
        if context.options.jar_publish_override:

            def parse_override(override):
                try:
                    coordinate, rev = override.split('=', 1)
                    try:
                        rev = Semver.parse(rev)
                    except ValueError as e:
                        raise TaskError('Invalid version %s: %s' % (rev, e))
                    return parse_jarcoordinate(coordinate), rev
                except ValueError:
                    raise TaskError('Invalid override: %s' % override)

            self.overrides.update(
                parse_override(o)
                for o in context.options.jar_publish_override)

        self.restart_at = None
        if context.options.jar_publish_restart_at:
            self.restart_at = parse_jarcoordinate(
                context.options.jar_publish_restart_at)

        context.products.require('jars')
        context.products.require('source_jars')
Пример #8
0
        def process_target(current_target):
            """
      :type current_target:pants.build_graph.target.Target
      """
            def get_target_type(target):
                if target.is_test:
                    return ExportTask.SourceRootTypes.TEST
                else:
                    if (isinstance(target, Resources)
                            and target in resource_target_map
                            and resource_target_map[target].is_test):
                        return ExportTask.SourceRootTypes.TEST_RESOURCE
                    elif isinstance(target, Resources):
                        return ExportTask.SourceRootTypes.RESOURCE
                    else:
                        return ExportTask.SourceRootTypes.SOURCE

            info = {
                'targets': [],
                'libraries': [],
                'roots': [],
                'id':
                current_target.id,
                'target_type':
                get_target_type(current_target),
                # NB: is_code_gen should be removed when export format advances to 1.1.0 or higher
                'is_code_gen':
                current_target.is_synthetic,
                'is_synthetic':
                current_target.is_synthetic,
                'pants_target_type':
                self._get_pants_target_alias(type(current_target)),
            }

            if not current_target.is_synthetic:
                info['globs'] = current_target.globs_relative_to_buildroot()
                if self.get_options().sources:
                    info['sources'] = list(
                        current_target.sources_relative_to_buildroot())

            info['transitive'] = current_target.transitive
            info['scope'] = str(current_target.scope)
            info['is_target_root'] = current_target in target_roots_set

            if isinstance(current_target, PythonRequirementLibrary):
                reqs = current_target.payload.get_field_value(
                    'requirements', set())
                """:type : set[pants.backend.python.python_requirement.PythonRequirement]"""
                info['requirements'] = [req.key for req in reqs]

            if isinstance(current_target, PythonTarget):
                interpreter_for_target = self.select_interpreter_for_targets(
                    [current_target])
                if interpreter_for_target is None:
                    raise TaskError(
                        'Unable to find suitable interpreter for {}'.format(
                            current_target.address))
                python_interpreter_targets_mapping[
                    interpreter_for_target].append(current_target)
                info['python_interpreter'] = str(
                    interpreter_for_target.identity)

            def iter_transitive_jars(jar_lib):
                """
        :type jar_lib: :class:`pants.backend.jvm.targets.jar_library.JarLibrary`
        :rtype: :class:`collections.Iterator` of
                :class:`pants.backend.jvm.jar_dependency_utils.M2Coordinate`
        """
                if classpath_products:
                    jar_products = classpath_products.get_artifact_classpath_entries_for_targets(
                        (jar_lib, ))
                    for _, jar_entry in jar_products:
                        coordinate = jar_entry.coordinate
                        # We drop classifier and type_ since those fields are represented in the global
                        # libraries dict and here we just want the key into that dict (see `_jar_id`).
                        yield M2Coordinate(org=coordinate.org,
                                           name=coordinate.name,
                                           rev=coordinate.rev)

            target_libraries = OrderedSet()
            if isinstance(current_target, JarLibrary):
                target_libraries = OrderedSet(
                    iter_transitive_jars(current_target))
            for dep in current_target.dependencies:
                info['targets'].append(dep.address.spec)
                if isinstance(dep, JarLibrary):
                    for jar in dep.jar_dependencies:
                        target_libraries.add(
                            M2Coordinate(jar.org, jar.name, jar.rev))
                    # Add all the jars pulled in by this jar_library
                    target_libraries.update(iter_transitive_jars(dep))
                if isinstance(dep, Resources):
                    resource_target_map[dep] = current_target

            if isinstance(current_target, ScalaLibrary):
                for dep in current_target.java_sources:
                    info['targets'].append(dep.address.spec)
                    process_target(dep)

            if isinstance(current_target, JvmTarget):
                info['excludes'] = [
                    self._exclude_id(exclude)
                    for exclude in current_target.excludes
                ]
                info['platform'] = current_target.platform.name
                if hasattr(current_target, 'test_platform'):
                    info['test_platform'] = current_target.test_platform.name

            info['roots'] = map(
                lambda (source_root, package_prefix): {
                    'source_root': source_root,
                    'package_prefix': package_prefix
                }, self._source_roots_for_target(current_target))

            if classpath_products:
                info['libraries'] = [
                    self._jar_id(lib) for lib in target_libraries
                ]
            targets_map[current_target.address.spec] = info
Пример #9
0
    def gen(self, partial_cmd, targets):
        with self.invalidated(
                targets, invalidate_dependents=True) as invalidation_check:
            invalid_targets = []
            for vt in invalidation_check.invalid_vts:
                invalid_targets.extend(vt.targets)

            compiler = partial_cmd.compiler
            import_paths, changed_srcs = compiler.calc_srcs(
                invalid_targets, self.is_gentarget)
            outdir = self._outdir(partial_cmd)
            if changed_srcs:
                args = []

                for import_path in import_paths:
                    args.extend(['--import-path', import_path])

                args.extend(['--language', partial_cmd.language])

                for lhs, rhs in partial_cmd.namespace_map:
                    args.extend(['--namespace-map', '%s=%s' % (lhs, rhs)])

                if partial_cmd.rpc_style == 'ostrich':
                    args.append('--finagle')
                    args.append('--ostrich')
                elif partial_cmd.rpc_style == 'finagle':
                    args.append('--finagle')

                args.extend(['--dest', outdir])
                safe_mkdir(outdir)

                if not compiler.strict:
                    args.append('--disable-strict')

                if self.get_options().verbose:
                    args.append('--verbose')

                gen_file_map_path = os.path.relpath(self._tempname())
                args.extend(['--gen-file-map', gen_file_map_path])

                args.extend(changed_srcs)

                classpath = self.tool_classpath(compiler.name)
                returncode = self.runjava(classpath=classpath,
                                          main=compiler.main,
                                          jvm_options=compiler.jvm_args,
                                          args=args,
                                          workunit_name=compiler.name)
                try:
                    if 0 == returncode:
                        gen_files_for_source = self.parse_gen_file_map(
                            gen_file_map_path, outdir)
                    else:
                        gen_files_for_source = None
                finally:
                    os.remove(gen_file_map_path)

                if 0 != returncode:
                    raise TaskError('java %s ... exited non-zero (%i)' %
                                    (compiler.main, returncode))
                self.write_gen_file_map(gen_files_for_source, invalid_targets,
                                        outdir)

        return self.gen_file_map(targets, outdir)
Пример #10
0
 def _maybe_open_report(self, report_file_path):
   if report_file_path:
     try:
       desktop.ui_open(report_file_path)
     except desktop.OpenError as e:
       raise TaskError(e)
Пример #11
0
    def compile(self, args, classpath, sources, classes_output_dir,
                upstream_analysis, analysis_file, log_file, settings):
        # We add compiler_classpath to ensure the scala-library jar is on the classpath.
        # TODO: This also adds the compiler jar to the classpath, which compiled code shouldn't
        # usually need. Be more selective?
        # TODO(John Sirois): Do we need to do this at all?  If adding scala-library to the classpath is
        # only intended to allow target authors to omit a scala-library dependency, then ScalaLibrary
        # already overrides traversable_dependency_specs to achieve the same end; arguably at a more
        # appropriate level and certainly at a more appropriate granularity.
        relativized_classpath = relativize_paths(
            self.compiler_classpath() + classpath, get_buildroot())

        zinc_args = []

        zinc_args.extend([
            '-log-level',
            self.get_options().level, '-analysis-cache', analysis_file,
            '-classpath', ':'.join(relativized_classpath), '-d',
            classes_output_dir
        ])
        if not self.get_options().colors:
            zinc_args.append('-no-color')
        if not self.get_options().name_hashing:
            zinc_args.append('-no-name-hashing')
        if log_file:
            zinc_args.extend(['-capture-log', log_file])

        zinc_args.extend(
            ['-compiler-interface',
             self.tool_jar('compiler-interface')])
        zinc_args.extend(['-sbt-interface', self.tool_jar('sbt-interface')])
        zinc_args.extend(['-scala-path', ':'.join(self.compiler_classpath())])

        zinc_args += self.plugin_args()
        if upstream_analysis:
            zinc_args.extend([
                '-analysis-map', ','.join('{}:{}'.format(*kv)
                                          for kv in upstream_analysis.items())
            ])

        zinc_args += args

        zinc_args.extend([
            '-C-source',
            '-C{}'.format(settings.source_level),
            '-C-target',
            '-C{}'.format(settings.target_level),
        ])
        zinc_args.extend(settings.args)

        jvm_options = list(self._jvm_options)

        zinc_args.extend(sources)

        self.log_zinc_file(analysis_file)
        if self.runjava(classpath=self.zinc_classpath(),
                        main=self._ZINC_MAIN,
                        jvm_options=jvm_options,
                        args=zinc_args,
                        workunit_name='zinc',
                        workunit_labels=[WorkUnitLabel.COMPILER]):
            raise TaskError('Zinc compile failed.')
Пример #12
0
  def context(self, for_task_types=None, for_subsystems=None, options=None,
              target_roots=None, console_outstream=None, workspace=None,
              scheduler=None, **kwargs):
    """
    :API: public

    :param dict **kwargs: keyword arguments passed in to `create_options_for_optionables`.
    """
    # Many tests use source root functionality via the SourceRootConfig.global_instance().
    # (typically accessed via Target.target_base), so we always set it up, for convenience.
    for_subsystems = set(for_subsystems or ())
    for subsystem in for_subsystems:
      if subsystem.options_scope is None:
        raise TaskError('You must set a scope on your subsystem type before using it in tests.')

    optionables = {SourceRootConfig} | self._build_configuration.optionables() | for_subsystems

    for_task_types = for_task_types or ()
    for task_type in for_task_types:
      scope = task_type.options_scope
      if scope is None:
        raise TaskError('You must set a scope on your task type before using it in tests.')
      optionables.add(task_type)
      # If task is expected to inherit goal-level options, register those directly on the task,
      # by subclassing the goal options registrar and settings its scope to the task scope.
      if issubclass(task_type, GoalOptionsMixin):
        subclass_name = 'test_{}_{}_{}'.format(
          task_type.__name__, task_type.goal_options_registrar_cls.options_scope,
          task_type.options_scope)
        if PY2:
          subclass_name = subclass_name.encode('utf-8')
        optionables.add(type(subclass_name, (task_type.goal_options_registrar_cls, ),
                             {'options_scope': task_type.options_scope}))

    # Now expand to all deps.
    all_optionables = set()
    for optionable in optionables:
      all_optionables.update(si.optionable_cls for si in optionable.known_scope_infos())

    # Now default the option values and override with any caller-specified values.
    # TODO(benjy): Get rid of the options arg, and require tests to call set_options.
    options = options.copy() if options else {}
    for s, opts in self.options.items():
      scoped_opts = options.setdefault(s, {})
      scoped_opts.update(opts)

    fake_options = create_options_for_optionables(
      all_optionables, options=options, **kwargs)

    Subsystem.reset(reset_options=True)
    Subsystem.set_options(fake_options)

    context = create_context_from_options(fake_options,
                                          target_roots=target_roots,
                                          build_graph=self.build_graph,
                                          build_file_parser=self.build_file_parser,
                                          address_mapper=self.address_mapper,
                                          console_outstream=console_outstream,
                                          workspace=workspace,
                                          scheduler=scheduler)
    return context
Пример #13
0
    def __init__(self, config, options, log):
        self._log = log

        # TODO(pl): This is super awful, but options doesn't have a nice way to get out
        # attributes that might not be there, and even then the attribute value might be
        # None, which we still want to override
        # Benjy thinks we should probably hoist these options to the global set of options,
        # rather than just keeping them within IvyResolve.setup_parser
        self._mutable_pattern = (
            getattr(options, 'ivy_mutable_pattern', None)
            or config.get('ivy-resolve', 'mutable_pattern', default=None))

        self._transitive = config.getbool('ivy-resolve',
                                          'transitive',
                                          default=True)
        self._args = config.getlist('ivy-resolve', 'args', default=[])
        self._jvm_options = config.getlist('ivy-resolve',
                                           'jvm_args',
                                           default=[])
        # Disable cache in File.getCanonicalPath(), makes Ivy work with -symlink option properly on ng.
        self._jvm_options.append('-Dsun.io.useCanonCaches=false')
        self._workdir = os.path.join(config.getdefault('pants_workdir'), 'ivy')
        self._template_path = self.IVY_TEMPLATE_PATH

        if self._mutable_pattern:
            try:
                self._mutable_pattern = re.compile(self._mutable_pattern)
            except re.error as e:
                raise TaskError('Invalid mutable pattern specified: %s %s' %
                                (self._mutable_pattern, e))

        def parse_override(override):
            match = re.match(r'^([^#]+)#([^=]+)=([^\s]+)$', override)
            if not match:
                raise TaskError('Invalid dependency override: %s' % override)

            org, name, rev_or_url = match.groups()

            def fmt_message(message, template):
                return message % dict(
                    overridden='%s#%s;%s' %
                    (template.org, template.module, template.version),
                    rev=rev_or_url,
                    url=rev_or_url)

            def replace_rev(template):
                self._log.info(
                    fmt_message('Overrode %(overridden)s with rev %(rev)s',
                                template))
                return template.extend(version=rev_or_url,
                                       url=None,
                                       force=True)

            def replace_url(template):
                self._log.info(
                    fmt_message(
                        'Overrode %(overridden)s with snapshot at %(url)s',
                        template))
                return template.extend(version='SNAPSHOT',
                                       url=rev_or_url,
                                       force=True)

            replace = replace_url if re.match(r'^\w+://.+',
                                              rev_or_url) else replace_rev
            return (org, name), replace

        self._overrides = {}
        # TODO(pl): See above comment wrt options
        if hasattr(options,
                   'ivy_resolve_overrides') and options.ivy_resolve_overrides:
            self._overrides.update(
                parse_override(o) for o in options.ivy_resolve_overrides)
Пример #14
0
    def _bootstrap_shaded_jvm_tool(self, jvm_tool, targets):
        fingerprint_strategy = ShadedToolFingerprintStrategy(
            jvm_tool.main, custom_rules=jvm_tool.custom_rules)

        with self.invalidated(
                targets,
                # We're the only dependent in reality since we shade.
                invalidate_dependents=False,
                fingerprint_strategy=fingerprint_strategy
        ) as invalidation_check:

            # If there are no vts, then there are no resolvable targets, so we exit early with an empty
            # classpath.  This supports the optional tool classpath case.
            if not invalidation_check.all_vts:
                return []

            tool_vts = self.tool_vts(invalidation_check)
            jar_name = '{main}-{hash}.jar'.format(main=jvm_tool.main,
                                                  hash=tool_vts.cache_key.hash)
            shaded_jar = os.path.join(self._tool_cache_path, 'shaded_jars',
                                      jar_name)

            if not invalidation_check.invalid_vts and os.path.exists(
                    shaded_jar):
                return [shaded_jar]

            # Ensure we have a single binary jar we can shade.
            binary_jar = os.path.join(self._tool_cache_path, 'binary_jars',
                                      jar_name)
            safe_mkdir_for(binary_jar)

            classpath = self._bootstrap_classpath(jvm_tool, targets)
            if len(classpath) == 1:
                shutil.copy(classpath[0], binary_jar)
            else:
                with self.open_jar(binary_jar) as jar:
                    for classpath_jar in classpath:
                        jar.writejar(classpath_jar)
                    jar.main(jvm_tool.main)

            # Now shade the binary jar and return that single jar as the safe tool classpath.
            safe_mkdir_for(shaded_jar)
            with self.shader.binary_shader(
                    shaded_jar,
                    jvm_tool.main,
                    binary_jar,
                    custom_rules=jvm_tool.custom_rules,
                    jvm_options=self.get_options().jvm_options) as shader:
                try:
                    result = util.execute_runner(
                        shader,
                        workunit_factory=self.context.new_workunit,
                        workunit_name='shade-{}'.format(jvm_tool.key))
                    if result != 0:
                        raise TaskError(
                            "Shading of tool '{key}' with main class {main} for {scope} failed "
                            "with exit code {result}, command run was:\n\t{cmd}"
                            .format(key=jvm_tool.key,
                                    main=jvm_tool.main,
                                    scope=jvm_tool.scope,
                                    result=result,
                                    cmd=shader.cmd))
                except Executor.Error as e:
                    raise TaskError(
                        "Shading of tool '{key}' with main class {main} for {scope} failed "
                        "with: {exception}".format(key=jvm_tool.key,
                                                   main=jvm_tool.main,
                                                   scope=jvm_tool.scope,
                                                   exception=e))

            if self.artifact_cache_writes_enabled():
                self.update_artifact_cache([(tool_vts, [shaded_jar])])

            return [shaded_jar]
Пример #15
0
 def process_result(self, result):
     if result != 0:
         raise TaskError(f"Targets failed scalafix checks.")
Пример #16
0
 def abort(error):
     raise TaskError('Failed to copy from %s to %s: %s' %
                     (from_base, to_base, error))
Пример #17
0
 def ivy_cache_dir(self):
     ret = self.context.products.get_data('ivy_cache_dir')
     if ret is None:
         raise TaskError(
             'ivy_cache_dir product accessed before it was created.')
     return ret
Пример #18
0
    def _run_pytest(self, workdirs, targets):
        if not targets:
            return PytestResult.rc(0)

        if self._run_in_chroot:
            path_func = lambda rel_src: rel_src
        else:
            source_chroot = os.path.relpath(self._source_chroot_path(targets),
                                            get_buildroot())
            path_func = lambda rel_src: os.path.join(source_chroot, rel_src)

        sources_map = OrderedDict()  # Path from chroot -> Path from buildroot.
        for t in targets:
            for p in t.sources_relative_to_source_root():
                sources_map[path_func(p)] = os.path.join(t.target_base, p)

        if not sources_map:
            return PytestResult.rc(0)

        with self._test_runner(workdirs, targets,
                               sources_map) as (pex, test_args):
            # Validate that the user didn't provide any passthru args that conflict
            # with those we must set ourselves.
            for arg in self.get_passthru_args():
                if arg.startswith('--junitxml') or arg.startswith(
                        '--confcutdir'):
                    raise TaskError(
                        'Cannot pass this arg through to pytest: {}'.format(
                            arg))

            junitxml_path = workdirs.junitxml_path(*targets)

            # N.B. the `--confcutdir` here instructs pytest to stop scanning for conftest.py files at the
            # top of the buildroot. This prevents conftest.py files from outside (e.g. in users home dirs)
            # from leaking into pants test runs. See: https://github.com/pantsbuild/pants/issues/2726
            args = [
                '--junitxml', junitxml_path, '--confcutdir',
                get_buildroot(), '--continue-on-collection-errors'
            ]
            if self.get_options().fail_fast:
                args.extend(['-x'])
            if self._debug:
                args.extend(['-s'])
            if self.get_options().colors:
                args.extend(['--color', 'yes'])
            for options in self.get_options().options + self.get_passthru_args(
            ):
                args.extend(safe_shlex_split(options))
            args.extend(test_args)
            args.extend(sources_map.keys())

            # We want to ensure our reporting based off junit xml is from this run so kill results from
            # prior runs.
            if os.path.exists(junitxml_path):
                os.unlink(junitxml_path)

            with self._maybe_run_in_chroot(targets):
                result = self._do_run_tests_with_args(pex, args)

            # There was a problem prior to test execution preventing junit xml file creation so just let
            # the failure result bubble.
            if not os.path.exists(junitxml_path):
                return result

            failed_targets = self._get_failed_targets_from_junitxml(
                junitxml_path, targets)

            def parse_error_handler(parse_error):
                # Simple error handler to pass to xml parsing function.
                raise TaskError('Error parsing xml file at {}: {}'.format(
                    parse_error.xml_path, parse_error.cause))

            all_tests_info = self.parse_test_info(
                junitxml_path, parse_error_handler,
                ['file', 'name', 'classname'])
            for test_name, test_info in all_tests_info.items():
                test_target = self._get_target_from_test(test_info, targets)
                self.report_all_info_for_single_test(self.options_scope,
                                                     test_target, test_name,
                                                     test_info)

            return result.with_failed_targets(failed_targets)
Пример #19
0
    def execute(self):
        # We drive creation of setup.py distributions from the original target graph, grabbing codegen'd
        # sources when needed. We ignore PythonDistribution targets.
        def is_exported_python_target(t):
            return t.is_original and self.has_provides(
                t) and not is_local_python_dist(t)

        exported_python_targets = OrderedSet(t
                                             for t in self.context.target_roots
                                             if is_exported_python_target(t))
        if not exported_python_targets:
            raise TaskError('setup-py target(s) must provide an artifact.')

        dist_dir = self.get_options().pants_distdir

        # NB: We have to create and then run in 2 steps so that we can discover all exported targets
        # in-play in the creation phase which then allows a tsort of these exported targets in the run
        # phase to ensure an exported target is, for example (--run="sdist upload"), uploaded before any
        # exported target that depends on it is uploaded.

        created = {}

        def create(exported_python_target):
            if exported_python_target not in created:
                self.context.log.info(
                    'Creating setup.py project for {}'.format(
                        exported_python_target))
                subject = self.derived_by_original.get(exported_python_target,
                                                       exported_python_target)
                setup_dir, dependencies = self.create_setup_py(
                    subject, dist_dir)
                created[exported_python_target] = setup_dir
                if self._recursive:
                    for dep in dependencies:
                        if is_exported_python_target(dep):
                            create(dep)

        for exported_python_target in exported_python_targets:
            create(exported_python_target)

        interpreter = self.context.products.get_data(PythonInterpreter)
        python_dists = self.context.products.register_data(
            self.PYTHON_DISTS_PRODUCT, {})
        for exported_python_target in reversed(
                sort_targets(list(created.keys()))):
            setup_dir = created.get(exported_python_target)
            if setup_dir:
                if not self._run:
                    self.context.log.info(
                        'Running packager against {}'.format(setup_dir))
                    setup_runner = Packager(setup_dir, interpreter=interpreter)
                    tgz_name = os.path.basename(setup_runner.sdist())
                    sdist_path = os.path.join(dist_dir, tgz_name)
                    self.context.log.info('Writing {}'.format(sdist_path))
                    shutil.move(setup_runner.sdist(), sdist_path)
                    safe_rmtree(setup_dir)
                    python_dists[exported_python_target] = sdist_path
                else:
                    self.context.log.info('Running {} against {}'.format(
                        self._run, setup_dir))
                    split_command = safe_shlex_split(self._run)
                    setup_runner = SetupPyRunner(setup_dir,
                                                 split_command,
                                                 interpreter=interpreter)
                    setup_runner.run()
                    python_dists[exported_python_target] = setup_dir
Пример #20
0
    def _ivy_resolve(self,
                     targets,
                     executor=None,
                     silent=False,
                     workunit_name=None,
                     confs=None,
                     extra_args=None,
                     invalidate_dependents=False,
                     pinned_artifacts=None):
        """Resolves external dependencies for the given targets."""
        # If there are no targets, we don't need to do a resolve.
        if not targets:
            return NO_RESOLVE_RUN_RESULT
        confs = confs or ('default', )
        fingerprint_strategy = IvyResolveFingerprintStrategy(confs)
        with self.invalidated(targets,
                              invalidate_dependents=invalidate_dependents,
                              silent=silent,
                              fingerprint_strategy=fingerprint_strategy
                              ) as invalidation_check:
            # In case all the targets were filtered out because they didn't participate in fingerprinting.
            if not invalidation_check.all_vts:
                return NO_RESOLVE_RUN_RESULT
            resolve_vts = VersionedTargetSet.from_versioned_targets(
                invalidation_check.all_vts)
            resolve_hash_name = resolve_vts.cache_key.hash
            global_ivy_workdir = os.path.join(
                self.context.options.for_global_scope().pants_workdir, 'ivy')
            fetch = self._create_ivy_fetch_step(
                confs, resolve_hash_name, pinned_artifacts,
                self.get_options().soft_excludes, self.ivy_cache_dir,
                global_ivy_workdir)

            resolve = self._create_ivy_resolve_step(
                confs, resolve_hash_name, pinned_artifacts,
                self.get_options().soft_excludes, self.ivy_cache_dir,
                global_ivy_workdir, self.global_excludes)
            result = self._perform_resolution(
                fetch,
                resolve,
                executor,
                extra_args,
                invalidation_check,
                resolve_vts,
                resolve_vts.targets,
                workunit_name,
            )

            # NOTE(mateo): Wiring up our own reports, the full ivy report is too heavy weight for our purposes.
            if result.resolved_artifact_paths and self.resolution_report_outdir and not self.get_options(
            ).disable_reports:
                # This is added to get a reasonable handle for managed_dependencies target sets.
                # If there is more than one VT it defaults to the VTS.id, which is a non-human-readable cache key.
                # If we wanted to be more performant than rigorous, we could bail after the first query.
                managed_dependencies = set(
                    j.target.managed_dependencies
                    for j in invalidation_check.all_vts
                    if isinstance(j.target, JarLibrary)
                    and j.target.managed_dependencies is not None)

                if managed_dependencies and len(managed_dependencies) > 1:
                    raise TaskError(
                        'Each partition should be mapped to a single managed_dependencies target: (was: {})\n Targets: {}'
                        .format(managed_dependencies, resolve_vts.targets))
                default_target_name = JarDependencyManagement.global_instance(
                )._default_target.name
                partition_name = list(
                    managed_dependencies
                )[0].name if managed_dependencies else default_target_name
                self.write_resolve_report(resolve.frozen_resolve_file,
                                          partition_name)
            return result
Пример #21
0
    def execute(self, targets):
        self.check_clean_master(commit=(not self.dryrun and self.commit))

        exported_targets = self.exported_targets()
        self.check_targets(exported_targets)

        pushdbs = {}

        def get_db(tgt):
            # TODO(tdesai) Handle resource type in get_db.
            if tgt.provides is None:
                raise TaskError(
                    'trying to publish target %r which does not provide an artifact'
                    % tgt)
            dbfile = tgt.provides.repo.push_db
            result = pushdbs.get(dbfile)
            if not result:
                db = PushDb.load(dbfile)
                repo = self.repos[tgt.provides.repo.name]
                result = (db, dbfile, repo)
                pushdbs[dbfile] = result
            return result

        def get_pushdb(tgt):
            return get_db(tgt)[0]

        def fingerprint_internal(tgt):
            pushdb, _, _ = get_db(tgt)
            _, _, _, fingerprint = pushdb.as_jar_with_version(tgt)
            return fingerprint or '0.0.0'

        def artifact_path(jar,
                          version,
                          name=None,
                          suffix='',
                          extension='jar',
                          artifact_ext=''):
            return os.path.join(
                self.workdir, jar.org, jar.name + artifact_ext,
                '%s%s-%s%s.%s' %
                ((name or jar.name), artifact_ext if name != 'ivy' else '',
                 version, suffix, extension))

        def stage_artifact(tgt,
                           jar,
                           version,
                           changelog,
                           confs=None,
                           artifact_ext=''):
            def path(name=None, suffix='', extension='jar'):
                return artifact_path(jar,
                                     version,
                                     name=name,
                                     suffix=suffix,
                                     extension=extension,
                                     artifact_ext=artifact_ext)

            with safe_open(path(suffix='-CHANGELOG', extension='txt'),
                           'w') as changelog_file:
                changelog_file.write(changelog)
            ivyxml = path(name='ivy', extension='xml')

            IvyWriter(get_pushdb).write(tgt, ivyxml, confs=confs)
            PomWriter(get_pushdb).write(tgt, path(extension='pom'))

            return ivyxml

        def copy_artifact(tgt, version, typename, suffix='', artifact_ext=''):
            genmap = self.context.products.get(typename)
            for basedir, jars in genmap.get(tgt).items():
                for artifact in jars:
                    path = artifact_path(jar,
                                         version,
                                         suffix=suffix,
                                         artifact_ext=artifact_ext)
                    shutil.copy(os.path.join(basedir, artifact), path)

        def stage_artifacts(tgt, jar, version, changelog, confs=None):
            ivyxml_path = stage_artifact(tgt, jar, version, changelog, confs)
            copy_artifact(tgt, version, typename='jars')
            copy_artifact(tgt,
                          version,
                          typename='source_jars',
                          suffix='-sources')

            jarmap = self.context.products.get('javadoc_jars')
            if not jarmap.empty() and (tgt.is_java or tgt.is_scala):
                copy_artifact(tgt,
                              version,
                              typename='javadoc_jars',
                              suffix='-javadoc')

            return ivyxml_path

        if self.overrides:
            print('Publishing with revision overrides:\n  %s' %
                  '\n  '.join('%s=%s' % (coordinate(org, name), rev)
                              for (org, name), rev in self.overrides.items()))

        head_sha = self.scm.commit_id

        safe_rmtree(self.workdir)
        published = []
        skip = (self.restart_at is not None)
        for target in exported_targets:
            pushdb, dbfile, repo = get_db(target)
            jar, semver, sha, fingerprint = pushdb.as_jar_with_version(target)

            published.append(jar)

            if skip and (jar.org, jar.name) == self.restart_at:
                skip = False

            newver = self.overrides.get((jar.org, jar.name)) or semver.bump()
            if self.snapshot:
                newver = newver.make_snapshot()

            if newver <= semver:
                raise TaskError(
                    'Requested version %s must be greater than the current version %s'
                    % (newver.version(), semver.version()))

            newfingerprint = self.fingerprint(target, fingerprint_internal)
            no_changes = newfingerprint == fingerprint

            if no_changes:
                changelog = 'No changes for %s - forced push.\n' % jar_coordinate(
                    jar, semver.version())
            else:
                changelog = self.changelog(
                    target, sha) or 'Direct dependencies changed.\n'

            if no_changes and not self.force:
                print('No changes for %s' %
                      jar_coordinate(jar, semver.version()))
                stage_artifacts(target, jar,
                                (newver if self.force else semver).version(),
                                changelog)
            elif skip:
                print('Skipping %s to resume at %s' %
                      (jar_coordinate(
                          jar, (newver if self.force else semver).version()),
                       coordinate(self.restart_at[0], self.restart_at[1])))
                stage_artifacts(target, jar, semver.version(), changelog)
            else:
                if not self.dryrun:
                    # Confirm push looks good
                    if no_changes:
                        print(changelog)
                    else:
                        print('\nChanges for %s since %s @ %s:\n\n%s' %
                              (coordinate(jar.org, jar.name), semver.version(),
                               sha, changelog))
                    if os.isatty(sys.stdin.fileno()):
                        push = raw_input(
                            'Publish %s with revision %s ? [y|N] ' %
                            (coordinate(jar.org, jar.name), newver.version()))
                        print('\n')
                        if push.strip().lower() != 'y':
                            raise TaskError('User aborted push')

                pushdb.set_version(target, newver, head_sha, newfingerprint)

                confs = set(repo['confs'])
                if self.context.options.jar_create_sources:
                    confs.add('sources')
                if self.context.options.jar_create_javadoc:
                    confs.add('docs')
                ivyxml = stage_artifacts(target,
                                         jar,
                                         newver.version(),
                                         changelog,
                                         confs=list(confs))

                if self.dryrun:
                    print('Skipping publish of %s in test mode.' %
                          jar_coordinate(jar, newver.version()))
                else:
                    resolver = repo['resolver']
                    path = repo.get('path')

                    # Get authentication for the publish repo if needed
                    jvm_args = self._jvmargs
                    if repo.get('auth'):
                        user = repo.get('username')
                        password = repo.get('password')
                        if user and password:
                            jvm_args.append('-Dlogin=%s' % user)
                            jvm_args.append('-Dpassword=%s' % password)
                        else:
                            raise TaskError(
                                'Unable to publish to %s. %s' %
                                (repo['resolver'], repo.get('help', '')))

                    # Do the publish
                    def publish(ivyxml_path):
                        ivysettings = self.generate_ivysettings(
                            published, publish_local=path)
                        args = [
                            '-settings',
                            ivysettings,
                            '-ivy',
                            ivyxml_path,
                            '-deliverto',
                            '%s/[organisation]/[module]/ivy-[revision].xml' %
                            self.workdir,
                            '-publish',
                            resolver,
                            '-publishpattern',
                            '%s/[organisation]/[module]/'
                            '[artifact]-[revision](-[classifier]).[ext]' %
                            self.workdir,
                            '-revision',
                            newver.version(),
                            '-m2compatible',
                        ]

                        if LogOptions.stderr_log_level() == logging.DEBUG:
                            args.append('-verbose')

                        if self.snapshot:
                            args.append('-overwrite')

                        try:
                            ivy = Bootstrapper.default_ivy()
                            ivy.execute(
                                jvm_options=jvm_args,
                                args=args,
                                workunit_factory=self.context.new_workunit,
                                workunit_name='jar-publish')
                        except (Bootstrapper.Error, Ivy.Error) as e:
                            raise TaskError(
                                'Failed to push %s! %s' %
                                (jar_coordinate(jar, newver.version()), e))

                    publish(ivyxml)

                    if self.commit:
                        org = jar.org
                        name = jar.name
                        rev = newver.version()
                        args = dict(org=org,
                                    name=name,
                                    rev=rev,
                                    coordinate=coordinate(org, name, rev),
                                    user=getpass.getuser(),
                                    cause='with forced revision' if
                                    (org,
                                     name) in self.overrides else '(autoinc)')

                        pushdb.dump(dbfile)
                        self.commit_push(coordinate(org, name, rev))
                        self.scm.refresh()
                        self.scm.tag(
                            '%(org)s-%(name)s-%(rev)s' % args,
                            message=
                            'Publish of %(coordinate)s initiated by %(user)s %(cause)s'
                            % args)
Пример #22
0
    def write_resolve_report(self, frozen_resolve_file, partition_name):
        safe_mkdir(self.resolution_report_outdir)
        out_file = os.path.join(self.resolution_report_outdir,
                                partition_name + '.json')

        def lines_from_json(json_str):
            # type: (str) -> List[str]
            return [s.strip() for s in json_str.splitlines()]

        def _diff(json1, json2, fromfile='commited', tofile='generated'):
            # type: (str, str, str, str) -> List[str]
            return list(
                unified_diff(lines_from_json(json1),
                             lines_from_json(json2),
                             fromfile=fromfile,
                             tofile=tofile))

        try:
            with open(frozen_resolve_file) as fp:
                # We are alphabetizing the 3rdparty names and their resolved coordinateds to get a stable diff in the SCM.
                parsed = json.load(fp, object_pairs_hook=OrderedDict)

                for target, coords in parsed['default'][
                        'target_to_coords'].items():
                    parsed['default']['target_to_coords'][target] = sorted(
                        coords)

                parsed = OrderedDict(
                    sorted((key, val) for key, val in parsed['default']
                           ['target_to_coords'].items()
                           if not key.startswith('//')))

                # By default `json.dumps` uses the seperators ', ' and ': '. While that second one is fine, the first
                # one when used in conjunction with indent produces trailing whitespaces. Because many devs have IDEs
                # that go ahead and get rid of trailing whitespace this will create giant unwanted diffs. And because
                # it's generaly frowned upon, we override that setting so at to produce a trailing whitespace free json.
                # -- Mathieu
                new_report = json.dumps(parsed,
                                        indent=JSON_INDENT,
                                        separators=(',', ': '))

                if self.get_options().fail_on_diff:
                    with open(out_file, 'r') as old_report_fd:
                        old_report = old_report_fd.read()

                    diff = _diff(old_report, new_report)
                    if diff:
                        pretty_diff = "\n".join(diff)
                        raise TaskError(
                            '\n{pretty_diff}\n\n'
                            'Committed dependency file and resolved dependencies are different, '
                            'please make sure you comitted latest dependency file (@ {path}). '
                            'Check 3rdparty/reports/jvm/README.md for more help.'
                            .format(
                                pretty_diff=pretty_diff,
                                path=out_file,
                            ))

                with safe_concurrent_creation(out_file) as tmp_filename:
                    with open(tmp_filename, 'wb') as f:
                        f.write(new_report)
        except IOError as e:
            raise TaskError(
                'Failed to dump resolution report to {}: {}'.format(
                    out_file, e))
Пример #23
0
 def process_info_file(cp_elem, info_file):
   plugin_info = ElementTree.parse(info_file).getroot()
   if plugin_info.tag != 'plugin':
     raise TaskError('File {} in {} is not a valid scalac plugin descriptor'.format(
         _SCALAC_PLUGIN_INFO_FILE, cp_elem))
   return plugin_info.find('name').text
Пример #24
0
    def _run_pytest(self, fail_fast, test_targets, workdirs):
        if not test_targets:
            return PytestResult.rc(0)

        # Absolute path to chrooted test file -> Path to original test file relative to the buildroot.
        sources_map = OrderedDict()
        for t in test_targets:
            for p in t.sources_relative_to_source_root():
                sources_map[os.path.join(self._source_chroot_path,
                                         p)] = os.path.join(t.target_base, p)

        if not sources_map:
            return PytestResult.rc(0)

        with self._test_runner(workdirs, test_targets,
                               sources_map) as (pytest_binary, test_args,
                                                pytest_rootdir):
            # Validate that the user didn't provide any passthru args that conflict
            # with those we must set ourselves.
            for arg in (*self.get_passthru_args(),
                        *PyTest.global_instance().get_args()):
                if arg.startswith('--junitxml') or arg.startswith(
                        '--confcutdir'):
                    raise TaskError(
                        f'Cannot pass this arg through to pytest: {arg}')

            junitxml_path = workdirs.junitxml_path(*test_targets)

            # N.B. the `--confcutdir` here instructs pytest to stop scanning for conftest.py files at the
            # top of the buildroot. This prevents conftest.py files from outside (e.g. in users home dirs)
            # from leaking into pants test runs. See: https://github.com/pantsbuild/pants/issues/2726
            args = [
                '-c',
                os.devnull,  # Force an empty pytest.ini
                '-o'
                'cache_dir={}'.format(
                    os.path.join(self.workdir, '.pytest_cache')),
                '--junitxml',
                junitxml_path,
                '--confcutdir',
                get_buildroot(),
                '--continue-on-collection-errors'
            ]
            if fail_fast:
                args.extend(['-x'])
            if self._debug:
                args.extend(['-s'])
            if self.get_options().colors:
                args.extend(['--color', 'yes'])

            args.extend([
                *self.get_passthru_args(),
                *PyTest.global_instance().get_args()
            ])

            args.extend(test_args)
            args.extend(sources_map.keys())

            # We want to ensure our reporting based off junit xml is from this run so kill results from
            # prior runs.
            if os.path.exists(junitxml_path):
                os.unlink(junitxml_path)

            with self._maybe_run_in_chroot():
                result = self._do_run_tests_with_args(test_targets,
                                                      pytest_binary.pex, args)

            # There was a problem prior to test execution preventing junit xml file creation so just let
            # the failure result bubble.
            if not os.path.exists(junitxml_path):
                return result

            failed_targets = self._get_failed_targets_from_junitxml(
                junitxml_path, test_targets, pytest_rootdir)

            def parse_error_handler(parse_error):
                # Simple error handler to pass to xml parsing function.
                raise TaskError('Error parsing xml file at {}: {}'.format(
                    parse_error.xml_path, parse_error.cause))

            all_tests_info = self.parse_test_info(
                junitxml_path, parse_error_handler,
                ['file', 'name', 'classname'])
            for test_name, test_info in all_tests_info.items():
                test_target = self._get_target_from_test(
                    test_info, test_targets, pytest_rootdir)
                self.report_all_info_for_single_test(self.options_scope,
                                                     test_target, test_name,
                                                     test_info)

            return result.with_failed_targets(failed_targets)
Пример #25
0
    def execute(self):
        # The called binary may block for a while, allow concurrent pants activity during this pants
        # idle period.
        #
        # TODO(John Sirois): refactor lock so that I can do:
        # with self.context.lock.yield():
        #   - blocking code
        #
        # Currently re-acquiring the lock requires a path argument that was set up by the goal
        # execution engine.  I do not want task code to learn the lock location.
        # http://jira.local.twitter.com/browse/AWESOME-1317
        target = self.require_single_root_target()

        working_dir = None
        cwd_opt = self.get_options().cwd
        if cwd_opt != _CWD_NOT_PRESENT:
            working_dir = self.get_options().cwd
            if not working_dir:
                working_dir = target.address.spec_path
        logger.debug(f"Working dir is {working_dir}")

        if isinstance(target, JvmApp):
            binary = target.binary
        else:
            binary = target

        # This task is installed in the "run" goal.
        # This means that, when invoked with ./pants run, it will run regardless of whether
        # the target is a jvm target.
        # As a result, not all targets passed here will have good defaults for extra_jvm_options
        extra_jvm_options = binary.payload.get_field_value(
            "extra_jvm_options", [])

        # We can't throw if binary isn't a JvmBinary, because perhaps we were called on a
        # python_binary, in which case we have to no-op and let python_run do its thing.
        # TODO(benjy): Some more elegant way to coordinate how tasks claim targets.
        if isinstance(binary, JvmBinary):
            jvm = self.preferred_jvm_distribution_for_targets([binary])
            executor = CommandLineGrabber(
                jvm) if self.only_write_cmd_line else None
            self.context.release_lock()
            with self.context.new_workunit(name='run',
                                           labels=[WorkUnitLabel.RUN]):
                result = jvm.execute_java(
                    classpath=self.classpath([target]),
                    main=self.get_options().main or binary.main,
                    executor=executor,
                    jvm_options=self.jvm_options + extra_jvm_options,
                    args=self.args,
                    cwd=working_dir,
                    synthetic_jar_dir=self.workdir,
                    create_synthetic_jar=self.synthetic_classpath)

            if self.only_write_cmd_line:
                with safe_open(expand_path(self.only_write_cmd_line),
                               'w') as outfile:
                    outfile.write(' '.join(executor.cmd))
            elif result != 0:
                raise TaskError(
                    f'java {binary.main} ... exited non-zero ({result})',
                    exit_code=result)
Пример #26
0
 def parse_error_handler(parse_error):
     # Simple error handler to pass to xml parsing function.
     raise TaskError('Error parsing xml file at {}: {}'.format(
         parse_error.xml_path, parse_error.cause))
Пример #27
0
 def get_passthru_args(self):
   if not self.supports_passthru_args():
     raise TaskError('{0} Does not support passthru args.'.format(self.__class__.__name__))
   else:
     return self.context.new_options.passthru_args_for_scope(self.options_scope)
Пример #28
0
 def process_result(self, result):
     if result != 0:
         raise TaskError(
             f"{self._SCALAFIX_MAIN} ... failed to fix ({result}) targets.")
Пример #29
0
 def _prepare(self, context, goal_infos):
     if len(goal_infos) == 0:
         raise TaskError('No goals to prepare')
     for goal_info in goal_infos:
         yield GoalExecutor(context, goal_info.goal,
                            goal_info.tasktypes_by_name)
Пример #30
0
    def _transitive_download_remote_libs(self, go_remote_libs):
        """Recursively attempt to resolve / download all remote transitive deps of go_remote_libs.

    Returns a dict<str, set<tuple<str, str>>>, which maps a global import id of a remote dep to a
    set of unresolved remote dependencies, each dependency expressed as a tuple containing the
    global import id of the dependency and the location of the expected BUILD file. If all
    transitive dependencies were successfully resolved, returns an empty dict.

    Downloads as many invalidated transitive dependencies as possible, and returns as many
    undeclared dependencies as possible. However, because the dependencies of a remote library
    can only be determined _after_ it has been downloaded, a transitive dependency of an undeclared
    remote library will never be detected.

    Because go_remote_libraries do not declare dependencies (rather, they are inferred), injects
    all successfully resolved transitive dependencies into the build graph.
    """
        if not go_remote_libs:
            return {}

        # TODO(cgibb): If performance is an issue (unlikely), use dict instead of set,
        # mapping import_id to resolved target to avoid resolving the same import_id's
        # multiple times.
        resolved_remote_libs = set()
        undeclared_deps = defaultdict(set)

        # Remove duplicate remote libraries.
        with self.invalidated(go_remote_libs) as invalidation_check:
            for vt in invalidation_check.all_vts:
                import_id = self.global_import_id(vt.target)
                dest_dir = os.path.join(vt.results_dir, import_id)

                if not vt.valid:
                    # Only download invalidated remote libraries.
                    rev = vt.target.payload.get_field_value('rev')
                    zip_url = vt.target.payload.get_field_value(
                        'zip_url').format(
                            id=import_id,
                            rev=rev,
                            host=self.get_options().remote_lib_host)
                    if not zip_url:
                        raise TaskError(
                            'No zip url specified for go_remote_library {id}'.
                            format(id=import_id))
                    self._download_zip(zip_url, dest_dir)

                self.context.products.get_data('go_remote_lib_src')[
                    vt.target] = dest_dir

                for remote_import_id in self._get_remote_import_ids(dest_dir):
                    try:
                        remote_lib = self._resolve_and_inject(
                            vt.target, remote_import_id)
                        resolved_remote_libs.add(remote_lib)
                    except self.UndeclaredRemoteLibError as e:
                        undeclared_deps[import_id].add(
                            (remote_import_id, e.spec_path))

        # Recurse after the invalidated block, so the libraries we downloaded are now "valid"
        # and thus we don't try to download a library twice.
        trans_undeclared_deps = self._transitive_download_remote_libs(
            resolved_remote_libs)
        undeclared_deps.update(trans_undeclared_deps)

        return undeclared_deps