def test_no_record_classpath(self):
   target_spec = 'testprojects/src/java/org/pantsbuild/testproject/printversion:printversion'
   target_id = Target.compute_target_id(Address.parse(target_spec))
   classpath_filename = '{}.txt'.format(target_id)
   with self.do_test_compile(target_spec,
                             expected_files=['PrintVersion.class'],
                             extra_args=['--no-compile-zinc-capture-classpath']) as found:
     self.assertFalse(classpath_filename in found)
 def add(self, targets, cache_key, valid, phase):
   # Manufacture an id from a hash of the target ids
   targets_hash = Target.identify(targets)
   self._entries.append(self.TaskEntry(targets_hash=targets_hash,
                                       target_ids=[t.id for t in targets],
                                       cache_key_id=cache_key.id,
                                       cache_key_hash=cache_key.hash,
                                       valid=valid,
                                       phase=phase))
Exemple #3
0
 def _maybe_emit_junit_xml(self, targets):
   args = []
   xml_base = self.get_options().junit_xml_dir
   if xml_base and targets:
     xml_base = os.path.realpath(xml_base)
     xml_path = os.path.join(xml_base, Target.maybe_readable_identify(targets) + '.xml')
     safe_mkdir(os.path.dirname(xml_path))
     args.append('--junitxml={}'.format(xml_path))
   yield args
Exemple #4
0
 def extra_products(self, target):
   """Override extra_products to produce an annotation processor information file."""
   ret = []
   if isinstance(target, AnnotationProcessor) and target.processors:
     root = os.path.join(self._processor_info_dir, Target.maybe_readable_identify([target]))
     processor_info_file = os.path.join(root, self._PROCESSOR_INFO_FILE)
     self._write_processor_info(processor_info_file, target.processors)
     ret.append((root, [processor_info_file]))
   return super(AptCompile, self).extra_products(target) + ret
Exemple #5
0
  def subsystems(cls):
    """Initialize these subsystems when running your test.

    If your test instantiates a target type that depends on any subsystems, those subsystems need to
    be initialized in your test. You can override this property to return the necessary subsystem
    classes.

    :rtype: list of type objects, all subclasses of Subsystem
    """
    return Target.subsystems()
Exemple #6
0
 def assert_closure(self, expected_targets, roots, include_scopes=None, exclude_scopes=None,
                    respect_intransitive=True, ordered=False):
   set_type = OrderedSet if ordered else set
   result = set_type(Target.closure_for_targets(
     target_roots=roots,
     include_scopes=include_scopes,
     exclude_scopes=exclude_scopes,
     respect_intransitive=respect_intransitive,
   ))
   self.assertEquals(set_type(expected_targets), result)
Exemple #7
0
  def _collect_internal_deps(self, targets):
    """Collect one level of dependencies from the given targets, and then transitively walk.

    This is different from directly executing `Target.closure_for_targets`, because the
    resulting set will not include the roots unless the roots depend on one another.
    """
    roots = set()
    for target in targets:
      roots.update(target.dependencies)
    return Target.closure_for_targets(roots)
 def add(self, targets, cache_key, valid, phase=None):
   if not phase:
     raise ValueError('Must specify a descriptive phase= value (e.g. "init", "pre-check", ...')
   # Manufacture an id from a hash of the target ids
   targets_hash = Target.identify(targets)
   self._entries.append(self.TaskEntry(targets_hash=targets_hash,
                                       target_ids=[t.id for t in targets],
                                       cache_key_id=cache_key.id,
                                       cache_key_hash=cache_key.hash,
                                       valid=valid,
                                       phase=phase))
  def find_all_relevant_resources_targets(self):
    # NB: Ordering isn't relevant here, because it is applied during the dep walk to
    # consume from the runtime_classpath.
    def is_jvm_target(target):
      return isinstance(target, JvmTarget)
    jvm_targets = self.context.targets(predicate=is_jvm_target)

    all_resources_tgts = OrderedSet()
    for target in Target.closure_for_targets(jvm_targets, bfs=True):
      if isinstance(target, Resources):
        all_resources_tgts.add(target)
    return all_resources_tgts
 def test_record_classpath(self):
   target_spec = 'testprojects/src/java/org/pantsbuild/testproject/printversion:printversion'
   target_id = Target.compute_target_id(Address.parse(target_spec))
   classpath_filename = '{}.txt'.format(target_id)
   with self.do_test_compile(target_spec,
                             expected_files=[classpath_filename, 'PrintVersion.class'],
                             extra_args=['--compile-zinc-capture-classpath']) as found:
     found_classpath_file = self.get_only(found, classpath_filename)
     self.assertTrue(found_classpath_file
                     .endswith(os.path.join('compile_classpath', classpath_filename)))
     with open(found_classpath_file, 'r') as f:
       self.assertIn(target_id, f.read())
Exemple #11
0
  def _isolation(self, all_targets):
    run_dir = '_runs'
    output_dir = os.path.join(self.workdir, run_dir, Target.identify(all_targets))
    safe_mkdir(output_dir, clean=True)

    coverage = None
    options = self.get_options()
    if options.coverage or options.is_flagged('coverage_open'):
      coverage_processor = options.coverage_processor
      if coverage_processor == 'cobertura':
        settings = CoberturaTaskSettings.from_task(self, workdir=output_dir)
        coverage = Cobertura(settings)
      else:
        raise TaskError('unknown coverage processor {0}'.format(coverage_processor))

    self.context.release_lock()
    if coverage:
      coverage.instrument(targets=all_targets,
                          compute_junit_classpath=lambda: self.classpath(all_targets),
                          execute_java_for_targets=self.execute_java_for_coverage)

    def do_report(exc=None):
      if coverage:
        coverage.report(all_targets, self.execute_java_for_coverage, tests_failed_exception=exc)
      if self._html_report:
        html_file_path = JUnitHtmlReport().report(output_dir, os.path.join(output_dir, 'reports'))
        if self._open:
          desktop.ui_open(html_file_path)

    try:
      yield output_dir, do_report, coverage
    finally:
      # NB: Deposit of the "current" test output in the root workdir (.pants.d/test/junit) is a
      # defacto public API and so we implement that behavior here to maintain backwards
      # compatibility for non-pants report file consumers.
      # TODO(John Sirois): Deprecate this ~API and provide a stable directory solution for test
      # output: https://github.com/pantsbuild/pants/issues/3879
      lock_file = '.file_lock'
      with OwnerPrintingInterProcessFileLock(os.path.join(self.workdir, lock_file)):
        # Kill everything except the isolated runs/ dir.
        for name in os.listdir(self.workdir):
          path = os.path.join(self.workdir, name)
          if name not in (run_dir, lock_file):
            if os.path.isdir(path):
              safe_rmtree(path)
            else:
              os.unlink(path)

        # Link all the isolated run/ dir contents back up to the stable workdir
        for name in os.listdir(output_dir):
          path = os.path.join(output_dir, name)
          os.symlink(path, os.path.join(self.workdir, name))
Exemple #12
0
  def combine_cache_keys(cache_keys):
    """Returns a cache key for a list of target sets that already have cache keys.

    This operation is 'idempotent' in the sense that if cache_keys contains a single key
    then that key is returned.

    Note that this operation is commutative but not associative.  We use the term 'combine' rather
    than 'merge' or 'union' to remind the user of this. Associativity is not a necessary property,
    in practice.
    """
    if len(cache_keys) == 1:
      return cache_keys[0]
    else:
      combined_id = Target.maybe_readable_combine_ids(cache_key.id for cache_key in cache_keys)
      combined_hash = hash_all(sorted(cache_key.hash for cache_key in cache_keys))
      return CacheKey(combined_id, combined_hash)
Exemple #13
0
 def assert_closure_dfs(self,
                        expected_targets,
                        roots,
                        include_scopes=None,
                        exclude_scopes=None,
                        respect_intransitive=True,
                        ordered=False,
                        postorder=None):
     set_type = OrderedSet if ordered else set
     result = set_type(
         Target.closure_for_targets(
             target_roots=roots,
             include_scopes=include_scopes,
             exclude_scopes=exclude_scopes,
             respect_intransitive=respect_intransitive,
             postorder=postorder))
     self.assertEquals(set_type(expected_targets), result)
Exemple #14
0
 def _check_for_untagged_dependencies(
     self, *, tagged_target_roots: Iterable[Target], tag_name: str
 ) -> None:
     untagged_dependencies = {
         tgt
         for tgt in Target.closure_for_targets(target_roots=tagged_target_roots)
         if tag_name not in tgt.tags and self.is_non_synthetic_python_target(tgt)
     }
     if not untagged_dependencies:
         return
     formatted_targets = "\n".join(tgt.address.spec for tgt in sorted(untagged_dependencies))
     self.context.log.warn(
         f"[WARNING]: The following targets are not marked with the tag name `{tag_name}`, "
         f"but are dependencies of targets that are type checked. MyPy will check these dependencies, "
         f"inferring `Any` where possible. You are encouraged to properly type check "
         f"these dependencies.\n{formatted_targets}"
     )
Exemple #15
0
  def combine_cache_keys(cache_keys):
    """Returns a cache key for a list of target sets that already have cache keys.

    This operation is 'idempotent' in the sense that if cache_keys contains a single key
    then that key is returned.

    Note that this operation is commutative but not associative.  We use the term 'combine' rather
    than 'merge' or 'union' to remind the user of this. Associativity is not a necessary property,
    in practice.
    """
    if len(cache_keys) == 1:
      return cache_keys[0]
    else:
      combined_id = Target.maybe_readable_combine_ids(cache_key.id for cache_key in cache_keys)
      combined_hash = hash_all(sorted(cache_key.hash for cache_key in cache_keys))
      summed_chunking_units = sum([cache_key.num_chunking_units for cache_key in cache_keys])
      return CacheKey(combined_id, combined_hash, summed_chunking_units)
Exemple #16
0
    def add_sources_from(self, tgt: Target) -> None:
        dump_source = _create_source_dumper(self._builder, tgt)
        self._log.debug(f"  Dumping sources: {tgt}")
        for relpath in tgt.sources_relative_to_buildroot():
            try:
                dump_source(relpath)
            except OSError:
                self._log.error(
                    f"Failed to copy {relpath} for target {tgt.address.spec}")
                raise

        if getattr(tgt, "_resource_target_specs", None) or getattr(
                tgt, "_synthetic_resources_target", None):
            # No one should be on old-style resources any more.  And if they are,
            # switching to the new python pipeline will be a great opportunity to fix that.
            raise TaskError(
                f"Old-style resources not supported for target {tgt.address.spec}. Depend on resources() "
                "targets instead.")
Exemple #17
0
  def expose_results(self, invalid_tgts, partition, workdirs):
    external_junit_xml_dir = self.get_options().junit_xml_dir
    if external_junit_xml_dir:
      # Either we just ran pytest for a set of invalid targets and generated a junit xml file
      # specific to that (sub)set or else we hit the cache for the whole partition and skipped
      # running pytest, simply retrieving the partition's full junit xml file.
      junitxml_path = workdirs.junitxml_path(*(invalid_tgts or partition))

      safe_mkdir(external_junit_xml_dir)
      shutil.copy2(junitxml_path, external_junit_xml_dir)
    if self.get_options().coverage:
      coverage_output_dir = self.get_options().coverage_output_dir
      if coverage_output_dir:
        target_dir = coverage_output_dir
      else:
        relpath = Target.maybe_readable_identify(partition)
        pants_distdir = self.context.options.for_global_scope().pants_distdir
        target_dir = os.path.join(pants_distdir, 'coverage', relpath)
      mergetree(workdirs.coverage_path, target_dir)
Exemple #18
0
  def expose_results(self, invalid_tgts, partition, workdirs):
    external_junit_xml_dir = self.get_options().junit_xml_dir
    if external_junit_xml_dir:
      # Either we just ran pytest for a set of invalid targets and generated a junit xml file
      # specific to that (sub)set or else we hit the cache for the whole partition and skipped
      # running pytest, simply retrieving the partition's full junit xml file.
      junitxml_path = workdirs.junitxml_path(*(invalid_tgts or partition))

      safe_mkdir(external_junit_xml_dir)
      shutil.copy2(junitxml_path, external_junit_xml_dir)
    if self.get_options().coverage:
      coverage_output_dir = self.get_options().coverage_output_dir
      if coverage_output_dir:
        target_dir = coverage_output_dir
      else:
        relpath = Target.maybe_readable_identify(partition)
        pants_distdir = self.context.options.for_global_scope().pants_distdir
        target_dir = os.path.join(pants_distdir, 'coverage', relpath)
      mergetree(workdirs.coverage_path, target_dir)
Exemple #19
0
  def _isolation(self, all_targets):
    run_dir = '_runs'
    output_dir = os.path.join(self.workdir, run_dir, Target.identify(all_targets))
    safe_mkdir(output_dir, clean=False)

    if self._html_report:
      junit_html_report = JUnitHtmlReport.create(output_dir, self.context.log)
    else:
      junit_html_report = NoJunitHtmlReport()

    if self.get_options().coverage or self.get_options().is_flagged('coverage_open'):
      settings = CoberturaTaskSettings.from_task(self, workdir=output_dir)
      coverage = Cobertura(settings, all_targets, self.execute_java_for_coverage)
    else:
      coverage = NoCoverage()

    reports = self.Reports(junit_html_report, coverage)

    self.context.release_lock()
    try:
      yield output_dir, reports, coverage
    finally:
      # NB: Deposit of the "current" test output in the root workdir (.pants.d/test/junit) is a
      # defacto public API and so we implement that behavior here to maintain backwards
      # compatibility for non-pants report file consumers.
      # TODO(John Sirois): Deprecate this ~API and provide a stable directory solution for test
      # output: https://github.com/pantsbuild/pants/issues/3879
      lock_file = '.file_lock'
      with OwnerPrintingInterProcessFileLock(os.path.join(self.workdir, lock_file)):
        # Kill everything except the isolated `_runs/` dir.
        for name in os.listdir(self.workdir):
          path = os.path.join(self.workdir, name)
          if name not in (run_dir, lock_file):
            if os.path.isdir(path):
              safe_rmtree(path)
            else:
              os.unlink(path)

        # Link all the isolated run/ dir contents back up to the stable workdir
        for name in os.listdir(output_dir):
          path = os.path.join(output_dir, name)
          os.symlink(path, os.path.join(self.workdir, name))
Exemple #20
0
  def _isolation(self, all_targets):
    run_dir = '_runs'
    output_dir = os.path.join(self.workdir, run_dir, Target.identify(all_targets))
    safe_mkdir(output_dir, clean=False)

    if self._html_report:
      junit_html_report = JUnitHtmlReport.create(output_dir, self.context.log)
    else:
      junit_html_report = NoJunitHtmlReport()

    if self.get_options().coverage or self.get_options().is_flagged('coverage_open'):
      settings = CoberturaTaskSettings.from_task(self, workdir=output_dir)
      coverage = Cobertura(settings, all_targets, self.execute_java_for_coverage)
    else:
      coverage = NoCoverage()

    reports = self.Reports(junit_html_report, coverage)

    self.context.release_lock()
    try:
      yield output_dir, reports, coverage
    finally:
      # NB: Deposit of the "current" test output in the root workdir (.pants.d/test/junit) is a
      # defacto public API and so we implement that behavior here to maintain backwards
      # compatibility for non-pants report file consumers.
      # TODO(John Sirois): Deprecate this ~API and provide a stable directory solution for test
      # output: https://github.com/pantsbuild/pants/issues/3879
      lock_file = '.file_lock'
      with OwnerPrintingInterProcessFileLock(os.path.join(self.workdir, lock_file)):
        # Kill everything except the isolated `_runs/` dir.
        for name in os.listdir(self.workdir):
          path = os.path.join(self.workdir, name)
          if name not in (run_dir, lock_file):
            if os.path.isdir(path):
              safe_rmtree(path)
            else:
              os.unlink(path)

        # Link all the isolated run/ dir contents back up to the stable workdir
        for name in os.listdir(output_dir):
          path = os.path.join(output_dir, name)
          os.symlink(path, os.path.join(self.workdir, name))
Exemple #21
0
    def _isolation(self, per_target, all_targets):
        run_dir = '_runs'
        mode_dir = 'isolated' if per_target else 'combined'
        batch_dir = str(self._batch_size) if self._batched else 'all'
        output_dir = os.path.join(self.workdir, run_dir,
                                  Target.identify(all_targets), mode_dir,
                                  batch_dir)
        safe_mkdir(output_dir, clean=False)

        if self._html_report:
            junit_html_report = JUnitHtmlReport.create(
                xml_dir=output_dir,
                open_report=self.get_options().open,
                logger=self.context.log,
                error_on_conflict=self.get_options(
                ).html_report_error_on_conflict)
        else:
            junit_html_report = NoJunitHtmlReport()

        coverage = CodeCoverage.global_instance().get_coverage_engine(
            self, output_dir, all_targets, self.execute_java_for_coverage)

        reports = self.Reports(junit_html_report, coverage)

        self.context.release_lock()
        try:
            yield output_dir, reports, coverage
        finally:
            lock_file = '.file_lock'
            preserve = (run_dir, lock_file)
            dist_dir = os.path.join(
                self.get_options().pants_distdir,
                os.path.relpath(self.workdir,
                                self.get_options().pants_workdir))

            with OwnerPrintingInterProcessFileLock(
                    os.path.join(dist_dir, lock_file)):
                self._link_current_reports(report_dir=output_dir,
                                           link_dir=dist_dir,
                                           preserve=preserve)
Exemple #22
0
  def _isolation(self, per_target, all_targets):
    run_dir = '_runs'
    mode_dir = 'isolated' if per_target else 'combined'
    batch_dir = str(self._batch_size) if self._batched else 'all'
    output_dir = os.path.join(self.workdir,
                              run_dir,
                              Target.identify(all_targets),
                              mode_dir,
                              batch_dir)
    safe_mkdir(output_dir, clean=False)

    if self._html_report:
      junit_html_report = JUnitHtmlReport.create(xml_dir=output_dir,
                                                 open_report=self.get_options().open,
                                                 logger=self.context.log,
                                                 error_on_conflict=True)
    else:
      junit_html_report = NoJunitHtmlReport()

    coverage = CodeCoverage.global_instance().get_coverage_engine(
      self,
      output_dir,
      all_targets,
      self.execute_java_for_coverage)

    reports = self.Reports(junit_html_report, coverage)

    self.context.release_lock()
    try:
      yield output_dir, reports, coverage
    finally:
      lock_file = '.file_lock'
      preserve = (run_dir, lock_file)
      dist_dir = os.path.join(self.get_options().pants_distdir,
                              os.path.relpath(self.workdir, self.get_options().pants_workdir))

      with OwnerPrintingInterProcessFileLock(os.path.join(dist_dir, lock_file)):
        self._link_current_reports(report_dir=output_dir, link_dir=dist_dir,
                                   preserve=preserve)
  def test_classpath_does_not_include_extra_classes_dirs(self):
    target_rel_spec = 'testprojects/src/java/org/pantsbuild/testproject/phrases:'
    classpath_file_by_target_id = {}
    for target_name in ['there-was-a-duck',
      'lesser-of-two',
      'once-upon-a-time',
      'ten-thousand']:
      target_id = Target.compute_target_id(Address.parse('{}{}'
        .format(target_rel_spec, target_name)))
      classpath_file_by_target_id[target_id] = '{}.txt'.format(target_id)

    with self.do_test_compile(target_rel_spec,
      expected_files = list(classpath_file_by_target_id.values()),
      extra_args=['--compile-zinc-capture-classpath']) as found:
      for target_id, filename in classpath_file_by_target_id.items():
        found_classpath_file = self.get_only(found, filename)
        with open(found_classpath_file, 'r') as f:
          contents = f.read()

          self.assertIn(target_id, contents)

          other_target_ids = set(classpath_file_by_target_id.keys()) - {target_id}
          for other_id in other_target_ids:
            self.assertNotIn(other_id, contents)
  def test_classpath_does_not_include_extra_classes_dirs(self):
    target_rel_spec = 'testprojects/src/java/org/pantsbuild/testproject/phrases:'
    classpath_file_by_target_id = {}
    for target_name in ['there-was-a-duck',
      'lesser-of-two',
      'once-upon-a-time',
      'ten-thousand']:
      target_id = Target.compute_target_id(Address.parse('{}{}'
        .format(target_rel_spec, target_name)))
      classpath_file_by_target_id[target_id] = '{}.txt'.format(target_id)

    with self.do_test_compile(target_rel_spec,
      expected_files=classpath_file_by_target_id.values(),
      extra_args=['--compile-zinc-capture-classpath']) as found:
      for target_id, filename in classpath_file_by_target_id.items():
        found_classpath_file = self.get_only(found, filename)
        with open(found_classpath_file, 'r') as f:
          contents = f.read()

          self.assertIn(target_id, contents)

          other_target_ids = set(classpath_file_by_target_id.keys()) - {target_id}
          for other_id in other_target_ids:
            self.assertNotIn(other_id, contents)
Exemple #25
0
    def update_dependee_references(self):
        dependee_targets = self.dependency_graph()[Target(
            name=self._from_address.target_name,
            address=self._from_address,
            build_graph=[],
            **{})]

        logging.disable(logging.WARNING)

        for concrete_target in dependee_targets:
            for formats in [{
                    'from': self._from_address.spec,
                    'to': self._to_address.spec
            }, {
                    'from':
                    ':{}'.format(self._from_address.target_name),
                    'to':
                    ':{}'.format(self._to_address.target_name)
            }]:
                Buildozer.execute_binary('replace dependencies {} {}'.format(
                    formats['from'], formats['to']),
                                         spec=concrete_target.address.spec)

        logging.disable(logging.NOTSET)
Exemple #26
0
 def for_partition(cls, work_dir, partition):
   root_dir = os.path.join(work_dir, Target.maybe_readable_identify(partition))
   safe_mkdir(root_dir, clean=False)
   return cls(root_dir=root_dir, partition=partition)
Exemple #27
0
    def test_validate_target_representation_args_invalid_exactly_one(self):
        with self.assertRaises(AssertionError):
            Target._validate_target_representation_args(None, None)

        with self.assertRaises(AssertionError):
            Target._validate_target_representation_args({}, Payload())
Exemple #28
0
 def _get_junit_xml_path(self, targets):
   xml_path = os.path.join(self.workdir, 'junitxml',
                           'TEST-{}.xml'.format(Target.maybe_readable_identify(targets)))
   safe_mkdir_for(xml_path)
   return xml_path
Exemple #29
0
 def _get_junit_xml_path(self, targets):
     xml_path = os.path.join(
         self.workdir, 'junitxml',
         'TEST-{}.xml'.format(Target.maybe_readable_identify(targets)))
     safe_mkdir_for(xml_path)
     return xml_path
Exemple #30
0
 def _collect_targets(self, root_targets, **kwargs):
     return Target.closure_for_targets(target_roots=root_targets, **kwargs)
 def _library_targets(self, managed_jar_dependencies):
   targets = [t for spec in managed_jar_dependencies.library_specs for t in self.context.resolve(spec)]
   for t in Target.closure_for_targets(targets):
     yield t
Exemple #32
0
  def test_validate_target_representation_args_invalid_exactly_one(self):
    with self.assertRaises(AssertionError):
      Target._validate_target_representation_args(None, None)

    with self.assertRaises(AssertionError):
      Target._validate_target_representation_args({}, Payload())
Exemple #33
0
 def skip_instrumentation(**kwargs):
     return Target.compute_target_id(kwargs["address"]).startswith(
         ".pants.d.gen") or ScoveragePlatform.global_instance(
         ).is_blacklisted(kwargs["address"].spec)
    def _alternate_target_roots(cls, options, address_mapper, build_graph):
        processed = set()
        for jvm_tool in JvmToolMixin.get_registered_tools():
            dep_spec = jvm_tool.dep_spec(options)
            dep_address = Address.parse(dep_spec)
            # Some JVM tools are requested multiple times, we only need to handle them once.
            if dep_address not in processed:
                processed.add(dep_address)
                try:
                    if build_graph.contains_address(
                            dep_address) or address_mapper.resolve(
                                dep_address):
                        # The user has defined a tool classpath override - we let that stand.
                        continue
                except AddressLookupError as e:
                    if jvm_tool.classpath is None:
                        raise cls._tool_resolve_error(e, dep_spec, jvm_tool)
                    else:
                        if not jvm_tool.is_default(options):
                            # The user specified a target spec for this jvm tool that doesn't actually exist.
                            # We want to error out here instead of just silently using the default option while
                            # appearing to respect their config.
                            raise cls.ToolResolveError(
                                dedent("""
                  Failed to resolve target for tool: {tool}. This target was obtained from
                  option {option} in scope {scope}.

                  Make sure you didn't make a typo in the tool's address. You specified that the
                  tool should use the target found at "{tool}".

                  This target has a default classpath configured, so you can simply remove:
                    [{scope}]
                    {option}: {tool}
                  from pants.ini (or any other config file) to use the default tool.

                  The default classpath is: {default_classpath}

                  Note that tool target addresses in pants.ini should be specified *without* quotes.
                """).strip().format(tool=dep_spec,
                                    option=jvm_tool.key,
                                    scope=jvm_tool.scope,
                                    default_classpath=':'.join(
                                        map(str, jvm_tool.classpath or ()))))
                        if jvm_tool.classpath:
                            tool_classpath_target = JarLibrary(
                                name=dep_address.target_name,
                                address=dep_address,
                                build_graph=build_graph,
                                jars=jvm_tool.classpath)
                        else:
                            # The tool classpath is empty by default, so we just inject a dummy target that
                            # ivy resolves as the empty list classpath.  JarLibrary won't do since it requires
                            # one or more jars, so we just pick a target type ivy has no resolve work to do for.
                            tool_classpath_target = Target(
                                name=dep_address.target_name,
                                address=dep_address,
                                build_graph=build_graph)
                        build_graph.inject_target(tool_classpath_target,
                                                  synthetic=True)

        # We use the trick of not returning alternate roots, but instead just filling the dep_spec
        # holes with a JarLibrary built from a tool's default classpath JarDependency list if there is
        # no over-riding targets present. This means we do modify the build_graph, but we at least do
        # it at a time in the engine lifecycle cut out for handling that.
        return None
 def _library_targets(self, managed_jar_dependencies):
   targets = [t for spec in managed_jar_dependencies.library_specs for t in self.context.resolve(spec)]
   for t in Target.closure_for_targets(targets):
     yield t
Exemple #36
0
 def for_partition(cls, work_dir, partition):
     root_dir = os.path.join(work_dir,
                             Target.maybe_readable_identify(partition))
     safe_mkdir(root_dir, clean=False)
     return cls(root_dir=root_dir, partition=partition)
Exemple #37
0
 def identify(targets):
   targets = list(targets)
   if len(targets) == 1 and targets[0].is_jvm and getattr(targets[0], 'provides', None):
     return targets[0].provides.org, targets[0].provides.name
   else:
     return IvyUtils.INTERNAL_ORG_NAME, Target.maybe_readable_identify(targets)
Exemple #38
0
 def target_set_id(self, *targets):
   return Target.maybe_readable_identify(targets or self.partition)
Exemple #39
0
 def test_validate_target_representation_args_valid(self):
   Target._validate_target_representation_args(kwargs={}, payload=None)
   Target._validate_target_representation_args(kwargs=None, payload=Payload())
Exemple #40
0
 def __str__(self):
   ident = Target.identify(self.targets())
   return 'Context(id:{}, targets:{})'.format(ident, self.targets())
Exemple #41
0
  def pre_compile_jobs(self, counter):

    # Create a target for the jdk outlining so that it'll only be done once per run.
    target = Target('jdk', Address('', 'jdk'), self.context.build_graph)
    index_dir = os.path.join(self.versioned_workdir, '--jdk--', 'index')

    def work_for_vts_rsc_jdk():
      distribution = self._get_jvm_distribution()
      jvm_lib_jars_abs = distribution.find_libs(['rt.jar', 'dt.jar', 'jce.jar', 'tools.jar'])
      self._jvm_lib_jars_abs = jvm_lib_jars_abs

      metacp_inputs = tuple(jvm_lib_jars_abs)

      counter_val = str(counter()).rjust(counter.format_length(), ' ' if PY3 else b' ')
      counter_str = '[{}/{}] '.format(counter_val, counter.size)
      self.context.log.info(
        counter_str,
        'Metacp-ing ',
        items_to_report_element(metacp_inputs, 'jar'),
        ' in the jdk')

      # NB: Metacp doesn't handle the existence of possibly stale semanticdb jars,
      # so we explicitly clean the directory to keep it happy.
      safe_mkdir(index_dir, clean=True)

      with Timer() as timer:
        # Step 1: Convert classpath to SemanticDB
        # ---------------------------------------
        rsc_index_dir = fast_relpath(index_dir, get_buildroot())
        args = [
          '--verbose',
          # NB: The directory to dump the semanticdb jars generated by metacp.
          '--out', rsc_index_dir,
          os.pathsep.join(metacp_inputs),
        ]
        metacp_wu = self._runtool(
          'scala.meta.cli.Metacp',
          'metacp',
          args,
          distribution,
          tgt=target,
          input_files=tuple(
            # NB: no input files because the jdk is expected to exist on the system in a known
            #     location.
            #     Related: https://github.com/pantsbuild/pants/issues/6416
          ),
          output_dir=rsc_index_dir)
        metacp_stdout = stdout_contents(metacp_wu)
        metacp_result = json.loads(metacp_stdout)

        metai_classpath = self._collect_metai_classpath(metacp_result, jvm_lib_jars_abs)

        # Step 1.5: metai Index the semanticdbs
        # -------------------------------------
        self._run_metai_tool(distribution, metai_classpath, rsc_index_dir, tgt=target)

        self._jvm_lib_metacp_classpath = [os.path.join(get_buildroot(), x) for x in metai_classpath]

      self._record_target_stats(target,
        len(self._jvm_lib_metacp_classpath),
        len([]),
        timer.elapsed,
        False,
        'metacp'
      )

    return [
      Job(
        'metacp(jdk)',
        functools.partial(
          work_for_vts_rsc_jdk
        ),
        [],
        self._size_estimator([]),
      ),
    ]
Exemple #42
0
  def test_validate_target_representation_args_invalid_type(self):
    with self.assertRaises(AssertionError):
      Target._validate_target_representation_args(kwargs=Payload(), payload=None)

    with self.assertRaises(AssertionError):
      Target._validate_target_representation_args(kwargs=None, payload={})
Exemple #43
0
 def generate_ivy(cls, targets, jars, excludes, ivyxml, confs, resolve_hash_name=None,
                  pinned_artifacts=None, jar_dep_manager=None):
   if not resolve_hash_name:
     resolve_hash_name = Target.maybe_readable_identify(targets)
   return cls._generate_resolve_ivy(jars, excludes, ivyxml, confs, resolve_hash_name, pinned_artifacts,
                            jar_dep_manager)
Exemple #44
0
    def _maybe_emit_coverage_data(self, targets, pex, workunit):
        coverage = self.get_options().coverage
        if coverage is None:
            yield []
            return

        pex_src_root = os.path.relpath(
            self.context.products.get_data(
                GatherSources.PYTHON_SOURCES).path(), get_buildroot())

        source_mappings = {}
        for target in targets:
            libs = (
                tgt for tgt in target.closure()
                if tgt.has_sources('.py') and not isinstance(tgt, PythonTests))
            for lib in libs:
                source_mappings[lib.target_base] = [pex_src_root]

        def ensure_trailing_sep(path):
            return path if path.endswith(os.path.sep) else path + os.path.sep

        if coverage == 'auto':

            def compute_coverage_sources(tgt):
                if tgt.coverage:
                    return tgt.coverage
                else:
                    # This makes the assumption that tests/python/<tgt> will be testing src/python/<tgt>.
                    # Note in particular that this doesn't work for pants' own tests, as those are under
                    # the top level package 'pants_tests', rather than just 'pants'.
                    # TODO(John Sirois): consider failing fast if there is no explicit coverage scheme;
                    # but also  consider supporting configuration of a global scheme whether that be parallel
                    # dirs/packages or some arbitrary function that can be registered that takes a test target
                    # and hands back the source packages or paths under test.
                    return set(
                        os.path.dirname(s).replace(os.sep, '.')
                        for s in tgt.sources_relative_to_source_root())

            coverage_sources = set(
                itertools.chain(
                    *[compute_coverage_sources(t) for t in targets]))
        else:
            coverage_sources = []
            for source in coverage.split(','):
                if os.path.isdir(source):
                    # The source is a dir, so correct its prefix for the chroot.
                    # E.g. if source is /path/to/src/python/foo/bar or src/python/foo/bar then
                    # rel_source is src/python/foo/bar, and ...
                    rel_source = os.path.relpath(source, get_buildroot())
                    rel_source = ensure_trailing_sep(rel_source)
                    found_target_base = False
                    for target_base in source_mappings:
                        prefix = ensure_trailing_sep(target_base)
                        if rel_source.startswith(prefix):
                            # ... rel_source will match on prefix=src/python/ ...
                            suffix = rel_source[len(prefix):]
                            # ... suffix will equal foo/bar ...
                            coverage_sources.append(
                                os.path.join(pex_src_root, suffix))
                            found_target_base = True
                            # ... and we end up appending <pex_src_root>/foo/bar to the coverage_sources.
                            break
                    if not found_target_base:
                        self.context.log.warn(
                            'Coverage path {} is not in any target. Skipping.'.
                            format(source))
                else:
                    # The source is to be interpreted as a package name.
                    coverage_sources.append(source)

        with self._cov_setup(
                source_mappings,
                coverage_sources=coverage_sources) as (args, coverage_rc):
            try:
                yield args
            finally:
                env = {'PEX_MODULE': 'coverage.cmdline:main'}

                def pex_run(args):
                    return self._pex_run(pex, workunit, args=args, env=env)

                # On failures or timeouts, the .coverage file won't be written.
                if not os.path.exists('.coverage'):
                    self.context.log.warn(
                        'No .coverage file was found! Skipping coverage reporting.'
                    )
                else:
                    # Normalize .coverage.raw paths using combine and `paths` config in the rc file.
                    # This swaps the /tmp pex chroot source paths for the local original source paths
                    # the pex was generated from and which the user understands.
                    shutil.move('.coverage', '.coverage.raw')
                    pex_run(args=['combine', '--rcfile', coverage_rc])
                    pex_run(args=['report', '-i', '--rcfile', coverage_rc])

                    # TODO(wickman): If coverage is enabled and we are not using fast mode, write an
                    # intermediate .html that points to each of the coverage reports generated and
                    # webbrowser.open to that page.
                    # TODO(John Sirois): Possibly apply the same logic to the console report.  In fact,
                    # consider combining coverage files from all runs in this Tasks's execute and then
                    # producing just 1 console and 1 html report whether or not the tests are run in fast
                    # mode.
                    if self.get_options().coverage_output_dir:
                        target_dir = self.get_options().coverage_output_dir
                    else:
                        relpath = Target.maybe_readable_identify(targets)
                        pants_distdir = self.context.options.for_global_scope(
                        ).pants_distdir
                        target_dir = os.path.join(pants_distdir, 'coverage',
                                                  relpath)
                    safe_mkdir(target_dir)
                    pex_run(args=[
                        'html', '-i', '--rcfile', coverage_rc, '-d', target_dir
                    ])
                    coverage_xml = os.path.join(target_dir, 'coverage.xml')
                    pex_run(args=[
                        'xml', '-i', '--rcfile', coverage_rc, '-o',
                        coverage_xml
                    ])
Exemple #45
0
 def _init_target_subsystem(self):
   if not self._inited_target:
     subsystem_util.init_subsystems(Target.subsystems())
     self._inited_target = True
Exemple #46
0
 def __str__(self):
     ident = Target.identify(self.targets())
     return 'Context(id:{}, targets:{})'.format(ident, self.targets())
 def test_contains_address(self):
     a = Address.parse('a')
     self.assertFalse(self.build_graph.contains_address(a))
     target = Target(name='a', address=a, build_graph=self.build_graph)
     self.build_graph.inject_target(target)
     self.assertTrue(self.build_graph.contains_address(a))
Exemple #48
0
    def _process_target(
        self,
        current_target: Target,
        modulizable_target_set: FrozenOrderedSet[Target],
        resource_target_map,
        runtime_classpath,
        zinc_args_for_target,
        flat_non_modulizable_deps_for_modulizable_targets,
    ):
        """
        :type current_target:pants.build_graph.target.Target
        """
        info = {
            # this means 'dependencies'
            "targets": [],
            "source_dependencies_in_classpath": [],
            "libraries": [],
            "roots": [],
            "id": current_target.id,
            "target_type": ExportDepAsJar._get_target_type(
                current_target, resource_target_map, runtime_classpath
            ),
            "is_synthetic": current_target.is_synthetic,
            "pants_target_type": self._get_pants_target_alias(type(current_target)),
            "is_target_root": current_target in modulizable_target_set,
            "transitive": current_target.transitive,
            "scope": str(current_target.scope),
            "scalac_args": ExportDepAsJar._extract_arguments_with_prefix_from_zinc_args(
                zinc_args_for_target, "-S"
            ),
            "javac_args": ExportDepAsJar._extract_arguments_with_prefix_from_zinc_args(
                zinc_args_for_target, "-C"
            ),
            "extra_jvm_options": current_target.payload.get_field_value("extra_jvm_options", []),
        }

        def iter_transitive_jars(jar_lib):
            """
            :type jar_lib: :class:`pants.backend.jvm.targets.jar_library.JarLibrary`
            :rtype: :class:`collections.Iterator` of
                    :class:`pants.java.jar.M2Coordinate`
            """
            if runtime_classpath:
                jar_products = runtime_classpath.get_artifact_classpath_entries_for_targets(
                    (jar_lib,)
                )
                for _, jar_entry in jar_products:
                    coordinate = jar_entry.coordinate
                    # We drop classifier and type_ since those fields are represented in the global
                    # libraries dict and here we just want the key into that dict (see `_jar_id`).
                    yield M2Coordinate(org=coordinate.org, name=coordinate.name, rev=coordinate.rev)

        def _full_library_set_for_target(target):
            """Get the full library set for a target, including jar dependencies and jars of the
            library itself."""
            libraries = set([])
            if isinstance(target, JarLibrary):
                jars = set([])
                for jar in target.jar_dependencies:
                    jars.add(M2Coordinate(jar.org, jar.name, jar.rev))
                # Add all the jars pulled in by this jar_library
                jars.update(iter_transitive_jars(target))
                libraries = [self._jar_id(jar) for jar in jars]
            else:
                libraries.add(target.id)
            return libraries

        if not current_target.is_synthetic:
            info["globs"] = current_target.globs_relative_to_buildroot()

        libraries_for_target = set(
            [self._jar_id(jar) for jar in iter_transitive_jars(current_target)]
        )
        for dep in sorted(flat_non_modulizable_deps_for_modulizable_targets[current_target]):
            libraries_for_target.update(_full_library_set_for_target(dep))
        info["libraries"].extend(libraries_for_target)

        info["roots"] = [
            {
                "source_root": os.path.realpath(source_root_package_prefix[0]),
                "package_prefix": source_root_package_prefix[1],
            }
            for source_root_package_prefix in self._source_roots_for_target(current_target)
        ]

        for dep in current_target.dependencies:
            if dep in modulizable_target_set:
                info["targets"].append(dep.address.spec)

        if isinstance(current_target, ScalaLibrary):
            for dep in current_target.java_sources:
                info["targets"].append(dep.address.spec)

        if isinstance(current_target, JvmTarget):
            info["excludes"] = [self._exclude_id(exclude) for exclude in current_target.excludes]
            info["platform"] = current_target.platform.name
            if isinstance(current_target, RuntimePlatformMixin):
                # We ignore typing here because mypy doesn't behave well with multiple inheritance:
                # ref: https://github.com/python/mypy/issues/3603
                info["runtime_platform"] = current_target.runtime_platform.name  # type: ignore[misc]

        info["source_dependencies_in_classpath"] = self._compute_transitive_source_dependencies(
            current_target, info["targets"], modulizable_target_set
        )

        return info
Exemple #49
0
 def test_validate_target_representation_args_valid(self):
     Target._validate_target_representation_args(kwargs={}, payload=None)
     Target._validate_target_representation_args(kwargs=None,
                                                 payload=Payload())
Exemple #50
0
    def _maybe_emit_coverage_data(self, targets, pex, workunit):
        coverage = self.get_options().coverage
        if coverage is None:
            yield []
            return

        def read_coverage_list(prefix):
            return coverage[len(prefix):].split(',')

        coverage_modules = None
        if coverage.startswith('modules:'):
            # NB: pytest-cov maps these modules to the `[run] sources` config.  So for
            # `modules:pants.base,pants.util` the config emitted has:
            # [run]
            # source =
            #   pants.base
            #   pants.util
            #
            # Now even though these are not paths, coverage sees the dots and switches to a module
            # prefix-matching mode.  Unfortunately, neither wildcards nor top-level module prefixes
            # like `pants.` serve to engage this module prefix-matching as one might hope.  It
            # appears that `pants.` is treated as a path and `pants.*` is treated as a literal
            # module prefix name.
            coverage_modules = read_coverage_list('modules:')
        elif coverage.startswith('paths:'):
            coverage_modules = []
            pex_src_root = os.path.relpath(
                self.context.products.get_data(
                    GatherSources.PYTHON_SOURCES).path(), get_buildroot())
            for path in read_coverage_list('paths:'):
                coverage_modules.append(os.path.join(pex_src_root, path))

        with self._cov_setup(
                targets, pex.path(),
                coverage_modules=coverage_modules) as (args, coverage_rc):
            try:
                yield args
            finally:
                env = {'PEX_MODULE': 'coverage.cmdline:main'}

                def pex_run(args):
                    return self._pex_run(pex, workunit, args=args, env=env)

                # On failures or timeouts, the .coverage file won't be written.
                if not os.path.exists('.coverage'):
                    self.context.log.warn(
                        'No .coverage file was found! Skipping coverage reporting.'
                    )
                else:
                    # Normalize .coverage.raw paths using combine and `paths` config in the rc file.
                    # This swaps the /tmp pex chroot source paths for the local original source paths
                    # the pex was generated from and which the user understands.
                    shutil.move('.coverage', '.coverage.raw')
                    pex_run(args=['combine', '--rcfile', coverage_rc])
                    pex_run(args=['report', '-i', '--rcfile', coverage_rc])

                    # TODO(wickman): If coverage is enabled and we are not using fast mode, write an
                    # intermediate .html that points to each of the coverage reports generated and
                    # webbrowser.open to that page.
                    # TODO(John Sirois): Possibly apply the same logic to the console report.  In fact,
                    # consider combining coverage files from all runs in this Tasks's execute and then
                    # producing just 1 console and 1 html report whether or not the tests are run in fast
                    # mode.
                    if self.get_options().coverage_output_dir:
                        target_dir = self.get_options().coverage_output_dir
                    else:
                        relpath = Target.maybe_readable_identify(targets)
                        pants_distdir = self.context.options.for_global_scope(
                        ).pants_distdir
                        target_dir = os.path.join(pants_distdir, 'coverage',
                                                  relpath)
                    safe_mkdir(target_dir)
                    pex_run(args=[
                        'html', '-i', '--rcfile', coverage_rc, '-d', target_dir
                    ])
                    coverage_xml = os.path.join(target_dir, 'coverage.xml')
                    pex_run(args=[
                        'xml', '-i', '--rcfile', coverage_rc, '-o',
                        coverage_xml
                    ])
Exemple #51
0
  def _maybe_emit_coverage_data(self, targets, pex):
    coverage = self.get_options().coverage
    if coverage is None:
      yield []
      return

    pex_src_root = os.path.relpath(
      self.context.products.get_data(GatherSources.PYTHON_SOURCES).path(), get_buildroot())

    source_mappings = {}
    for target in targets:
      libs = (tgt for tgt in target.closure()
              if tgt.has_sources('.py') and not isinstance(tgt, PythonTests))
      for lib in libs:
        source_mappings[lib.target_base] = [pex_src_root]

    def ensure_trailing_sep(path):
      return path if path.endswith(os.path.sep) else path + os.path.sep

    if coverage == 'auto':
      def compute_coverage_sources(tgt):
        if tgt.coverage:
          return tgt.coverage
        else:
          # This makes the assumption that tests/python/<tgt> will be testing src/python/<tgt>.
          # Note in particular that this doesn't work for pants' own tests, as those are under
          # the top level package 'pants_tests', rather than just 'pants'.
          # TODO(John Sirois): consider failing fast if there is no explicit coverage scheme;
          # but also  consider supporting configuration of a global scheme whether that be parallel
          # dirs/packages or some arbitrary function that can be registered that takes a test target
          # and hands back the source packages or paths under test.
          return set(os.path.dirname(s).replace(os.sep, '.')
                     for s in tgt.sources_relative_to_source_root())
      coverage_sources = set(itertools.chain(*[compute_coverage_sources(t) for t in targets]))
    else:
      coverage_sources = []
      for source in coverage.split(','):
        if os.path.isdir(source):
          # The source is a dir, so correct its prefix for the chroot.
          # E.g. if source is /path/to/src/python/foo/bar or src/python/foo/bar then
          # rel_source is src/python/foo/bar, and ...
          rel_source = os.path.relpath(source, get_buildroot())
          rel_source = ensure_trailing_sep(rel_source)
          found_target_base = False
          for target_base in source_mappings:
            prefix = ensure_trailing_sep(target_base)
            if rel_source.startswith(prefix):
              # ... rel_source will match on prefix=src/python/ ...
              suffix = rel_source[len(prefix):]
              # ... suffix will equal foo/bar ...
              coverage_sources.append(os.path.join(pex_src_root, suffix))
              found_target_base = True
              # ... and we end up appending <pex_src_root>/foo/bar to the coverage_sources.
              break
          if not found_target_base:
            self.context.log.warn('Coverage path {} is not in any target. Skipping.'.format(source))
        else:
          # The source is to be interpreted as a package name.
          coverage_sources.append(source)

    with self._cov_setup(source_mappings,
                         coverage_sources=coverage_sources) as (args, coverage_rc):
      try:
        yield args
      finally:
        env = {
          'PEX_MODULE': 'coverage.cmdline:main'
        }
        def pex_run(arguments):
          return self._pex_run(pex, workunit_name='coverage', args=arguments, env=env)

        # On failures or timeouts, the .coverage file won't be written.
        if not os.path.exists('.coverage'):
          self.context.log.warn('No .coverage file was found! Skipping coverage reporting.')
        else:
          # Normalize .coverage.raw paths using combine and `paths` config in the rc file.
          # This swaps the /tmp pex chroot source paths for the local original source paths
          # the pex was generated from and which the user understands.
          shutil.move('.coverage', '.coverage.raw')
          pex_run(['combine', '--rcfile', coverage_rc])
          pex_run(['report', '-i', '--rcfile', coverage_rc])
          if self.get_options().coverage_output_dir:
            target_dir = self.get_options().coverage_output_dir
          else:
            relpath = Target.maybe_readable_identify(targets)
            pants_distdir = self.context.options.for_global_scope().pants_distdir
            target_dir = os.path.join(pants_distdir, 'coverage', relpath)
          safe_mkdir(target_dir)
          pex_run(['html', '-i', '--rcfile', coverage_rc, '-d', target_dir])
          coverage_xml = os.path.join(target_dir, 'coverage.xml')
          pex_run(['xml', '-i', '--rcfile', coverage_rc, '-o', coverage_xml])
Exemple #52
0
 def identify(targets):
   targets = list(targets)
   if len(targets) == 1 and targets[0].is_jvm and getattr(targets[0], 'provides', None):
     return targets[0].provides.org, targets[0].provides.name
   else:
     return IvyUtils.INTERNAL_ORG_NAME, Target.maybe_readable_identify(targets)
Exemple #53
0
    def generate_targets_map(self, targets, classpath_products=None):
        """Generates a dictionary containing all pertinent information about the target graph.

    The return dictionary is suitable for serialization by json.dumps.
    :param targets: The list of targets to generate the map for.
    :param classpath_products: Optional classpath_products. If not provided when the --libraries
      option is `True`, this task will perform its own jar resolution.
    """
        targets_map = {}
        resource_target_map = {}
        python_interpreter_targets_mapping = defaultdict(list)

        if self.get_options().libraries:
            # NB(gmalmquist): This supports mocking the classpath_products in tests.
            if classpath_products is None:
                classpath_products = self.resolve_jars(targets)
        else:
            classpath_products = None

        target_roots_set = set(self.context.target_roots)

        def process_target(current_target):
            """
      :type current_target:pants.build_graph.target.Target
      """
            def get_target_type(tgt):
                def is_test(t):
                    return isinstance(t, JUnitTests) or isinstance(
                        t, PythonTests)

                if is_test(tgt):
                    return SourceRootTypes.TEST
                else:
                    if (isinstance(tgt, Resources)
                            and tgt in resource_target_map
                            and is_test(resource_target_map[tgt])):
                        return SourceRootTypes.TEST_RESOURCE
                    elif isinstance(tgt, Resources):
                        return SourceRootTypes.RESOURCE
                    else:
                        return SourceRootTypes.SOURCE

            info = {
                'targets': [],
                'libraries': [],
                'roots': [],
                'id':
                current_target.id,
                'target_type':
                get_target_type(current_target),
                # NB: is_code_gen should be removed when export format advances to 1.1.0 or higher
                'is_code_gen':
                current_target.is_synthetic,
                'is_synthetic':
                current_target.is_synthetic,
                'pants_target_type':
                self._get_pants_target_alias(type(current_target)),
            }

            if not current_target.is_synthetic:
                info['globs'] = current_target.globs_relative_to_buildroot()
                if self.get_options().sources:
                    info['sources'] = list(
                        current_target.sources_relative_to_buildroot())

            info['transitive'] = current_target.transitive
            info['scope'] = str(current_target.scope)
            info['is_target_root'] = current_target in target_roots_set

            if isinstance(current_target, PythonRequirementLibrary):
                reqs = current_target.payload.get_field_value(
                    'requirements', set())
                """:type : set[pants.backend.python.python_requirement.PythonRequirement]"""
                info['requirements'] = [req.key for req in reqs]

            if isinstance(current_target, PythonTarget):
                interpreter_for_target = self._interpreter_cache.select_interpreter_for_targets(
                    [current_target])
                if interpreter_for_target is None:
                    raise TaskError(
                        'Unable to find suitable interpreter for {}'.format(
                            current_target.address))
                python_interpreter_targets_mapping[
                    interpreter_for_target].append(current_target)
                info['python_interpreter'] = str(
                    interpreter_for_target.identity)

            def iter_transitive_jars(jar_lib):
                """
        :type jar_lib: :class:`pants.backend.jvm.targets.jar_library.JarLibrary`
        :rtype: :class:`collections.Iterator` of
                :class:`pants.java.jar.M2Coordinate`
        """
                if classpath_products:
                    jar_products = classpath_products.get_artifact_classpath_entries_for_targets(
                        (jar_lib, ))
                    for _, jar_entry in jar_products:
                        coordinate = jar_entry.coordinate
                        # We drop classifier and type_ since those fields are represented in the global
                        # libraries dict and here we just want the key into that dict (see `_jar_id`).
                        yield M2Coordinate(org=coordinate.org,
                                           name=coordinate.name,
                                           rev=coordinate.rev)

            target_libraries = OrderedSet()
            if isinstance(current_target, JarLibrary):
                target_libraries = OrderedSet(
                    iter_transitive_jars(current_target))
            for dep in current_target.dependencies:
                info['targets'].append(dep.address.spec)
                if isinstance(dep, JarLibrary):
                    for jar in dep.jar_dependencies:
                        target_libraries.add(
                            M2Coordinate(jar.org, jar.name, jar.rev))
                    # Add all the jars pulled in by this jar_library
                    target_libraries.update(iter_transitive_jars(dep))
                if isinstance(dep, Resources):
                    resource_target_map[dep] = current_target

            if isinstance(current_target, ScalaLibrary):
                for dep in current_target.java_sources:
                    info['targets'].append(dep.address.spec)
                    process_target(dep)

            if isinstance(current_target, JvmTarget):
                info['excludes'] = [
                    self._exclude_id(exclude)
                    for exclude in current_target.excludes
                ]
                info['platform'] = current_target.platform.name
                if hasattr(current_target, 'test_platform'):
                    info['test_platform'] = current_target.test_platform.name

            info['roots'] = [{
                'source_root': source_root_package_prefix[0],
                'package_prefix': source_root_package_prefix[1]
            } for source_root_package_prefix in self._source_roots_for_target(
                current_target)]

            if classpath_products:
                info['libraries'] = [
                    self._jar_id(lib) for lib in target_libraries
                ]
            targets_map[current_target.address.spec] = info

        for target in targets:
            process_target(target)

        scala_platform = ScalaPlatform.global_instance()
        scala_platform_map = {
            'scala_version':
            scala_platform.version,
            'compiler_classpath': [
                cp_entry.path
                for cp_entry in scala_platform.compiler_classpath_entries(
                    self.context.products)
            ],
        }

        jvm_platforms_map = {
            'default_platform':
            JvmPlatform.global_instance().default_platform.name,
            'platforms': {
                str(platform_name): {
                    'target_level': str(platform.target_level),
                    'source_level': str(platform.source_level),
                    'args': platform.args,
                }
                for platform_name, platform in
                JvmPlatform.global_instance().platforms_by_name.items()
            },
        }

        graph_info = {
            'version': DEFAULT_EXPORT_VERSION,
            'targets': targets_map,
            'jvm_platforms': jvm_platforms_map,
            'scala_platform': scala_platform_map,
            # `jvm_distributions` are static distribution settings from config,
            # `preferred_jvm_distributions` are distributions that pants actually uses for the
            # given platform setting.
            'preferred_jvm_distributions': {}
        }

        for platform_name, platform in JvmPlatform.global_instance(
        ).platforms_by_name.items():
            preferred_distributions = {}
            for strict, strict_key in [(True, 'strict'),
                                       (False, 'non_strict')]:
                try:
                    dist = JvmPlatform.preferred_jvm_distribution(
                        [platform], strict=strict)
                    preferred_distributions[strict_key] = dist.home
                except DistributionLocator.Error:
                    pass

            if preferred_distributions:
                graph_info['preferred_jvm_distributions'][
                    platform_name] = preferred_distributions

        if classpath_products:
            graph_info['libraries'] = self._resolve_jars_info(
                targets, classpath_products)

        if python_interpreter_targets_mapping:
            # NB: We've selected a python interpreter compatible with each python target individually into
            # the `python_interpreter_targets_mapping`. These python targets may not be compatible, ie: we
            # could have a python target requiring 'CPython>=2.7<3' (ie: CPython-2.7.x) and another
            # requiring 'CPython>=3.6'. To pick a default interpreter then from among these two choices
            # is arbitrary and not to be relied on to work as a default interpreter if ever needed by the
            # export consumer.
            #
            # TODO(John Sirois): consider either eliminating the 'default_interpreter' field and pressing
            # export consumers to make their own choice of a default (if needed) or else use
            # `select.select_interpreter_for_targets` and fail fast if there is no interpreter compatible
            # across all the python targets in-play.
            #
            # For now, make our arbitrary historical choice of a default interpreter explicit and use the
            # lowest version.
            default_interpreter = min(
                python_interpreter_targets_mapping.keys())

            interpreters_info = {}
            for interpreter, targets in python_interpreter_targets_mapping.items(
            ):
                req_libs = [
                    target for target in Target.closure_for_targets(targets)
                    if has_python_requirements(target)
                ]
                chroot = self.resolve_requirements(interpreter, req_libs)
                interpreters_info[str(interpreter.identity)] = {
                    'binary': interpreter.binary,
                    'chroot': chroot.path()
                }

            graph_info['python_setup'] = {
                'default_interpreter': str(default_interpreter.identity),
                'interpreters': interpreters_info
            }

        if self.get_options().available_target_types:
            graph_info['available_target_types'] = self._target_types()

        return graph_info
Exemple #54
0
 def target_set_id(self, *targets):
     return Target.maybe_readable_identify(targets or self.partition)
Exemple #55
0
 def _collect_targets(self, root_targets, **kwargs):
   return Target.closure_for_targets(
     target_roots=root_targets,
     **kwargs
   )
Exemple #56
0
  def _maybe_emit_coverage_data(self, targets, chroot, pex, workunit):
    coverage = self.get_options().coverage
    if coverage is None:
      yield []
      return

    def read_coverage_list(prefix):
      return coverage[len(prefix):].split(',')

    coverage_modules = None
    if coverage.startswith('modules:'):
      # NB: pytest-cov maps these modules to the `[run] sources` config.  So for
      # `modules:pants.base,pants.util` the config emitted has:
      # [run]
      # source =
      #   pants.base
      #   pants.util
      #
      # Now even though these are not paths, coverage sees the dots and switches to a module
      # prefix-matching mode.  Unfortunately, neither wildcards nor top-level module prefixes
      # like `pants.` serve to engage this module prefix-matching as one might hope.  It
      # appears that `pants.` is treated as a path and `pants.*` is treated as a literal
      # module prefix name.
      coverage_modules = read_coverage_list('modules:')
    elif coverage.startswith('paths:'):
      coverage_modules = []
      for path in read_coverage_list('paths:'):
        if not os.path.exists(path) and not os.path.isabs(path):
          # Look for the source in the PEX chroot since its not available from CWD.
          path = os.path.join(chroot, path)
        coverage_modules.append(path)

    with self._cov_setup(targets,
                         chroot,
                         coverage_modules=coverage_modules) as (args, coverage_rc):
      try:
        yield args
      finally:
        with environment_as(PEX_MODULE='coverage.cmdline:main'):
          def pex_run(args):
            return self._pex_run(pex, workunit, args=args)

          # On failures or timeouts, the .coverage file won't be written.
          if not os.path.exists('.coverage'):
            logger.warning('No .coverage file was found! Skipping coverage reporting.')
          else:
            # Normalize .coverage.raw paths using combine and `paths` config in the rc file.
            # This swaps the /tmp pex chroot source paths for the local original source paths
            # the pex was generated from and which the user understands.
            shutil.move('.coverage', '.coverage.raw')
            pex_run(args=['combine', '--rcfile', coverage_rc])
            pex_run(args=['report', '-i', '--rcfile', coverage_rc])

            # TODO(wickman): If coverage is enabled and we are not using fast mode, write an
            # intermediate .html that points to each of the coverage reports generated and
            # webbrowser.open to that page.
            # TODO(John Sirois): Possibly apply the same logic to the console report.  In fact,
            # consider combining coverage files from all runs in this Tasks's execute and then
            # producing just 1 console and 1 html report whether or not the tests are run in fast
            # mode.
            if self.get_options().coverage_output_dir:
              target_dir = self.get_options().coverage_output_dir
            else:
              relpath = Target.maybe_readable_identify(targets)
              pants_distdir = self.context.options.for_global_scope().pants_distdir
              target_dir = os.path.join(pants_distdir, 'coverage', relpath)
            safe_mkdir(target_dir)
            pex_run(args=['html', '-i', '--rcfile', coverage_rc, '-d', target_dir])
            coverage_xml = os.path.join(target_dir, 'coverage.xml')
            pex_run(args=['xml', '-i', '--rcfile', coverage_rc, '-o', coverage_xml])
Exemple #57
0
def has_python_sources(tgt: Target) -> bool:
    return is_python_target(tgt) and tgt.has_sources()
Exemple #58
0
  def closure(*vargs, **kwargs):
    """See `Target.closure_for_targets` for arguments.

    :API: public
    """
    return Target.closure_for_targets(*vargs, **kwargs)
Exemple #59
0
def has_resources(tgt: Target) -> bool:
    return isinstance(tgt, Files) and tgt.has_sources()