Пример #1
0
  def _maybe_emit_coverage_data(self, targets, chroot, pex, stdout, stderr):
    coverage = os.environ.get('PANTS_PY_COVERAGE')
    if coverage is None:
      yield []
      return

    def read_coverage_list(prefix):
      return coverage[len(prefix):].split(',')

    coverage_modules = None
    if coverage.startswith('modules:'):
      # NB: pytest-cov maps these modules to the `[run] sources` config.  So for
      # `modules:pants.base,pants.util` the config emitted has:
      # [run]
      # source =
      #   pants.base
      #   pants.util
      #
      # Now even though these are not paths, coverage sees the dots and switches to a module
      # prefix-matching mode.  Unfortunately, neither wildcards nor top-level module prefixes
      # like `pants.` serve to engage this module prefix-matching as one might hope.  It
      # appears that `pants.` is treated as a path and `pants.*` is treated as a literal
      # module prefix name.
      coverage_modules = read_coverage_list('modules:')
    elif coverage.startswith('paths:'):
      coverage_modules = []
      for path in read_coverage_list('paths:'):
        if not os.path.exists(path) and not os.path.isabs(path):
          # Look for the source in the PEX chroot since its not available from CWD.
          path = os.path.join(chroot, path)
        coverage_modules.append(path)

    with self._cov_setup(targets,
                         chroot,
                         coverage_modules=coverage_modules) as (args, coverage_rc):
      try:
        yield args
      finally:
        with environment_as(PEX_MODULE='coverage.cmdline:main'):
          # Normalize .coverage.raw paths using combine and `paths` config in the rc file.
          # This swaps the /tmp pex chroot source paths for the local original source paths
          # the pex was generated from and which the user understands.
          shutil.move('.coverage', '.coverage.raw')
          pex.run(args=['combine', '--rcfile', coverage_rc], stdout=stdout, stderr=stderr)

          pex.run(args=['report', '-i', '--rcfile', coverage_rc], stdout=stdout, stderr=stderr)

          # TODO(wickman): If coverage is enabled and we are not using fast mode, write an
          # intermediate .html that points to each of the coverage reports generated and
          # webbrowser.open to that page.
          # TODO(John Sirois): Possibly apply the same logic to the console report.  In fact,
          # consider combining coverage files from all runs in this Tasks's execute and then
          # producing just 1 console and 1 html report whether or not the tests are run in fast
          # mode.
          relpath = Target.maybe_readable_identify(targets)
          pants_distdir = Config.from_cache().getdefault('pants_distdir')
          target_dir = os.path.join(pants_distdir, 'coverage', relpath)
          safe_mkdir(target_dir)
          pex.run(args=['html', '-i', '--rcfile', coverage_rc, '-d', target_dir],
                  stdout=stdout, stderr=stderr)
Пример #2
0
 def identify(self, targets):
     targets = list(targets)
     if len(targets) == 1 and hasattr(targets[0],
                                      'provides') and targets[0].provides:
         return targets[0].provides.org, targets[0].provides.name
     else:
         return 'internal', Target.maybe_readable_identify(targets)
Пример #3
0
  def _maybe_emit_coverage_data(self, targets, chroot, pex, stdout, stderr):
    coverage = os.environ.get('PANTS_PY_COVERAGE')
    if coverage is None:
      yield []
      return

    def read_coverage_list(prefix):
      return coverage[len(prefix):].split(',')

    coverage_modules = None
    if coverage.startswith('modules:'):
      # NB: pytest-cov maps these modules to the `[run] sources` config.  So for
      # `modules:pants.base,pants.util` the config emitted has:
      # [run]
      # source =
      #   pants.base
      #   pants.util
      #
      # Now even though these are not paths, coverage sees the dots and switches to a module
      # prefix-matching mode.  Unfortunately, neither wildcards nor top-level module prefixes
      # like `pants.` serve to engage this module prefix-matching as one might hope.  It
      # appears that `pants.` is treated as a path and `pants.*` is treated as a literal
      # module prefix name.
      coverage_modules = read_coverage_list('modules:')
    elif coverage.startswith('paths:'):
      coverage_modules = []
      for path in read_coverage_list('paths:'):
        if not os.path.exists(path) and not os.path.isabs(path):
          # Look for the source in the PEX chroot since its not available from CWD.
          path = os.path.join(chroot, path)
        coverage_modules.append(path)

    with self._cov_setup(targets,
                         chroot,
                         coverage_modules=coverage_modules) as (args, coverage_rc):
      try:
        yield args
      finally:
        with environment_as(PEX_MODULE='coverage.cmdline:main'):
          # Normalize .coverage.raw paths using combine and `paths` config in the rc file.
          # This swaps the /tmp pex chroot source paths for the local original source paths
          # the pex was generated from and which the user understands.
          shutil.move('.coverage', '.coverage.raw')
          pex.run(args=['combine', '--rcfile', coverage_rc], stdout=stdout, stderr=stderr)

          pex.run(args=['report', '-i', '--rcfile', coverage_rc], stdout=stdout, stderr=stderr)

          # TODO(wickman): If coverage is enabled and we are not using fast mode, write an
          # intermediate .html that points to each of the coverage reports generated and
          # webbrowser.open to that page.
          # TODO(John Sirois): Possibly apply the same logic to the console report.  In fact,
          # consider combining coverage files from all runs in this Tasks's execute and then
          # producing just 1 console and 1 html report whether or not the tests are run in fast
          # mode.
          relpath = Target.maybe_readable_identify(targets)
          pants_distdir = Config.from_cache().getdefault('pants_distdir')
          target_dir = os.path.join(pants_distdir, 'coverage', relpath)
          safe_mkdir(target_dir)
          pex.run(args=['html', '-i', '--rcfile', coverage_rc, '-d', target_dir],
                  stdout=stdout, stderr=stderr)
Пример #4
0
 def identify(targets):
     targets = list(targets)
     if len(targets) == 1 and targets[0].is_jvm and getattr(
             targets[0], 'provides', None):
         return targets[0].provides.org, targets[0].provides.name
     else:
         return 'internal', Target.maybe_readable_identify(targets)
Пример #5
0
 def identify(targets):
     targets = list(targets)
     if len(targets) == 1 and targets[0].is_jvm and getattr(
             targets[0], 'provides', None):
         return targets[0].provides.org, targets[0].provides.name
     else:
         return IvyUtils.INTERNAL_ORG_NAME, Target.maybe_readable_identify(
             targets)
Пример #6
0
 def extra_products(self, target):
   ret = []
   if target.is_apt and target.processors:
     root = os.path.join(self._resources_dir, Target.maybe_readable_identify([target]))
     processor_info_file = os.path.join(root, JavaCompile._PROCESSOR_INFO_FILE)
     self._write_processor_info(processor_info_file, target.processors)
     ret.append((root, [processor_info_file]))
   return ret
Пример #7
0
 def extra_products(self, target):
   ret = []
   if isinstance(target, AnnotationProcessor) and target.processors:
     root = os.path.join(self._resources_dir, Target.maybe_readable_identify([target]))
     processor_info_file = os.path.join(root, JavaCompile._PROCESSOR_INFO_FILE)
     self._write_processor_info(processor_info_file, target.processors)
     ret.append((root, [processor_info_file]))
   return ret
Пример #8
0
 def _maybe_emit_junit_xml(self, targets):
   args = []
   xml_base = os.getenv('JUNIT_XML_BASE')
   if xml_base and targets:
     xml_base = os.path.realpath(xml_base)
     xml_path = os.path.join(xml_base, Target.maybe_readable_identify(targets) + '.xml')
     safe_mkdir(os.path.dirname(xml_path))
     args.append('--junitxml=%s' % xml_path)
   yield args
Пример #9
0
 def _maybe_emit_junit_xml(self, targets):
     args = []
     xml_base = self.get_options().junit_xml_dir
     if xml_base and targets:
         xml_base = os.path.realpath(xml_base)
         xml_path = os.path.join(xml_base, Target.maybe_readable_identify(targets) + ".xml")
         safe_mkdir(os.path.dirname(xml_path))
         args.append("--junitxml={}".format(xml_path))
     yield args
Пример #10
0
 def extra_products(self, target):
   """Override extra_products to produce an annotation processor information file."""
   ret = []
   if isinstance(target, AnnotationProcessor) and target.processors:
     root = os.path.join(self._processor_info_dir, Target.maybe_readable_identify([target]))
     processor_info_file = os.path.join(root, self._PROCESSOR_INFO_FILE)
     self._write_processor_info(processor_info_file, target.processors)
     ret.append((root, [processor_info_file]))
   return ret
Пример #11
0
 def _maybe_emit_junit_xml(self, targets):
   args = []
   xml_base = os.getenv('JUNIT_XML_BASE')
   if xml_base and targets:
     xml_base = os.path.realpath(xml_base)
     xml_path = os.path.join(xml_base, Target.maybe_readable_identify(targets) + '.xml')
     safe_mkdir(os.path.dirname(xml_path))
     args.append('--junitxml={}'.format(xml_path))
   yield args
Пример #12
0
  def execute(self):
    if self.old_options.pex and self.old_options.ipython:
      self.error('Cannot specify both --pex and --ipython!')

    if self.old_options.entry_point and self.old_options.ipython:
      self.error('Cannot specify both --entry_point and --ipython!')

    if self.old_options.verbose:
      print('Build operating on targets: %s' % ' '.join(str(target) for target in self.targets))


    builder = PEXBuilder(tempfile.mkdtemp(), interpreter=self.interpreter,
                         pex_info=self.binary.pexinfo if self.binary else None)

    if self.old_options.entry_point:
      builder.set_entry_point(self.old_options.entry_point)

    if self.old_options.ipython:
      if not self.config.has_section('python-ipython'):
        self.error('No python-ipython sections defined in your pants.ini!')

      builder.info.entry_point = self.config.get('python-ipython', 'entry_point')
      if builder.info.entry_point is None:
        self.error('Must specify entry_point for IPython in the python-ipython section '
                   'of your pants.ini!')

      requirements = self.config.getlist('python-ipython', 'requirements', default=[])

      for requirement in requirements:
        self.extra_requirements.append(PythonRequirement(requirement))

    executor = PythonChroot(
        targets=self.targets,
        extra_requirements=self.extra_requirements,
        builder=builder,
        platforms=self.binary.platforms if self.binary else None,
        interpreter=self.interpreter,
        conn_timeout=self.old_options.conn_timeout)

    executor.dump()

    if self.old_options.pex:
      pex_name = self.binary.name if self.binary else Target.maybe_readable_identify(self.targets)
      pex_path = os.path.join(self.root_dir, 'dist', '%s.pex' % pex_name)
      builder.build(pex_path)
      print('Wrote %s' % pex_path)
      return 0
    else:
      builder.freeze()
      pex = PEX(builder.path(), interpreter=self.interpreter)
      po = pex.run(args=list(self.args), blocking=False)
      try:
        return po.wait()
      except KeyboardInterrupt:
        po.send_signal(signal.SIGINT)
        raise
Пример #13
0
 def extra_products(self, target):
     """Override extra_products to produce an annotation processor information file."""
     ret = []
     if isinstance(target, AnnotationProcessor) and target.processors:
         root = os.path.join(self._processor_info_dir,
                             Target.maybe_readable_identify([target]))
         processor_info_file = os.path.join(root, self._PROCESSOR_INFO_FILE)
         self._write_processor_info(processor_info_file, target.processors)
         ret.append((root, [processor_info_file]))
     return ret
Пример #14
0
 def _maybe_emit_junit_xml(self, targets):
     args = []
     xml_base = self.get_options().junit_xml_dir
     if xml_base and targets:
         xml_base = os.path.realpath(xml_base)
         xml_path = os.path.join(
             xml_base,
             Target.maybe_readable_identify(targets) + '.xml')
         safe_mkdir(os.path.dirname(xml_path))
         args.append('--junitxml={}'.format(xml_path))
     yield args
Пример #15
0
def generate_coverage_config(targets):
  cp = configparser.ConfigParser()
  cp.readfp(Compatibility.StringIO(DEFAULT_COVERAGE_CONFIG))
  cp.add_section('html')
  if len(targets) == 1:
    target = targets[0]
    relpath = os.path.join(os.path.dirname(target.address.buildfile.relpath), target.name)
  else:
    relpath = Target.maybe_readable_identify(targets)
  target_dir = os.path.join(Config.load().getdefault('pants_distdir'), 'coverage', relpath)
  safe_mkdir(target_dir)
  cp.set('html', 'directory', target_dir)
  return cp
Пример #16
0
def generate_coverage_config(targets):
  cp = configparser.ConfigParser()
  cp.readfp(Compatibility.StringIO(DEFAULT_COVERAGE_CONFIG))
  cp.add_section('html')
  if len(targets) == 1:
    target = targets[0]
    relpath = os.path.join(os.path.dirname(target.address.build_file.relpath), target.name)
  else:
    relpath = Target.maybe_readable_identify(targets)
  target_dir = os.path.join(Config.load().getdefault('pants_distdir'), 'coverage', relpath)
  safe_mkdir(target_dir)
  cp.set('html', 'directory', target_dir)
  return cp
Пример #17
0
 def generate_junit_args(targets):
   args = []
   xml_base = os.getenv('JUNIT_XML_BASE')
   if xml_base and targets:
     xml_base = os.path.abspath(os.path.normpath(xml_base))
     if len(targets) == 1:
       target = targets[0]
       relpath = os.path.join(os.path.dirname(target.address.build_file.relpath),
                              target.name + '.xml')
     else:
       relpath = Target.maybe_readable_identify(targets) + '.xml'
     xml_path = os.path.join(xml_base, relpath)
     safe_mkdir(os.path.dirname(xml_path))
     args.append('--junitxml=%s' % xml_path)
   return args
Пример #18
0
 def generate_junit_args(targets):
   args = []
   xml_base = os.getenv('JUNIT_XML_BASE')
   if xml_base and targets:
     xml_base = os.path.abspath(os.path.normpath(xml_base))
     if len(targets) == 1:
       target = targets[0]
       relpath = os.path.join(os.path.dirname(target.address.buildfile.relpath),
                              target.name + '.xml')
     else:
       relpath = Target.maybe_readable_identify(targets) + '.xml'
     xml_path = os.path.join(xml_base, relpath)
     safe_mkdir(os.path.dirname(xml_path))
     args.append('--junitxml=%s' % xml_path)
   return args
Пример #19
0
    def execute(self, targets):
        # TODO(benjy): Add a pre-execute phase for injecting deps into targets, so e.g.,
        # we can inject a dep on the scala runtime library and still have it ivy-resolve.

        # In case we have no relevant targets and return early.
        self._create_empty_products()

        relevant_targets = [
            t for t in targets if t.has_sources(self._file_suffix)
        ]

        if not relevant_targets:
            return

        # Get the exclusives group for the targets to compile.
        # Group guarantees that they'll be a single exclusives key for them.
        egroups = self.context.products.get_data('exclusives_groups')
        group_id = egroups.get_group_key_for_target(relevant_targets[0])

        # Add resource dirs to the classpath for us and for downstream tasks.
        for conf in self._confs:
            egroups.update_compatible_classpaths(group_id,
                                                 [(conf, self._resources_dir)])

        # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
        classpath = egroups.get_classpath_for_group(group_id)

        # Add any extra compile-time classpath elements.
        for conf in self._confs:
            for jar in self.extra_compile_time_classpath_elements():
                classpath.insert(0, (conf, jar))

        # TODO(benjy): Should sources_by_target and locally_changed_targets be on all Tasks?

        # Target -> sources (relative to buildroot).
        sources_by_target = self._compute_current_sources_by_target(
            relevant_targets)

        # If needed, find targets that we've changed locally (as opposed to
        # changes synced in from the SCM).
        locally_changed_targets = None
        if self._locally_changed_targets_heuristic_limit:
            locally_changed_targets = self._find_locally_changed_targets(
                sources_by_target)
            if locally_changed_targets and \
                    len(locally_changed_targets) > self._locally_changed_targets_heuristic_limit:
                locally_changed_targets = None

        # Invalidation check. Everything inside the with block must succeed for the
        # invalid targets to become valid.
        with self.invalidated(relevant_targets,
                              invalidate_dependents=True,
                              partition_size_hint=self._partition_size_hint,
                              locally_changed_targets=locally_changed_targets
                              ) as invalidation_check:
            if invalidation_check.invalid_vts:
                # The analysis for invalid and deleted sources is no longer valid.
                invalid_targets = [
                    vt.target for vt in invalidation_check.invalid_vts
                ]
                invalid_sources_by_target = {}
                for tgt in invalid_targets:
                    invalid_sources_by_target[tgt] = sources_by_target[tgt]
                invalid_sources = list(
                    itertools.chain.from_iterable(
                        invalid_sources_by_target.values()))
                deleted_sources = self._deleted_sources()

                # Work in a tmpdir so we don't stomp the main analysis files on error.
                # The tmpdir is cleaned up in a shutdown hook, because background work
                # may need to access files we create here even after this method returns.
                self._ensure_analysis_tmpdir()
                tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4()))
                os.mkdir(tmpdir)
                valid_analysis_tmp = os.path.join(tmpdir, 'valid_analysis')
                newly_invalid_analysis_tmp = os.path.join(
                    tmpdir, 'newly_invalid_analysis')
                invalid_analysis_tmp = os.path.join(tmpdir, 'invalid_analysis')
                if self._analysis_parser.is_nonempty_analysis(
                        self._analysis_file):
                    with self.context.new_workunit(name='prepare-analysis'):
                        self._analysis_tools.split_to_paths(
                            self._analysis_file,
                            [(invalid_sources + deleted_sources,
                              newly_invalid_analysis_tmp)], valid_analysis_tmp)
                        if self._analysis_parser.is_nonempty_analysis(
                                self._invalid_analysis_file):
                            self._analysis_tools.merge_from_paths([
                                self._invalid_analysis_file,
                                newly_invalid_analysis_tmp
                            ], invalid_analysis_tmp)
                        else:
                            invalid_analysis_tmp = newly_invalid_analysis_tmp

                        # Now it's OK to overwrite the main analysis files with the new state.
                        self.move(valid_analysis_tmp, self._analysis_file)
                        self.move(invalid_analysis_tmp,
                                  self._invalid_analysis_file)

                # Register products for all the valid targets.
                # We register as we go, so dependency checking code can use this data.
                valid_targets = list(
                    set(relevant_targets) - set(invalid_targets))
                self._register_products(valid_targets, sources_by_target,
                                        self._analysis_file)

                # Figure out the sources and analysis belonging to each partition.
                partitions = [
                ]  # Each element is a triple (vts, sources_by_target, analysis).
                for vts in invalidation_check.invalid_vts_partitioned:
                    partition_tmpdir = os.path.join(
                        tmpdir, Target.maybe_readable_identify(vts.targets))
                    os.mkdir(partition_tmpdir)
                    sources = list(
                        itertools.chain.from_iterable([
                            invalid_sources_by_target.get(t, [])
                            for t in vts.targets
                        ]))
                    de_duped_sources = list(OrderedSet(sources))
                    if len(sources) != len(de_duped_sources):
                        counts = [(src, len(list(srcs)))
                                  for src, srcs in groupby(sorted(sources))]
                        self.context.log.warn(
                            'De-duped the following sources:\n\t%s' %
                            '\n\t'.join(
                                sorted('%d %s' % (cnt, src)
                                       for src, cnt in counts if cnt > 1)))
                    analysis_file = os.path.join(partition_tmpdir, 'analysis')
                    partitions.append((vts, de_duped_sources, analysis_file))

                # Split per-partition files out of the global invalid analysis.
                if self._analysis_parser.is_nonempty_analysis(
                        self._invalid_analysis_file) and partitions:
                    with self.context.new_workunit(name='partition-analysis'):
                        splits = [(x[1], x[2]) for x in partitions]
                        # We have to pass the analysis for any deleted files through zinc, to give it
                        # a chance to delete the relevant class files.
                        if splits:
                            splits[0] = (splits[0][0] + deleted_sources,
                                         splits[0][1])
                        self._analysis_tools.split_to_paths(
                            self._invalid_analysis_file, splits)

                # Now compile partitions one by one.
                for partition in partitions:
                    (vts, sources, analysis_file) = partition
                    cp_entries = [
                        entry for conf, entry in classpath
                        if conf in self._confs
                    ]
                    self._process_target_partition(partition, cp_entries)
                    # No exception was thrown, therefore the compile succeded and analysis_file is now valid.
                    if os.path.exists(
                            analysis_file
                    ):  # The compilation created an analysis.
                        # Merge the newly-valid analysis with our global valid analysis.
                        new_valid_analysis = analysis_file + '.valid.new'
                        if self._analysis_parser.is_nonempty_analysis(
                                self._analysis_file):
                            with self.context.new_workunit(
                                    name='update-upstream-analysis'):
                                self._analysis_tools.merge_from_paths(
                                    [self._analysis_file, analysis_file],
                                    new_valid_analysis)
                        else:  # We need to keep analysis_file around. Background tasks may need it.
                            shutil.copy(analysis_file, new_valid_analysis)

                        # Move the merged valid analysis to its proper location.
                        # We do this before checking for missing dependencies, so that we can still
                        # enjoy an incremental compile after fixing missing deps.
                        self.move(new_valid_analysis, self._analysis_file)

                        # Update the products with the latest classes. Must happen before the
                        # missing dependencies check.
                        self._register_products(vts.targets, sources_by_target,
                                                analysis_file)
                        if self._dep_analyzer:
                            # Check for missing dependencies.
                            actual_deps = self._analysis_parser.parse_deps_from_path(
                                analysis_file, lambda: self.
                                _compute_classpath_elements_by_class(cp_entries
                                                                     ))
                            with self.context.new_workunit(
                                    name='find-missing-dependencies'):
                                self._dep_analyzer.check(sources, actual_deps)

                        # Kick off the background artifact cache write.
                        if self.artifact_cache_writes_enabled():
                            self._write_to_artifact_cache(
                                analysis_file, vts, invalid_sources_by_target)

                    if self._analysis_parser.is_nonempty_analysis(
                            self._invalid_analysis_file):
                        with self.context.new_workunit(
                                name='trim-downstream-analysis'):
                            # Trim out the newly-valid sources from our global invalid analysis.
                            new_invalid_analysis = analysis_file + '.invalid.new'
                            discarded_invalid_analysis = analysis_file + '.invalid.discard'
                            self._analysis_tools.split_to_paths(
                                self._invalid_analysis_file,
                                [(sources, discarded_invalid_analysis)],
                                new_invalid_analysis)
                            self.move(new_invalid_analysis,
                                      self._invalid_analysis_file)

                    # Record the built target -> sources mapping for future use.
                    for target in vts.targets:
                        self._record_sources_by_target(
                            target, sources_by_target.get(target, []))

                    # Now that all the analysis accounting is complete, and we have no missing deps,
                    # we can safely mark the targets as valid.
                    vts.update()
            else:
                # Nothing to build. Register products for all the targets in one go.
                self._register_products(relevant_targets, sources_by_target,
                                        self._analysis_file)

        # Update the classpath for downstream tasks.
        runtime_deps = self.tool_classpath(self._runtime_deps_key) \
          if self._runtime_deps_key else []
        for conf in self._confs:
            egroups.update_compatible_classpaths(group_id,
                                                 [(conf, self._classes_dir)])
            for dep in runtime_deps:
                # TODO(benjy): Make compile-time vs. runtime classpaths more explicit.
                egroups.update_compatible_classpaths(group_id, [(conf, dep)])

        self.post_process(relevant_targets)
Пример #20
0
  def execute(self, targets):
    # TODO(benjy): Add a pre-execute phase for injecting deps into targets, so e.g.,
    # we can inject a dep on the scala runtime library and still have it ivy-resolve.

    # In case we have no relevant targets and return early.
    self._create_empty_products()

    relevant_targets = [t for t in targets if t.has_sources(self._file_suffix)]

    if not relevant_targets:
      return

    # Get the exclusives group for the targets to compile.
    # Group guarantees that they'll be a single exclusives key for them.
    egroups = self.context.products.get_data('exclusives_groups')
    group_id = egroups.get_group_key_for_target(relevant_targets[0])

    # Add resource dirs to the classpath for us and for downstream tasks.
    for conf in self._confs:
      egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)])

    # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
    classpath = egroups.get_classpath_for_group(group_id)

    # Add any extra compile-time classpath elements.
    for conf in self._confs:
      for jar in self.extra_compile_time_classpath_elements():
        classpath.insert(0, (conf, jar))

    # Target -> sources (relative to buildroot).
    sources_by_target = self._compute_sources_by_target(relevant_targets)

    # Invalidation check. Everything inside the with block must succeed for the
    # invalid targets to become valid.
    with self.invalidated(relevant_targets,
                          invalidate_dependents=True,
                          partition_size_hint=self._partition_size_hint) as invalidation_check:
      if invalidation_check.invalid_vts and not self.dry_run:
        # The analysis for invalid and deleted sources is no longer valid.
        invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
        invalid_sources_by_target = {}
        for tgt in invalid_targets:
          invalid_sources_by_target[tgt] = sources_by_target[tgt]
        invalid_sources = list(itertools.chain.from_iterable(invalid_sources_by_target.values()))
        deleted_sources = self._deleted_sources()

        # Work in a tmpdir so we don't stomp the main analysis files on error.
        # The tmpdir is cleaned up in a shutdown hook, because background work
        # may need to access files we create here even after this method returns.
        self._ensure_analysis_tmpdir()
        tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4()))
        os.mkdir(tmpdir)
        valid_analysis_tmp = os.path.join(tmpdir, 'valid_analysis')
        newly_invalid_analysis_tmp = os.path.join(tmpdir, 'newly_invalid_analysis')
        invalid_analysis_tmp = os.path.join(tmpdir, 'invalid_analysis')
        if self._analysis_parser.is_nonempty_analysis(self._analysis_file):
          with self.context.new_workunit(name='prepare-analysis'):
            self._analysis_tools.split_to_paths(self._analysis_file,
              [(invalid_sources + deleted_sources, newly_invalid_analysis_tmp)], valid_analysis_tmp)
            if self._analysis_parser.is_nonempty_analysis(self._invalid_analysis_file):
              self._analysis_tools.merge_from_paths(
                [self._invalid_analysis_file, newly_invalid_analysis_tmp], invalid_analysis_tmp)
            else:
              invalid_analysis_tmp = newly_invalid_analysis_tmp

            # Now it's OK to overwrite the main analysis files with the new state.
            self.move(valid_analysis_tmp, self._analysis_file)
            self.move(invalid_analysis_tmp, self._invalid_analysis_file)

        # Register products for all the valid targets.
        # We register as we go, so dependency checking code can use this data.
        valid_targets = list(set(relevant_targets) - set(invalid_targets))
        self._register_products(valid_targets, sources_by_target, self._analysis_file)

        # Figure out the sources and analysis belonging to each partition.
        partitions = []  # Each element is a triple (vts, sources_by_target, analysis).
        for vts in invalidation_check.invalid_vts_partitioned:
          partition_tmpdir = os.path.join(tmpdir, Target.maybe_readable_identify(vts.targets))
          os.mkdir(partition_tmpdir)
          sources = list(itertools.chain.from_iterable(
              [invalid_sources_by_target.get(t, []) for t in vts.targets]))
          de_duped_sources = list(OrderedSet(sources))
          if len(sources) != len(de_duped_sources):
            counts = [(src, len(list(srcs))) for src, srcs in groupby(sorted(sources))]
            self.context.log.warn(
                'De-duped the following sources:\n\t%s' %
                '\n\t'.join(sorted('%d %s' % (cnt, src) for src, cnt in counts if cnt > 1)))
          analysis_file = os.path.join(partition_tmpdir, 'analysis')
          partitions.append((vts, de_duped_sources, analysis_file))

        # Split per-partition files out of the global invalid analysis.
        if self._analysis_parser.is_nonempty_analysis(self._invalid_analysis_file) and partitions:
          with self.context.new_workunit(name='partition-analysis'):
            splits = [(x[1], x[2]) for x in partitions]
            # We have to pass the analysis for any deleted files through zinc, to give it
            # a chance to delete the relevant class files.
            if splits:
              splits[0] = (splits[0][0] + deleted_sources, splits[0][1])
            self._analysis_tools.split_to_paths(self._invalid_analysis_file, splits)

        # Now compile partitions one by one.
        for partition in partitions:
          (vts, sources, analysis_file) = partition
          cp_entries = [entry for conf, entry in classpath if conf in self._confs]
          self._process_target_partition(partition, cp_entries)
          # No exception was thrown, therefore the compile succeded and analysis_file is now valid.
          if os.path.exists(analysis_file):  # The compilation created an analysis.
            # Merge the newly-valid analysis with our global valid analysis.
            new_valid_analysis = analysis_file + '.valid.new'
            if self._analysis_parser.is_nonempty_analysis(self._analysis_file):
              with self.context.new_workunit(name='update-upstream-analysis'):
                self._analysis_tools.merge_from_paths([self._analysis_file, analysis_file],
                                                      new_valid_analysis)
            else:  # We need to keep analysis_file around. Background tasks may need it.
              shutil.copy(analysis_file, new_valid_analysis)

            # Move the merged valid analysis to its proper location.
            # We do this before checking for missing dependencies, so that we can still
            # enjoy an incremental compile after fixing missing deps.
            self.move(new_valid_analysis, self._analysis_file)

            # Update the products with the latest classes. Must happen before the
            # missing dependencies check.
            self._register_products(vts.targets, sources_by_target, analysis_file)
            if self._dep_analyzer:
              # Check for missing dependencies.
              actual_deps = self._analysis_parser.parse_deps_from_path(analysis_file,
                  lambda: self._compute_classpath_elements_by_class(cp_entries))
              with self.context.new_workunit(name='find-missing-dependencies'):
                self._dep_analyzer.check(sources, actual_deps)

            # Kick off the background artifact cache write.
            if self.artifact_cache_writes_enabled():
              self._write_to_artifact_cache(analysis_file, vts, invalid_sources_by_target)

          if self._analysis_parser.is_nonempty_analysis(self._invalid_analysis_file):
            with self.context.new_workunit(name='trim-downstream-analysis'):
              # Trim out the newly-valid sources from our global invalid analysis.
              new_invalid_analysis = analysis_file + '.invalid.new'
              discarded_invalid_analysis = analysis_file + '.invalid.discard'
              self._analysis_tools.split_to_paths(self._invalid_analysis_file,
                [(sources, discarded_invalid_analysis)], new_invalid_analysis)
              self.move(new_invalid_analysis, self._invalid_analysis_file)

          # Now that all the analysis accounting is complete, and we have no missing deps,
          # we can safely mark the targets as valid.
          vts.update()
      else:
        # Nothing to build. Register products for all the targets in one go.
        self._register_products(relevant_targets, sources_by_target, self._analysis_file)

    # Update the classpath for downstream tasks.
    runtime_deps = self._jvm_tool_bootstrapper.get_jvm_tool_classpath(self._runtime_deps_key) \
      if self._runtime_deps_key else []
    for conf in self._confs:
      egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)])
      for dep in runtime_deps:
        # TODO(benjy): Make compile-time vs. runtime classpaths more explicit.
        egroups.update_compatible_classpaths(group_id, [(conf, dep)])

    self.post_process(relevant_targets)
Пример #21
0
    def execute(self):
        if self.options.pex and self.options.ipython:
            self.error('Cannot specify both --pex and --ipython!')

        if self.options.entry_point and self.options.ipython:
            self.error('Cannot specify both --entry_point and --ipython!')

        if self.options.verbose:
            print('Build operating on targets: %s' %
                  ' '.join(str(target) for target in self.targets))

        builder = PEXBuilder(
            tempfile.mkdtemp(),
            interpreter=self.interpreter,
            pex_info=self.binary.pexinfo if self.binary else None)

        if self.options.entry_point:
            builder.set_entry_point(self.options.entry_point)

        if self.options.ipython:
            if not self.config.has_section('python-ipython'):
                self.error(
                    'No python-ipython sections defined in your pants.ini!')

            builder.info.entry_point = self.config.get('python-ipython',
                                                       'entry_point')
            if builder.info.entry_point is None:
                self.error(
                    'Must specify entry_point for IPython in the python-ipython section '
                    'of your pants.ini!')

            requirements = self.config.getlist('python-ipython',
                                               'requirements',
                                               default=[])

            for requirement in requirements:
                self.extra_requirements.append(PythonRequirement(requirement))

        executor = PythonChroot(
            targets=self.targets,
            extra_requirements=self.extra_requirements,
            builder=builder,
            platforms=self.binary.platforms if self.binary else None,
            interpreter=self.interpreter,
            conn_timeout=self.options.conn_timeout)

        executor.dump()

        if self.options.pex:
            pex_name = self.binary.name if self.binary else Target.maybe_readable_identify(
                self.targets)
            pex_path = os.path.join(self.root_dir, 'dist', '%s.pex' % pex_name)
            builder.build(pex_path)
            print('Wrote %s' % pex_path)
            return 0
        else:
            builder.freeze()
            pex = PEX(builder.path(), interpreter=self.interpreter)
            po = pex.run(args=list(self.args), blocking=False)
            try:
                return po.wait()
            except KeyboardInterrupt:
                po.send_signal(signal.SIGINT)
                raise
Пример #22
0
  def compile_chunk(self,
                    invalidation_check,
                    all_targets,
                    relevant_targets,
                    invalid_targets,
                    extra_compile_time_classpath_elements,
                    compile_vts,
                    register_vts,
                    update_artifact_cache_vts_work):
    """Executes compilations for the invalid targets contained in a single chunk.

    Has the side effects of populating:
    # valid/invalid analysis files
    # classes_by_source product
    # classes_by_target product
    # resources_by_target product
    """
    assert invalid_targets, "compile_chunk should only be invoked if there are invalid targets."

    extra_classpath_tuples = self._compute_extra_classpath(extra_compile_time_classpath_elements)

    # Get the classpath generated by upstream JVM tasks and our own prepare_compile().
    # NB: The global strategy uses the aggregated classpath (for all targets) to compile each
    # chunk, which avoids needing to introduce compile-time dependencies between annotation
    # processors and the classes they annotate.
    compile_classpath = ClasspathUtil.compute_classpath(all_targets, self.context.products.get_data(
      'compile_classpath'), extra_classpath_tuples, self._confs)

    # Find the invalid sources for this chunk.
    invalid_sources_by_target = {t: self._sources_for_target(t) for t in invalid_targets}

    tmpdir = os.path.join(self.analysis_tmpdir, str(uuid.uuid4()))
    os.mkdir(tmpdir)

    # Figure out the sources and analysis belonging to each partition.
    partitions = []  # Each element is a triple (vts, sources_by_target, analysis).
    for vts in invalidation_check.invalid_vts_partitioned:
      partition_tmpdir = os.path.join(tmpdir, Target.maybe_readable_identify(vts.targets))
      os.mkdir(partition_tmpdir)
      sources = list(itertools.chain.from_iterable(
          [invalid_sources_by_target.get(t, []) for t in vts.targets]))
      de_duped_sources = list(OrderedSet(sources))
      if len(sources) != len(de_duped_sources):
        counts = [(src, len(list(srcs))) for src, srcs in itertools.groupby(sorted(sources))]
        self.context.log.warn(
            'De-duped the following sources:\n\t{}'
            .format('\n\t'.join(sorted('{} {}'.format(cnt, src) for src, cnt in counts if cnt > 1))))
      analysis_file = os.path.join(partition_tmpdir, 'analysis')
      partitions.append((vts, de_duped_sources, analysis_file))

    # Split per-partition files out of the global invalid analysis.
    if self._analysis_parser.is_nonempty_analysis(self._invalid_analysis_file) and partitions:
      with self.context.new_workunit(name='partition-analysis'):
        splits = [(x[1], x[2]) for x in partitions]
        # We have to pass the analysis for any deleted files through zinc, to give it
        # a chance to delete the relevant class files.
        if splits:
          splits[0] = (splits[0][0] + self._deleted_sources, splits[0][1])
        self._analysis_tools.split_to_paths(self._invalid_analysis_file, splits)

    # Now compile partitions one by one.
    for partition_index, partition in enumerate(partitions):
      (vts, sources, analysis_file) = partition

      progress_message = 'partition {} of {}'.format(partition_index + 1, len(partitions))
      # We have to treat the global output dir as an upstream element, so compilers can
      # find valid analysis for previous partitions. We use the global valid analysis
      # for the upstream.
      upstream_analysis = ({self._classes_dir: self._analysis_file}
                           if os.path.exists(self._analysis_file) else {})
      compile_vts(vts,
                  sources,
                  analysis_file,
                  upstream_analysis,
                  compile_classpath,
                  self._classes_dir,
                  None,
                  progress_message)

      # No exception was thrown, therefore the compile succeeded and analysis_file is now valid.
      if os.path.exists(analysis_file):  # The compilation created an analysis.
        # Merge the newly-valid analysis with our global valid analysis.
        new_valid_analysis = analysis_file + '.valid.new'
        if self._analysis_parser.is_nonempty_analysis(self._analysis_file):
          with self.context.new_workunit(name='update-upstream-analysis'):
            self._analysis_tools.merge_from_paths([self._analysis_file, analysis_file],
                                                  new_valid_analysis)
        else:  # We need to keep analysis_file around. Background tasks may need it.
          shutil.copy(analysis_file, new_valid_analysis)

        # Move the merged valid analysis to its proper location.
        # We do this before checking for missing dependencies, so that we can still
        # enjoy an incremental compile after fixing missing deps.
        self.move(new_valid_analysis, self._analysis_file)

        # Update the products with the latest classes. Must happen before the
        # missing dependencies check.
        register_vts([self.compile_context(t) for t in vts.targets])
        if self._dep_analyzer:
          # Check for missing dependencies.
          actual_deps = self._analysis_parser.parse_deps_from_path(analysis_file,
              lambda: self._compute_classpath_elements_by_class(compile_classpath), self._classes_dir)
          with self.context.new_workunit(name='find-missing-dependencies'):
            self._dep_analyzer.check(sources, actual_deps)

        # Kick off the background artifact cache write.
        if update_artifact_cache_vts_work:
          self._write_to_artifact_cache(analysis_file,
                                        vts,
                                        update_artifact_cache_vts_work)

      if self._analysis_parser.is_nonempty_analysis(self._invalid_analysis_file):
        with self.context.new_workunit(name='trim-downstream-analysis'):
          # Trim out the newly-valid sources from our global invalid analysis.
          new_invalid_analysis = analysis_file + '.invalid.new'
          discarded_invalid_analysis = analysis_file + '.invalid.discard'
          self._analysis_tools.split_to_paths(self._invalid_analysis_file,
            [(sources, discarded_invalid_analysis)], new_invalid_analysis)
          self.move(new_invalid_analysis, self._invalid_analysis_file)

      # Record the built target -> sources mapping for future use.
      for target, sources in self._sources_for_targets(vts.targets).items():
        self._record_previous_sources_by_target(target, sources)

      # Now that all the analysis accounting is complete, and we have no missing deps,
      # we can safely mark the targets as valid.
      vts.update()
Пример #23
0
    def execute_chunk(self, relevant_targets):
        # TODO(benjy): Add a pre-execute goal for injecting deps into targets, so e.g.,
        # we can inject a dep on the scala runtime library and still have it ivy-resolve.

        if not relevant_targets:
            return

        # Get the classpath generated by upstream JVM tasks and our own prepare_execute().
        compile_classpath = self.context.products.get_data('compile_classpath')

        # Add any extra compile-time-only classpath elements.
        # TODO(benjy): Model compile-time vs. runtime classpaths more explicitly.
        def extra_compile_classpath_iter():
            for conf in self._confs:
                for jar in self.extra_compile_time_classpath_elements():
                    yield (conf, jar)

        compile_classpath = OrderedSet(
            list(extra_compile_classpath_iter()) + list(compile_classpath))

        # Target -> sources (relative to buildroot), for just this chunk's targets.
        sources_by_target = self._sources_for_targets(relevant_targets)

        # If needed, find targets that we've changed locally (as opposed to
        # changes synced in from the SCM).
        # TODO(benjy): Should locally_changed_targets be available in all Tasks?
        locally_changed_targets = None
        if self._changed_targets_heuristic_limit:
            locally_changed_targets = self._find_locally_changed_targets(
                sources_by_target)
            if (locally_changed_targets and len(locally_changed_targets) >
                    self._changed_targets_heuristic_limit):
                locally_changed_targets = None

        # Invalidation check. Everything inside the with block must succeed for the
        # invalid targets to become valid.
        with self.invalidated(
                relevant_targets,
                invalidate_dependents=True,
                partition_size_hint=self._partition_size_hint,
                locally_changed_targets=locally_changed_targets,
                fingerprint_strategy=self._jvm_fingerprint_strategy(),
                topological_order=True) as invalidation_check:
            if invalidation_check.invalid_vts:
                # Find the invalid sources for this chunk.
                invalid_targets = [
                    vt.target for vt in invalidation_check.invalid_vts
                ]
                invalid_sources_by_target = self._sources_for_targets(
                    invalid_targets)

                tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4()))
                os.mkdir(tmpdir)

                # Register products for all the valid targets.
                # We register as we go, so dependency checking code can use this data.
                valid_targets = list(
                    set(relevant_targets) - set(invalid_targets))
                self._register_products(valid_targets, self._analysis_file)

                # Figure out the sources and analysis belonging to each partition.
                partitions = [
                ]  # Each element is a triple (vts, sources_by_target, analysis).
                for vts in invalidation_check.invalid_vts_partitioned:
                    partition_tmpdir = os.path.join(
                        tmpdir, Target.maybe_readable_identify(vts.targets))
                    os.mkdir(partition_tmpdir)
                    sources = list(
                        itertools.chain.from_iterable([
                            invalid_sources_by_target.get(t, [])
                            for t in vts.targets
                        ]))
                    de_duped_sources = list(OrderedSet(sources))
                    if len(sources) != len(de_duped_sources):
                        counts = [
                            (src, len(list(srcs)))
                            for src, srcs in itertools.groupby(sorted(sources))
                        ]
                        self.context.log.warn(
                            'De-duped the following sources:\n\t%s' %
                            '\n\t'.join(
                                sorted('%d %s' % (cnt, src)
                                       for src, cnt in counts if cnt > 1)))
                    analysis_file = os.path.join(partition_tmpdir, 'analysis')
                    partitions.append((vts, de_duped_sources, analysis_file))

                # Split per-partition files out of the global invalid analysis.
                if self._analysis_parser.is_nonempty_analysis(
                        self._invalid_analysis_file) and partitions:
                    with self.context.new_workunit(name='partition-analysis'):
                        splits = [(x[1], x[2]) for x in partitions]
                        # We have to pass the analysis for any deleted files through zinc, to give it
                        # a chance to delete the relevant class files.
                        if splits:
                            splits[0] = (splits[0][0] + self._deleted_sources,
                                         splits[0][1])
                        self._analysis_tools.split_to_paths(
                            self._invalid_analysis_file, splits)

                # Now compile partitions one by one.
                for partition in partitions:
                    (vts, sources, analysis_file) = partition
                    cp_entries = [
                        entry for conf, entry in compile_classpath
                        if conf in self._confs
                    ]
                    self._process_target_partition(partition, cp_entries)
                    # No exception was thrown, therefore the compile succeded and analysis_file is now valid.
                    if os.path.exists(
                            analysis_file
                    ):  # The compilation created an analysis.
                        # Merge the newly-valid analysis with our global valid analysis.
                        new_valid_analysis = analysis_file + '.valid.new'
                        if self._analysis_parser.is_nonempty_analysis(
                                self._analysis_file):
                            with self.context.new_workunit(
                                    name='update-upstream-analysis'):
                                self._analysis_tools.merge_from_paths(
                                    [self._analysis_file, analysis_file],
                                    new_valid_analysis)
                        else:  # We need to keep analysis_file around. Background tasks may need it.
                            shutil.copy(analysis_file, new_valid_analysis)

                        # Move the merged valid analysis to its proper location.
                        # We do this before checking for missing dependencies, so that we can still
                        # enjoy an incremental compile after fixing missing deps.
                        self.move(new_valid_analysis, self._analysis_file)

                        # Update the products with the latest classes. Must happen before the
                        # missing dependencies check.
                        self._register_products(vts.targets, analysis_file)
                        if self._dep_analyzer:
                            # Check for missing dependencies.
                            actual_deps = self._analysis_parser.parse_deps_from_path(
                                analysis_file, lambda: self.
                                _compute_classpath_elements_by_class(cp_entries
                                                                     ))
                            with self.context.new_workunit(
                                    name='find-missing-dependencies'):
                                self._dep_analyzer.check(
                                    sources, actual_deps, self.ivy_cache_dir)

                        # Kick off the background artifact cache write.
                        if self.artifact_cache_writes_enabled():
                            self._write_to_artifact_cache(
                                analysis_file, vts, invalid_sources_by_target)

                    if self._analysis_parser.is_nonempty_analysis(
                            self._invalid_analysis_file):
                        with self.context.new_workunit(
                                name='trim-downstream-analysis'):
                            # Trim out the newly-valid sources from our global invalid analysis.
                            new_invalid_analysis = analysis_file + '.invalid.new'
                            discarded_invalid_analysis = analysis_file + '.invalid.discard'
                            self._analysis_tools.split_to_paths(
                                self._invalid_analysis_file,
                                [(sources, discarded_invalid_analysis)],
                                new_invalid_analysis)
                            self.move(new_invalid_analysis,
                                      self._invalid_analysis_file)

                    # Record the built target -> sources mapping for future use.
                    for target in vts.targets:
                        self._record_sources_by_target(
                            target, sources_by_target.get(target, []))

                    # Now that all the analysis accounting is complete, and we have no missing deps,
                    # we can safely mark the targets as valid.
                    vts.update()
            else:
                # Nothing to build. Register products for all the targets in one go.
                self._register_products(relevant_targets, self._analysis_file)

        self.post_process(relevant_targets)
Пример #24
0
    def _maybe_emit_coverage_data(self, targets, chroot, pex, workunit):
        coverage = self.get_options().coverage
        if coverage is None:
            yield []
            return

        def read_coverage_list(prefix):
            return coverage[len(prefix) :].split(",")

        coverage_modules = None
        if coverage.startswith("modules:"):
            # NB: pytest-cov maps these modules to the `[run] sources` config.  So for
            # `modules:pants.base,pants.util` the config emitted has:
            # [run]
            # source =
            #   pants.base
            #   pants.util
            #
            # Now even though these are not paths, coverage sees the dots and switches to a module
            # prefix-matching mode.  Unfortunately, neither wildcards nor top-level module prefixes
            # like `pants.` serve to engage this module prefix-matching as one might hope.  It
            # appears that `pants.` is treated as a path and `pants.*` is treated as a literal
            # module prefix name.
            coverage_modules = read_coverage_list("modules:")
        elif coverage.startswith("paths:"):
            coverage_modules = []
            for path in read_coverage_list("paths:"):
                if not os.path.exists(path) and not os.path.isabs(path):
                    # Look for the source in the PEX chroot since its not available from CWD.
                    path = os.path.join(chroot, path)
                coverage_modules.append(path)

        with self._cov_setup(targets, chroot, coverage_modules=coverage_modules) as (args, coverage_rc):
            try:
                yield args
            finally:
                with environment_as(PEX_MODULE="coverage.cmdline:main"):

                    def pex_run(args):
                        return self._pex_run(pex, workunit, args=args)

                    # Normalize .coverage.raw paths using combine and `paths` config in the rc file.
                    # This swaps the /tmp pex chroot source paths for the local original source paths
                    # the pex was generated from and which the user understands.
                    shutil.move(".coverage", ".coverage.raw")
                    pex_run(args=["combine", "--rcfile", coverage_rc])
                    pex_run(args=["report", "-i", "--rcfile", coverage_rc])

                    # TODO(wickman): If coverage is enabled and we are not using fast mode, write an
                    # intermediate .html that points to each of the coverage reports generated and
                    # webbrowser.open to that page.
                    # TODO(John Sirois): Possibly apply the same logic to the console report.  In fact,
                    # consider combining coverage files from all runs in this Tasks's execute and then
                    # producing just 1 console and 1 html report whether or not the tests are run in fast
                    # mode.
                    relpath = Target.maybe_readable_identify(targets)
                    pants_distdir = self.context.options.for_global_scope().pants_distdir
                    target_dir = os.path.join(pants_distdir, "coverage", relpath)
                    safe_mkdir(target_dir)
                    pex_run(args=["html", "-i", "--rcfile", coverage_rc, "-d", target_dir])
                    coverage_xml = os.path.join(target_dir, "coverage.xml")
                    pex_run(args=["xml", "-i", "--rcfile", coverage_rc, "-o", coverage_xml])
  def compile_sub_chunk(self,
                        invalidation_check,
                        all_targets,
                        invalid_targets,
                        extra_compile_time_classpath_elements,
                        compile_vts,
                        register_vts,
                        update_artifact_cache_vts_work,
                        settings):
    """Executes compilations for the invalid targets contained in a single chunk.

    Has the side effects of populating:
    # valid/invalid analysis files
    # classes_by_source product
    # classes_by_target product
    # resources_by_target product
    """
    extra_classpath_tuples = self._compute_extra_classpath(extra_compile_time_classpath_elements)

    # Get the classpath generated by upstream JVM tasks and our own prepare_compile().
    # NB: The global strategy uses the aggregated classpath (for all targets) to compile each
    # chunk, which avoids needing to introduce compile-time dependencies between annotation
    # processors and the classes they annotate.
    compile_classpath = ClasspathUtil.compute_classpath(all_targets, self.context.products.get_data(
      'compile_classpath'), extra_classpath_tuples, self._confs)

    # Find the invalid sources for this chunk.
    invalid_sources_by_target = {t: self._sources_for_target(t) for t in invalid_targets}

    tmpdir = os.path.join(self.analysis_tmpdir, str(uuid.uuid4()))
    os.mkdir(tmpdir)

    # Figure out the sources and analysis belonging to each partition.
    partitions = []  # Each element is a triple (vts, sources_by_target, analysis).
    for vts in invalidation_check.invalid_vts_partitioned:
      partition_tmpdir = os.path.join(tmpdir, Target.maybe_readable_identify(vts.targets))
      os.mkdir(partition_tmpdir)
      sources = list(itertools.chain.from_iterable(
          [invalid_sources_by_target.get(t, []) for t in vts.targets]))
      de_duped_sources = list(OrderedSet(sources))
      if len(sources) != len(de_duped_sources):
        counts = [(src, len(list(srcs))) for src, srcs in itertools.groupby(sorted(sources))]
        self.context.log.warn(
            'De-duped the following sources:\n\t{}'
            .format('\n\t'.join(sorted('{} {}'.format(cnt, src) for src, cnt in counts if cnt > 1))))
      analysis_file = os.path.join(partition_tmpdir, 'analysis')
      partitions.append((vts, de_duped_sources, analysis_file))

    # Split per-partition files out of the global invalid analysis.
    if self._analysis_parser.is_nonempty_analysis(self._invalid_analysis_file) and partitions:
      with self.context.new_workunit(name='partition-analysis'):
        splits = [(x[1], x[2]) for x in partitions]
        # We have to pass the analysis for any deleted files through zinc, to give it
        # a chance to delete the relevant class files.
        if splits:
          splits[0] = (splits[0][0] + self._deleted_sources, splits[0][1])
        self._analysis_tools.split_to_paths(self._invalid_analysis_file, splits)

    # Now compile partitions one by one.
    for partition_index, partition in enumerate(partitions):
      (vts, sources, analysis_file) = partition

      progress_message = 'partition {} of {}'.format(partition_index + 1, len(partitions))
      # We have to treat the global output dir as an upstream element, so compilers can
      # find valid analysis for previous partitions. We use the global valid analysis
      # for the upstream.
      upstream_analysis = ({self._classes_dir: self._analysis_file}
                           if os.path.exists(self._analysis_file) else {})
      compile_vts(vts,
                  sources,
                  analysis_file,
                  upstream_analysis,
                  compile_classpath,
                  self._classes_dir,
                  None,
                  progress_message,
                  settings)

      # No exception was thrown, therefore the compile succeeded and analysis_file is now valid.
      if os.path.exists(analysis_file):  # The compilation created an analysis.
        # Merge the newly-valid analysis with our global valid analysis.
        new_valid_analysis = analysis_file + '.valid.new'
        if self._analysis_parser.is_nonempty_analysis(self._analysis_file):
          with self.context.new_workunit(name='update-upstream-analysis'):
            self._analysis_tools.merge_from_paths([self._analysis_file, analysis_file],
                                                  new_valid_analysis)
        else:  # We need to keep analysis_file around. Background tasks may need it.
          shutil.copy(analysis_file, new_valid_analysis)

        # Move the merged valid analysis to its proper location.
        # We do this before checking for missing dependencies, so that we can still
        # enjoy an incremental compile after fixing missing deps.
        self.move(new_valid_analysis, self._analysis_file)

        # Update the products with the latest classes. Must happen before the
        # missing dependencies check.
        register_vts([self.compile_context(t) for t in vts.targets])
        if self._dep_analyzer:
          # Check for missing dependencies.
          actual_deps = self._analysis_parser.parse_deps_from_path(analysis_file,
              lambda: self._compute_classpath_elements_by_class(compile_classpath), self._classes_dir)
          with self.context.new_workunit(name='find-missing-dependencies'):
            self._dep_analyzer.check(sources, actual_deps)

        # Kick off the background artifact cache write.
        if update_artifact_cache_vts_work:
          self._write_to_artifact_cache(analysis_file,
                                        vts,
                                        update_artifact_cache_vts_work)

      if self._analysis_parser.is_nonempty_analysis(self._invalid_analysis_file):
        with self.context.new_workunit(name='trim-downstream-analysis'):
          # Trim out the newly-valid sources from our global invalid analysis.
          new_invalid_analysis = analysis_file + '.invalid.new'
          discarded_invalid_analysis = analysis_file + '.invalid.discard'
          self._analysis_tools.split_to_paths(self._invalid_analysis_file,
            [(sources, discarded_invalid_analysis)], new_invalid_analysis)
          self.move(new_invalid_analysis, self._invalid_analysis_file)

      # Record the built target -> sources mapping for future use.
      for target, sources in self._sources_for_targets(vts.targets).items():
        self._record_previous_sources_by_target(target, sources)

      # Now that all the analysis accounting is complete, and we have no missing deps,
      # we can safely mark the targets as valid.
      vts.update()
Пример #26
0
 def identify(self, targets):
   targets = list(targets)
   if len(targets) == 1 and hasattr(targets[0], 'provides') and targets[0].provides:
     return targets[0].provides.org, targets[0].provides.name
   else:
     return 'internal', Target.maybe_readable_identify(targets)
Пример #27
0
 def identify(targets):
   targets = list(targets)
   if len(targets) == 1 and targets[0].is_jvm and getattr(targets[0], 'provides', None):
     return targets[0].provides.org, targets[0].provides.name
   else:
     return 'internal', Target.maybe_readable_identify(targets)
Пример #28
0
    def execute_chunk(self, relevant_targets):
        # TODO(benjy): Add a pre-execute goal for injecting deps into targets, so e.g.,
        # we can inject a dep on the scala runtime library and still have it ivy-resolve.

        if not relevant_targets:
            return

        # Get the exclusives group for the targets to compile.
        # Group guarantees that they'll be a single exclusives key for them.
        egroups = self.context.products.get_data("exclusives_groups")
        group_id = egroups.get_group_key_for_target(relevant_targets[0])

        # Get the classpath generated by upstream JVM tasks and our own prepare_execute().
        classpath = egroups.get_classpath_for_group(group_id)

        # Add any extra compile-time-only classpath elements.
        # TODO(benjy): Model compile-time vs. runtime classpaths more explicitly.
        for conf in self._confs:
            for jar in self.extra_compile_time_classpath_elements():
                classpath.insert(0, (conf, jar))

        # Target -> sources (relative to buildroot), for just this chunk's targets.
        sources_by_target = self._sources_for_targets(relevant_targets)

        # If needed, find targets that we've changed locally (as opposed to
        # changes synced in from the SCM).
        # TODO(benjy): Should locally_changed_targets be available in all Tasks?
        locally_changed_targets = None
        if self._locally_changed_targets_heuristic_limit:
            locally_changed_targets = self._find_locally_changed_targets(sources_by_target)
            if locally_changed_targets and len(locally_changed_targets) > self._locally_changed_targets_heuristic_limit:
                locally_changed_targets = None

        # Invalidation check. Everything inside the with block must succeed for the
        # invalid targets to become valid.
        with self.invalidated(
            relevant_targets,
            invalidate_dependents=True,
            partition_size_hint=self._partition_size_hint,
            locally_changed_targets=locally_changed_targets,
            fingerprint_strategy=self._jvm_fingerprint_strategy(),
        ) as invalidation_check:
            if invalidation_check.invalid_vts:
                # Find the invalid sources for this chunk.
                invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
                invalid_sources_by_target = self._sources_for_targets(invalid_targets)

                tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4()))
                os.mkdir(tmpdir)

                # Register products for all the valid targets.
                # We register as we go, so dependency checking code can use this data.
                valid_targets = list(set(relevant_targets) - set(invalid_targets))
                self._register_products(valid_targets, self._analysis_file)

                # Figure out the sources and analysis belonging to each partition.
                partitions = []  # Each element is a triple (vts, sources_by_target, analysis).
                for vts in invalidation_check.invalid_vts_partitioned:
                    partition_tmpdir = os.path.join(tmpdir, Target.maybe_readable_identify(vts.targets))
                    os.mkdir(partition_tmpdir)
                    sources = list(
                        itertools.chain.from_iterable([invalid_sources_by_target.get(t, []) for t in vts.targets])
                    )
                    de_duped_sources = list(OrderedSet(sources))
                    if len(sources) != len(de_duped_sources):
                        counts = [(src, len(list(srcs))) for src, srcs in itertools.groupby(sorted(sources))]
                        self.context.log.warn(
                            "De-duped the following sources:\n\t%s"
                            % "\n\t".join(sorted("%d %s" % (cnt, src) for src, cnt in counts if cnt > 1))
                        )
                    analysis_file = os.path.join(partition_tmpdir, "analysis")
                    partitions.append((vts, de_duped_sources, analysis_file))

                # Split per-partition files out of the global invalid analysis.
                if self._analysis_parser.is_nonempty_analysis(self._invalid_analysis_file) and partitions:
                    with self.context.new_workunit(name="partition-analysis"):
                        splits = [(x[1], x[2]) for x in partitions]
                        # We have to pass the analysis for any deleted files through zinc, to give it
                        # a chance to delete the relevant class files.
                        if splits:
                            splits[0] = (splits[0][0] + self._deleted_sources, splits[0][1])
                        self._analysis_tools.split_to_paths(self._invalid_analysis_file, splits)

                # Now compile partitions one by one.
                for partition in partitions:
                    (vts, sources, analysis_file) = partition
                    cp_entries = [entry for conf, entry in classpath if conf in self._confs]
                    self._process_target_partition(partition, cp_entries)
                    # No exception was thrown, therefore the compile succeded and analysis_file is now valid.
                    if os.path.exists(analysis_file):  # The compilation created an analysis.
                        # Merge the newly-valid analysis with our global valid analysis.
                        new_valid_analysis = analysis_file + ".valid.new"
                        if self._analysis_parser.is_nonempty_analysis(self._analysis_file):
                            with self.context.new_workunit(name="update-upstream-analysis"):
                                self._analysis_tools.merge_from_paths(
                                    [self._analysis_file, analysis_file], new_valid_analysis
                                )
                        else:  # We need to keep analysis_file around. Background tasks may need it.
                            shutil.copy(analysis_file, new_valid_analysis)

                        # Move the merged valid analysis to its proper location.
                        # We do this before checking for missing dependencies, so that we can still
                        # enjoy an incremental compile after fixing missing deps.
                        self.move(new_valid_analysis, self._analysis_file)

                        # Update the products with the latest classes. Must happen before the
                        # missing dependencies check.
                        self._register_products(vts.targets, analysis_file)
                        if self._dep_analyzer:
                            # Check for missing dependencies.
                            actual_deps = self._analysis_parser.parse_deps_from_path(
                                analysis_file, lambda: self._compute_classpath_elements_by_class(cp_entries)
                            )
                            with self.context.new_workunit(name="find-missing-dependencies"):
                                self._dep_analyzer.check(sources, actual_deps)

                        # Kick off the background artifact cache write.
                        if self.artifact_cache_writes_enabled():
                            self._write_to_artifact_cache(analysis_file, vts, invalid_sources_by_target)

                    if self._analysis_parser.is_nonempty_analysis(self._invalid_analysis_file):
                        with self.context.new_workunit(name="trim-downstream-analysis"):
                            # Trim out the newly-valid sources from our global invalid analysis.
                            new_invalid_analysis = analysis_file + ".invalid.new"
                            discarded_invalid_analysis = analysis_file + ".invalid.discard"
                            self._analysis_tools.split_to_paths(
                                self._invalid_analysis_file,
                                [(sources, discarded_invalid_analysis)],
                                new_invalid_analysis,
                            )
                            self.move(new_invalid_analysis, self._invalid_analysis_file)

                    # Record the built target -> sources mapping for future use.
                    for target in vts.targets:
                        self._record_sources_by_target(target, sources_by_target.get(target, []))

                    # Now that all the analysis accounting is complete, and we have no missing deps,
                    # we can safely mark the targets as valid.
                    vts.update()
            else:
                # Nothing to build. Register products for all the targets in one go.
                self._register_products(relevant_targets, self._analysis_file)

        self.post_process(relevant_targets)
Пример #29
0
 def identify(targets):
   targets = list(targets)
   if len(targets) == 1 and targets[0].is_jvm and getattr(targets[0], 'provides', None):
     return targets[0].provides.org, targets[0].provides.name
   else:
     return IvyUtils.INTERNAL_ORG_NAME, Target.maybe_readable_identify(targets)