Exemplo n.º 1
0
  def _write_to_artifact_cache(self, vts, sources_by_target):
    self._ensure_depfile_tmpdir()
    vt_by_target = dict([(vt.target, vt) for vt in vts.versioned_targets])

    # This work can happen in the background, if there's a measurable benefit to that.

    # Split the depfile into per-target files.
    splits = [(sources, JavaCompile.create_depfile_path(self._depfile_tmpdir, [target]))
              for target, sources in sources_by_target.items()]
    deps = Dependencies(self._classes_dir)
    if os.path.exists(self._depfile):
      deps.load(self._depfile)
    deps.split(splits)

    # Gather up the artifacts.
    vts_artifactfiles_pairs = []
    for target, sources in sources_by_target.items():
      artifacts = [JavaCompile.create_depfile_path(self._depfile_tmpdir, [target])]
      for source in sources:
        for cls in deps.classes_by_source.get(source, []):
          artifacts.append(os.path.join(self._classes_dir, cls))
      vt = vt_by_target.get(target)
      if vt is not None:
        vts_artifactfiles_pairs.append((vt, artifacts))

    # Write to the artifact cache.
    self.update_artifact_cache(vts_artifactfiles_pairs)
Exemplo n.º 2
0
  def execute(self, targets):
    java_targets = filter(_is_java, targets)
    if java_targets:
      safe_mkdir(self._classes_dir)
      safe_mkdir(self._depfile_dir)

      egroups = self.context.products.get_data('exclusives_groups')
      group_id = egroups.get_group_key_for_target(java_targets[0])
      for conf in self._confs:
        egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)])
        egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)])


      with self.invalidated(java_targets, invalidate_dependents=True,
                            partition_size_hint=self._partition_size_hint) as invalidation_check:
        for vt in invalidation_check.invalid_vts_partitioned:
          # Compile, using partitions for efficiency.
          exclusives_classpath = egroups.get_classpath_for_group(group_id)
          self.execute_single_compilation(vt, exclusives_classpath)
          if not self.dry_run:
            vt.update()

        for vt in invalidation_check.all_vts:
          depfile = self.create_depfile_path(vt.targets)
          if not self.dry_run and os.path.exists(depfile):
            # Read in the deps created either just now or by a previous run on these targets.
            deps = Dependencies(self._classes_dir)
            deps.load(depfile)
            self._deps.merge(deps)

      if not self.dry_run:
        if self.context.products.isrequired('classes'):
          genmap = self.context.products.get('classes')
          # Map generated classes to the owning targets and sources.
          for target, classes_by_source in self._deps.findclasses(java_targets).items():
            for source, classes in classes_by_source.items():
              genmap.add(source, self._classes_dir, classes)
              genmap.add(target, self._classes_dir, classes)

          # TODO(John Sirois): Map target.resources in the same way
          # 'Map' (rewrite) annotation processor service info files to the owning targets.
          for target in java_targets:
            if is_apt(target) and target.processors:
              basedir = os.path.join(self._resources_dir, Target.maybe_readable_identify([target]))
              processor_info_file = os.path.join(basedir, _PROCESSOR_INFO_FILE)
              self.write_processor_info(processor_info_file, target.processors)
              genmap.add(target, basedir, [_PROCESSOR_INFO_FILE])

        # Produce a monolithic apt processor service info file for further compilation rounds
        # and the unit test classpath.
        all_processors = set()
        for target in java_targets:
          if is_apt(target) and target.processors:
            all_processors.update(target.processors)
        processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
        if os.path.exists(processor_info_file):
          with safe_open(processor_info_file, 'r') as f:
            for processor in f:
              all_processors.add(processor.strip())
        self.write_processor_info(processor_info_file, all_processors)
Exemplo n.º 3
0
  def execute_single_compilation(self, vt, cp):
    depfile = self.create_depfile_path(vt.targets)

    self.merge_depfile(vt)  # Get what we can from previous builds.
    sources_by_target, fingerprint = self.calculate_sources(vt.targets)
    if sources_by_target:
      sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
      if not sources:
        self.context.log.warn('Skipping java compile for targets with no sources:\n  %s' %
                              '\n  '.join(str(t) for t in sources_by_target.keys()))
      else:
        classpath = [jar for conf, jar in cp if conf in self._confs]
        result = self.compile(classpath, sources, fingerprint, depfile)
        if result != 0:
          default_message = 'Unexpected error - %s returned %d' % (_JMAKE_MAIN, result)
          raise TaskError(_JMAKE_ERROR_CODES.get(result, default_message))
        self.split_depfile(vt)

      all_artifact_files = [depfile]

      if self._artifact_cache and self.context.options.write_to_artifact_cache:
        deps = Dependencies(self._classes_dir)
        deps.load(depfile)
        vts_artifactfile_pairs = []
        for single_vt in vt.versioned_targets:
          per_target_depfile = self.create_depfile_path([single_vt.target])
          per_target_artifact_files = [per_target_depfile]
          for _, classes_by_source in deps.findclasses([single_vt.target]).items():
            for _, classes in classes_by_source.items():
              classfile_paths = [os.path.join(self._classes_dir, cls) for cls in classes]
              per_target_artifact_files.extend(classfile_paths)
              all_artifact_files.extend(classfile_paths)
            vts_artifactfile_pairs.append((single_vt, per_target_artifact_files))
        vts_artifactfile_pairs.append((vt, all_artifact_files))
        self.update_artifact_cache(vts_artifactfile_pairs)
Exemplo n.º 4
0
  def _post_process(self, target, cp):
    """Must be called on all targets, whether they needed compilation or not."""
    classes_dir, depfile, _ = self._output_paths([target])

    # Update the classpath, for the benefit of tasks downstream from us.
    if os.path.exists(classes_dir):
      for conf in self._confs:
        cp.insert(0, (conf, classes_dir))

    # Make note of the classes generated by this target.
    if os.path.exists(depfile) and self.context.products.isrequired('classes'):
      self.context.log.debug('Reading dependencies from ' + depfile)
      deps = Dependencies(classes_dir)
      deps.load(depfile)
      genmap = self.context.products.get('classes')
      for classes_by_source in deps.findclasses([target]).values():
        for source, classes in classes_by_source.items():
          genmap.add(source, classes_dir, classes)
          genmap.add(target, classes_dir, classes)

          # TODO(John Sirois): Map target.resources in the same way
          # Create and Map scala plugin info files to the owning targets.
        if is_scalac_plugin(target) and target.classname:
          basedir, plugin_info_file = self._zinc_utils.write_plugin_info(self._resources_dir, target)
          genmap.add(target, basedir, [plugin_info_file])
Exemplo n.º 5
0
  def _merge_artifact(self, versioned_target_set):
    """Merges artifacts representing the individual targets in a VersionedTargetSet into one artifact for that set.
    Creates an output classes dir, depfile and analysis file for the VersionedTargetSet.
    Note that the merged artifact may be incomplete (e.g., if we have no previous artifacts for some of the
    individual targets). That's OK: We run this right before we invoke zinc, which will fill in what's missing.
    This method is not required for correctness, only for efficiency: it can prevent zinc from doing superfluous work.

    NOTE: This method is reentrant.
    """
    if len(versioned_target_set.targets) <= 1:
      return  # Nothing to do.

    with temporary_dir() as tmpdir:
      dst_classes_dir, dst_depfile, dst_analysis_file = self._output_paths(versioned_target_set.targets)
      safe_rmtree(dst_classes_dir)
      safe_mkdir(dst_classes_dir)
      src_analysis_files = []

      # TODO: Do we actually need to merge deps? Zinc will stomp them anyway on success.
      dst_deps = Dependencies(dst_classes_dir)

      for target in versioned_target_set.targets:
        src_classes_dir, src_depfile, src_analysis_file = self._output_paths([target])
        if os.path.exists(src_depfile):
          src_deps = Dependencies(src_classes_dir)
          src_deps.load(src_depfile)
          dst_deps.merge(src_deps)

          classes_by_source = src_deps.findclasses([target]).get(target, {})
          for source, classes in classes_by_source.items():
            for cls in classes:
              src = os.path.join(src_classes_dir, cls)
              dst = os.path.join(dst_classes_dir, cls)
              # src may not exist if we aborted a build in the middle. That's OK: zinc will notice that
              # it's missing and rebuild it.
              # dst may already exist if we have overlapping targets. It's not a good idea
              # to have those, but until we enforce it, we must allow it here.
              if os.path.exists(src) and not os.path.exists(dst):
                # Copy the class file.
                safe_mkdir(os.path.dirname(dst))
                os.link(src, dst)

          # Rebase a copy of the per-target analysis files to reflect the merged classes dir.
          if os.path.exists(src_analysis_file):
            src_analysis_file_tmp = \
            os.path.join(tmpdir, os.path.relpath(src_analysis_file, self._analysis_files_base))
            shutil.copyfile(src_analysis_file, src_analysis_file_tmp)
            src_analysis_files.append(src_analysis_file_tmp)
            if self._zinc_utils.run_zinc_rebase(src_analysis_file_tmp, [(src_classes_dir, dst_classes_dir)]):
              self.context.log.warn('In merge_artifact: zinc failed to rebase analysis file %s. '\
                                    'Target may require a full rebuild.' %\
                                    src_analysis_file_tmp)

      dst_deps.save(dst_depfile)

      if self._zinc_utils.run_zinc_merge(src_analysis_files, dst_analysis_file):
        self.context.log.warn('zinc failed to merge analysis files %s to %s. '\
                              'Target may require a full rebuild.' %\
                             (':'.join(src_analysis_files), dst_analysis_file))
Exemplo n.º 6
0
 def post_process(self, versioned_targets):
   depfile = self.create_depfile_path(versioned_targets.targets)
   if not self.dry_run and os.path.exists(depfile):
     # Read in the deps created either just now or by a previous compiler run on these targets.
     deps = Dependencies(self._classes_dir)
     deps.load(depfile)
     self.split_depfile(deps, versioned_targets)
     self._deps.merge(deps)
Exemplo n.º 7
0
 def post_process(self, versioned_targets):
     depfile = self.create_depfile_path(versioned_targets.targets)
     if not self.dry_run and os.path.exists(depfile):
         # Read in the deps created either just now or by a previous compiler run on these targets.
         deps = Dependencies(self._classes_dir)
         deps.load(depfile)
         self.split_depfile(deps, versioned_targets)
         self._deps.merge(deps)
Exemplo n.º 8
0
  def _compute_classes_by_source(self, depfile=None):
    """Compute src->classes."""
    if depfile is None:
      depfile = self._depfile

    if not os.path.exists(depfile):
      return {}
    deps = Dependencies(self._classes_dir)
    deps.load(depfile)
    return deps.classes_by_source
Exemplo n.º 9
0
  def _compile(self, versioned_target_set, classpath, upstream_analysis_files):
    """Actually compile some targets.

    May be invoked concurrently on independent target sets.

    Postcondition: The individual targets in versioned_target_set are up-to-date, as if each
                   were compiled individually.
    """
    # Note: We actually compile all the targets in the set in a single zinc call, because
    # compiler invocation overhead is high, but this fact is not exposed outside this method.
    classes_dir, depfile, analysis_file = self._output_paths(versioned_target_set.targets)
    safe_mkdir(classes_dir)

    # Get anything we have from previous builds.
    self._merge_artifact(versioned_target_set)

    # Compute the sources we need to compile.
    sources_by_target = ScalaCompile._calculate_sources(versioned_target_set.targets)

    if sources_by_target:
      sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
      if not sources:
        self.context.log.warn('Skipping scala compile for targets with no sources:\n  %s' %
                              '\n  '.join(str(t) for t in sources_by_target.keys()))
      else:
        # Invoke the compiler.
        self.context.log.info('Compiling targets %s' % versioned_target_set.targets)
        if self._zinc_utils.compile(classpath, sources, classes_dir, analysis_file,
                                    upstream_analysis_files, depfile):
          raise TaskError('Compile failed.')

        # Read in the deps we just created.
        self.context.log.debug('Reading dependencies from ' + depfile)
        deps = Dependencies(classes_dir)
        deps.load(depfile)

        # Split the artifact into per-target artifacts.
        self._split_artifact(deps, versioned_target_set)

        # Write to artifact cache, if needed.
        for vt in versioned_target_set.versioned_targets:
          vt_classes_dir, vt_depfile, vt_analysis_file = self._output_paths(vt.targets)
          vt_portable_analysis_file = _portable(vt_analysis_file)
          if self._artifact_cache and self.context.options.write_to_artifact_cache:
            # Relativize the analysis.
            # TODO: Relativize before splitting? This will require changes to Zinc, which currently
            # eliminates paths it doesn't recognize (including our placeholders) when splitting.
            if os.path.exists(vt_analysis_file) and \
                self._zinc_utils.relativize_analysis_file(vt_analysis_file, vt_portable_analysis_file):
              raise TaskError('Zinc failed to relativize analysis file: %s' % vt_analysis_file)
            # Write the per-target artifacts to the cache.
            artifacts = [vt_classes_dir, vt_depfile, vt_portable_analysis_file]
            self.update_artifact_cache(vt, artifacts)
          else:
            safe_rmtree(vt_portable_analysis_file)  # Don't leave cruft lying around.
Exemplo n.º 10
0
    def execute(self, targets):
        java_targets = [t for t in targets if t.has_sources(".java")]

        if not java_targets:
            return

        # Get the exclusives group for the targets to compile.
        # Group guarantees that they'll be a single exclusives key for them.
        egroups = self.context.products.get_data("exclusives_groups")
        group_id = egroups.get_group_key_for_target(java_targets[0])

        # Add classes and resource dirs to the classpath for us and for downstream tasks.
        for conf in self._confs:
            egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)])
            egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)])

        # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
        cp = egroups.get_classpath_for_group(group_id)

        with self.invalidated(
            java_targets, invalidate_dependents=True, partition_size_hint=self._partition_size_hint
        ) as invalidation_check:
            if not self.dry_run:
                for vts in invalidation_check.invalid_vts_partitioned:
                    # Compile, using partitions for efficiency.
                    sources_by_target = self._process_target_partition(vts, cp)

                    # TODO: Check for missing dependencies.  See ScalaCompile for an example.
                    # Will require figuring out what the actual deps of a class file are.

                    vts.update()
                    if self.artifact_cache_writes_enabled():
                        self._write_to_artifact_cache(vts, sources_by_target)

                # Provide the target->class and source->class mappings to downstream tasks if needed.
                if self.context.products.isrequired("classes"):
                    if os.path.exists(self._depfile):
                        sources_by_target = self._compute_sources_by_target(java_targets)
                        deps = Dependencies(self._classes_dir)
                        deps.load(self._depfile)
                        self._add_all_products_to_genmap(sources_by_target, deps.classes_by_source)

                # Produce a monolithic apt processor service info file for further compilation rounds
                # and the unit test classpath.
                all_processors = set()
                for target in java_targets:
                    if target.is_apt and target.processors:
                        all_processors.update(target.processors)
                processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
                if os.path.exists(processor_info_file):
                    with safe_open(processor_info_file, "r") as f:
                        for processor in f:
                            all_processors.add(processor.strip())
                self.write_processor_info(processor_info_file, all_processors)
Exemplo n.º 11
0
  def execute(self, targets):
    java_targets = filter(lambda t: has_sources(t, '.java'), targets)
    if not java_targets:
      return

    # Get the exclusives group for the targets to compile.
    # Group guarantees that they'll be a single exclusives key for them.
    egroups = self.context.products.get_data('exclusives_groups')
    group_id = egroups.get_group_key_for_target(java_targets[0])

    # Add classes and resource dirs to the classpath for us and for downstream tasks.
    for conf in self._confs:
      egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)])
      egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)])

    # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
    cp = egroups.get_classpath_for_group(group_id)

    with self.invalidated(java_targets, invalidate_dependents=True,
                          partition_size_hint=self._partition_size_hint) as invalidation_check:
      if not self.dry_run:
        for vts in invalidation_check.invalid_vts_partitioned:
          # Compile, using partitions for efficiency.
          sources_by_target = self._process_target_partition(vts, cp)

          # TODO: Check for missing dependencies.  See ScalaCompile for an example.
          # Will require figuring out what the actual deps of a class file are.

          vts.update()
          if self.get_artifact_cache() and self.context.options.write_to_artifact_cache:
            self._write_to_artifact_cache(vts, sources_by_target)

        # Provide the target->class and source->class mappings to downstream tasks if needed.
        if self.context.products.isrequired('classes'):
          if os.path.exists(self._depfile):
            sources_by_target = self._compute_sources_by_target(java_targets)
            deps = Dependencies(self._classes_dir)
            deps.load(self._depfile)
            self._add_all_products_to_genmap(sources_by_target, deps.classes_by_source)

        # Produce a monolithic apt processor service info file for further compilation rounds
        # and the unit test classpath.
        all_processors = set()
        for target in java_targets:
          if is_apt(target) and target.processors:
            all_processors.update(target.processors)
        processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
        if os.path.exists(processor_info_file):
          with safe_open(processor_info_file, 'r') as f:
            for processor in f:
              all_processors.add(processor.strip())
        self.write_processor_info(processor_info_file, all_processors)
Exemplo n.º 12
0
  def merge_artifact(self, versioned_target_set):
    if len(versioned_target_set.targets) <= 1:
      return

    with temporary_dir() as tmpdir:
      dst_output_dir, dst_depfile, dst_analysis_cache = self.create_output_paths(versioned_target_set.targets)
      safe_rmtree(dst_output_dir)
      safe_mkdir(dst_output_dir)
      src_analysis_caches = []

      # TODO: Do we actually need to merge deps? Zinc will stomp them anyway on success.
      dst_deps = Dependencies(dst_output_dir)

      for target in versioned_target_set.targets:
        src_output_dir, src_depfile, src_analysis_cache = self.create_output_paths([target])
        if os.path.exists(src_depfile):
          src_deps = Dependencies(src_output_dir)
          src_deps.load(src_depfile)
          dst_deps.merge(src_deps)

          classes_by_source = src_deps.findclasses([target]).get(target, {})
          for source, classes in classes_by_source.items():
            for cls in classes:
              src = os.path.join(src_output_dir, cls)
              dst = os.path.join(dst_output_dir, cls)
              # src may not exist if we aborted a build in the middle. That's OK: zinc will notice that
              # it's missing and rebuild it.
              # dst may already exist if we have overlapping targets. It's not a good idea
              # to have those, but until we enforce it, we must allow it here.
              if os.path.exists(src) and not os.path.exists(dst):
                # Copy the class file.
                safe_mkdir(os.path.dirname(dst))
                os.link(src, dst)

          # Rebase a copy of the per-target analysis files prior to merging.
          if os.path.exists(src_analysis_cache):
            src_analysis_cache_tmp = \
              os.path.join(tmpdir, os.path.relpath(src_analysis_cache, self._analysis_cache_dir))
            shutil.copyfile(src_analysis_cache, src_analysis_cache_tmp)
            src_analysis_caches.append(src_analysis_cache_tmp)
            if self._zinc_utils.run_zinc_rebase(cache=src_analysis_cache_tmp, rebasings=[(src_output_dir, dst_output_dir)]):
              self.context.log.warn('In merge_artifact: zinc failed to rebase analysis file %s. ' \
                                    'Target may require a full rebuild.' % \
                                    src_analysis_cache_tmp)

      dst_deps.save(dst_depfile)

      if self._zinc_utils.run_zinc_merge(src_caches=src_analysis_caches, dst_cache=dst_analysis_cache):
        self.context.log.warn('zinc failed to merge analysis files %s to %s. ' \
                              'Target may require a full rebuild.' % \
                              (':'.join(src_analysis_caches), dst_analysis_cache))
Exemplo n.º 13
0
  def merge_depfile(self, versioned_target_set):
    if len(versioned_target_set.targets) <= 1:
      return

    dst_depfile = self.create_depfile_path(versioned_target_set.targets)
    dst_deps = Dependencies(self._classes_dir)

    for target in versioned_target_set.targets:
      src_depfile = self.create_depfile_path([target])
      if os.path.exists(src_depfile):
        src_deps = Dependencies(self._classes_dir)
        src_deps.load(src_depfile)
        dst_deps.merge(src_deps)

    dst_deps.save(dst_depfile)
Exemplo n.º 14
0
 def post_process_cached_vts(cached_vts):
   # Merge the cached analyses into the existing global one.
   if cached_vts:
     with self.context.new_workunit(name='merge-dependencies'):
       global_deps = Dependencies(self._classes_dir)
       if os.path.exists(self._depfile):
         global_deps.load(self._depfile)
       for vt in cached_vts:
         for target in vt.targets:
           depfile = JavaCompile.create_depfile_path(self._depfile_tmpdir, [target])
           if os.path.exists(depfile):
             deps = Dependencies(self._classes_dir)
             deps.load(depfile)
             global_deps.merge(deps)
       global_deps.save(self._depfile)
Exemplo n.º 15
0
  def execute_single_compilation(self, java_targets, cp):
    self.context.log.info('Compiling targets %s' % str(java_targets))

    # Compute the id of this compilation. We try to make it human-readable.
    if len(java_targets) == 1:
      compilation_id = java_targets[0].id
    else:
      compilation_id = self.context.identify(java_targets)

    if self._flatten:
      # If compiling in flat mode, we let all dependencies aggregate into a single well-known depfile. This
      # allows us to build different targets in different invocations without losing dependency information
      # from any of them.
      depfile = os.path.join(self._depfile_dir, 'dependencies.flat')
    else:
      # If not in flat mode, we let each compilation have its own depfile, to avoid quadratic behavior (each
      # compilation will read in the entire depfile, add its stuff to it and write it out again).
      depfile = os.path.join(self._depfile_dir, compilation_id) + '.dependencies'

    with self.changed(java_targets, invalidate_dependants=True) as changed:
      sources_by_target, processors, fingerprint = self.calculate_sources(changed)
      if sources_by_target:
        sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
        if not sources:
          self.context.log.warn('Skipping java compile for targets with no sources:\n  %s' %
                                '\n  '.join(str(t) for t in sources_by_target.keys()))
        else:
          classpath = [jar for conf, jar in cp if conf in self._confs]
          result = self.compile(classpath, sources, fingerprint, depfile)
          if result != 0:
            default_message = 'Unexpected error - %s returned %d' % (_JMAKE_MAIN, result)
            raise TaskError(_JMAKE_ERROR_CODES.get(result, default_message))

        if processors:
          # Produce a monolithic apt processor service info file for further compilation rounds
          # and the unit test classpath.
          processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
          if os.path.exists(processor_info_file):
            with safe_open(processor_info_file, 'r') as f:
              for processor in f:
                processors.add(processor.strip())
          self.write_processor_info(processor_info_file, processors)

    # Read in the deps created either just now or by a previous compiler run on these targets.
    deps = Dependencies(self._classes_dir)
    deps.load(depfile)
    self._deps.merge(deps)
Exemplo n.º 16
0
  def split_depfile(self, vt):
    depfile = self.create_depfile_path(vt.targets)
    if len(vt.targets) <= 1 or not os.path.exists(depfile) or self.dry_run:
      return

    deps = Dependencies(self._classes_dir)
    deps.load(depfile)

    classes_by_source_by_target = deps.findclasses(vt.targets)
    for target in vt.targets:
      classes_by_source = classes_by_source_by_target.get(target, {})
      dst_depfile = self.create_depfile_path([target])
      dst_deps = Dependencies(self._classes_dir)
      for source, classes in classes_by_source.items():
        src = os.path.join(target.target_base, source)
        dst_deps.add(src, classes)
      dst_deps.save(dst_depfile)
Exemplo n.º 17
0
  def execute_single_compilation(self, versioned_targets, cp):
    compilation_id = Target.maybe_readable_identify(versioned_targets.targets)

    # TODO: Use the artifact cache. In flat mode we may want to look for the artifact for all targets,
    # not just the invalid ones, as it might be more likely to be present. Or we could look for both.

    if self._flatten:
      # If compiling in flat mode, we let all dependencies aggregate into a single well-known depfile. This
      # allows us to build different targets in different invocations without losing dependency information
      # from any of them.
      depfile = os.path.join(self._depfile_dir, 'dependencies.flat')
    else:
      # If not in flat mode, we let each compilation have its own depfile, to avoid quadratic behavior (each
      # compilation will read in the entire depfile, add its stuff to it and write it out again).
      depfile = os.path.join(self._depfile_dir, compilation_id) + '.dependencies'

    if not versioned_targets.valid:
      self.context.log.info('Compiling targets %s' % str(versioned_targets.targets))
      sources_by_target, processors, fingerprint = self.calculate_sources(versioned_targets.targets)
      if sources_by_target:
        sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
        if not sources:
          touch(depfile)  # Create an empty depfile, since downstream code may assume that one exists.
          self.context.log.warn('Skipping java compile for targets with no sources:\n  %s' %
                                '\n  '.join(str(t) for t in sources_by_target.keys()))
        else:
          classpath = [jar for conf, jar in cp if conf in self._confs]
          result = self.compile(classpath, sources, fingerprint, depfile)
          if result != 0:
            default_message = 'Unexpected error - %s returned %d' % (_JMAKE_MAIN, result)
            raise TaskError(_JMAKE_ERROR_CODES.get(result, default_message))

        if processors:
          # Produce a monolithic apt processor service info file for further compilation rounds
          # and the unit test classpath.
          processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
          if os.path.exists(processor_info_file):
            with safe_open(processor_info_file, 'r') as f:
              for processor in f:
                processors.add(processor.strip())
          self.write_processor_info(processor_info_file, processors)

    # Read in the deps created either just now or by a previous compiler run on these targets.
    deps = Dependencies(self._classes_dir)
    deps.load(depfile)
    self._deps.merge(deps)
Exemplo n.º 18
0
  def execute(self, targets):
    java_targets = filter(JavaCompile._has_java_sources, targets)
    if java_targets:
      safe_mkdir(self._classes_dir)
      safe_mkdir(self._depfile_dir)

      with self.context.state('classpath', []) as cp:
        for conf in self._confs:
          cp.insert(0, (conf, self._resources_dir))
          cp.insert(0, (conf, self._classes_dir))

      with self.invalidated(java_targets, invalidate_dependents=True,
          partition_size_hint=self._partition_size_hint) as invalidation_check:
        for vt in invalidation_check.invalid_vts_partitioned:
          # Compile, using partitions for efficiency.
          self.execute_single_compilation(vt, cp)
          if not self.dry_run:
            vt.update()

        for vt in invalidation_check.all_vts:
          depfile = self.create_depfile_path(vt.targets)
          if not self.dry_run and os.path.exists(depfile):
            # Read in the deps created either just now or by a previous run on these targets.
            deps = Dependencies(self._classes_dir)
            deps.load(depfile)
            self._deps.merge(deps)

      if not self.dry_run:
        if self.context.products.isrequired('classes'):
          genmap = self.context.products.get('classes')

          # Map generated classes to the owning targets and sources.
          for target, classes_by_source in self._deps.findclasses(java_targets).items():
            for source, classes in classes_by_source.items():
              genmap.add(source, self._classes_dir, classes)
              genmap.add(target, self._classes_dir, classes)

          # TODO(John Sirois): Map target.resources in the same way
          # 'Map' (rewrite) annotation processor service info files to the owning targets.
          for target in java_targets:
            if is_apt(target) and target.processors:
              basedir = os.path.join(self._resources_dir, target.id)
              processor_info_file = os.path.join(basedir, _PROCESSOR_INFO_FILE)
              self.write_processor_info(processor_info_file, target.processors)
              genmap.add(target, basedir, [_PROCESSOR_INFO_FILE])
Exemplo n.º 19
0
  def execute_single_compilation(self, scala_targets, cp, upstream_analysis_caches):
    """Execute a single compilation, updating upstream_analysis_caches if needed."""
    self.context.log.info('Compiling targets %s' % str(scala_targets))

    compilation_id = self.context.maybe_readable_identify(scala_targets)

    # Each compilation must output to its own directory, so zinc can then associate those with the appropriate
    # analysis caches of previous compilations. We then copy the results out to the real output dir.
    output_dir = os.path.join(self._incremental_classes_dir, compilation_id)
    depfile = os.path.join(self._depfile_dir, compilation_id) + '.dependencies'
    analysis_cache = os.path.join(self._analysis_cache_dir, compilation_id) + '.analysis_cache'

    # We must defer dependency analysis to zinc. If we exclude files from a repeat build, zinc will assume
    # the files were deleted and will nuke the corresponding class files.
    invalidate_globally = self._flatten

    with self.changed(scala_targets, invalidate_dependants=True,
                      invalidate_globally=invalidate_globally) as changed_targets:
      sources_by_target = self.calculate_sources(changed_targets)
      if sources_by_target:
        sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
        if not sources:
          self.context.log.warn('Skipping scala compile for targets with no sources:\n  %s' %
                                '\n  '.join(str(t) for t in sources_by_target.keys()))
        else:
          classpath = [jar for conf, jar in cp if conf in self._confs]
          result = self.compile(classpath, sources, output_dir, analysis_cache, upstream_analysis_caches, depfile)
          if result != 0:
            raise TaskError('%s returned %d' % (self._main, result))
          # Link class files emitted in this compilation into the central classes dir.
          self.link_all(output_dir, self._classes_dir)

    # Read in the deps created either just now or by a previous compiler run on these targets.
    self.context.log.debug('Reading dependencies from ' + depfile)
    deps = Dependencies(output_dir)
    deps.load(depfile)
    self._deps.merge(deps)

    analysis_cache_parts = os.path.split(analysis_cache)
    if not upstream_analysis_caches.has(output_dir):
      # A previous chunk might have already updated this. It is certainly possible for a later chunk to
      # independently depend on some target that a previous chunk already built.
      upstream_analysis_caches.add(output_dir, analysis_cache_parts[0], [ analysis_cache_parts[1] ])
    return compilation_id
Exemplo n.º 20
0
    def post_process(self, vt, upstream_analysis_caches, split_artifact):
        output_dir, depfile, analysis_cache = self.create_output_paths(
            vt.targets)
        if not self.dry_run:
            # Read in the deps created either just now or by a previous compiler run on these targets.
            if os.path.exists(depfile):
                self.context.log.debug('Reading dependencies from ' + depfile)
                deps = Dependencies(output_dir)
                deps.load(depfile)

                if split_artifact:
                    self.split_artifact(deps, vt)

                if self.context.products.isrequired('classes'):
                    genmap = self.context.products.get('classes')
                    for target, classes_by_source in deps.findclasses(
                            vt.targets).items():
                        for source, classes in classes_by_source.items():
                            genmap.add(source, output_dir, classes)
                            genmap.add(target, output_dir, classes)

                    # TODO(John Sirois): Map target.resources in the same way
                    # Create and Map scala plugin info files to the owning targets.
                    for target in vt.targets:
                        if is_scalac_plugin(target) and target.classname:
                            basedir = self.write_plugin_info(target)
                            genmap.add(target, basedir, [_PLUGIN_INFO_FILE])

        # Update the upstream analysis map.
        if os.path.exists(analysis_cache):
            analysis_cache_parts = os.path.split(analysis_cache)
            if not upstream_analysis_caches.has(output_dir):
                # A previous chunk might have already updated this. It is certainly possible for a later chunk to
                # independently depend on some target that a previous chunk already built.
                upstream_analysis_caches.add(output_dir,
                                             analysis_cache_parts[0],
                                             [analysis_cache_parts[1]])

        # Update the classpath.
        with self.context.state('classpath', []) as cp:
            for conf in self._confs:
                cp.insert(0, (conf, output_dir))
Exemplo n.º 21
0
  def post_process(self, vt, upstream_analysis_caches, split_artifact):
    output_dir, depfile, analysis_cache = self.create_output_paths(vt.targets)
    if not self.dry_run:
      # Read in the deps created either just now or by a previous compiler run on these targets.
      if os.path.exists(depfile):
        self.context.log.debug('Reading dependencies from ' + depfile)
        deps = Dependencies(output_dir)
        deps.load(depfile)

        if split_artifact:
          self.split_artifact(deps, vt)

        if self.context.products.isrequired('classes') :
          genmap = self.context.products.get('classes')
          for target, classes_by_source in deps.findclasses(vt.targets).items():
            for source, classes in classes_by_source.items():
              genmap.add(source, output_dir, classes)
              genmap.add(target, output_dir, classes)

          # TODO(John Sirois): Map target.resources in the same way
          # Create and Map scala plugin info files to the owning targets.
          for target in vt.targets:
            if is_scalac_plugin(target) and target.classname:
              basedir = self.write_plugin_info(target)
              genmap.add(target, basedir, [_PLUGIN_INFO_FILE])

    # Update the upstream analysis map.
    if os.path.exists(analysis_cache):
      analysis_cache_parts = os.path.split(analysis_cache)
      if not upstream_analysis_caches.has(output_dir):
        # A previous chunk might have already updated this. It is certainly possible for a later chunk to
        # independently depend on some target that a previous chunk already built.
        upstream_analysis_caches.add(output_dir, analysis_cache_parts[0], [ analysis_cache_parts[1] ])

    # Update the classpath.
    with self.context.state('classpath', []) as cp:
      for conf in self._confs:
        cp.insert(0, (conf, output_dir))
Exemplo n.º 22
0
    def execute_single_compilation(self, versioned_targets, cp):
        compilation_id = Target.maybe_readable_identify(
            versioned_targets.targets)

        # TODO: Use the artifact cache. In flat mode we may want to look for the artifact for all targets,
        # not just the invalid ones, as it might be more likely to be present. Or we could look for both.

        if self._flatten:
            # If compiling in flat mode, we let all dependencies aggregate into a single well-known depfile. This
            # allows us to build different targets in different invocations without losing dependency information
            # from any of them.
            depfile = os.path.join(self._depfile_dir, 'dependencies.flat')
        else:
            # If not in flat mode, we let each compilation have its own depfile, to avoid quadratic behavior (each
            # compilation will read in the entire depfile, add its stuff to it and write it out again).
            depfile = os.path.join(self._depfile_dir,
                                   compilation_id) + '.dependencies'

        if not versioned_targets.valid:
            self.context.log.info('Compiling targets %s' %
                                  str(versioned_targets.targets))
            sources_by_target, processors, fingerprint = self.calculate_sources(
                versioned_targets.targets)
            if sources_by_target:
                sources = reduce(lambda all, sources: all.union(sources),
                                 sources_by_target.values())
                if not sources:
                    touch(
                        depfile
                    )  # Create an empty depfile, since downstream code may assume that one exists.
                    self.context.log.warn(
                        'Skipping java compile for targets with no sources:\n  %s'
                        %
                        '\n  '.join(str(t) for t in sources_by_target.keys()))
                else:
                    classpath = [
                        jar for conf, jar in cp if conf in self._confs
                    ]
                    result = self.compile(classpath, sources, fingerprint,
                                          depfile)
                    if result != 0:
                        default_message = 'Unexpected error - %s returned %d' % (
                            _JMAKE_MAIN, result)
                        raise TaskError(
                            _JMAKE_ERROR_CODES.get(result, default_message))

                if processors:
                    # Produce a monolithic apt processor service info file for further compilation rounds
                    # and the unit test classpath.
                    processor_info_file = os.path.join(self._classes_dir,
                                                       _PROCESSOR_INFO_FILE)
                    if os.path.exists(processor_info_file):
                        with safe_open(processor_info_file, 'r') as f:
                            for processor in f:
                                processors.add(processor.strip())
                    self.write_processor_info(processor_info_file, processors)

        # Read in the deps created either just now or by a previous compiler run on these targets.
        deps = Dependencies(self._classes_dir)
        deps.load(depfile)
        self._deps.merge(deps)
Exemplo n.º 23
0
  def merge_artifact(self, versioned_target_set):
    if len(versioned_target_set.targets) <= 1:
      return

    with temporary_dir() as tmpdir:
      dst_output_dir, dst_depfile, dst_analysis_cache = self.create_output_paths(versioned_target_set.targets)
      safe_rmtree(dst_output_dir)
      safe_mkdir(dst_output_dir)
      src_analysis_caches = []

      analysis_args = []
      analysis_args.extend(self._zinc_jar_args)
      analysis_args.extend([
        '-log-level', self.context.options.log_level or 'info',
        '-analysis',
        ])

      # TODO: Do we actually need to merge deps? Zinc will stomp them anyway on success.
      dst_deps = Dependencies(dst_output_dir)

      for target in versioned_target_set.targets:
        src_output_dir, src_depfile, src_analysis_cache = self.create_output_paths([target])
        if os.path.exists(src_depfile):
          src_deps = Dependencies(src_output_dir)
          src_deps.load(src_depfile)
          dst_deps.merge(src_deps)

          classes_by_source = src_deps.findclasses([target]).get(target, {})
          for source, classes in classes_by_source.items():
            for cls in classes:
              src = os.path.join(src_output_dir, cls)
              dst = os.path.join(dst_output_dir, cls)
              # src may not exist if we aborted a build in the middle. That's OK: zinc will notice that
              # it's missing and rebuild it.
              # dst may already exist if we have overlapping targets. It's not a good idea
              # to have those, but until we enforce it, we must allow it here.
              if os.path.exists(src) and not os.path.exists(dst):
                # Copy the class file.
                safe_mkdir(os.path.dirname(dst))
                os.link(src, dst)

          # Use zinc to rebase a copy of the per-target analysis files prior to merging.
          if os.path.exists(src_analysis_cache):
            src_analysis_cache_tmp = \
              os.path.join(tmpdir, os.path.relpath(src_analysis_cache, self._analysis_cache_dir))
            shutil.copyfile(src_analysis_cache, src_analysis_cache_tmp)
            src_analysis_caches.append(src_analysis_cache_tmp)
            rebase_args = analysis_args + [
              '-cache', src_analysis_cache_tmp,
              '-rebase', '%s:%s' % (src_output_dir, dst_output_dir),
              ]
            if self.runjava(self._main, classpath=self._zinc_classpath, args=rebase_args, jvmargs=self._jvm_args):
              self.context.log.warn('In merge_artifact: zinc failed to rebase analysis file %s. ' \
              'Target may require a full rebuild.' % src_analysis_cache_tmp)

      dst_deps.save(dst_depfile)

      # Use zinc to merge the analysis files.
      merge_args = analysis_args + [
        '-cache', dst_analysis_cache,
        '-merge', ':'.join(src_analysis_caches),
      ]
      if self.runjava(self._main, classpath=self._zinc_classpath, args=merge_args, jvmargs=self._jvm_args):
        raise TaskError, 'zinc failed to merge analysis files %s to %s' % \
                         (':'.join(src_analysis_caches), dst_analysis_cache)
Exemplo n.º 24
0
  def execute_single_compilation(self, versioned_target_set, cp, upstream_analysis_caches):
    """Execute a single compilation, updating upstream_analysis_caches if needed."""
    if self._flatten:
      compilation_id = 'flat'
      output_dir = self._flat_classes_dir
    else:
      compilation_id = Target.maybe_readable_identify(versioned_target_set.targets)
      # Each compilation must output to its own directory, so zinc can then associate those with the appropriate
      # analysis caches of previous compilations. We then copy the results out to the real output dir.
      output_dir = os.path.join(self._incremental_classes_dir, compilation_id)

    depfile = os.path.join(self._depfile_dir, compilation_id) + '.dependencies'
    analysis_cache = os.path.join(self._analysis_cache_dir, compilation_id) + '.analysis_cache'

    safe_mkdir(output_dir)

    if not versioned_target_set.valid:
      with self.check_artifact_cache(versioned_target_set,
                                     build_artifacts=[output_dir, depfile, analysis_cache],
                                     artifact_root=self._workdir) as needs_building:
        if needs_building:
          self.context.log.info('Compiling targets %s' % versioned_target_set.targets)
          sources_by_target = self.calculate_sources(versioned_target_set.targets)
          if sources_by_target:
            sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
            if not sources:
              touch(depfile)  # Create an empty depfile, since downstream code may assume that one exists.
              self.context.log.warn('Skipping scala compile for targets with no sources:\n  %s' %
                                    '\n  '.join(str(t) for t in sources_by_target.keys()))
            else:
              classpath = [jar for conf, jar in cp if conf in self._confs]
              result = self.compile(classpath, sources, output_dir, analysis_cache, upstream_analysis_caches, depfile)
              if result != 0:
                raise TaskError('%s returned %d' % (self._main, result))

    # Note that the following post-processing steps must happen even for valid targets.

    # Read in the deps created either just now or by a previous compiler run on these targets.
    if self.context.products.isrequired('classes'):
      self.context.log.debug('Reading dependencies from ' + depfile)
      deps = Dependencies(output_dir)
      deps.load(depfile)

      genmap = self.context.products.get('classes')

      for target, classes_by_source in deps.findclasses(versioned_target_set.targets).items():
        for source, classes in classes_by_source.items():
          genmap.add(source, output_dir, classes)
          genmap.add(target, output_dir, classes)

      # TODO(John Sirois): Map target.resources in the same way
      # Create and Map scala plugin info files to the owning targets.
      for target in versioned_target_set.targets:
        if is_scalac_plugin(target) and target.classname:
          basedir = self.write_plugin_info(target)
          genmap.add(target, basedir, [_PLUGIN_INFO_FILE])

    # Update the upstream analysis map.
    analysis_cache_parts = os.path.split(analysis_cache)
    if not upstream_analysis_caches.has(output_dir):
      # A previous chunk might have already updated this. It is certainly possible for a later chunk to
      # independently depend on some target that a previous chunk already built.
      upstream_analysis_caches.add(output_dir, analysis_cache_parts[0], [ analysis_cache_parts[1] ])

    # Update the classpath.
    with self.context.state('classpath', []) as cp:
      for conf in self._confs:
        cp.insert(0, (conf, output_dir))
Exemplo n.º 25
0
    def merge_artifact(self, versioned_target_set):
        if len(versioned_target_set.targets) <= 1:
            return

        with temporary_dir() as tmpdir:
            dst_output_dir, dst_depfile, dst_analysis_cache = self.create_output_paths(
                versioned_target_set.targets)
            safe_rmtree(dst_output_dir)
            safe_mkdir(dst_output_dir)
            src_analysis_caches = []

            analysis_args = []
            analysis_args.extend(self._zinc_jar_args)
            analysis_args.extend([
                '-log-level',
                self.context.options.log_level or 'info',
                '-analysis',
            ])

            # TODO: Do we actually need to merge deps? Zinc will stomp them anyway on success.
            dst_deps = Dependencies(dst_output_dir)

            for target in versioned_target_set.targets:
                src_output_dir, src_depfile, src_analysis_cache = self.create_output_paths(
                    [target])
                if os.path.exists(src_depfile):
                    src_deps = Dependencies(src_output_dir)
                    src_deps.load(src_depfile)
                    dst_deps.merge(src_deps)

                    classes_by_source = src_deps.findclasses([target]).get(
                        target, {})
                    for source, classes in classes_by_source.items():
                        for cls in classes:
                            src = os.path.join(src_output_dir, cls)
                            dst = os.path.join(dst_output_dir, cls)
                            # src may not exist if we aborted a build in the middle. That's OK: zinc will notice that
                            # it's missing and rebuild it.
                            # dst may already exist if we have overlapping targets. It's not a good idea
                            # to have those, but until we enforce it, we must allow it here.
                            if os.path.exists(src) and not os.path.exists(dst):
                                # Copy the class file.
                                safe_mkdir(os.path.dirname(dst))
                                os.link(src, dst)

                    # Use zinc to rebase a copy of the per-target analysis files prior to merging.
                    if os.path.exists(src_analysis_cache):
                        src_analysis_cache_tmp = \
                          os.path.join(tmpdir, os.path.relpath(src_analysis_cache, self._analysis_cache_dir))
                        shutil.copyfile(src_analysis_cache,
                                        src_analysis_cache_tmp)
                        src_analysis_caches.append(src_analysis_cache_tmp)
                        rebase_args = analysis_args + [
                            '-cache',
                            src_analysis_cache_tmp,
                            '-rebase',
                            '%s:%s' % (src_output_dir, dst_output_dir),
                        ]
                        if self.runjava(self._main,
                                        classpath=self._zinc_classpath,
                                        args=rebase_args,
                                        jvmargs=self._jvm_args):
                            self.context.log.warn('In merge_artifact: zinc failed to rebase analysis file %s. ' \
                            'Target may require a full rebuild.' % src_analysis_cache_tmp)

            dst_deps.save(dst_depfile)

            # Use zinc to merge the analysis files.
            merge_args = analysis_args + [
                '-cache',
                dst_analysis_cache,
                '-merge',
                ':'.join(src_analysis_caches),
            ]
            if self.runjava(self._main,
                            classpath=self._zinc_classpath,
                            args=merge_args,
                            jvmargs=self._jvm_args):
                raise TaskError, 'zinc failed to merge analysis files %s to %s' % \
                                 (':'.join(src_analysis_caches), dst_analysis_cache)
Exemplo n.º 26
0
    def execute_single_compilation(self, versioned_target_set, cp,
                                   upstream_analysis_caches):
        """Execute a single compilation, updating upstream_analysis_caches if needed."""
        if self._flatten:
            compilation_id = 'flat'
            output_dir = self._flat_classes_dir
        else:
            compilation_id = Target.maybe_readable_identify(
                versioned_target_set.targets)
            # Each compilation must output to its own directory, so zinc can then associate those with the appropriate
            # analysis caches of previous compilations. We then copy the results out to the real output dir.
            output_dir = os.path.join(self._incremental_classes_dir,
                                      compilation_id)

        depfile = os.path.join(self._depfile_dir,
                               compilation_id) + '.dependencies'
        analysis_cache = os.path.join(self._analysis_cache_dir,
                                      compilation_id) + '.analysis_cache'

        safe_mkdir(output_dir)

        if not versioned_target_set.valid:
            with self.check_artifact_cache(
                    versioned_target_set,
                    build_artifacts=[output_dir, depfile,
                                     analysis_cache]) as in_cache:
                if not in_cache:
                    self.context.log.info('Compiling targets %s' %
                                          versioned_target_set.targets)
                    sources_by_target = self.calculate_sources(
                        versioned_target_set.targets)
                    if sources_by_target:
                        sources = reduce(
                            lambda all, sources: all.union(sources),
                            sources_by_target.values())
                        if not sources:
                            # Create empty files, since downstream code may assume that these exist.
                            touch(depfile)
                            touch(analysis_cache)
                            self.context.log.warn(
                                'Skipping scala compile for targets with no sources:\n  %s'
                                % '\n  '.join(
                                    str(t) for t in sources_by_target.keys()))
                        else:
                            classpath = [
                                jar for conf, jar in cp if conf in self._confs
                            ]
                            result = self.compile(classpath, sources,
                                                  output_dir, analysis_cache,
                                                  upstream_analysis_caches,
                                                  depfile)
                            if result != 0:
                                raise TaskError('%s returned %d' %
                                                (self._main, result))

        # Note that the following post-processing steps must happen even for valid targets.

        # Read in the deps created either just now or by a previous compiler run on these targets.
        if self.context.products.isrequired('classes'):
            self.context.log.debug('Reading dependencies from ' + depfile)
            deps = Dependencies(output_dir)
            deps.load(depfile)

            genmap = self.context.products.get('classes')

            for target, classes_by_source in deps.findclasses(
                    versioned_target_set.targets).items():
                for source, classes in classes_by_source.items():
                    genmap.add(source, output_dir, classes)
                    genmap.add(target, output_dir, classes)

            # TODO(John Sirois): Map target.resources in the same way
            # Create and Map scala plugin info files to the owning targets.
            for target in versioned_target_set.targets:
                if is_scalac_plugin(target) and target.classname:
                    basedir = self.write_plugin_info(target)
                    genmap.add(target, basedir, [_PLUGIN_INFO_FILE])

        # Update the upstream analysis map.
        analysis_cache_parts = os.path.split(analysis_cache)
        if not upstream_analysis_caches.has(output_dir):
            # A previous chunk might have already updated this. It is certainly possible for a later chunk to
            # independently depend on some target that a previous chunk already built.
            upstream_analysis_caches.add(output_dir, analysis_cache_parts[0],
                                         [analysis_cache_parts[1]])

        # Update the classpath.
        with self.context.state('classpath', []) as cp:
            for conf in self._confs:
                cp.insert(0, (conf, output_dir))
Exemplo n.º 27
0
  def execute_single_compilation(self, scala_targets, cp, upstream_analysis_caches):
    """Execute a single compilation, updating upstream_analysis_caches if needed."""
    self.context.log.info('Compiling targets %s' % str(scala_targets))

    compilation_id = self.context.maybe_readable_identify(scala_targets)

    if self._flatten:
      # If compiling in flat mode, we let all dependencies aggregate into a single well-known depfile. This
      # allows us to build different targets in different invocations without losing dependency information
      # from any of them.
      depfile = os.path.join(self._depfile_dir, 'dependencies.flat')
    else:
      # If not in flat mode, we let each compilation have its own depfile, to avoid quadratic behavior (each
      # compilation will read in the entire depfile, add its stuff to it and write it out again).
      depfile = os.path.join(self._depfile_dir, compilation_id) + '.dependencies'

    if self._flatten:
      output_dir = self._classes_dir
      analysis_cache = os.path.join(self._analysis_cache_dir, compilation_id) + '.flat'
    else:
      # When compiling with multiple compilations, each compilation must output to its own directory, so zinc
      # can then associate those with the analysis caches of previous compilations.
      # So we compile into a compilation-specific directory and then copy the results out to the real output dir.
      output_dir = os.path.join(self._incremental_classes_dir, compilation_id)
      analysis_cache = os.path.join(self._analysis_cache_dir, compilation_id)

    if self._flatten:
      # We must defer dependency analysis to zinc. If we exclude files from a repeat build, zinc will assume
      # the files were deleted and will nuke the corresponding class files.
      invalidate_globally = True
    else:
      invalidate_globally = False
    with self.changed(scala_targets, invalidate_dependants=True,
                      invalidate_globally=invalidate_globally) as changed_targets:
      sources_by_target = self.calculate_sources(changed_targets)
      if sources_by_target:
        sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
        if not sources:
          self.context.log.warn('Skipping scala compile for targets with no sources:\n  %s' %
                                '\n  '.join(str(t) for t in sources_by_target.keys()))
        else:
          classpath = [jar for conf, jar in cp if conf in self._confs]
          result = self.compile(classpath, sources, output_dir, analysis_cache, upstream_analysis_caches, depfile)
          if result != 0:
            raise TaskError('%s returned %d' % (self._main, result))
          if output_dir != self._classes_dir:
            # Link class files emitted in this compilation into the central classes dir.
            for (dirpath, dirnames, filenames) in os.walk(output_dir):
              for d in [os.path.join(dirpath, x) for x in dirnames]:
                dir = os.path.join(self._classes_dir, os.path.relpath(d, output_dir))
                if not os.path.isdir(dir):
                  os.mkdir(dir)
              for f in [os.path.join(dirpath, x) for x in filenames]:
                outfile = os.path.join(self._classes_dir, os.path.relpath(f, output_dir))
                if os.path.exists(outfile):
                  os.unlink(outfile)
                os.link(f, outfile)

    # Read in the deps created either just now or by a previous compiler run on these targets.
    self.context.log.debug('Reading dependencies from ' + depfile)
    deps = Dependencies(output_dir)
    deps.load(depfile)
    self._deps.merge(deps)

    if not self._flatten:
      upstream_analysis_caches[output_dir] = analysis_cache