コード例 #1
0
ファイル: scala_compile.py プロジェクト: lxwuchang/commons
  def execute(self, targets):
    scala_targets = filter(ScalaCompile._has_scala_sources, targets)
    if scala_targets:
      safe_mkdir(self._depfile_dir)
      safe_mkdir(self._analysis_cache_dir)

      # Map from output directory to { analysis_cache_dir, [ analysis_cache_file ]}
      upstream_analysis_caches = self.context.products.get('upstream')

      with self.context.state('classpath', []) as cp:
        for conf in self._confs:
          cp.insert(0, (conf, self._resources_dir))
          for jar in self._plugin_jars:
            cp.insert(0, (conf, jar))

      with self.invalidated(scala_targets, invalidate_dependants=True,
          partition_size_hint=self._partition_size_hint) as invalidation_check:
        for vt in invalidation_check.all_vts:
          if vt.valid:  # Don't compile, just post-process.
            self.post_process(vt, upstream_analysis_caches, split_artifact=False)
        for vt in invalidation_check.invalid_vts_partitioned:
          # Compile, using partitions for efficiency.
          self.execute_single_compilation(vt, cp, upstream_analysis_caches)
          if not self.dry_run:
            vt.update()
      deps_cache = JvmDependencyCache(self, scala_targets)
      deps_cache.check_undeclared_dependencies()
コード例 #2
0
ファイル: scala_compile.py プロジェクト: samitny/commons
  def execute(self, targets):
    scala_targets = filter(ScalaCompile._has_scala_sources, targets)
    if not scala_targets:
      return

    # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
    with self.context.state('classpath', []) as cp:
      self._add_globally_required_classpath_entries(cp)
      with self.context.state('upstream_analysis_map', {}) as upstream_analysis_map:
        with self.invalidated(scala_targets, invalidate_dependents=True,
                              partition_size_hint=self._partition_size_hint) as invalidation_check:
          # Process partitions one by one.
          for vts in invalidation_check.all_vts_partitioned:
            if not self.dry_run:
              merged_artifact = self._process_target_partition(vts, cp, upstream_analysis_map)
              vts.update()
              # Note that we add the merged classes_dir to the upstream.
              # This is because zinc doesn't handle many upstream dirs well.
              if os.path.exists(merged_artifact.classes_dir):
                for conf in self._confs:
                  cp.append((conf, merged_artifact.classes_dir))
                if os.path.exists(merged_artifact.analysis_file):
                  upstream_analysis_map[merged_artifact.classes_dir] = merged_artifact.analysis_file

    # Check for missing dependencies.
    all_analysis_files = set()
    for target in scala_targets:
      analysis_file = self._artifact_factory.analysis_file_for_targets([target])
      if os.path.exists(analysis_file):
        all_analysis_files.add(analysis_file)
    deps_cache = JvmDependencyCache(self.context, scala_targets, all_analysis_files)
    deps_cache.check_undeclared_dependencies()
コード例 #3
0
  def execute(self, targets):
    scala_targets = filter(ScalaCompile._has_scala_sources, targets)
    if not scala_targets:
      return

    safe_mkdir(self._classes_dir_base)
    safe_mkdir(self._depfiles_base)
    safe_mkdir(self._analysis_files_base)

    # Get the classpath generated by upstream JVM tasks (including previous calls to this execute()).
    with self.context.state('classpath', []) as cp:
      self._add_globally_required_classpath_entries(cp)

      with self.invalidated_with_artifact_cache_check(
          scala_targets,
          invalidate_dependents=True,
          partition_size_hint=self._partition_size_hint) as (invalidation_check, cached_vts):
        # Localize the analysis files we read from the artifact cache.
        self._localize_portable_artifact_files(cached_vts)
        # Compile partitions one by one.
        self._compile_all(invalidation_check.invalid_vts_partitioned, scala_targets, cp)

      # Post-processing we perform for all targets, whether they needed compilation or not.
      for target in scala_targets:
        self._post_process(target, cp)

    # Check for missing dependencies.
    all_analysis_files = set()
    for target in scala_targets:
      _, _, analysis_file = self._output_paths([target])
      if os.path.exists(analysis_file):
        all_analysis_files.add(analysis_file)
    deps_cache = JvmDependencyCache(self.context, scala_targets, all_analysis_files)
    deps_cache.check_undeclared_dependencies()
コード例 #4
0
ファイル: scala_compile.py プロジェクト: jalons/commons
  def execute(self, targets):
    scala_targets = filter(_is_scala, targets)
    if not scala_targets:
      return

    # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
    with self.context.state('classpath', []) as cp:
      self._add_globally_required_classpath_entries(cp)
      with self.context.state('upstream_analysis_map', {}) as upstream_analysis_map:
        with self.invalidated(scala_targets, invalidate_dependents=True,
                              partition_size_hint=self._partition_size_hint) as invalidation_check:
          # Process partitions one by one.
          for vts in invalidation_check.all_vts_partitioned:
            if not self.dry_run:
              merged_artifact = self._process_target_partition(vts, cp, upstream_analysis_map)
              vts.update()
              # Note that we add the merged classes_dir to the upstream.
              # This is because zinc doesn't handle many upstream dirs well.
              if os.path.exists(merged_artifact.classes_dir):
                for conf in self._confs:
                  cp.append((conf, merged_artifact.classes_dir))
                if os.path.exists(merged_artifact.analysis_file):
                  upstream_analysis_map[merged_artifact.classes_dir] = \
                    AnalysisFileSpec(merged_artifact.analysis_file, merged_artifact.classes_dir)

    # Check for missing dependencies.
    all_analysis_files = set()
    for target in scala_targets:
      analysis_file_spec = self._artifact_factory.analysis_file_for_targets([target])
      if os.path.exists(analysis_file_spec.analysis_file):
        all_analysis_files.add(analysis_file_spec)
    deps_cache = JvmDependencyCache(self.context, scala_targets, all_analysis_files)
    deps_cache.check_undeclared_dependencies()
コード例 #5
0
ファイル: scala_compile.py プロジェクト: wfarner/commons
    def execute(self, targets):
        scala_targets = filter(_is_scala, targets)
        if not scala_targets:
            return

        # Get the exclusives group for the targets to compile.
        # Group guarantees that they'll be a single exclusives key for them.
        egroups = self.context.products.get_data('exclusives_groups')
        exclusives_key = egroups.get_group_key_for_target(targets[0])
        exclusives_classpath = egroups.get_classpath_for_group(exclusives_key)

        with self.context.state('upstream_analysis_map',
                                {}) as upstream_analysis_map:
            with self.invalidated(scala_targets,
                                  invalidate_dependents=True,
                                  partition_size_hint=self._partition_size_hint
                                  ) as invalidation_check:
                # Process partitions one by one.
                for vts in invalidation_check.all_vts_partitioned:
                    # Refresh the classpath, to pick up any changes from update_compatible_classpaths.
                    exclusives_classpath = egroups.get_classpath_for_group(
                        exclusives_key)
                    # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
                    # Add the global classpaths here, directly, instead of doing the
                    # add-to-compatible thing.
                    self._add_globally_required_classpath_entries(
                        exclusives_classpath)

                    if not self.dry_run:
                        merged_artifact = self._process_target_partition(
                            vts, exclusives_classpath, upstream_analysis_map)
                        vts.update()
                        # Note that we add the merged classes_dir to the upstream.
                        # This is because zinc doesn't handle many upstream dirs well.
                        if os.path.exists(merged_artifact.classes_dir):
                            for conf in self._confs:  ### CLASSPATH UPDATE
                                # Update the exclusives group classpaths.
                                egroups.update_compatible_classpaths(
                                    exclusives_key,
                                    [(conf, merged_artifact.classes_dir)])
                            if os.path.exists(merged_artifact.analysis_file):
                                upstream_analysis_map[merged_artifact.classes_dir] = \
                                  AnalysisFileSpec(merged_artifact.analysis_file, merged_artifact.classes_dir)
                if invalidation_check.invalid_vts:
                    # Check for missing dependencies.
                    all_analysis_files = set()
                    for target in scala_targets:
                        analysis_file_spec = self._artifact_factory.analysis_file_for_targets(
                            [target])
                        if os.path.exists(analysis_file_spec.analysis_file):
                            all_analysis_files.add(analysis_file_spec)
                    deps_cache = JvmDependencyCache(self.context,
                                                    scala_targets,
                                                    all_analysis_files)
                    deps_cache.check_undeclared_dependencies()
コード例 #6
0
ファイル: scala_compile.py プロジェクト: UrbanCompass/commons
    def execute(self, targets):
        scala_targets = filter(_is_scala, targets)
        if not scala_targets:
            return

        # Get the exclusives group for the targets to compile.
        # Group guarantees that they'll be a single exclusives key for them.
        egroups = self.context.products.get_data("exclusives_groups")
        exclusives_key = egroups.get_group_key_for_target(targets[0])
        exclusives_classpath = egroups.get_classpath_for_group(exclusives_key)

        with self.context.state("upstream_analysis_map", {}) as upstream_analysis_map:
            with self.invalidated(
                scala_targets, invalidate_dependents=True, partition_size_hint=self._partition_size_hint
            ) as invalidation_check:
                # Process partitions one by one.
                for vts in invalidation_check.all_vts_partitioned:
                    # Refresh the classpath, to pick up any changes from update_compatible_classpaths.
                    exclusives_classpath = egroups.get_classpath_for_group(exclusives_key)
                    # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
                    # Add the global classpaths here, directly, instead of doing the
                    # add-to-compatible thing.
                    self._add_globally_required_classpath_entries(exclusives_classpath)

                    if not self.dry_run:
                        merged_artifact = self._process_target_partition(
                            vts, exclusives_classpath, upstream_analysis_map
                        )
                        vts.update()
                        # Note that we add the merged classes_dir to the upstream.
                        # This is because zinc doesn't handle many upstream dirs well.
                        if os.path.exists(merged_artifact.classes_dir):
                            for conf in self._confs:  ### CLASSPATH UPDATE
                                # Update the exclusives group classpaths.
                                egroups.update_compatible_classpaths(
                                    exclusives_key, [(conf, merged_artifact.classes_dir)]
                                )
                            if os.path.exists(merged_artifact.analysis_file):
                                upstream_analysis_map[merged_artifact.classes_dir] = AnalysisFileSpec(
                                    merged_artifact.analysis_file, merged_artifact.classes_dir
                                )
                if invalidation_check.invalid_vts:
                    # Check for missing dependencies.
                    all_analysis_files = set()
                    for target in scala_targets:
                        analysis_file_spec = self._artifact_factory.analysis_file_for_targets([target])
                        if os.path.exists(analysis_file_spec.analysis_file):
                            all_analysis_files.add(analysis_file_spec)
                    deps_cache = JvmDependencyCache(self.context, scala_targets, all_analysis_files)
                    deps_cache.check_undeclared_dependencies()
コード例 #7
0
  def execute(self, targets):
    scala_targets = filter(lambda t: has_sources(t, '.scala'), targets)
    if not scala_targets:
      return

    # Get the exclusives group for the targets to compile.
    # Group guarantees that they'll be a single exclusives key for them.
    egroups = self.context.products.get_data('exclusives_groups')
    group_id = egroups.get_group_key_for_target(scala_targets[0])

    # Add resource dirs to the classpath for us and for downstream tasks.
    for conf in self._confs:
      egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)])

    # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
    cp = egroups.get_classpath_for_group(group_id)

    # Add (only to the local copy) classpath entries necessary for our compiler plugins.
    for conf in self._confs:
      for jar in self._zinc_utils.plugin_jars():
        cp.insert(0, (conf, jar))

    # Invalidation check. Everything inside the with block must succeed for the
    # invalid targets to become valid.
    with self.invalidated(scala_targets, invalidate_dependents=True,
                          partition_size_hint=self._partition_size_hint) as invalidation_check:
      if invalidation_check.invalid_vts and not self.dry_run:
        invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
        # The analysis for invalid and deleted sources is no longer valid.
        invalid_sources_by_target = self._compute_sources_by_target(invalid_targets)
        invalid_sources = list(itertools.chain.from_iterable(invalid_sources_by_target.values()))
        deleted_sources = self._get_deleted_sources()

        # Work in a tmpdir so we don't stomp the main analysis files on error.
        # The tmpdir is cleaned up in a shutdown hook, because background work
        # may need to access files we create here even after this method returns.
        self._ensure_analysis_tmpdir()
        tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4()))
        os.mkdir(tmpdir)
        valid_analysis_tmp = os.path.join(tmpdir, 'valid_analysis')
        newly_invalid_analysis_tmp = os.path.join(tmpdir, 'newly_invalid_analysis')
        invalid_analysis_tmp = os.path.join(tmpdir, 'invalid_analysis')
        if ZincUtils.is_nonempty_analysis(self._analysis_file):
          with self.context.new_workunit(name='prepare-analysis'):
            if self._zinc_utils.run_zinc_split(self._analysis_file,
                                               ((invalid_sources + deleted_sources, newly_invalid_analysis_tmp),
                                                ([], valid_analysis_tmp))):
              raise TaskError('Failed to split off invalid analysis.')
            if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file):
              if self._zinc_utils.run_zinc_merge([self._invalid_analysis_file, newly_invalid_analysis_tmp],
                                                 invalid_analysis_tmp):
                raise TaskError('Failed to merge prior and current invalid analysis.')
            else:
              invalid_analysis_tmp = newly_invalid_analysis_tmp

            # Now it's OK to overwrite the main analysis files with the new state.
            ZincUtils._move_analysis(valid_analysis_tmp, self._analysis_file)
            ZincUtils._move_analysis(invalid_analysis_tmp, self._invalid_analysis_file)

        # Figure out the sources and analysis belonging to each partition.
        partitions = []  # Each element is a triple (vts, sources_by_target, analysis).
        for vts in invalidation_check.invalid_vts_partitioned:
          partition_tmpdir = os.path.join(tmpdir, Target.maybe_readable_identify(vts.targets))
          os.mkdir(partition_tmpdir)
          sources = list(itertools.chain.from_iterable(
            [invalid_sources_by_target.get(t, []) for t in vts.targets]))
          analysis_file = os.path.join(partition_tmpdir, 'analysis')
          partitions.append((vts, sources, analysis_file))

        # Split per-partition files out of the global invalid analysis.
        if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file) and partitions:
          with self.context.new_workunit(name='partition-analysis'):
            splits = [(x[1], x[2]) for x in partitions]
            if self._zinc_utils.run_zinc_split(self._invalid_analysis_file, splits):
              raise TaskError('Failed to split invalid analysis into per-partition files.')

        # Now compile partitions one by one.
        for partition in partitions:
          (vts, sources, analysis_file) = partition
          self._process_target_partition(partition, cp)
          # No exception was thrown, therefore the compile succeded and analysis_file is now valid.

          if os.path.exists(analysis_file):  # The compilation created an analysis.
            # Kick off the background artifact cache write.
            if self.get_artifact_cache() and self.context.options.write_to_artifact_cache:
              self._write_to_artifact_cache(analysis_file, vts, invalid_sources_by_target)

            # Merge the newly-valid analysis into our global valid analysis.
            if ZincUtils.is_nonempty_analysis(self._analysis_file):
              with self.context.new_workunit(name='update-upstream-analysis'):
                new_valid_analysis = analysis_file + '.valid.new'
                if self._zinc_utils.run_zinc_merge([self._analysis_file, analysis_file], new_valid_analysis):
                  raise TaskError('Failed to merge new analysis back into valid analysis file.')
              ZincUtils._move_analysis(new_valid_analysis, self._analysis_file)
            else:  # We need to keep analysis_file around. Background tasks may need it.
              ZincUtils._copy_analysis(analysis_file, self._analysis_file)

          if ZincUtils.is_nonempty_analysis(self._invalid_analysis_file):
            with self.context.new_workunit(name='trim-downstream-analysis'):
              # Trim out the newly-valid sources from our global invalid analysis.
              new_invalid_analysis = analysis_file + '.invalid.new'
              discarded_invalid_analysis = analysis_file + '.invalid.discard'
              if self._zinc_utils.run_zinc_split(self._invalid_analysis_file,
                  [(sources, discarded_invalid_analysis), ([], new_invalid_analysis)]):
                raise TaskError('Failed to trim invalid analysis file.')
              ZincUtils._move_analysis(new_invalid_analysis, self._invalid_analysis_file)

          # Now that all the analysis accounting is complete, we can safely mark the
          # targets as valid.
          vts.update()

        # Check for missing dependencies, if needed.
        if invalidation_check.invalid_vts and os.path.exists(self._analysis_file):
          deps_cache = JvmDependencyCache(self.context, scala_targets, self._analysis_file, self._classes_dir)
          deps_cache.check_undeclared_dependencies()

    # Provide the target->class and source->class mappings to downstream tasks if needed.
    if self.context.products.isrequired('classes'):
      sources_by_target = self._compute_sources_by_target(scala_targets)
      classes_by_source = self._compute_classes_by_source()
      self._add_all_products_to_genmap(sources_by_target, classes_by_source)

    # Update the classpath for downstream tasks.
    for conf in self._confs:
      egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)])
コード例 #8
0
    def execute(self, targets):
        scala_targets = filter(lambda t: has_sources(t, '.scala'), targets)
        if not scala_targets:
            return

        # Get the exclusives group for the targets to compile.
        # Group guarantees that they'll be a single exclusives key for them.
        egroups = self.context.products.get_data('exclusives_groups')
        group_id = egroups.get_group_key_for_target(scala_targets[0])

        # Add resource dirs to the classpath for us and for downstream tasks.
        for conf in self._confs:
            egroups.update_compatible_classpaths(group_id,
                                                 [(conf, self._resources_dir)])

        # Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
        cp = egroups.get_classpath_for_group(group_id)

        # Add (only to the local copy) classpath entries necessary for our compiler plugins.
        for conf in self._confs:
            for jar in self._zinc_utils.plugin_jars():
                cp.insert(0, (conf, jar))

        # Invalidation check. Everything inside the with block must succeed for the
        # invalid targets to become valid.
        with self.invalidated(scala_targets,
                              invalidate_dependents=True,
                              partition_size_hint=self._partition_size_hint
                              ) as invalidation_check:
            if invalidation_check.invalid_vts and not self.dry_run:
                invalid_targets = [
                    vt.target for vt in invalidation_check.invalid_vts
                ]
                # The analysis for invalid and deleted sources is no longer valid.
                invalid_sources_by_target = self._compute_sources_by_target(
                    invalid_targets)
                invalid_sources = list(
                    itertools.chain.from_iterable(
                        invalid_sources_by_target.values()))
                deleted_sources = self._get_deleted_sources()

                # Work in a tmpdir so we don't stomp the main analysis files on error.
                # The tmpdir is cleaned up in a shutdown hook, because background work
                # may need to access files we create here even after this method returns.
                self._ensure_analysis_tmpdir()
                tmpdir = os.path.join(self._analysis_tmpdir, str(uuid.uuid4()))
                os.mkdir(tmpdir)
                valid_analysis_tmp = os.path.join(tmpdir, 'valid_analysis')
                newly_invalid_analysis_tmp = os.path.join(
                    tmpdir, 'newly_invalid_analysis')
                invalid_analysis_tmp = os.path.join(tmpdir, 'invalid_analysis')
                if ZincUtils.is_nonempty_analysis(self._analysis_file):
                    with self.context.new_workunit(name='prepare-analysis'):
                        if self._zinc_utils.run_zinc_split(
                                self._analysis_file,
                            ((invalid_sources + deleted_sources,
                              newly_invalid_analysis_tmp),
                             ([], valid_analysis_tmp))):
                            raise TaskError(
                                'Failed to split off invalid analysis.')
                        if ZincUtils.is_nonempty_analysis(
                                self._invalid_analysis_file):
                            if self._zinc_utils.run_zinc_merge([
                                    self._invalid_analysis_file,
                                    newly_invalid_analysis_tmp
                            ], invalid_analysis_tmp):
                                raise TaskError(
                                    'Failed to merge prior and current invalid analysis.'
                                )
                        else:
                            invalid_analysis_tmp = newly_invalid_analysis_tmp

                        # Now it's OK to overwrite the main analysis files with the new state.
                        ZincUtils._move_analysis(valid_analysis_tmp,
                                                 self._analysis_file)
                        ZincUtils._move_analysis(invalid_analysis_tmp,
                                                 self._invalid_analysis_file)

                # Figure out the sources and analysis belonging to each partition.
                partitions = [
                ]  # Each element is a triple (vts, sources_by_target, analysis).
                for vts in invalidation_check.invalid_vts_partitioned:
                    partition_tmpdir = os.path.join(
                        tmpdir, Target.maybe_readable_identify(vts.targets))
                    os.mkdir(partition_tmpdir)
                    sources = list(
                        itertools.chain.from_iterable([
                            invalid_sources_by_target.get(t, [])
                            for t in vts.targets
                        ]))
                    analysis_file = os.path.join(partition_tmpdir, 'analysis')
                    partitions.append((vts, sources, analysis_file))

                # Split per-partition files out of the global invalid analysis.
                if ZincUtils.is_nonempty_analysis(
                        self._invalid_analysis_file) and partitions:
                    with self.context.new_workunit(name='partition-analysis'):
                        splits = [(x[1], x[2]) for x in partitions]
                        if self._zinc_utils.run_zinc_split(
                                self._invalid_analysis_file, splits):
                            raise TaskError(
                                'Failed to split invalid analysis into per-partition files.'
                            )

                # Now compile partitions one by one.
                for partition in partitions:
                    (vts, sources, analysis_file) = partition
                    self._process_target_partition(partition, cp)
                    # No exception was thrown, therefore the compile succeded and analysis_file is now valid.

                    if os.path.exists(
                            analysis_file
                    ):  # The compilation created an analysis.
                        # Kick off the background artifact cache write.
                        if self.get_artifact_cache(
                        ) and self.context.options.write_to_artifact_cache:
                            self._write_to_artifact_cache(
                                analysis_file, vts, invalid_sources_by_target)

                        # Merge the newly-valid analysis into our global valid analysis.
                        if ZincUtils.is_nonempty_analysis(self._analysis_file):
                            with self.context.new_workunit(
                                    name='update-upstream-analysis'):
                                new_valid_analysis = analysis_file + '.valid.new'
                                if self._zinc_utils.run_zinc_merge(
                                    [self._analysis_file, analysis_file],
                                        new_valid_analysis):
                                    raise TaskError(
                                        'Failed to merge new analysis back into valid analysis file.'
                                    )
                            ZincUtils._move_analysis(new_valid_analysis,
                                                     self._analysis_file)
                        else:  # We need to keep analysis_file around. Background tasks may need it.
                            ZincUtils._copy_analysis(analysis_file,
                                                     self._analysis_file)

                    if ZincUtils.is_nonempty_analysis(
                            self._invalid_analysis_file):
                        with self.context.new_workunit(
                                name='trim-downstream-analysis'):
                            # Trim out the newly-valid sources from our global invalid analysis.
                            new_invalid_analysis = analysis_file + '.invalid.new'
                            discarded_invalid_analysis = analysis_file + '.invalid.discard'
                            if self._zinc_utils.run_zinc_split(
                                    self._invalid_analysis_file,
                                [(sources, discarded_invalid_analysis),
                                 ([], new_invalid_analysis)]):
                                raise TaskError(
                                    'Failed to trim invalid analysis file.')
                            ZincUtils._move_analysis(
                                new_invalid_analysis,
                                self._invalid_analysis_file)

                    # Now that all the analysis accounting is complete, we can safely mark the
                    # targets as valid.
                    vts.update()

                # Check for missing dependencies, if needed.
                if invalidation_check.invalid_vts and os.path.exists(
                        self._analysis_file):
                    deps_cache = JvmDependencyCache(self.context,
                                                    scala_targets,
                                                    self._analysis_file,
                                                    self._classes_dir)
                    deps_cache.check_undeclared_dependencies()

        # Provide the target->class and source->class mappings to downstream tasks if needed.
        if self.context.products.isrequired('classes'):
            sources_by_target = self._compute_sources_by_target(scala_targets)
            classes_by_source = self._compute_classes_by_source()
            self._add_all_products_to_genmap(sources_by_target,
                                             classes_by_source)

        # Update the classpath for downstream tasks.
        for conf in self._confs:
            egroups.update_compatible_classpaths(group_id,
                                                 [(conf, self._classes_dir)])