Ejemplo n.º 1
0
 def make_zinc_job(target, input_product_key, output_products,
                   dep_keys):
     return Job(
         key=self._zinc_key_for_target(target,
                                       rsc_compile_context.workflow),
         fn=functools.partial(self._default_work_for_vts, ivts,
                              zinc_compile_context, input_product_key,
                              counter, compile_contexts,
                              CompositeProductAdder(*output_products)),
         dependencies=list(dep_keys),
         size=self._size_estimator(zinc_compile_context.sources),
     )
Ejemplo n.º 2
0
 def make_rsc_job(target, dep_targets):
   return Job(
     self._rsc_key_for_target(target),
     functools.partial(
       work_for_vts_rsc,
       ivts,
       rsc_compile_context),
     # The rsc jobs depend on other rsc jobs, and on zinc jobs for targets that are not
     # processed by rsc.
     list(all_zinc_rsc_invalid_dep_keys(dep_targets)),
     self._size_estimator(rsc_compile_context.sources),
   )
Ejemplo n.º 3
0
  def create_compile_jobs(self, compile_target, all_compile_contexts, invalid_dependencies, ivts,
    counter, classpath_product):
    """Return a list of jobs, and a count of those jobs that represent meaningful ("countable") work."""

    context_for_target = all_compile_contexts[compile_target]
    compile_context = self.select_runtime_context(context_for_target)

    compile_deps = [self.exec_graph_key_for_target(target) for target in invalid_dependencies]

    # The cache checking job doesn't technically have any dependencies, but we want to delay it
    # until immediately before we would otherwise try compiling, so we indicate that it depends on
    # all compile dependencies.
    double_check_cache_job = Job(
      key=self.exec_graph_double_check_cache_key_for_target(compile_target),
      fn=functools.partial(self._default_double_check_cache_for_vts, ivts),
      dependencies=compile_deps,
      options_scope=self.options_scope,
      target=compile_target)
    # The compile job depends on the cache check job. This decomposition is necessary in order to
    # support more complex situations where compilation runs multiple jobs in parallel, and wants to
    # double check the cache before starting any of them.
    compile_job = Job(
      key=self.exec_graph_key_for_target(compile_target),
      fn=functools.partial(
        self._default_work_for_vts,
        ivts,
        compile_context,
        'runtime_classpath',
        counter,
        all_compile_contexts,
        classpath_product),
      dependencies=[double_check_cache_job.key] + compile_deps,
      size=self._size_estimator(compile_context.sources),
      # If compilation and analysis work succeeds, validate the vts.
      # Otherwise, fail it.
      on_success=ivts.update,
      on_failure=ivts.force_invalidate,
      options_scope=self.options_scope,
      target=compile_target)
    return ([double_check_cache_job, compile_job], 1)
Ejemplo n.º 4
0
    def job(self,
            name,
            fn,
            dependencies,
            size=0,
            on_success=None,
            on_failure=None):
        def recording_fn():
            self.jobs_run.append(name)
            fn()

        return Job(name, recording_fn, dependencies, size, on_success,
                   on_failure)
Ejemplo n.º 5
0
 def make_rsc_job(target, dep_targets):
     return Job(
         key=self._rsc_key_for_target(target),
         fn=functools.partial(
             # NB: This will output to the 'rsc_mixed_compile_classpath' product via
             # self.register_extra_products_from_contexts()!
             work_for_vts_rsc,
             ivts,
             rsc_compile_context,
         ),
         # The rsc jobs depend on other rsc jobs, and on zinc jobs for targets that are not
         # processed by rsc.
         dependencies=list(all_zinc_rsc_invalid_dep_keys(dep_targets)),
         size=self._size_estimator(rsc_compile_context.sources),
     )
Ejemplo n.º 6
0
 def make_zinc_job(target, input_product_key, output_products,
                   dep_keys):
     return Job(
         key=self._zinc_key_for_target(target,
                                       rsc_compile_context.workflow),
         fn=functools.partial(self._default_work_for_vts, ivts,
                              zinc_compile_context, input_product_key,
                              counter, compile_contexts,
                              CompositeProductAdder(*output_products)),
         dependencies=list(dep_keys),
         size=self._size_estimator(zinc_compile_context.sources),
         # If compilation and analysis work succeeds, validate the vts.
         # Otherwise, fail it.
         on_success=ivts.update,
         on_failure=ivts.force_invalidate)
Ejemplo n.º 7
0
 def make_outline_job(target, dep_targets):
     if workflow == self.JvmCompileWorkflowType.outline_and_zinc:
         target_key = self._outline_key_for_target(target)
     else:
         target_key = self._rsc_key_for_target(target)
     return Job(
         key=target_key,
         fn=functools.partial(
             # NB: This will output to the 'rsc_mixed_compile_classpath' product via
             # self.register_extra_products_from_contexts()!
             work_for_vts_rsc,
             ivts,
             rsc_compile_context,
         ),
         # The rsc jobs depend on other rsc jobs, and on zinc jobs for targets that are not
         # processed by rsc.
         dependencies=[cache_doublecheck_key] +
         list(all_zinc_rsc_invalid_dep_keys(dep_targets)),
         size=self._size_estimator(rsc_compile_context.sources),
         options_scope=self.options_scope,
         target=target)
Ejemplo n.º 8
0
    def create_compile_jobs(self, compile_target, all_compile_contexts,
                            invalid_dependencies, ivts, counter,
                            classpath_product):

        context_for_target = all_compile_contexts[compile_target]
        compile_context = self.select_runtime_context(context_for_target)

        job = Job(
            self.exec_graph_key_for_target(compile_target),
            functools.partial(self._default_work_for_vts, ivts,
                              compile_context, 'runtime_classpath', counter,
                              all_compile_contexts, classpath_product),
            [
                self.exec_graph_key_for_target(target)
                for target in invalid_dependencies
            ],
            self._size_estimator(compile_context.sources),
            # If compilation and analysis work succeeds, validate the vts.
            # Otherwise, fail it.
            on_success=ivts.update,
            on_failure=ivts.force_invalidate)
        return [job]
Ejemplo n.º 9
0
    def _create_compile_jobs(self, classpath_products, compile_contexts,
                             extra_compile_time_classpath, invalid_targets,
                             invalid_vts):
        class Counter(object):
            def __init__(self, size, initial=0):
                self.size = size
                self.count = initial

            def __call__(self):
                self.count += 1
                return self.count

            def format_length(self):
                return len(str(self.size))

        counter = Counter(len(invalid_vts))

        def check_cache(vts):
            """Manually checks the artifact cache (usually immediately before compilation.)

      Returns true if the cache was hit successfully, indicating that no compilation is necessary.
      """
            if not self.artifact_cache_reads_enabled():
                return False
            cached_vts, _, _ = self.check_artifact_cache([vts])
            if not cached_vts:
                self.context.log.debug(
                    'Missed cache during double check for {}'.format(
                        vts.target.address.spec))
                return False
            assert cached_vts == [
                vts
            ], ('Cache returned unexpected target: {} vs {}'.format(
                cached_vts, [vts]))
            self.context.log.info(
                'Hit cache during double check for {}'.format(
                    vts.target.address.spec))
            counter()
            return True

        def should_compile_incrementally(vts, ctx):
            """Check to see if the compile should try to re-use the existing analysis.

      Returns true if we should try to compile the target incrementally.
      """
            if not vts.is_incremental:
                return False
            if not self._clear_invalid_analysis:
                return True
            return os.path.exists(ctx.analysis_file)

        def work_for_vts(vts, ctx):
            progress_message = ctx.target.address.spec

            # Capture a compilation log if requested.
            log_file = ctx.log_file if self._capture_log else None

            # Double check the cache before beginning compilation
            hit_cache = check_cache(vts)

            if not hit_cache:
                # Compute the compile classpath for this target.
                cp_entries = [ctx.classes_dir]
                cp_entries.extend(
                    ClasspathUtil.compute_classpath(
                        ctx.dependencies(self._dep_context),
                        classpath_products, extra_compile_time_classpath,
                        self._confs))
                upstream_analysis = dict(
                    self._upstream_analysis(compile_contexts, cp_entries))

                if not should_compile_incrementally(vts, ctx):
                    # Purge existing analysis file in non-incremental mode.
                    safe_delete(ctx.analysis_file)
                    # Work around https://github.com/pantsbuild/pants/issues/3670
                    safe_rmtree(ctx.classes_dir)

                tgt, = vts.targets
                fatal_warnings = self._compute_language_property(
                    tgt, lambda x: x.fatal_warnings)
                self._compile_vts(vts, ctx.sources, ctx.analysis_file,
                                  upstream_analysis, cp_entries,
                                  ctx.classes_dir, log_file, progress_message,
                                  tgt.platform, fatal_warnings, counter)
                self._analysis_tools.relativize(ctx.analysis_file,
                                                ctx.portable_analysis_file)

                # Write any additional resources for this target to the target workdir.
                self.write_extra_resources(ctx)

                # Jar the compiled output.
                self._create_context_jar(ctx)

            # Update the products with the latest classes.
            self._register_vts([ctx])

            # Once products are registered, check for unused dependencies (if enabled).
            if not hit_cache and self._unused_deps_check_enabled:
                self._check_unused_deps(ctx)

        jobs = []
        invalid_target_set = set(invalid_targets)
        for ivts in invalid_vts:
            # Invalidated targets are a subset of relevant targets: get the context for this one.
            compile_target = ivts.target
            compile_context = compile_contexts[compile_target]
            invalid_dependencies = self._collect_invalid_compile_dependencies(
                compile_target, compile_contexts, invalid_target_set)

            jobs.append(
                Job(
                    self.exec_graph_key_for_target(compile_target),
                    functools.partial(work_for_vts, ivts, compile_context),
                    [
                        self.exec_graph_key_for_target(target)
                        for target in invalid_dependencies
                    ],
                    self._size_estimator(compile_context.sources),
                    # If compilation and analysis work succeeds, validate the vts.
                    # Otherwise, fail it.
                    on_success=ivts.update,
                    on_failure=ivts.force_invalidate))
        return jobs
    def _create_compile_jobs(self, compile_classpaths, compile_contexts,
                             extra_compile_time_classpath, invalid_targets,
                             invalid_vts_partitioned, compile_vts,
                             register_vts, update_artifact_cache_vts_work):
        def create_work_for_vts(vts, compile_context, target_closure):
            def work():
                progress_message = compile_context.target.address.spec
                cp_entries = self._compute_classpath_entries(
                    compile_classpaths, target_closure, compile_context,
                    extra_compile_time_classpath)

                upstream_analysis = dict(
                    self._upstream_analysis(compile_contexts, cp_entries))

                # Capture a compilation log if requested.
                log_file = self._capture_log_file(compile_context.target)

                # Mutate analysis within a temporary directory, and move it to the final location
                # on success.
                tmpdir = os.path.join(self.analysis_tmpdir,
                                      compile_context.target.id)
                safe_mkdir(tmpdir)
                tmp_analysis_file = JvmCompileStrategy._analysis_for_target(
                    tmpdir, compile_context.target)
                if os.path.exists(compile_context.analysis_file):
                    shutil.copy(compile_context.analysis_file,
                                tmp_analysis_file)
                target, = vts.targets
                compile_vts(vts, compile_context.sources, tmp_analysis_file,
                            upstream_analysis, cp_entries,
                            compile_context.classes_dir, log_file,
                            progress_message, target.platform)
                atomic_copy(tmp_analysis_file, compile_context.analysis_file)

                # Jar the compiled output.
                self._create_context_jar(compile_context)

                # Update the products with the latest classes.
                register_vts([compile_context])

                # Kick off the background artifact cache write.
                if update_artifact_cache_vts_work:
                    self._write_to_artifact_cache(
                        vts, compile_context, update_artifact_cache_vts_work)

            return work

        jobs = []
        invalid_target_set = set(invalid_targets)
        for vts in invalid_vts_partitioned:
            assert len(vts.targets) == 1, (
                "Requested one target per partition, got {}".format(vts))

            # Invalidated targets are a subset of relevant targets: get the context for this one.
            compile_target = vts.targets[0]
            compile_context = compile_contexts[compile_target]
            compile_target_closure = compile_target.closure()

            # dependencies of the current target which are invalid for this chunk
            invalid_dependencies = (compile_target_closure
                                    & invalid_target_set) - [compile_target]

            jobs.append(
                Job(
                    self.exec_graph_key_for_target(compile_target),
                    create_work_for_vts(vts, compile_context,
                                        compile_target_closure),
                    [
                        self.exec_graph_key_for_target(target)
                        for target in invalid_dependencies
                    ],
                    self._size_estimator(compile_context.sources),
                    # If compilation and analysis work succeeds, validate the vts.
                    # Otherwise, fail it.
                    on_success=vts.update,
                    on_failure=vts.force_invalidate))
        return jobs
Ejemplo n.º 11
0
  def create_compile_jobs(self, compile_target, all_compile_contexts, invalid_dependencies, ivts,
    counter):

    def work_for_vts(vts, ctx):
      progress_message = ctx.target.address.spec

      # Double check the cache before beginning compilation
      hit_cache = self.check_cache(vts, counter)

      if not hit_cache:
        # Compute the compile classpath for this target.
        cp_entries = self._cp_entries_for_ctx(ctx, 'runtime_classpath')

        upstream_analysis = dict(self._upstream_analysis(all_compile_contexts, cp_entries))

        is_incremental = self.should_compile_incrementally(vts, ctx)
        if not is_incremental:
          # Purge existing analysis file in non-incremental mode.
          safe_delete(ctx.analysis_file)
          # Work around https://github.com/pantsbuild/pants/issues/3670
          safe_rmtree(ctx.classes_dir)

        dep_context = DependencyContext.global_instance()
        tgt, = vts.targets
        compiler_option_sets = dep_context.defaulted_property(tgt, lambda x: x.compiler_option_sets)
        zinc_file_manager = dep_context.defaulted_property(tgt, lambda x: x.zinc_file_manager)
        with Timer() as timer:
          self._compile_vts(vts,
                            ctx,
                            upstream_analysis,
                            cp_entries,
                            progress_message,
                            tgt.platform,
                            compiler_option_sets,
                            zinc_file_manager,
                            counter)
        self._record_target_stats(tgt,
                                  len(cp_entries),
                                  len(ctx.sources),
                                  timer.elapsed,
                                  is_incremental,
                                  'compile')

        # Write any additional resources for this target to the target workdir.
        self.write_extra_resources(ctx)

        # Jar the compiled output.
        self._create_context_jar(ctx)

      # Update the products with the latest classes.
      self.register_extra_products_from_contexts([ctx.target], all_compile_contexts)

    context_for_target = all_compile_contexts[compile_target]
    compile_context = self.select_runtime_context(context_for_target)

    job = Job(self.exec_graph_key_for_target(compile_target),
              functools.partial(work_for_vts, ivts, compile_context),
              [self.exec_graph_key_for_target(target) for target in invalid_dependencies],
              self._size_estimator(compile_context.sources),
              # If compilation and analysis work succeeds, validate the vts.
              # Otherwise, fail it.
              on_success=ivts.update,
              on_failure=ivts.force_invalidate)
    return [job]
Ejemplo n.º 12
0
    def _create_compile_jobs(self, classpath_products, compile_contexts,
                             extra_compile_time_classpath, invalid_targets,
                             invalid_vts):
        class Counter(object):
            def __init__(self, size, initial=0):
                self.size = size
                self.count = initial

            def __call__(self):
                self.count += 1
                return self.count

            def format_length(self):
                return len(str(self.size))

        counter = Counter(len(invalid_vts))

        def check_cache(vts):
            """Manually checks the artifact cache (usually immediately before compilation.)

      Returns true if the cache was hit successfully, indicating that no compilation is necessary.
      """
            if not self.artifact_cache_reads_enabled():
                return False
            cached_vts, _, _ = self.check_artifact_cache([vts])
            if not cached_vts:
                self.context.log.debug(
                    'Missed cache during double check for {}'.format(
                        vts.target.address.spec))
                return False
            assert cached_vts == [
                vts
            ], ('Cache returned unexpected target: {} vs {}'.format(
                cached_vts, [vts]))
            self.context.log.info(
                'Hit cache during double check for {}'.format(
                    vts.target.address.spec))
            counter()
            return True

        def should_compile_incrementally(vts):
            """Check to see if the compile should try to re-use the existing analysis.

      Returns true if we should try to compile the target incrementally.
      """
            if not vts.is_incremental:
                return False
            if not self._clear_invalid_analysis:
                return True
            return os.path.exists(compile_context.analysis_file)

        def work_for_vts(vts, ctx):
            progress_message = ctx.target.address.spec

            # Capture a compilation log if requested.
            log_file = ctx.log_file if self._capture_log else None

            # Double check the cache before beginning compilation
            hit_cache = check_cache(vts)

            if not hit_cache:
                # Compute the compile classpath for this target.
                cp_entries = self._compute_classpath_entries(
                    classpath_products, ctx, extra_compile_time_classpath)
                # TODO: always provide transitive analysis, but not always all classpath entries?
                upstream_analysis = dict(
                    self._upstream_analysis(compile_contexts, cp_entries))

                # Write analysis to a temporary file, and move it to the final location on success.
                tmp_analysis_file = "{}.tmp".format(ctx.analysis_file)
                if should_compile_incrementally(vts):
                    # If this is an incremental compile, rebase the analysis to our new classes directory.
                    self._analysis_tools.rebase_from_path(
                        ctx.analysis_file, tmp_analysis_file,
                        vts.previous_results_dir, vts.results_dir)
                else:
                    # Otherwise, simply ensure that it is empty.
                    safe_delete(tmp_analysis_file)
                tgt, = vts.targets
                fatal_warnings = self._compute_language_property(
                    tgt, lambda x: x.fatal_warnings)
                self._compile_vts(vts, ctx.sources, tmp_analysis_file,
                                  upstream_analysis, cp_entries,
                                  ctx.classes_dir, log_file, progress_message,
                                  tgt.platform, fatal_warnings, counter)
                os.rename(tmp_analysis_file, ctx.analysis_file)
                self._analysis_tools.relativize(ctx.analysis_file,
                                                ctx.portable_analysis_file)

                # Write any additional resources for this target to the target workdir.
                self.write_extra_resources(ctx)

                # Jar the compiled output.
                self._create_context_jar(ctx)

            # Update the products with the latest classes.
            self._register_vts([ctx])

        jobs = []
        invalid_target_set = set(invalid_targets)
        for ivts in invalid_vts:
            # Invalidated targets are a subset of relevant targets: get the context for this one.
            compile_target = ivts.targets[0]
            compile_context = compile_contexts[compile_target]
            compile_target_closure = compile_target.closure()

            # dependencies of the current target which are invalid for this chunk
            invalid_dependencies = (compile_target_closure
                                    & invalid_target_set) - [compile_target]

            jobs.append(
                Job(
                    self.exec_graph_key_for_target(compile_target),
                    functools.partial(work_for_vts, ivts, compile_context),
                    [
                        self.exec_graph_key_for_target(target)
                        for target in invalid_dependencies
                    ],
                    self._size_estimator(compile_context.sources),
                    # If compilation and analysis work succeeds, validate the vts.
                    # Otherwise, fail it.
                    on_success=ivts.update,
                    on_failure=ivts.force_invalidate))
        return jobs
Ejemplo n.º 13
0
    def _create_compile_jobs(self, classpath_products, compile_contexts,
                             extra_compile_time_classpath, invalid_targets,
                             invalid_vts_partitioned):
        def check_cache(vts):
            """Manually checks the artifact cache (usually immediately before compilation.)

      Returns true if the cache was hit successfully, indicating that no compilation is necessary.
      """
            if not self.artifact_cache_reads_enabled():
                return False
            cached_vts, uncached_vts = self.check_artifact_cache([vts])
            if not cached_vts:
                self.context.log.debug(
                    'Missed cache during double check for {}'.format(
                        vts.target.address.spec))
                return False
            assert cached_vts == [
                vts
            ], ('Cache returned unexpected target: {} vs {}'.format(
                cached_vts, [vts]))
            self.context.log.info(
                'Hit cache during double check for {}'.format(
                    vts.target.address.spec))
            return True

        def work_for_vts(vts, compile_context):
            progress_message = compile_context.target.address.spec
            cp_entries = self._compute_classpath_entries(
                classpath_products, compile_context,
                extra_compile_time_classpath)

            upstream_analysis = dict(
                self._upstream_analysis(compile_contexts, cp_entries))

            # Capture a compilation log if requested.
            log_file = compile_context.log_file if self._capture_log else None

            # Double check the cache before beginning compilation
            hit_cache = check_cache(vts)

            if not hit_cache:
                # Write analysis to a temporary file, and move it to the final location on success.
                tmp_analysis_file = "{}.tmp".format(
                    compile_context.analysis_file)
                if vts.is_incremental:
                    # If this is an incremental compile, rebase the analysis to our new classes directory.
                    self._analysis_tools.rebase_from_path(
                        compile_context.analysis_file, tmp_analysis_file,
                        vts.previous_results_dir, vts.results_dir)
                else:
                    # Otherwise, simply ensure that it is empty.
                    safe_delete(tmp_analysis_file)
                target, = vts.targets
                self._compile_vts(vts, compile_context.sources,
                                  tmp_analysis_file, upstream_analysis,
                                  cp_entries, compile_context.classes_dir,
                                  log_file, progress_message, target.platform)
                os.rename(tmp_analysis_file, compile_context.analysis_file)
                self._analysis_tools.relativize(
                    compile_context.analysis_file,
                    compile_context.portable_analysis_file)

                # Write any additional resources for this target to the target workdir.
                self.write_extra_resources(compile_context)

                # Jar the compiled output.
                self._create_context_jar(compile_context)

            # Update the products with the latest classes.
            self._register_vts([compile_context])

        jobs = []
        invalid_target_set = set(invalid_targets)
        for vts in invalid_vts_partitioned:
            assert len(vts.targets) == 1, (
                "Requested one target per partition, got {}".format(vts))

            # Invalidated targets are a subset of relevant targets: get the context for this one.
            compile_target = vts.targets[0]
            compile_context = compile_contexts[compile_target]
            compile_target_closure = compile_target.closure()

            # dependencies of the current target which are invalid for this chunk
            invalid_dependencies = (compile_target_closure
                                    & invalid_target_set) - [compile_target]

            jobs.append(
                Job(
                    self.exec_graph_key_for_target(compile_target),
                    functools.partial(work_for_vts, vts, compile_context),
                    [
                        self.exec_graph_key_for_target(target)
                        for target in invalid_dependencies
                    ],
                    self._size_estimator(compile_context.sources),
                    # If compilation and analysis work succeeds, validate the vts.
                    # Otherwise, fail it.
                    on_success=vts.update,
                    on_failure=vts.force_invalidate))
        return jobs
Ejemplo n.º 14
0
    def _create_compile_jobs(self, classpath_products, compile_contexts,
                             extra_compile_time_classpath, invalid_targets,
                             invalid_vts_partitioned, check_vts, compile_vts,
                             register_vts, update_artifact_cache_vts_work):
        def check_cache(vts):
            """Manually checks the artifact cache (usually immediately before compilation.)

      Returns true if the cache was hit successfully, indicating that no compilation is necessary.
      """
            if not check_vts:
                return False
            cached_vts, uncached_vts = check_vts([vts])
            if not cached_vts:
                self.context.log.debug(
                    'Missed cache during double check for {}'.format(
                        vts.target.address.spec))
                return False
            assert cached_vts == [
                vts
            ], ('Cache returned unexpected target: {} vs {}'.format(
                cached_vts, [vts]))
            self.context.log.info(
                'Hit cache during double check for {}'.format(
                    vts.target.address.spec))
            return True

        def work_for_vts(vts, compile_context, target_closure):
            progress_message = compile_context.target.address.spec
            cp_entries = self._compute_classpath_entries(
                classpath_products, target_closure, compile_context,
                extra_compile_time_classpath)

            upstream_analysis = dict(
                self._upstream_analysis(compile_contexts, cp_entries))

            # Capture a compilation log if requested.
            log_file = self._capture_log_file(compile_context.target)

            # Double check the cache before beginning compilation
            hit_cache = check_cache(vts)
            incremental = False

            if not hit_cache:
                # Mutate analysis within a temporary directory, and move it to the final location
                # on success.
                tmpdir = os.path.join(self.analysis_tmpdir,
                                      compile_context.target.id)
                safe_mkdir(tmpdir)
                tmp_analysis_file = self._analysis_for_target(
                    tmpdir, compile_context.target)
                # If the analysis exists for this context, it is an incremental compile.
                if os.path.exists(compile_context.analysis_file):
                    incremental = True
                    shutil.copy(compile_context.analysis_file,
                                tmp_analysis_file)
                target, = vts.targets
                compile_vts(vts, compile_context.sources, tmp_analysis_file,
                            upstream_analysis, cp_entries,
                            compile_context.classes_dir, log_file,
                            progress_message, target.platform)
                atomic_copy(tmp_analysis_file, compile_context.analysis_file)

                # Jar the compiled output.
                self._create_context_jar(compile_context)

            # Update the products with the latest classes.
            register_vts([compile_context])

            # We write to the cache only if we didn't hit during the double check, and optionally
            # only for clean builds.
            is_cacheable = not hit_cache and (
                self.get_options().incremental_caching or not incremental)
            self.context.log.debug(
                'Completed compile for {}. '
                'Hit cache: {}, was incremental: {}, is cacheable: {}, cache writes enabled: {}.'
                .format(compile_context.target.address.spec, hit_cache,
                        incremental, is_cacheable,
                        update_artifact_cache_vts_work is not None))
            if is_cacheable and update_artifact_cache_vts_work:
                # Kick off the background artifact cache write.
                self._write_to_artifact_cache(vts, compile_context,
                                              update_artifact_cache_vts_work)

        jobs = []
        invalid_target_set = set(invalid_targets)
        for vts in invalid_vts_partitioned:
            assert len(vts.targets) == 1, (
                "Requested one target per partition, got {}".format(vts))

            # Invalidated targets are a subset of relevant targets: get the context for this one.
            compile_target = vts.targets[0]
            compile_context = compile_contexts[compile_target]
            compile_target_closure = compile_target.closure()

            # dependencies of the current target which are invalid for this chunk
            invalid_dependencies = (compile_target_closure
                                    & invalid_target_set) - [compile_target]

            jobs.append(
                Job(
                    self.exec_graph_key_for_target(compile_target),
                    functools.partial(work_for_vts, vts, compile_context,
                                      compile_target_closure),
                    [
                        self.exec_graph_key_for_target(target)
                        for target in invalid_dependencies
                    ],
                    self._size_estimator(compile_context.sources),
                    # If compilation and analysis work succeeds, validate the vts.
                    # Otherwise, fail it.
                    on_success=vts.update,
                    on_failure=vts.force_invalidate))
        return jobs
Ejemplo n.º 15
0
  def create_compile_jobs(self,
                          compile_target,
                          compile_contexts,
                          invalid_dependencies,
                          ivts,
                          counter,
                          runtime_classpath_product):

    def work_for_vts_rsc(vts, ctx):
      # Double check the cache before beginning compilation
      hit_cache = self.check_cache(vts, counter)
      target = ctx.target

      if not hit_cache:
        cp_entries = []

        # Include the current machine's jdk lib jars. This'll blow up remotely.
        # We need a solution for that.
        # Probably something to do with https://github.com/pantsbuild/pants/pull/6346
        distribution = JvmPlatform.preferred_jvm_distribution([ctx.target.platform], strict=True)
        jvm_lib_jars_abs = distribution.find_libs(['rt.jar', 'dt.jar', 'jce.jar', 'tools.jar'])
        cp_entries.extend(jvm_lib_jars_abs)

        classpath_abs = self._zinc.compile_classpath(
          'rsc_classpath',
          ctx.target,
          extra_cp_entries=self._extra_compile_time_classpath)

        jar_deps = [t for t in DependencyContext.global_instance().dependencies_respecting_strict_deps(target)
                    if isinstance(t, JarLibrary)]
        metacp_jar_classpath_abs = [y[1] for y in self._metacp_jars_classpath_product.get_for_targets(
          jar_deps
        )]
        jar_jar_paths = {y[1] for y in self.context.products.get_data('rsc_classpath').get_for_targets(jar_deps)}

        classpath_abs = [c for c in classpath_abs if c not in jar_jar_paths]


        classpath_rel = fast_relpath_collection(classpath_abs)
        metacp_jar_classpath_rel = fast_relpath_collection(metacp_jar_classpath_abs)
        cp_entries.extend(classpath_rel)

        ctx.ensure_output_dirs_exist()

        counter_val = str(counter()).rjust(counter.format_length(), b' ')
        counter_str = '[{}/{}] '.format(counter_val, counter.size)
        self.context.log.info(
          counter_str,
          'Rsc-ing ',
          items_to_report_element(ctx.sources, '{} source'.format(self.name())),
          ' in ',
          items_to_report_element([t.address.reference() for t in vts.targets], 'target'),
          ' (',
          ctx.target.address.spec,
          ').')

        tgt, = vts.targets
        with Timer() as timer:
          # Step 1: Convert classpath to SemanticDB
          # ---------------------------------------
          scalac_classpath_path_entries_abs = self.tool_classpath('workaround-metacp-dependency-classpath')
          scalac_classpath_path_entries = fast_relpath_collection(scalac_classpath_path_entries_abs)
          rsc_index_dir = fast_relpath(ctx.rsc_index_dir, get_buildroot())
          args = [
            '--verbose',
            # NB: Without this setting, rsc will be missing some symbols
            #     from the scala library.
            '--include-scala-library-synthetics', # TODO generate these once and cache them
            # NB: We need to add these extra dependencies in order to be able
            #     to find symbols used by the scalac jars.
            '--dependency-classpath', os.pathsep.join(scalac_classpath_path_entries + list(jar_jar_paths)),
            # NB: The directory to dump the semanticdb jars generated by metacp.
            '--out', rsc_index_dir,
            os.pathsep.join(cp_entries),
          ]
          metacp_wu = self._runtool(
            'scala.meta.cli.Metacp',
            'metacp',
            args,
            distribution,
            tgt=tgt,
            input_files=(scalac_classpath_path_entries + classpath_rel),
            output_dir=rsc_index_dir)
          metacp_stdout = stdout_contents(metacp_wu)
          metacp_result = json.loads(metacp_stdout)


          metai_classpath = self._collect_metai_classpath(
            metacp_result, classpath_rel, jvm_lib_jars_abs)

          # Step 1.5: metai Index the semanticdbs
          # -------------------------------------
          self._run_metai_tool(distribution, metai_classpath, rsc_index_dir, tgt)

          # Step 2: Outline Scala sources into SemanticDB
          # ---------------------------------------------
          rsc_outline_dir = fast_relpath(ctx.rsc_outline_dir, get_buildroot())
          rsc_out = os.path.join(rsc_outline_dir, 'META-INF/semanticdb/out.semanticdb')
          safe_mkdir(os.path.join(rsc_outline_dir, 'META-INF/semanticdb'))
          target_sources = ctx.sources
          args = [
            '-cp', os.pathsep.join(metai_classpath + metacp_jar_classpath_rel),
            '-out', rsc_out,
          ] + target_sources
          self._runtool(
            'rsc.cli.Main',
            'rsc',
            args,
            distribution,
            tgt=tgt,
            # TODO pass the input files from the target snapshot instead of the below
            # input_snapshot = ctx.target.sources_snapshot(scheduler=self.context._scheduler)
            input_files=target_sources + metai_classpath + metacp_jar_classpath_rel,
            output_dir=rsc_outline_dir)
          rsc_classpath = [rsc_outline_dir]

          # Step 2.5: Postprocess the rsc outputs
          # TODO: This is only necessary as a workaround for https://github.com/twitter/rsc/issues/199.
          # Ideally, Rsc would do this on its own.
          self._run_metai_tool(distribution,
            rsc_classpath,
            rsc_outline_dir,
            tgt,
            extra_input_files=(rsc_out,))


          # Step 3: Convert SemanticDB into an mjar
          # ---------------------------------------
          rsc_mjar_file = fast_relpath(ctx.rsc_mjar_file, get_buildroot())
          args = [
            '-out', rsc_mjar_file,
            os.pathsep.join(rsc_classpath),
          ]
          self._runtool(
            'scala.meta.cli.Mjar',
            'mjar',
            args,
            distribution,
            tgt=tgt,
            input_files=(
              rsc_out,
            ),
            output_dir=os.path.dirname(rsc_mjar_file)
            )
          self.context.products.get_data('rsc_classpath').add_for_target(
            ctx.target,
            [(conf, ctx.rsc_mjar_file) for conf in self._confs],
          )

        self._record_target_stats(tgt,
                                  len(cp_entries),
                                  len(target_sources),
                                  timer.elapsed,
                                  False,
                                  'rsc'
                                  )
        # Write any additional resources for this target to the target workdir.
        self.write_extra_resources(ctx)

      # Update the products with the latest classes.
      self.register_extra_products_from_contexts([ctx.target], compile_contexts)

    def work_for_vts_rsc_jar_library(vts, ctx):

      cp_entries = []

      # Include the current machine's jdk lib jars. This'll blow up remotely.
      # We need a solution for that.
      # Probably something to do with https://github.com/pantsbuild/pants/pull/6346
      # TODO perhaps determine the platform of the jar and use that here.
      # https://github.com/pantsbuild/pants/issues/6547
      distribution = JvmPlatform.preferred_jvm_distribution([], strict=True)
      jvm_lib_jars_abs = distribution.find_libs(['rt.jar', 'dt.jar', 'jce.jar', 'tools.jar'])
      cp_entries.extend(jvm_lib_jars_abs)

      # TODO use compile_classpath
      classpath_abs = [
        path for (conf, path) in
        self.context.products.get_data('rsc_classpath').get_for_target(ctx.target)
      ]
      dependency_classpath = self._zinc.compile_classpath(
        'compile_classpath',
        ctx.target,
        extra_cp_entries=self._extra_compile_time_classpath)
      classpath_rel = fast_relpath_collection(classpath_abs)
      cp_entries.extend(classpath_rel)

      counter_val = str(counter()).rjust(counter.format_length(), b' ')
      counter_str = '[{}/{}] '.format(counter_val, counter.size)
      self.context.log.info(
        counter_str,
        'Metacp-ing ',
        items_to_report_element(cp_entries, 'jar'),
        ' in ',
        items_to_report_element([t.address.reference() for t in vts.targets], 'target'),
        ' (',
        ctx.target.address.spec,
        ').')

      ctx.ensure_output_dirs_exist()

      tgt, = vts.targets
      with Timer() as timer:
      # Step 1: Convert classpath to SemanticDB
        # ---------------------------------------
        scalac_classpath_path_entries_abs = self.tool_classpath('workaround-metacp-dependency-classpath')
        scalac_classpath_path_entries = fast_relpath_collection(scalac_classpath_path_entries_abs)
        rsc_index_dir = fast_relpath(ctx.rsc_index_dir, get_buildroot())
        args = [
          '--verbose',
          # NB: Without this setting, rsc will be missing some symbols
          #     from the scala library.
          '--include-scala-library-synthetics', # TODO generate these once and cache them
          # NB: We need to add these extra dependencies in order to be able
          #     to find symbols used by the scalac jars.
          '--dependency-classpath', os.pathsep.join(dependency_classpath + scalac_classpath_path_entries),
          # NB: The directory to dump the semanticdb jars generated by metacp.
          '--out', rsc_index_dir,
          os.pathsep.join(cp_entries),
        ]
        metacp_wu = self._runtool(
          'scala.meta.cli.Metacp',
          'metacp',
          args,
          distribution,
          tgt=tgt,
          input_files=(scalac_classpath_path_entries + classpath_rel),
          output_dir=rsc_index_dir)
        metacp_stdout = stdout_contents(metacp_wu)
        metacp_result = json.loads(metacp_stdout)

        metai_classpath = self._collect_metai_classpath(
          metacp_result, classpath_rel, jvm_lib_jars_abs)

        # Step 1.5: metai Index the semanticdbs
        # -------------------------------------
        self._run_metai_tool(distribution, metai_classpath, rsc_index_dir, tgt)

        abs_output = [(conf, os.path.join(get_buildroot(), x))
                      for conf in self._confs for x in metai_classpath]

        self._metacp_jars_classpath_product.add_for_target(
          ctx.target,
          abs_output,
        )

      self._record_target_stats(tgt,
          len(abs_output),
          len([]),
          timer.elapsed,
          False,
          'metacp'
        )

    rsc_jobs = []
    zinc_jobs = []

    # Invalidated targets are a subset of relevant targets: get the context for this one.
    compile_target = ivts.target
    compile_context_pair = compile_contexts[compile_target]

    # Create the rsc job.
    # Currently, rsc only supports outlining scala.
    if self._only_zinc_compileable(compile_target):
      pass
    elif self._rsc_compilable(compile_target):
      rsc_key = self._rsc_key_for_target(compile_target)
      rsc_jobs.append(
        Job(
          rsc_key,
          functools.partial(
            work_for_vts_rsc,
            ivts,
            compile_context_pair[0]),
          [self._rsc_key_for_target(target) for target in invalid_dependencies],
          self._size_estimator(compile_context_pair[0].sources),
        )
      )
    elif self._metacpable(compile_target):
      rsc_key = self._rsc_key_for_target(compile_target)
      rsc_jobs.append(
        Job(
          rsc_key,
          functools.partial(
            work_for_vts_rsc_jar_library,
            ivts,
            compile_context_pair[0]),
          [self._rsc_key_for_target(target) for target in invalid_dependencies],
          self._size_estimator(compile_context_pair[0].sources),
          on_success=ivts.update,
          on_failure=ivts.force_invalidate,
        )
      )
    else:
      raise TaskError("Unexpected target for rsc compile {} with type {}"
        .format(compile_target, type(compile_target)))

    # Create the zinc compile jobs.
    # - Scala zinc compile jobs depend on the results of running rsc on the scala target.
    # - Java zinc compile jobs depend on the zinc compiles of their dependencies, because we can't
    #   generate mjars that make javac happy at this point.

    invalid_dependencies_without_jar_metacps = [t for t in invalid_dependencies
      if not self._metacpable(t)]
    if self._rsc_compilable(compile_target):
      full_key = self._compile_against_rsc_key_for_target(compile_target)
      zinc_jobs.append(
        Job(
          full_key,
          functools.partial(
            self._default_work_for_vts,
            ivts,
            compile_context_pair[1],
            'rsc_classpath',
            counter,
            compile_contexts,
            runtime_classpath_product),
          [
            self._rsc_key_for_target(compile_target)
          ] + [
            self._rsc_key_for_target(target)
            for target in invalid_dependencies_without_jar_metacps
          ],
          self._size_estimator(compile_context_pair[1].sources),
          # NB: right now, only the last job will write to the cache, because we don't
          #     do multiple cache entries per target-task tuple.
          on_success=ivts.update,
          on_failure=ivts.force_invalidate,
        )
      )
    elif self._only_zinc_compileable(compile_target):
      # write to both rsc classpath and runtime classpath
      class CompositeProductAdder(object):
        def __init__(self, runtime_classpath_product, rsc_classpath_product):
          self.rsc_classpath_product = rsc_classpath_product
          self.runtime_classpath_product = runtime_classpath_product

        def add_for_target(self, *args, **kwargs):
          self.runtime_classpath_product.add_for_target(*args, **kwargs)
          self.rsc_classpath_product.add_for_target(*args, **kwargs)

      full_key = self._compile_against_rsc_key_for_target(compile_target)
      zinc_jobs.append(
        Job(
          full_key,
          functools.partial(
            self._default_work_for_vts,
            ivts,
            compile_context_pair[1],
            'runtime_classpath',
            counter,
            compile_contexts,
            CompositeProductAdder(
              runtime_classpath_product,
              self.context.products.get_data('rsc_classpath'))),
          [
            self._compile_against_rsc_key_for_target(target)
            for target in invalid_dependencies_without_jar_metacps],
          self._size_estimator(compile_context_pair[1].sources),
          # NB: right now, only the last job will write to the cache, because we don't
          #     do multiple cache entries per target-task tuple.
          on_success=ivts.update,
          on_failure=ivts.force_invalidate,
        )
      )

    return rsc_jobs + zinc_jobs
Ejemplo n.º 16
0
    def create_compile_jobs(
        self,
        compile_target,
        compile_contexts,
        invalid_dependencies,
        ivts,
        counter,
        runtime_classpath_product,
    ):
        def work_for_vts_rsc(vts, ctx):
            target = ctx.target
            (tgt, ) = vts.targets

            rsc_cc = compile_contexts[target].rsc_cc

            use_youtline = rsc_cc.workflow == self.JvmCompileWorkflowType.outline_and_zinc
            outliner = "scalac-outliner" if use_youtline else "rsc"

            if use_youtline and Semver.parse(
                    self._scala_library_version) < Semver.parse("2.12.9"):
                raise RuntimeError(
                    f"To use scalac's built-in outlining, scala version must be at least 2.12.9, but got {self._scala_library_version}"
                )

            # If we didn't hit the cache in the cache job, run rsc.
            if not vts.valid:
                counter_val = str(counter()).rjust(counter.format_length(),
                                                   " ")
                counter_str = f"[{counter_val}/{counter.size}] "
                action_str = "Outlining " if use_youtline else "Rsc-ing "

                self.context.log.info(
                    counter_str,
                    action_str,
                    items_to_report_element(ctx.sources,
                                            f"{self.name()} source"),
                    " in ",
                    items_to_report_element(
                        [t.address.reference() for t in vts.targets],
                        "target"),
                    " (",
                    ctx.target.address.spec,
                    ").",
                )
                # This does the following
                # - Collect the rsc classpath elements, including zinc compiles of rsc incompatible targets
                #   and rsc compiles of rsc compatible targets.
                # - Run Rsc on the current target with those as dependencies.

                dependencies_for_target = list(
                    DependencyContext.global_instance(
                    ).dependencies_respecting_strict_deps(target))

                classpath_paths = []
                classpath_digests = []
                classpath_product = self.context.products.get_data(
                    "rsc_mixed_compile_classpath")
                classpath_entries = classpath_product.get_classpath_entries_for_targets(
                    dependencies_for_target)

                hermetic = self.execution_strategy == self.ExecutionStrategy.hermetic
                for _conf, classpath_entry in classpath_entries:
                    classpath_paths.append(
                        fast_relpath(classpath_entry.path, get_buildroot()))
                    if hermetic and not classpath_entry.directory_digest:
                        raise AssertionError(
                            "ClasspathEntry {} didn't have a Digest, so won't be present for hermetic "
                            "execution of {}".format(classpath_entry,
                                                     outliner))
                    classpath_digests.append(classpath_entry.directory_digest)

                ctx.ensure_output_dirs_exist()

                with Timer() as timer:
                    # Outline Scala sources into SemanticDB / scalac compatible header jars.
                    # ---------------------------------------------
                    rsc_jar_file_relative_path = fast_relpath(
                        ctx.rsc_jar_file.path, get_buildroot())

                    sources_snapshot = ctx.target.sources_snapshot(
                        scheduler=self.context._scheduler)

                    distribution = self._get_jvm_distribution()

                    def hermetic_digest_classpath():
                        jdk_libs_rel, jdk_libs_digest = self._jdk_libs_paths_and_digest(
                            distribution)

                        merged_sources_and_jdk_digest = self.context._scheduler.merge_directories(
                            (jdk_libs_digest, sources_snapshot.digest) +
                            tuple(classpath_digests))
                        classpath_rel_jdk = classpath_paths + jdk_libs_rel
                        return (merged_sources_and_jdk_digest,
                                classpath_rel_jdk)

                    def nonhermetic_digest_classpath():
                        classpath_abs_jdk = classpath_paths + self._jdk_libs_abs(
                            distribution)
                        return ((EMPTY_DIGEST), classpath_abs_jdk)

                    (input_digest, classpath_entry_paths) = match(
                        self.execution_strategy,
                        {
                            self.ExecutionStrategy.hermetic:
                            hermetic_digest_classpath,
                            self.ExecutionStrategy.subprocess:
                            nonhermetic_digest_classpath,
                            self.ExecutionStrategy.nailgun:
                            nonhermetic_digest_classpath,
                        },
                    )()

                    youtline_args = []
                    if use_youtline:
                        youtline_args = [
                            "-Youtline",
                            "-Ystop-after:pickler",
                            "-Ypickle-write",
                            rsc_jar_file_relative_path,
                        ]

                    target_sources = ctx.sources

                    # TODO: m.jar digests aren't found, so hermetic will fail.
                    if use_youtline and not hermetic and self.get_options(
                    ).zinc_outline:
                        self._zinc_outline(ctx, classpath_paths,
                                           target_sources, youtline_args)
                    else:
                        args = ([
                            "-cp",
                            os.pathsep.join(classpath_entry_paths),
                            "-d",
                            rsc_jar_file_relative_path,
                        ] + self.get_options().extra_rsc_args + youtline_args +
                                target_sources)

                        self.write_argsfile(ctx, args)

                        self._runtool(distribution, input_digest, ctx,
                                      use_youtline)

                self._record_target_stats(
                    tgt,
                    len(classpath_entry_paths),
                    len(target_sources),
                    timer.elapsed,
                    False,
                    outliner,
                )

            # Update the products with the latest classes.
            self.register_extra_products_from_contexts([ctx.target],
                                                       compile_contexts)

        ### Create Jobs for ExecutionGraph
        cache_doublecheck_jobs = []
        rsc_jobs = []
        zinc_jobs = []

        # Invalidated targets are a subset of relevant targets: get the context for this one.
        compile_target = ivts.target
        merged_compile_context = compile_contexts[compile_target]
        rsc_compile_context = merged_compile_context.rsc_cc
        zinc_compile_context = merged_compile_context.zinc_cc

        workflow = rsc_compile_context.workflow

        cache_doublecheck_key = self.exec_graph_double_check_cache_key_for_target(
            compile_target)

        def all_zinc_rsc_invalid_dep_keys(invalid_deps):
            """Get the rsc key for an rsc-and-zinc target, or the zinc key for a zinc-only
            target."""
            for tgt in invalid_deps:
                # None can occur for e.g. JarLibrary deps, which we don't need to compile as they are
                # populated in the resolve goal.
                tgt_rsc_cc = compile_contexts[tgt].rsc_cc
                if tgt_rsc_cc.workflow is not None:
                    # Rely on the results of zinc compiles for zinc-compatible targets
                    yield self._key_for_target_as_dep(tgt, tgt_rsc_cc.workflow)

        def make_cache_doublecheck_job(dep_keys):
            # As in JvmCompile.create_compile_jobs, we create a cache-double-check job that all "real" work
            # depends on. It depends on completion of the same dependencies as the rsc job in order to run
            # as late as possible, while still running before rsc or zinc.
            return Job(
                key=cache_doublecheck_key,
                fn=functools.partial(self._double_check_cache_for_vts, ivts,
                                     zinc_compile_context),
                dependencies=list(dep_keys),
                options_scope=self.options_scope,
            )

        def make_outline_job(target, dep_targets):
            if workflow == self.JvmCompileWorkflowType.outline_and_zinc:
                target_key = self._outline_key_for_target(target)
            else:
                target_key = self._rsc_key_for_target(target)
            return Job(
                key=target_key,
                fn=functools.partial(
                    # NB: This will output to the 'rsc_mixed_compile_classpath' product via
                    # self.register_extra_products_from_contexts()!
                    work_for_vts_rsc,
                    ivts,
                    rsc_compile_context,
                ),
                # The rsc jobs depend on other rsc jobs, and on zinc jobs for targets that are not
                # processed by rsc.
                dependencies=[cache_doublecheck_key] +
                list(all_zinc_rsc_invalid_dep_keys(dep_targets)),
                size=self._size_estimator(rsc_compile_context.sources),
                options_scope=self.options_scope,
                target=target,
            )

        def only_zinc_invalid_dep_keys(invalid_deps):
            for tgt in invalid_deps:
                rsc_cc_tgt = compile_contexts[tgt].rsc_cc
                if rsc_cc_tgt.workflow is not None:
                    yield self._zinc_key_for_target(tgt, rsc_cc_tgt.workflow)

        def make_zinc_job(target, input_product_key, output_products,
                          dep_keys):
            return Job(
                key=self._zinc_key_for_target(target,
                                              rsc_compile_context.workflow),
                fn=functools.partial(
                    self._default_work_for_vts,
                    ivts,
                    zinc_compile_context,
                    input_product_key,
                    counter,
                    compile_contexts,
                    CompositeProductAdder(*output_products),
                ),
                dependencies=[cache_doublecheck_key] + list(dep_keys),
                size=self._size_estimator(zinc_compile_context.sources),
                options_scope=self.options_scope,
                target=target,
            )

        # Replica of JvmCompile's _record_target_stats logic
        def record(k, v):
            self.context.run_tracker.report_target_info(
                self.options_scope, compile_target, ["compile", k], v)

        record("workflow", workflow.value)
        record("execution_strategy", self.execution_strategy.value)

        # Create the cache doublecheck job.
        match(
            workflow,
            {
                self.JvmCompileWorkflowType.zinc_only:
                lambda: cache_doublecheck_jobs.append(
                    make_cache_doublecheck_job(
                        list(
                            all_zinc_rsc_invalid_dep_keys(invalid_dependencies)
                        ))),
                self.JvmCompileWorkflowType.zinc_java:
                lambda: cache_doublecheck_jobs.append(
                    make_cache_doublecheck_job(
                        list(only_zinc_invalid_dep_keys(invalid_dependencies)))
                ),
                self.JvmCompileWorkflowType.rsc_and_zinc:
                lambda: cache_doublecheck_jobs.append(
                    make_cache_doublecheck_job(
                        list(
                            all_zinc_rsc_invalid_dep_keys(invalid_dependencies)
                        ))),
                self.JvmCompileWorkflowType.outline_and_zinc:
                lambda: cache_doublecheck_jobs.append(
                    make_cache_doublecheck_job(
                        list(
                            all_zinc_rsc_invalid_dep_keys(invalid_dependencies)
                        ))),
            },
        )()

        # Create the rsc job.
        # Currently, rsc only supports outlining scala.
        match(
            workflow,
            {
                self.JvmCompileWorkflowType.zinc_only:
                lambda: None,
                self.JvmCompileWorkflowType.zinc_java:
                lambda: None,
                self.JvmCompileWorkflowType.rsc_and_zinc:
                lambda: rsc_jobs.append(
                    make_outline_job(compile_target, invalid_dependencies)),
                self.JvmCompileWorkflowType.outline_and_zinc:
                lambda: rsc_jobs.append(
                    make_outline_job(compile_target, invalid_dependencies)),
            },
        )()

        # Create the zinc compile jobs.
        # - Scala zinc compile jobs depend on the results of running rsc on the scala target.
        # - Java zinc compile jobs depend on the zinc compiles of their dependencies, because we can't
        #   generate jars that make javac happy at this point.
        match(
            workflow,
            {
                # NB: zinc-only zinc jobs run zinc and depend on rsc and/or zinc compile outputs.
                self.JvmCompileWorkflowType.zinc_only:
                lambda: zinc_jobs.append(
                    make_zinc_job(
                        compile_target,
                        input_product_key="rsc_mixed_compile_classpath",
                        output_products=[
                            runtime_classpath_product,
                            self.context.products.get_data(
                                "rsc_mixed_compile_classpath"),
                        ],
                        dep_keys=list(
                            all_zinc_rsc_invalid_dep_keys(invalid_dependencies)
                        ),
                    )),
                # NB: javac can't read rsc output yet, so we need it to depend strictly on zinc
                # compilations of dependencies.
                self.JvmCompileWorkflowType.zinc_java:
                lambda: zinc_jobs.append(
                    make_zinc_job(
                        compile_target,
                        input_product_key="runtime_classpath",
                        output_products=[
                            runtime_classpath_product,
                            self.context.products.get_data(
                                "rsc_mixed_compile_classpath"),
                        ],
                        dep_keys=list(
                            only_zinc_invalid_dep_keys(invalid_dependencies)),
                    )),
                self.JvmCompileWorkflowType.rsc_and_zinc:
                lambda: zinc_jobs.append(
                    # NB: rsc-and-zinc jobs run zinc and depend on both rsc and zinc compile outputs.
                    make_zinc_job(
                        compile_target,
                        input_product_key="rsc_mixed_compile_classpath",
                        # NB: We want to ensure the 'runtime_classpath' product *only* contains the outputs of
                        # zinc compiles, and that the 'rsc_mixed_compile_classpath' entries for rsc-compatible targets
                        # *only* contain the output of an rsc compile for that target.
                        output_products=[runtime_classpath_product],
                        dep_keys=list(
                            all_zinc_rsc_invalid_dep_keys(invalid_dependencies)
                        ),
                    )),
                # Should be the same as 'rsc-and-zinc' case
                self.JvmCompileWorkflowType.outline_and_zinc:
                lambda: zinc_jobs.append(
                    make_zinc_job(
                        compile_target,
                        input_product_key="rsc_mixed_compile_classpath",
                        output_products=[runtime_classpath_product],
                        dep_keys=list(
                            all_zinc_rsc_invalid_dep_keys(invalid_dependencies)
                        ),
                    )),
            },
        )()

        compile_jobs = rsc_jobs + zinc_jobs

        # Create a job that depends on all real work having completed that will eagerly write to the
        # cache by calling `vt.update()`.
        write_to_cache_job = Job(
            key=self._write_to_cache_key_for_target(compile_target),
            fn=ivts.update,
            dependencies=[job.key for job in compile_jobs],
            run_asap=True,
            on_failure=ivts.force_invalidate,
            options_scope=self.options_scope,
            target=compile_target,
        )

        all_jobs = cache_doublecheck_jobs + rsc_jobs + zinc_jobs + [
            write_to_cache_job
        ]
        return (all_jobs, len(compile_jobs))
Ejemplo n.º 17
0
  def pre_compile_jobs(self, counter):

    # Create a target for the jdk outlining so that it'll only be done once per run.
    target = Target('jdk', Address('', 'jdk'), self.context.build_graph)
    index_dir = os.path.join(self.versioned_workdir, '--jdk--', 'index')

    def work_for_vts_rsc_jdk():
      distribution = self._get_jvm_distribution()
      jvm_lib_jars_abs = distribution.find_libs(['rt.jar', 'dt.jar', 'jce.jar', 'tools.jar'])
      self._jvm_lib_jars_abs = jvm_lib_jars_abs

      metacp_inputs = tuple(jvm_lib_jars_abs)

      counter_val = str(counter()).rjust(counter.format_length(), ' ' if PY3 else b' ')
      counter_str = '[{}/{}] '.format(counter_val, counter.size)
      self.context.log.info(
        counter_str,
        'Metacp-ing ',
        items_to_report_element(metacp_inputs, 'jar'),
        ' in the jdk')

      # NB: Metacp doesn't handle the existence of possibly stale semanticdb jars,
      # so we explicitly clean the directory to keep it happy.
      safe_mkdir(index_dir, clean=True)

      with Timer() as timer:
        # Step 1: Convert classpath to SemanticDB
        # ---------------------------------------
        rsc_index_dir = fast_relpath(index_dir, get_buildroot())
        args = [
          '--verbose',
          # NB: The directory to dump the semanticdb jars generated by metacp.
          '--out', rsc_index_dir,
          os.pathsep.join(metacp_inputs),
        ]
        metacp_wu = self._runtool(
          'scala.meta.cli.Metacp',
          'metacp',
          args,
          distribution,
          tgt=target,
          input_files=tuple(
            # NB: no input files because the jdk is expected to exist on the system in a known
            #     location.
            #     Related: https://github.com/pantsbuild/pants/issues/6416
          ),
          output_dir=rsc_index_dir)
        metacp_stdout = stdout_contents(metacp_wu)
        metacp_result = json.loads(metacp_stdout)

        metai_classpath = self._collect_metai_classpath(metacp_result, jvm_lib_jars_abs)

        # Step 1.5: metai Index the semanticdbs
        # -------------------------------------
        self._run_metai_tool(distribution, metai_classpath, rsc_index_dir, tgt=target)

        self._jvm_lib_metacp_classpath = [os.path.join(get_buildroot(), x) for x in metai_classpath]

      self._record_target_stats(target,
        len(self._jvm_lib_metacp_classpath),
        len([]),
        timer.elapsed,
        False,
        'metacp'
      )

    return [
      Job(
        'metacp(jdk)',
        functools.partial(
          work_for_vts_rsc_jdk
        ),
        [],
        self._size_estimator([]),
      ),
    ]
Ejemplo n.º 18
0
    def create_compile_jobs(self, compile_target, compile_contexts,
                            invalid_dependencies, ivts, counter,
                            runtime_classpath_product):
        def work_for_vts_rsc(vts, ctx):
            target = ctx.target
            tgt, = vts.targets

            # If we didn't hit the cache in the cache job, run rsc.
            if not vts.valid:
                counter_val = str(counter()).rjust(counter.format_length(),
                                                   ' ')
                counter_str = '[{}/{}] '.format(counter_val, counter.size)
                self.context.log.info(
                    counter_str, 'Rsc-ing ',
                    items_to_report_element(ctx.sources,
                                            '{} source'.format(self.name())),
                    ' in ',
                    items_to_report_element(
                        [t.address.reference() for t in vts.targets],
                        'target'), ' (', ctx.target.address.spec, ').')
                # This does the following
                # - Collect the rsc classpath elements, including zinc compiles of rsc incompatible targets
                #   and rsc compiles of rsc compatible targets.
                # - Run Rsc on the current target with those as dependencies.

                dependencies_for_target = list(
                    DependencyContext.global_instance(
                    ).dependencies_respecting_strict_deps(target))

                classpath_paths = []
                classpath_directory_digests = []
                classpath_product = self.context.products.get_data(
                    'rsc_mixed_compile_classpath')
                classpath_entries = classpath_product.get_classpath_entries_for_targets(
                    dependencies_for_target)
                for _conf, classpath_entry in classpath_entries:
                    classpath_paths.append(
                        fast_relpath(classpath_entry.path, get_buildroot()))
                    if self.HERMETIC == self.execution_strategy_enum.value and not classpath_entry.directory_digest:
                        raise AssertionError(
                            "ClasspathEntry {} didn't have a Digest, so won't be present for hermetic "
                            "execution of rsc".format(classpath_entry))
                    classpath_directory_digests.append(
                        classpath_entry.directory_digest)

                ctx.ensure_output_dirs_exist()

                with Timer() as timer:
                    # Outline Scala sources into SemanticDB / scalac compatible header jars.
                    # ---------------------------------------------
                    rsc_jar_file_relative_path = fast_relpath(
                        ctx.rsc_jar_file.path, get_buildroot())

                    sources_snapshot = ctx.target.sources_snapshot(
                        scheduler=self.context._scheduler)

                    distribution = self._get_jvm_distribution()

                    def hermetic_digest_classpath():
                        jdk_libs_rel, jdk_libs_digest = self._jdk_libs_paths_and_digest(
                            distribution)

                        merged_sources_and_jdk_digest = self.context._scheduler.merge_directories(
                            (jdk_libs_digest,
                             sources_snapshot.directory_digest) +
                            tuple(classpath_directory_digests))
                        classpath_rel_jdk = classpath_paths + jdk_libs_rel
                        return (merged_sources_and_jdk_digest,
                                classpath_rel_jdk)

                    def nonhermetic_digest_classpath():
                        classpath_abs_jdk = classpath_paths + self._jdk_libs_abs(
                            distribution)
                        return ((EMPTY_DIRECTORY_DIGEST), classpath_abs_jdk)

                    (input_digest, classpath_entry_paths
                     ) = self.execution_strategy_enum.resolve_for_enum_variant(
                         {
                             self.HERMETIC: hermetic_digest_classpath,
                             self.SUBPROCESS: nonhermetic_digest_classpath,
                             self.NAILGUN: nonhermetic_digest_classpath,
                         })()

                    target_sources = ctx.sources
                    args = [
                        '-cp',
                        os.pathsep.join(classpath_entry_paths),
                        '-d',
                        rsc_jar_file_relative_path,
                    ] + self.get_options().extra_rsc_args + target_sources

                    self.write_argsfile(ctx, args)

                    self._runtool(distribution, input_digest, ctx)

                self._record_target_stats(tgt, len(classpath_entry_paths),
                                          len(target_sources), timer.elapsed,
                                          False, 'rsc')

            # Update the products with the latest classes.
            self.register_extra_products_from_contexts([ctx.target],
                                                       compile_contexts)

        ### Create Jobs for ExecutionGraph
        cache_doublecheck_jobs = []
        rsc_jobs = []
        zinc_jobs = []

        # Invalidated targets are a subset of relevant targets: get the context for this one.
        compile_target = ivts.target
        merged_compile_context = compile_contexts[compile_target]
        rsc_compile_context = merged_compile_context.rsc_cc
        zinc_compile_context = merged_compile_context.zinc_cc

        cache_doublecheck_key = self.exec_graph_double_check_cache_key_for_target(
            compile_target)

        def all_zinc_rsc_invalid_dep_keys(invalid_deps):
            """Get the rsc key for an rsc-and-zinc target, or the zinc key for a zinc-only target."""
            for tgt in invalid_deps:
                # None can occur for e.g. JarLibrary deps, which we don't need to compile as they are
                # populated in the resolve goal.
                tgt_rsc_cc = compile_contexts[tgt].rsc_cc
                if tgt_rsc_cc.workflow is not None:
                    # Rely on the results of zinc compiles for zinc-compatible targets
                    yield self._key_for_target_as_dep(tgt, tgt_rsc_cc.workflow)

        def make_cache_doublecheck_job(dep_keys):
            # As in JvmCompile.create_compile_jobs, we create a cache-double-check job that all "real" work
            # depends on. It depends on completion of the same dependencies as the rsc job in order to run
            # as late as possible, while still running before rsc or zinc.
            return Job(cache_doublecheck_key,
                       functools.partial(
                           self._default_double_check_cache_for_vts, ivts),
                       dependencies=list(dep_keys))

        def make_rsc_job(target, dep_targets):
            return Job(
                key=self._rsc_key_for_target(target),
                fn=functools.partial(
                    # NB: This will output to the 'rsc_mixed_compile_classpath' product via
                    # self.register_extra_products_from_contexts()!
                    work_for_vts_rsc,
                    ivts,
                    rsc_compile_context,
                ),
                # The rsc jobs depend on other rsc jobs, and on zinc jobs for targets that are not
                # processed by rsc.
                dependencies=[cache_doublecheck_key] +
                list(all_zinc_rsc_invalid_dep_keys(dep_targets)),
                size=self._size_estimator(rsc_compile_context.sources),
            )

        def only_zinc_invalid_dep_keys(invalid_deps):
            for tgt in invalid_deps:
                rsc_cc_tgt = compile_contexts[tgt].rsc_cc
                if rsc_cc_tgt.workflow is not None:
                    yield self._zinc_key_for_target(tgt, rsc_cc_tgt.workflow)

        def make_zinc_job(target, input_product_key, output_products,
                          dep_keys):
            return Job(
                key=self._zinc_key_for_target(target,
                                              rsc_compile_context.workflow),
                fn=functools.partial(self._default_work_for_vts, ivts,
                                     zinc_compile_context, input_product_key,
                                     counter, compile_contexts,
                                     CompositeProductAdder(*output_products)),
                dependencies=[cache_doublecheck_key] + list(dep_keys),
                size=self._size_estimator(zinc_compile_context.sources),
            )

        workflow = rsc_compile_context.workflow

        # Replica of JvmCompile's _record_target_stats logic
        def record(k, v):
            self.context.run_tracker.report_target_info(
                self.options_scope, compile_target, ['compile', k], v)

        record('workflow', workflow.value)
        record('execution_strategy', self.execution_strategy)

        # Create the cache doublecheck job.
        workflow.resolve_for_enum_variant({
            'zinc-only':
            lambda: cache_doublecheck_jobs.append(
                make_cache_doublecheck_job(
                    list(all_zinc_rsc_invalid_dep_keys(invalid_dependencies)))
            ),
            'zinc-java':
            lambda: cache_doublecheck_jobs.append(
                make_cache_doublecheck_job(
                    list(only_zinc_invalid_dep_keys(invalid_dependencies)))),
            'rsc-and-zinc':
            lambda: cache_doublecheck_jobs.append(
                make_cache_doublecheck_job(
                    list(all_zinc_rsc_invalid_dep_keys(invalid_dependencies)))
            ),
        })()

        # Create the rsc job.
        # Currently, rsc only supports outlining scala.
        workflow.resolve_for_enum_variant({
            'zinc-only':
            lambda: None,
            'zinc-java':
            lambda: None,
            'rsc-and-zinc':
            lambda: rsc_jobs.append(
                make_rsc_job(compile_target, invalid_dependencies)),
        })()

        # Create the zinc compile jobs.
        # - Scala zinc compile jobs depend on the results of running rsc on the scala target.
        # - Java zinc compile jobs depend on the zinc compiles of their dependencies, because we can't
        #   generate jars that make javac happy at this point.
        workflow.resolve_for_enum_variant({
            # NB: zinc-only zinc jobs run zinc and depend on rsc and/or zinc compile outputs.
            'zinc-only':
            lambda: zinc_jobs.append(
                make_zinc_job(compile_target,
                              input_product_key='rsc_mixed_compile_classpath',
                              output_products=[
                                  runtime_classpath_product,
                                  self.context.products.get_data(
                                      'rsc_mixed_compile_classpath'),
                              ],
                              dep_keys=list(
                                  all_zinc_rsc_invalid_dep_keys(
                                      invalid_dependencies)))),
            # NB: javac can't read rsc output yet, so we need it to depend strictly on zinc
            # compilations of dependencies.
            'zinc-java':
            lambda: zinc_jobs.append(
                make_zinc_job(compile_target,
                              input_product_key='runtime_classpath',
                              output_products=[
                                  runtime_classpath_product,
                                  self.context.products.get_data(
                                      'rsc_mixed_compile_classpath'),
                              ],
                              dep_keys=list(
                                  only_zinc_invalid_dep_keys(
                                      invalid_dependencies)))),
            'rsc-and-zinc':
            lambda: zinc_jobs.append(
                # NB: rsc-and-zinc jobs run zinc and depend on both rsc and zinc compile outputs.
                make_zinc_job(
                    compile_target,
                    input_product_key='rsc_mixed_compile_classpath',
                    # NB: We want to ensure the 'runtime_classpath' product *only* contains the outputs of
                    # zinc compiles, and that the 'rsc_mixed_compile_classpath' entries for rsc-compatible targets
                    # *only* contain the output of an rsc compile for that target.
                    output_products=[
                        runtime_classpath_product,
                    ],
                    dep_keys=list(
                        all_zinc_rsc_invalid_dep_keys(invalid_dependencies)),
                )),
        })()

        compile_jobs = rsc_jobs + zinc_jobs

        # Create a job that depends on all real work having completed that will eagerly write to the
        # cache by calling `vt.update()`.
        write_to_cache_job = Job(
            key=self._write_to_cache_key_for_target(compile_target),
            fn=ivts.update,
            dependencies=[job.key for job in compile_jobs],
            run_asap=True,
            on_failure=ivts.force_invalidate)

        all_jobs = cache_doublecheck_jobs + rsc_jobs + zinc_jobs + [
            write_to_cache_job
        ]
        return (all_jobs, len(compile_jobs))
Ejemplo n.º 19
0
  def create_compile_jobs(self,
                          compile_target,
                          compile_contexts,
                          invalid_dependencies,
                          ivts,
                          counter,
                          runtime_classpath_product):

    def work_for_vts_rsc(vts, ctx):
      # Double check the cache before beginning compilation
      hit_cache = self.check_cache(vts, counter)
      target = ctx.target
      tgt, = vts.targets

      if not hit_cache:
        counter_val = str(counter()).rjust(counter.format_length(), ' ' if PY3 else b' ')
        counter_str = '[{}/{}] '.format(counter_val, counter.size)
        self.context.log.info(
          counter_str,
          'Rsc-ing ',
          items_to_report_element(ctx.sources, '{} source'.format(self.name())),
          ' in ',
          items_to_report_element([t.address.reference() for t in vts.targets], 'target'),
          ' (',
          ctx.target.address.spec,
          ').')

        # This does the following
        # - collect jar dependencies and metacp-classpath entries for them
        # - collect the non-java targets and their classpath entries
        # - break out java targets and their javac'd classpath entries
        # metacp
        # - metacp the java targets
        # rsc
        # - combine the metacp outputs for jars, previous scala targets and the java metacp
        #   classpath
        # - run Rsc on the current target with those as dependencies

        dependencies_for_target = list(
          DependencyContext.global_instance().dependencies_respecting_strict_deps(target))

        jar_deps = [t for t in dependencies_for_target if isinstance(t, JarLibrary)]

        def is_java_compile_target(t):
          return isinstance(t, JavaLibrary) or t.has_sources('.java')
        java_deps = [t for t in dependencies_for_target
                     if is_java_compile_target(t)]
        non_java_deps = [t for t in dependencies_for_target
                         if not (is_java_compile_target(t)) and not isinstance(t, JarLibrary)]

        metacped_jar_classpath_abs = _paths_from_classpath(
          self._metacp_jars_classpath_product.get_for_targets(jar_deps + java_deps)
        )
        metacped_jar_classpath_abs.extend(self._jvm_lib_metacp_classpath)
        metacped_jar_classpath_rel = fast_relpath_collection(metacped_jar_classpath_abs)

        non_java_paths = _paths_from_classpath(
          self.context.products.get_data('rsc_classpath').get_for_targets(non_java_deps),
          collection_type=set)
        non_java_rel = fast_relpath_collection(non_java_paths)

        ctx.ensure_output_dirs_exist()

        distribution = self._get_jvm_distribution()
        with Timer() as timer:
          # Outline Scala sources into SemanticDB
          # ---------------------------------------------
          rsc_mjar_file = fast_relpath(ctx.rsc_mjar_file, get_buildroot())

          # TODO remove non-rsc entries from non_java_rel in a better way
          rsc_semanticdb_classpath = metacped_jar_classpath_rel + \
                                     [j for j in non_java_rel if 'compile/rsc/' in j]
          target_sources = ctx.sources
          args = [
                   '-cp', os.pathsep.join(rsc_semanticdb_classpath),
                   '-d', rsc_mjar_file,
                 ] + target_sources
          sources_snapshot = ctx.target.sources_snapshot(scheduler=self.context._scheduler)
          self._runtool(
            'rsc.cli.Main',
            'rsc',
            args,
            distribution,
            tgt=tgt,
            input_files=tuple(rsc_semanticdb_classpath),
            input_digest=sources_snapshot.directory_digest,
            output_dir=os.path.dirname(rsc_mjar_file))

        self._record_target_stats(tgt,
          len(rsc_semanticdb_classpath),
          len(target_sources),
          timer.elapsed,
          False,
          'rsc'
        )
        # Write any additional resources for this target to the target workdir.
        self.write_extra_resources(ctx)

      # Update the products with the latest classes.
      self.register_extra_products_from_contexts([ctx.target], compile_contexts)

    def work_for_vts_metacp(vts, ctx, classpath_product_key):
      metacp_dependencies_entries = self._zinc.compile_classpath_entries(
        classpath_product_key,
        ctx.target,
        extra_cp_entries=self._extra_compile_time_classpath)

      metacp_dependencies = fast_relpath_collection(c.path for c in metacp_dependencies_entries)


      metacp_dependencies_digests = [c.directory_digest for c in metacp_dependencies_entries
                                     if c.directory_digest]
      metacp_dependencies_paths_without_digests = fast_relpath_collection(
        c.path for c in metacp_dependencies_entries if not c.directory_digest)

      classpath_entries = [
        cp_entry for (conf, cp_entry) in
        self.context.products.get_data(classpath_product_key).get_classpath_entries_for_targets(
          [ctx.target])
      ]
      classpath_digests = [c.directory_digest for c in classpath_entries if c.directory_digest]
      classpath_paths_without_digests = fast_relpath_collection(
        c.path for c in classpath_entries if not c.directory_digest)

      classpath_abs = [c.path for c in classpath_entries]
      classpath_rel = fast_relpath_collection(classpath_abs)

      metacp_inputs = []
      metacp_inputs.extend(classpath_rel)

      counter_val = str(counter()).rjust(counter.format_length(), ' ' if PY3 else b' ')
      counter_str = '[{}/{}] '.format(counter_val, counter.size)
      self.context.log.info(
        counter_str,
        'Metacp-ing ',
        items_to_report_element(metacp_inputs, 'jar'),
        ' in ',
        items_to_report_element([t.address.reference() for t in vts.targets], 'target'),
        ' (',
        ctx.target.address.spec,
        ').')

      ctx.ensure_output_dirs_exist()

      tgt, = vts.targets
      with Timer() as timer:
        # Step 1: Convert classpath to SemanticDB
        # ---------------------------------------
        rsc_index_dir = fast_relpath(ctx.rsc_index_dir, get_buildroot())
        args = [
          '--verbose',
          '--stub-broken-signatures',
          '--dependency-classpath', os.pathsep.join(
            metacp_dependencies +
            fast_relpath_collection(self._jvm_lib_jars_abs)
          ),
          # NB: The directory to dump the semanticdb jars generated by metacp.
          '--out', rsc_index_dir,
          os.pathsep.join(metacp_inputs),
        ]

        # NB: If we're building a scala library jar,
        #     also request that metacp generate the indices
        #     for the scala synthetics.
        if self._is_scala_core_library(tgt):
          args = [
            '--include-scala-library-synthetics',
          ] + args
        distribution = self._get_jvm_distribution()

        input_digest = self.context._scheduler.merge_directories(
          tuple(classpath_digests + metacp_dependencies_digests))

        metacp_wu = self._runtool(
          'scala.meta.cli.Metacp',
          'metacp',
          args,
          distribution,
          tgt=tgt,
          input_digest=input_digest,
          input_files=tuple(classpath_paths_without_digests +
                            metacp_dependencies_paths_without_digests),
          output_dir=rsc_index_dir)
        metacp_result = json.loads(stdout_contents(metacp_wu))

        metai_classpath = self._collect_metai_classpath(metacp_result, classpath_rel)

        # Step 1.5: metai Index the semanticdbs
        # -------------------------------------
        self._run_metai_tool(distribution, metai_classpath, rsc_index_dir, tgt)

        abs_output = [(conf, os.path.join(get_buildroot(), x))
                      for conf in self._confs for x in metai_classpath]

        self._metacp_jars_classpath_product.add_for_target(
          ctx.target,
          abs_output,
        )

      self._record_target_stats(tgt,
          len(abs_output),
          len([]),
          timer.elapsed,
          False,
          'metacp'
        )

    rsc_jobs = []
    zinc_jobs = []

    # Invalidated targets are a subset of relevant targets: get the context for this one.
    compile_target = ivts.target
    compile_context_pair = compile_contexts[compile_target]

    # Create the rsc job.
    # Currently, rsc only supports outlining scala.
    if self._only_zinc_compilable(compile_target):
      pass
    elif self._rsc_compilable(compile_target):
      rsc_key = self._rsc_key_for_target(compile_target)
      rsc_jobs.append(
        Job(
          rsc_key,
          functools.partial(
            work_for_vts_rsc,
            ivts,
            compile_context_pair[0]),
          [self._rsc_key_for_target(target) for target in invalid_dependencies] + ['metacp(jdk)'],
          self._size_estimator(compile_context_pair[0].sources),
        )
      )
    elif self._metacpable(compile_target):
      rsc_key = self._rsc_key_for_target(compile_target)
      rsc_jobs.append(
        Job(
          rsc_key,
          functools.partial(
            work_for_vts_metacp,
            ivts,
            compile_context_pair[0],
            'compile_classpath'),
          [self._rsc_key_for_target(target) for target in invalid_dependencies] + ['metacp(jdk)'],
          self._size_estimator(compile_context_pair[0].sources),
          on_success=ivts.update,
          on_failure=ivts.force_invalidate,
        )
      )
    else:
      raise TaskError("Unexpected target for rsc compile {} with type {}"
        .format(compile_target, type(compile_target)))

    # Create the zinc compile jobs.
    # - Scala zinc compile jobs depend on the results of running rsc on the scala target.
    # - Java zinc compile jobs depend on the zinc compiles of their dependencies, because we can't
    #   generate mjars that make javac happy at this point.

    invalid_dependencies_without_jar_metacps = [t for t in invalid_dependencies
      if not self._metacpable(t)]
    if self._rsc_compilable(compile_target):
      full_key = self._compile_against_rsc_key_for_target(compile_target)
      zinc_jobs.append(
        Job(
          full_key,
          functools.partial(
            self._default_work_for_vts,
            ivts,
            compile_context_pair[1],
            'rsc_classpath',
            counter,
            compile_contexts,
            runtime_classpath_product),
          [
            self._rsc_key_for_target(compile_target)
          ] + [
            self._rsc_key_for_target(target)
            for target in invalid_dependencies_without_jar_metacps
          ] + [
            'metacp(jdk)'
          ],
          self._size_estimator(compile_context_pair[1].sources),
          # NB: right now, only the last job will write to the cache, because we don't
          #     do multiple cache entries per target-task tuple.
          on_success=ivts.update,
          on_failure=ivts.force_invalidate,
        )
      )
    elif self._only_zinc_compilable(compile_target):
      # write to both rsc classpath and runtime classpath
      class CompositeProductAdder(object):
        def __init__(self, runtime_classpath_product, rsc_classpath_product):
          self.rsc_classpath_product = rsc_classpath_product
          self.runtime_classpath_product = runtime_classpath_product

        def add_for_target(self, *args, **kwargs):
          self.runtime_classpath_product.add_for_target(*args, **kwargs)
          self.rsc_classpath_product.add_for_target(*args, **kwargs)

      zinc_key = self._compile_against_rsc_key_for_target(compile_target)
      zinc_jobs.append(
        Job(
          zinc_key,
          functools.partial(
            self._default_work_for_vts,
            ivts,
            compile_context_pair[1],
            'runtime_classpath',
            counter,
            compile_contexts,
            CompositeProductAdder(
              runtime_classpath_product,
              self.context.products.get_data('rsc_classpath'))),
          [
            self._compile_against_rsc_key_for_target(target)
            for target in invalid_dependencies_without_jar_metacps],
          self._size_estimator(compile_context_pair[1].sources),
          on_failure=ivts.force_invalidate,
        )
      )

      metacp_key = self._metacp_key_for_target(compile_target)
      rsc_jobs.append(
        Job(
          metacp_key,
          functools.partial(
            work_for_vts_metacp,
            ivts,
            compile_context_pair[0],
            'runtime_classpath'),
            [self._metacp_dep_key_for_target(target) for target in invalid_dependencies] + [
              'metacp(jdk)',
              zinc_key,
            ],
          self._size_estimator(compile_context_pair[0].sources),
          on_success=ivts.update,
          on_failure=ivts.force_invalidate,
        )
      )

    return rsc_jobs + zinc_jobs