Beispiel #1
0
    def get_update_artifact_cache_work(self,
                                       vts_artifactfiles_pairs,
                                       cache=None):
        """Create a Work instance to update the artifact cache, if we're configured to.

    vts_artifactfiles_pairs - a list of pairs (vts, artifactfiles) where
      - vts is single VersionedTargetSet.
      - artifactfiles is a list of paths to artifacts for the VersionedTargetSet.
    """
        cache = cache or self.get_artifact_cache()
        if cache:
            if len(vts_artifactfiles_pairs) == 0:
                return None
                # Do some reporting.
            targets = set()
            for vts, _ in vts_artifactfiles_pairs:
                targets.update(vts.targets)
            self._report_targets('Caching artifacts for ', list(targets), '.')
            # Cache the artifacts.
            args_tuples = []
            for vts, artifactfiles in vts_artifactfiles_pairs:
                if self.context.options.verify_artifact_cache:
                    pass  # TODO: Verify that the artifact we just built is identical to the cached one?
                args_tuples.append((vts.cache_key, artifactfiles))
            return Work(lambda *args: cache.insert(*args), args_tuples,
                        'insert')
        else:
            return None
Beispiel #2
0
  def do_check_artifact_cache(self, vts, post_process_cached_vts=None):
    """Checks the artifact cache for the specified list of VersionedTargetSets.

    Returns a pair (cached, uncached) of VersionedTargets that were
    satisfied/unsatisfied from the cache.
    """
    if not vts:
      return [], []

    cached_vts = []
    uncached_vts = OrderedSet(vts)

    with self.context.new_workunit(name='check', labels=[WorkUnit.MULTITOOL]) as parent:
      res = self.context.submit_foreground_work_and_wait(
        Work(lambda vt: bool(self.get_artifact_cache().use_cached_files(vt.cache_key)),
             [(vt, ) for vt in vts], 'check'), workunit_parent=parent)
    for vt, was_in_cache in zip(vts, res):
      if was_in_cache:
        cached_vts.append(vt)
        uncached_vts.discard(vt)
    # Note that while the input vts may represent multiple targets (for tasks that overrride
    # check_artifact_cache_for), the ones we return must represent single targets.
    def flatten(vts):
      return list(itertools.chain.from_iterable([vt.versioned_targets for vt in vts]))
    all_cached_vts, all_uncached_vts = flatten(cached_vts), flatten(uncached_vts)
    if post_process_cached_vts:
      post_process_cached_vts(all_cached_vts)
    for vt in all_cached_vts:
      vt.update()
    return all_cached_vts, all_uncached_vts
Beispiel #3
0
    def _write_to_artifact_cache(self, analysis_file, vts, sources_by_target):
        vt_by_target = dict([(vt.target, vt) for vt in vts.versioned_targets])

        split_analysis_files = [
            JvmCompile._analysis_for_target(self._analysis_tmpdir, t)
            for t in vts.targets
        ]
        portable_split_analysis_files = [
            JvmCompile._portable_analysis_for_target(self._analysis_tmpdir, t)
            for t in vts.targets
        ]

        # Set up args for splitting the analysis into per-target files.
        splits = zip([sources_by_target.get(t, []) for t in vts.targets],
                     split_analysis_files)
        splits_args_tuples = [(analysis_file, splits)]

        # Set up args for rebasing the splits.
        relativize_args_tuples = zip(split_analysis_files,
                                     portable_split_analysis_files)

        # Set up args for artifact cache updating.
        vts_artifactfiles_pairs = []
        classes_by_source = self._compute_classes_by_source(analysis_file)
        for target, sources in sources_by_target.items():
            artifacts = []
            for source in sources:
                artifacts.extend(classes_by_source.get(source, []))
            vt = vt_by_target.get(target)
            if vt is not None:
                # NOTE: analysis_file doesn't exist yet.
                vts_artifactfiles_pairs.append((vt, artifacts + [
                    JvmCompile._portable_analysis_for_target(
                        self._analysis_tmpdir, target)
                ]))

        update_artifact_cache_work = self.get_update_artifact_cache_work(
            vts_artifactfiles_pairs)
        if update_artifact_cache_work:
            work_chain = [
                Work(self._analysis_tools.split_to_paths, splits_args_tuples,
                     'split'),
                Work(self._analysis_tools.relativize, relativize_args_tuples,
                     'relativize'), update_artifact_cache_work
            ]
            self.context.submit_background_work_chain(
                work_chain, parent_workunit_name='cache')
Beispiel #4
0
    def _write_to_artifact_cache(self, analysis_file, vts, sources_by_target):
        vt_by_target = dict([(vt.target, vt) for vt in vts.versioned_targets])

        # Copy the analysis file, so we can work on it without it changing under us.
        classes_by_source = self._compute_classes_by_source(analysis_file)

        # Set up args for splitting the analysis into per-target files.
        splits = [(sources_by_target.get(t, []),
                   ScalaCompile._analysis_for_target(self._analysis_tmpdir, t))
                  for t in vts.targets]
        splits_args_tuples = [(analysis_file, splits)]

        # Set up args for artifact cache updating.
        vts_artifactfiles_pairs = []
        for target, sources in sources_by_target.items():
            artifacts = []
            for source in sources:
                for cls in classes_by_source.get(source, []):
                    artifacts.append(os.path.join(self._classes_dir, cls))
            vt = vt_by_target.get(target)
            if vt is not None:
                analysis_file = \
                  ScalaCompile._analysis_for_target(self._analysis_tmpdir, target)
                # NOTE: analysis_file doesn't exist yet.
                # We stick the relations file in the artifact as well, for ease of debugging.
                # It's not needed for correctness.
                vts_artifactfiles_pairs.append(
                    (vt, artifacts +
                     [analysis_file, analysis_file + '.relations']))

        def split(analysis_file, splits):
            if self._zinc_utils.run_zinc_split(analysis_file, splits):
                raise TaskError('Zinc failed to split analysis file: %s' %
                                analysis_file)

        update_artifact_cache_work = \
          self.get_update_artifact_cache_work(vts_artifactfiles_pairs)
        if update_artifact_cache_work:
            work_chain = [
                Work(split, splits_args_tuples, 'split'),
                update_artifact_cache_work
            ]
            with self.context.new_workunit(
                    name='cache',
                    labels=[WorkUnit.MULTITOOL],
                    parent=self.context.run_tracker.
                    get_background_root_workunit()) as parent:
                self.context.submit_background_work_chain(
                    work_chain, workunit_parent=parent)