def post_process_cached_vts(self, cached_vts):
    """Special post processing for global scala analysis files.

    Class files are retrieved directly into their final locations in the global classes dir.
    """
    self._ensure_analysis_tmpdir()

    # Get all the targets whose artifacts we found in the cache.
    cached_targets = []
    for vt in cached_vts:
      for target in vt.targets:
        cached_targets.append(target)

    # The current global analysis may contain old data for modified targets for
    # which we got cache hits. We need to strip out this old analysis, to ensure
    # that the new data incoming from the cache doesn't collide with it during the merge.
    sources_to_strip = []
    if os.path.exists(self._analysis_file):
      for target in cached_targets:
        sources_to_strip.extend(self._get_previous_sources_by_target(target))

    # Localize the cached analyses.
    analyses_to_merge = []
    for target in cached_targets:
      analysis_file = JvmCompileStrategy._analysis_for_target(self._analysis_tmpdir, target)
      portable_analysis_file = JvmCompileStrategy._portable_analysis_for_target(self._analysis_tmpdir,
                                                                        target)
      if os.path.exists(portable_analysis_file):
        self._analysis_tools.localize(portable_analysis_file, analysis_file)
      if os.path.exists(analysis_file):
        analyses_to_merge.append(analysis_file)

    # Merge them into the global analysis.
    if analyses_to_merge:
      with temporary_dir() as tmpdir:
        if sources_to_strip:
          throwaway = os.path.join(tmpdir, 'throwaway')
          trimmed_analysis = os.path.join(tmpdir, 'trimmed')
          self._analysis_tools.split_to_paths(self._analysis_file,
                                          [(sources_to_strip, throwaway)],
                                          trimmed_analysis)
        else:
          trimmed_analysis = self._analysis_file
        if os.path.exists(trimmed_analysis):
          analyses_to_merge.append(trimmed_analysis)
        tmp_analysis = os.path.join(tmpdir, 'analysis')
        with self.context.new_workunit(name='merge_analysis'):
          self._analysis_tools.merge_from_paths(analyses_to_merge, tmp_analysis)

        sources_by_cached_target = self._sources_for_targets(cached_targets)

        # Record the cached target -> sources mapping for future use.
        for target, sources in sources_by_cached_target.items():
          self._record_previous_sources_by_target(target, sources)

        # Everything's good so move the merged analysis to its final location.
        if os.path.exists(tmp_analysis):
          self.move(tmp_analysis, self._analysis_file)
Ejemplo n.º 2
0
  def post_process_cached_vts(self, cached_vts):
    """Special post processing for global scala analysis files.

    Class files are retrieved directly into their final locations in the global classes dir.
    """

    # Get all the targets whose artifacts we found in the cache.
    cached_targets = []
    for vt in cached_vts:
      for target in vt.targets:
        cached_targets.append(target)

    # The current global analysis may contain old data for modified targets for
    # which we got cache hits. We need to strip out this old analysis, to ensure
    # that the new data incoming from the cache doesn't collide with it during the merge.
    sources_to_strip = []
    if os.path.exists(self._analysis_file):
      for target in cached_targets:
        sources_to_strip.extend(self._get_previous_sources_by_target(target))

    # Localize the cached analyses.
    analyses_to_merge = []
    for target in cached_targets:
      analysis_file = JvmCompileStrategy._analysis_for_target(self.analysis_tmpdir, target)
      portable_analysis_file = JvmCompileStrategy._portable_analysis_for_target(
          self.analysis_tmpdir, target)
      if os.path.exists(portable_analysis_file):
        self._analysis_tools.localize(portable_analysis_file, analysis_file)
      if os.path.exists(analysis_file):
        analyses_to_merge.append(analysis_file)

    # Merge them into the global analysis.
    if analyses_to_merge:
      with temporary_dir() as tmpdir:
        if sources_to_strip:
          throwaway = os.path.join(tmpdir, 'throwaway')
          trimmed_analysis = os.path.join(tmpdir, 'trimmed')
          self._analysis_tools.split_to_paths(self._analysis_file,
                                          [(sources_to_strip, throwaway)],
                                          trimmed_analysis)
        else:
          trimmed_analysis = self._analysis_file
        if os.path.exists(trimmed_analysis):
          analyses_to_merge.append(trimmed_analysis)
        tmp_analysis = os.path.join(tmpdir, 'analysis')
        with self.context.new_workunit(name='merge_analysis'):
          self._analysis_tools.merge_from_paths(analyses_to_merge, tmp_analysis)

        sources_by_cached_target = self._sources_for_targets(cached_targets)

        # Record the cached target -> sources mapping for future use.
        for target, sources in sources_by_cached_target.items():
          self._record_previous_sources_by_target(target, sources)

        # Everything's good so move the merged analysis to its final location.
        if os.path.exists(tmp_analysis):
          self.move(tmp_analysis, self._analysis_file)
Ejemplo n.º 3
0
  def _write_to_artifact_cache(self, analysis_file, vts, get_update_artifact_cache_work):
    vt_by_target = dict([(vt.target, vt) for vt in vts.versioned_targets])

    vts_targets = [t for t in vts.targets if not t.has_label('no_cache')]

    # Determine locations for analysis files that will be split in the background.
    split_analysis_files = [
        JvmCompileStrategy._analysis_for_target(self.analysis_tmpdir, t) for t in vts_targets]
    portable_split_analysis_files = [
        JvmCompileStrategy._portable_analysis_for_target(self.analysis_tmpdir, t) for t in vts_targets]

    # Set up args for splitting the analysis into per-target files.
    splits = zip([self._sources_for_target(t) for t in vts_targets], split_analysis_files)
    splits_args_tuples = [(analysis_file, splits)]

    # Set up args for rebasing the splits.
    relativize_args_tuples = zip(split_analysis_files, portable_split_analysis_files)

    # Compute the classes and resources for each vts.
    compile_contexts = [self.compile_context(t) for t in vts_targets]
    vts_artifactfiles_pairs = []
    classes_by_source_by_context = self.compute_classes_by_source(compile_contexts)
    resources_by_target = self.context.products.get_data('resources_by_target')
    for compile_context in compile_contexts:
      target = compile_context.target
      if target.has_label('no_cache'):
        continue
      artifacts = []
      if resources_by_target is not None:
        for _, paths in resources_by_target[target].abs_paths():
          artifacts.extend(paths)
      classes_by_source = classes_by_source_by_context[compile_context]
      for source in compile_context.sources:
        classes = classes_by_source.get(source, [])
        artifacts.extend(classes)

      vt = vt_by_target.get(target)
      if vt is not None:
        # NOTE: analysis_file doesn't exist yet.
        vts_artifactfiles_pairs.append(
            (vt, artifacts + [JvmCompileStrategy._portable_analysis_for_target(
                self.analysis_tmpdir, target)]))

    update_artifact_cache_work = get_update_artifact_cache_work(vts_artifactfiles_pairs)
    if update_artifact_cache_work:
      work_chain = [
        Work(self._analysis_tools.split_to_paths, splits_args_tuples, 'split'),
        Work(self._analysis_tools.relativize, relativize_args_tuples, 'relativize'),
        update_artifact_cache_work
      ]
      self.context.submit_background_work_chain(work_chain, parent_workunit_name='cache')
  def _write_to_artifact_cache(self, analysis_file, vts, get_update_artifact_cache_work):
    vt_by_target = dict([(vt.target, vt) for vt in vts.versioned_targets])

    vts_targets = [t for t in vts.targets if not t.has_label('no_cache')]

    # Determine locations for analysis files that will be split in the background.
    split_analysis_files = [
        JvmCompileStrategy._analysis_for_target(self.analysis_tmpdir, t) for t in vts_targets]
    portable_split_analysis_files = [
        JvmCompileStrategy._portable_analysis_for_target(self.analysis_tmpdir, t) for t in vts_targets]

    # Set up args for splitting the analysis into per-target files.
    splits = zip([self._sources_for_target(t) for t in vts_targets], split_analysis_files)
    splits_args_tuples = [(analysis_file, splits)]

    # Set up args for rebasing the splits.
    relativize_args_tuples = zip(split_analysis_files, portable_split_analysis_files)

    # Compute the classes and resources for each vts.
    compile_contexts = [self.compile_context(t) for t in vts_targets]
    vts_artifactfiles_pairs = []
    classes_by_source_by_context = self.compute_classes_by_source(compile_contexts)
    resources_by_target = self.context.products.get_data('resources_by_target')
    for compile_context in compile_contexts:
      target = compile_context.target
      if target.has_label('no_cache'):
        continue
      artifacts = []
      if resources_by_target is not None:
        for _, paths in resources_by_target[target].abs_paths():
          artifacts.extend(paths)
      classes_by_source = classes_by_source_by_context[compile_context]
      for source in compile_context.sources:
        classes = classes_by_source.get(source, [])
        artifacts.extend(classes)

      vt = vt_by_target.get(target)
      if vt is not None:
        # NOTE: analysis_file doesn't exist yet.
        vts_artifactfiles_pairs.append(
            (vt, artifacts + [JvmCompileStrategy._portable_analysis_for_target(
                self.analysis_tmpdir, target)]))

    update_artifact_cache_work = get_update_artifact_cache_work(vts_artifactfiles_pairs)
    if update_artifact_cache_work:
      work_chain = [
        Work(self._analysis_tools.split_to_paths, splits_args_tuples, 'split'),
        Work(self._analysis_tools.relativize, relativize_args_tuples, 'relativize'),
        update_artifact_cache_work
      ]
      self.context.submit_background_work_chain(work_chain, parent_workunit_name='cache')
Ejemplo n.º 5
0
            def work():
                progress_message = compile_context.target.address.spec
                cp_entries = self._compute_classpath_entries(
                    compile_classpaths, target_closure, compile_context,
                    extra_compile_time_classpath)

                upstream_analysis = dict(
                    self._upstream_analysis(compile_contexts, cp_entries))

                # Capture a compilation log if requested.
                log_file = self._capture_log_file(compile_context.target)

                # Mutate analysis within a temporary directory, and move it to the final location
                # on success.
                tmpdir = os.path.join(self.analysis_tmpdir,
                                      compile_context.target.id)
                safe_mkdir(tmpdir)
                tmp_analysis_file = JvmCompileStrategy._analysis_for_target(
                    tmpdir, compile_context.target)
                if os.path.exists(compile_context.analysis_file):
                    shutil.copy(compile_context.analysis_file,
                                tmp_analysis_file)
                compile_vts(vts, compile_context.sources, tmp_analysis_file,
                            upstream_analysis, cp_entries,
                            compile_context.classes_dir, log_file,
                            progress_message)
                atomic_copy(tmp_analysis_file, compile_context.analysis_file)

                # Update the products with the latest classes.
                register_vts([compile_context])

                # Kick off the background artifact cache write.
                if update_artifact_cache_vts_work:
                    self._write_to_artifact_cache(
                        vts, compile_context, update_artifact_cache_vts_work)
 def compile_context(self, target):
   analysis_file = JvmCompileStrategy._analysis_for_target(self._analysis_dir, target)
   classes_dir = os.path.join(self._classes_dir, target.id)
   return self.CompileContext(target,
                              analysis_file,
                              classes_dir,
                              self._sources_for_target(target))
      def work():
        progress_message = vts.targets[0].address.spec
        cp_entries = self._compute_classpath_entries(compile_classpaths,
                                                     target_closure,
                                                     compile_context,
                                                     extra_compile_time_classpath)

        upstream_analysis = dict(self._upstream_analysis(compile_contexts, cp_entries))
        tmpdir = os.path.join(self.analysis_tmpdir, vts.targets[0].id)
        safe_mkdir(tmpdir)

        tmp_analysis_file = JvmCompileStrategy._analysis_for_target(
            tmpdir, compile_context.target)
        if os.path.exists(compile_context.analysis_file):
           shutil.copy(compile_context.analysis_file, tmp_analysis_file)
        compile_vts(vts,
                    compile_context.sources,
                    tmp_analysis_file,
                    upstream_analysis,
                    cp_entries,
                    compile_context.classes_dir,
                    progress_message)
        atomic_copy(tmp_analysis_file, compile_context.analysis_file)

        # Update the products with the latest classes.
        register_vts([compile_context])

        # Kick off the background artifact cache write.
        if update_artifact_cache_vts_work:
          self._write_to_artifact_cache(vts, compile_context, update_artifact_cache_vts_work)
  def _write_to_artifact_cache(self, vts, compile_context, get_update_artifact_cache_work):
    assert len(vts.targets) == 1
    assert vts.targets[0] == compile_context.target

    # Noop if the target is uncacheable.
    if (compile_context.target.has_label('no_cache')):
      return
    vt = vts.versioned_targets[0]

    # Set up args to relativize analysis in the background.
    portable_analysis_file = JvmCompileStrategy._portable_analysis_for_target(
        self._analysis_dir, compile_context.target)
    relativize_args_tuple = (compile_context.analysis_file, portable_analysis_file)

    # Compute the classes and resources for this target.
    artifacts = []
    resources_by_target = self.context.products.get_data('resources_by_target')
    if resources_by_target is not None:
      for _, paths in resources_by_target[compile_context.target].abs_paths():
        artifacts.extend(paths)
    for dirpath, _, filenames in safe_walk(compile_context.classes_dir):
      artifacts.extend([os.path.join(dirpath, f) for f in filenames])

    # Get the 'work' that will publish these artifacts to the cache.
    # NB: the portable analysis_file won't exist until we finish.
    vts_artifactfiles_pair = (vt, artifacts + [portable_analysis_file])
    update_artifact_cache_work = get_update_artifact_cache_work([vts_artifactfiles_pair])

    # And execute it.
    if update_artifact_cache_work:
      work_chain = [
          Work(self._analysis_tools.relativize, [relativize_args_tuple], 'relativize'),
          update_artifact_cache_work
      ]
      self.context.submit_background_work_chain(work_chain, parent_workunit_name='cache')
Ejemplo n.º 9
0
 def compile_context(self, target):
     analysis_file = JvmCompileStrategy._analysis_for_target(
         self._analysis_dir, target)
     classes_dir = os.path.join(self._classes_dir, target.id)
     # TODO: this will be a fairly long path. should use an id hash to shorten it if possible
     jar_file = os.path.join(self._jars_dir, target.id + '.jar')
     return IsolatedCompileContext(target, analysis_file,
                                   classes_dir, jar_file,
                                   self._sources_for_target(target))
 def compile_context(self, target):
   analysis_file = JvmCompileStrategy._analysis_for_target(self._analysis_dir, target)
   classes_dir = os.path.join(self._classes_dir, target.id)
   # TODO: this will be a fairly long path. should use an id hash to shorten it if possible
   jar_file = os.path.join(self._jars_dir, target.id + '.jar')
   return IsolatedCompileContext(target,
                                 analysis_file,
                                 classes_dir,
                                 jar_file,
                                 self._sources_for_target(target))
 def compile_context(self, target):
     analysis_file = JvmCompileStrategy._analysis_for_target(
         self._analysis_dir, target)
     classes_dir = os.path.join(self._classes_dir, target.id)
     # Generate a short unique path for the jar to allow for shorter classpaths.
     #   TODO: likely unnecessary after https://github.com/pantsbuild/pants/issues/1988
     jar_file = os.path.join(
         self._jars_dir, '{}.jar'.format(sha1(target.id).hexdigest()[:12]))
     return IsolatedCompileContext(target, analysis_file,
                                   classes_dir, jar_file,
                                   self._sources_for_target(target))
 def compile_context(self, target):
   analysis_file = JvmCompileStrategy._analysis_for_target(self._analysis_dir, target)
   classes_dir = os.path.join(self._classes_dir, target.id)
   # Generate a short unique path for the jar to allow for shorter classpaths.
   #   TODO: likely unnecessary after https://github.com/pantsbuild/pants/issues/1988
   jar_file = os.path.join(self._jars_dir, '{}.jar'.format(sha1(target.id).hexdigest()[:12]))
   return IsolatedCompileContext(target,
                                 analysis_file,
                                 classes_dir,
                                 jar_file,
                                 self._sources_for_target(target))
Ejemplo n.º 13
0
    def _write_to_artifact_cache(self, vts, compile_context,
                                 get_update_artifact_cache_work):
        assert len(vts.targets) == 1
        assert vts.targets[0] == compile_context.target

        # Noop if the target is uncacheable.
        if (compile_context.target.has_label('no_cache')):
            return
        vt = vts.versioned_targets[0]

        # Set up args to relativize analysis in the background.
        portable_analysis_file = JvmCompileStrategy._portable_analysis_for_target(
            self._analysis_dir, compile_context.target)
        relativize_args_tuple = (compile_context.analysis_file,
                                 portable_analysis_file)

        # Collect the artifacts for this target.
        artifacts = []

        def add_abs_products(p):
            if p:
                for _, paths in p.abs_paths():
                    artifacts.extend(paths)

        # Resources.
        resources_by_target = self.context.products.get_data(
            'resources_by_target')
        add_abs_products(resources_by_target.get(compile_context.target))
        # Classes.
        classes_by_target = self.context.products.get_data('classes_by_target')
        add_abs_products(classes_by_target.get(compile_context.target))
        # Log file.
        log_file = self._capture_log_file(compile_context.target)
        if log_file and os.path.exists(log_file):
            artifacts.append(log_file)
        # Jar.
        if self._jar:
            artifacts.append(compile_context.jar_file)

        # Get the 'work' that will publish these artifacts to the cache.
        # NB: the portable analysis_file won't exist until we finish.
        vts_artifactfiles_pair = (vt, artifacts + [portable_analysis_file])
        update_artifact_cache_work = get_update_artifact_cache_work(
            [vts_artifactfiles_pair])

        # And execute it.
        if update_artifact_cache_work:
            work_chain = [
                Work(self._analysis_tools.relativize, [relativize_args_tuple],
                     'relativize'), update_artifact_cache_work
            ]
            self.context.submit_background_work_chain(
                work_chain, parent_workunit_name='cache')
  def post_process_cached_vts(self, cached_vts):
    """Localizes the fetched analysis for targets we found in the cache.

    This is the complement of `_write_to_artifact_cache`.
    """
    compile_contexts = []
    for vt in cached_vts:
      for target in vt.targets:
        compile_contexts.append(self.compile_context(target))

    for compile_context in compile_contexts:
      portable_analysis_file = JvmCompileStrategy._portable_analysis_for_target(
          self._analysis_dir, compile_context.target)
      if os.path.exists(portable_analysis_file):
        self._analysis_tools.localize(portable_analysis_file, compile_context.analysis_file)
    def post_process_cached_vts(self, cached_vts):
        """Localizes the fetched analysis for targets we found in the cache.

    This is the complement of `_write_to_artifact_cache`.
    """
        compile_contexts = []
        for vt in cached_vts:
            for target in vt.targets:
                compile_contexts.append(self.compile_context(target))

        for compile_context in compile_contexts:
            portable_analysis_file = JvmCompileStrategy._portable_analysis_for_target(
                self._analysis_dir, compile_context.target)
            if os.path.exists(portable_analysis_file):
                self._analysis_tools.localize(portable_analysis_file,
                                              compile_context.analysis_file)
Ejemplo n.º 16
0
  def _write_to_artifact_cache(self, vts, compile_context, get_update_artifact_cache_work):
    assert len(vts.targets) == 1
    assert vts.targets[0] == compile_context.target

    # Noop if the target is uncacheable.
    if (compile_context.target.has_label('no_cache')):
      return
    vt = vts.versioned_targets[0]

    # Set up args to relativize analysis in the background.
    portable_analysis_file = JvmCompileStrategy._portable_analysis_for_target(
        self._analysis_dir, compile_context.target)
    relativize_args_tuple = (compile_context.analysis_file, portable_analysis_file)

    # Collect the artifacts for this target.
    artifacts = []

    def add_abs_products(p):
      if p:
        for _, paths in p.abs_paths():
          artifacts.extend(paths)
    # Resources.
    resources_by_target = self.context.products.get_data('resources_by_target')
    add_abs_products(resources_by_target.get(compile_context.target))
    # Classes.
    classes_by_target = self.context.products.get_data('classes_by_target')
    add_abs_products(classes_by_target.get(compile_context.target))
    # Log file.
    log_file = self._capture_log_file(compile_context.target)
    if log_file and os.path.exists(log_file):
      artifacts.append(log_file)
    # Jar.
    if self._jar:
      artifacts.append(compile_context.jar_file)

    # Get the 'work' that will publish these artifacts to the cache.
    # NB: the portable analysis_file won't exist until we finish.
    vts_artifactfiles_pair = (vt, artifacts + [portable_analysis_file])
    update_artifact_cache_work = get_update_artifact_cache_work([vts_artifactfiles_pair])

    # And execute it.
    if update_artifact_cache_work:
      work_chain = [
          Work(self._analysis_tools.relativize, [relativize_args_tuple], 'relativize'),
          update_artifact_cache_work
      ]
      self.context.submit_background_work_chain(work_chain, parent_workunit_name='cache')
    def work_for_vts(vts, compile_context, target_closure):
      progress_message = compile_context.target.address.spec
      cp_entries = self._compute_classpath_entries(compile_classpaths,
                                                   target_closure,
                                                   compile_context,
                                                   extra_compile_time_classpath)

      upstream_analysis = dict(self._upstream_analysis(compile_contexts, cp_entries))

      # Capture a compilation log if requested.
      log_file = self._capture_log_file(compile_context.target)

      # Double check the cache before beginning compilation
      if not check_cache(vts):
        # Mutate analysis within a temporary directory, and move it to the final location
        # on success.
        tmpdir = os.path.join(self.analysis_tmpdir, compile_context.target.id)
        safe_mkdir(tmpdir)
        tmp_analysis_file = JvmCompileStrategy._analysis_for_target(
            tmpdir, compile_context.target)
        if os.path.exists(compile_context.analysis_file):
          shutil.copy(compile_context.analysis_file, tmp_analysis_file)
        target, = vts.targets
        compile_vts(vts,
                    compile_context.sources,
                    tmp_analysis_file,
                    upstream_analysis,
                    cp_entries,
                    compile_context.classes_dir,
                    log_file,
                    progress_message,
                    target.platform)
        atomic_copy(tmp_analysis_file, compile_context.analysis_file)

        # Jar the compiled output.
        self._create_context_jar(compile_context)

      # Update the products with the latest classes.
      register_vts([compile_context])

      # Kick off the background artifact cache write.
      if update_artifact_cache_vts_work:
        self._write_to_artifact_cache(vts, compile_context, update_artifact_cache_vts_work)
    def _write_to_artifact_cache(self, vts, compile_context, get_update_artifact_cache_work):
        assert len(vts.targets) == 1
        assert vts.targets[0] == compile_context.target

        # Noop if the target is uncacheable.
        if compile_context.target.has_label("no_cache"):
            return
        vt = vts.versioned_targets[0]

        # Set up args to relativize analysis in the background.
        portable_analysis_file = JvmCompileStrategy._portable_analysis_for_target(
            self._analysis_dir, compile_context.target
        )
        relativize_args_tuple = (compile_context.analysis_file, portable_analysis_file)

        # Collect the artifacts for this target.
        artifacts = []
        resources_by_target = self.context.products.get_data("resources_by_target")
        if resources_by_target is not None:
            for _, paths in resources_by_target[compile_context.target].abs_paths():
                artifacts.extend(paths)
        target_classes = self.context.products.get_data("classes_by_target").get(compile_context.target)
        for _, classfiles in target_classes.abs_paths():
            artifacts.extend(classfiles)
        log_file = self._capture_log_file(compile_context.target)
        if log_file and os.path.exists(log_file):
            artifacts.append(log_file)

        # Get the 'work' that will publish these artifacts to the cache.
        # NB: the portable analysis_file won't exist until we finish.
        vts_artifactfiles_pair = (vt, artifacts + [portable_analysis_file])
        update_artifact_cache_work = get_update_artifact_cache_work([vts_artifactfiles_pair])

        # And execute it.
        if update_artifact_cache_work:
            work_chain = [
                Work(self._analysis_tools.relativize, [relativize_args_tuple], "relativize"),
                update_artifact_cache_work,
            ]
            self.context.submit_background_work_chain(work_chain, parent_workunit_name="cache")
Ejemplo n.º 19
0
 def compile_context(self, target):
     analysis_file = JvmCompileStrategy._analysis_for_target(
         self._analysis_dir, target)
     classes_dir = os.path.join(self._classes_dir, target.id)
     return self.CompileContext(target, analysis_file, classes_dir,
                                self._sources_for_target(target))