def work():
        progress_message = vts.targets[0].address.spec
        cp_entries = self._compute_classpath_entries(compile_classpaths,
                                                     target_closure,
                                                     compile_context,
                                                     extra_compile_time_classpath)

        upstream_analysis = dict(self._upstream_analysis(compile_contexts, cp_entries))
        tmpdir = os.path.join(self.analysis_tmpdir, vts.targets[0].id)
        safe_mkdir(tmpdir)

        tmp_analysis_file = JvmCompileStrategy._analysis_for_target(
            tmpdir, compile_context.target)
        if os.path.exists(compile_context.analysis_file):
           shutil.copy(compile_context.analysis_file, tmp_analysis_file)
        compile_vts(vts,
                    compile_context.sources,
                    tmp_analysis_file,
                    upstream_analysis,
                    cp_entries,
                    compile_context.classes_dir,
                    progress_message)
        atomic_copy(tmp_analysis_file, compile_context.analysis_file)

        # Update the products with the latest classes.
        register_vts([compile_context])

        # Kick off the background artifact cache write.
        if update_artifact_cache_vts_work:
          self._write_to_artifact_cache(vts, compile_context, update_artifact_cache_vts_work)
Exemple #2
0
            def work():
                progress_message = compile_context.target.address.spec
                cp_entries = self._compute_classpath_entries(
                    compile_classpaths, target_closure, compile_context,
                    extra_compile_time_classpath)

                upstream_analysis = dict(
                    self._upstream_analysis(compile_contexts, cp_entries))

                # Capture a compilation log if requested.
                log_file = self._capture_log_file(compile_context.target)

                # Mutate analysis within a temporary directory, and move it to the final location
                # on success.
                tmpdir = os.path.join(self.analysis_tmpdir,
                                      compile_context.target.id)
                safe_mkdir(tmpdir)
                tmp_analysis_file = JvmCompileStrategy._analysis_for_target(
                    tmpdir, compile_context.target)
                if os.path.exists(compile_context.analysis_file):
                    shutil.copy(compile_context.analysis_file,
                                tmp_analysis_file)
                compile_vts(vts, compile_context.sources, tmp_analysis_file,
                            upstream_analysis, cp_entries,
                            compile_context.classes_dir, log_file,
                            progress_message)
                atomic_copy(tmp_analysis_file, compile_context.analysis_file)

                # Update the products with the latest classes.
                register_vts([compile_context])

                # Kick off the background artifact cache write.
                if update_artifact_cache_vts_work:
                    self._write_to_artifact_cache(
                        vts, compile_context, update_artifact_cache_vts_work)
    def execute(self):
        targets = self.get_targets(self._is_python_lambda)
        with self.invalidated(
                targets=targets,
                invalidate_dependents=True) as invalidation_check:
            python_lambda_product = self.context.products.get_data(
                "python_aws_lambda", dict)
            for vt in invalidation_check.all_vts:
                lambda_path = os.path.join(vt.results_dir,
                                           "{}.pex".format(vt.target.name))
                if not vt.valid:
                    self.context.log.debug(
                        "Existing lambda for {} is invalid, rebuilding".format(
                            vt.target))
                    self._create_lambda(vt.target, lambda_path)
                else:
                    self.context.log.debug(
                        "Using existing lambda for {}".format(vt.target))

                python_lambda_product[vt.target] = lambda_path
                self.context.log.debug("created {}".format(
                    os.path.relpath(lambda_path, get_buildroot())))

                # Put a copy in distdir.
                lambda_copy = os.path.join(self.get_options().pants_distdir,
                                           os.path.basename(lambda_path))
                safe_mkdir_for(lambda_copy)
                atomic_copy(lambda_path, lambda_copy)
                self.context.log.info("created lambda {}".format(
                    os.path.relpath(lambda_copy, get_buildroot())))
    def shade_jar(self, shading_rules, jar_path):
        """Shades a jar using the shading rules from the given jvm_binary.

    This *overwrites* the existing jar file at ``jar_path``.

    :param shading_rules: predefined rules for shading
    :param jar_path: The filepath to the jar that should be shaded.
    """
        self.context.log.debug('Shading {}.'.format(jar_path))
        with temporary_dir() as tempdir:
            output_jar = os.path.join(tempdir, os.path.basename(jar_path))
            with self.shader.binary_shader_for_rules(
                    output_jar, jar_path, shading_rules) as shade_runner:
                result = execute_runner(
                    shade_runner,
                    workunit_factory=self.context.new_workunit,
                    workunit_name='jarjar')
                if result != 0:
                    raise TaskError(
                        'Shading tool failed to shade {0} (error code {1})'.
                        format(jar_path, result))
                if not os.path.exists(output_jar):
                    raise TaskError(
                        'Shading tool returned success for {0}, but '
                        'the output jar was not found at {1}'.format(
                            jar_path, output_jar))
                atomic_copy(output_jar, jar_path)
                return jar_path
  def execute(self):
    binaries = self.context.targets(self.is_binary)

    # Check for duplicate binary names, since we write the pexes to <dist>/<name>.pex.
    names = {}
    for binary in binaries:
      name = binary.name
      if name in names:
        raise TaskError('Cannot build two binaries with the same name in a single invocation. '
                        '{} and {} both have the name {}.'.format(binary, names[name], name))
      names[name] = binary

    with self.invalidated(binaries, invalidate_dependents=True) as invalidation_check:
      python_deployable_archive = self.context.products.get('deployable_archives')
      python_pex_product = self.context.products.get('pex_archives')
      for vt in invalidation_check.all_vts:
        pex_path = os.path.join(vt.results_dir, '{}.pex'.format(vt.target.name))
        if not vt.valid:
          self.context.log.debug('cache for {} is invalid, rebuilding'.format(vt.target))
          self._create_binary(vt.target, vt.results_dir)
        else:
          self.context.log.debug('using cache for {}'.format(vt.target))

        basename = os.path.basename(pex_path)
        python_pex_product.add(vt.target, os.path.dirname(pex_path)).append(basename)
        python_deployable_archive.add(vt.target, os.path.dirname(pex_path)).append(basename)
        self.context.log.debug('created {}'.format(os.path.relpath(pex_path, get_buildroot())))

        # Create a copy for pex.
        pex_copy = os.path.join(self._distdir, os.path.basename(pex_path))
        safe_mkdir_for(pex_copy)
        atomic_copy(pex_path, pex_copy)
        self.context.log.info('created pex {}'.format(os.path.relpath(pex_copy, get_buildroot())))
Exemple #6
0
    def work_for_vts(vts, compile_context, target_closure):
      progress_message = compile_context.target.address.spec
      cp_entries = self._compute_classpath_entries(classpath_products,
                                                   target_closure,
                                                   compile_context,
                                                   extra_compile_time_classpath)

      upstream_analysis = dict(self._upstream_analysis(compile_contexts, cp_entries))

      # Capture a compilation log if requested.
      log_file = self._capture_log_file(compile_context.target)

      # Double check the cache before beginning compilation
      hit_cache = check_cache(vts)
      incremental = False

      if not hit_cache:
        # Mutate analysis within a temporary directory, and move it to the final location
        # on success.
        tmpdir = os.path.join(self.analysis_tmpdir, compile_context.target.id)
        safe_mkdir(tmpdir)
        tmp_analysis_file = self._analysis_for_target(
            tmpdir, compile_context.target)
        # If the analysis exists for this context, it is an incremental compile.
        if os.path.exists(compile_context.analysis_file):
          incremental = True
          shutil.copy(compile_context.analysis_file, tmp_analysis_file)
        target, = vts.targets
        compile_vts(vts,
                    compile_context.sources,
                    tmp_analysis_file,
                    upstream_analysis,
                    cp_entries,
                    compile_context.classes_dir,
                    log_file,
                    progress_message,
                    target.platform)
        atomic_copy(tmp_analysis_file, compile_context.analysis_file)

        # Jar the compiled output.
        self._create_context_jar(compile_context)

      # Update the products with the latest classes.
      register_vts([compile_context])

      # We write to the cache only if we didn't hit during the double check, and optionally
      # only for clean builds.
      is_cacheable = not hit_cache and (self.get_options().incremental_caching or not incremental)
      self.context.log.debug(
          'Completed compile for {}. '
          'Hit cache: {}, was incremental: {}, is cacheable: {}, cache writes enabled: {}.'.format(
            compile_context.target.address.spec,
            hit_cache,
            incremental,
            is_cacheable,
            update_artifact_cache_vts_work is not None
            ))
      if is_cacheable and update_artifact_cache_vts_work:
        # Kick off the background artifact cache write.
        self._write_to_artifact_cache(vts, compile_context, update_artifact_cache_vts_work)
  def execute(self):
    binaries = self.context.targets(self.is_binary)

    # Check for duplicate binary names, since we write the pexes to <dist>/<name>.pex.
    names = {}
    for binary in binaries:
      name = binary.name
      if name in names:
        raise TaskError('Cannot build two binaries with the same name in a single invocation. '
                        '{} and {} both have the name {}.'.format(binary, names[name], name))
      names[name] = binary

    with self.invalidated(binaries, invalidate_dependents=True) as invalidation_check:
      python_deployable_archive = self.context.products.get('deployable_archives')
      python_pex_product = self.context.products.get('pex_archives')
      for vt in invalidation_check.all_vts:
        pex_path = os.path.join(vt.results_dir, '{}.pex'.format(vt.target.name))
        if not vt.valid:
          self.context.log.debug('cache for {} is invalid, rebuilding'.format(vt.target))
          self._create_binary(vt.target, vt.results_dir)
        else:
          self.context.log.debug('using cache for {}'.format(vt.target))

        basename = os.path.basename(pex_path)
        python_pex_product.add(binary, os.path.dirname(pex_path)).append(basename)
        python_deployable_archive.add(binary, os.path.dirname(pex_path)).append(basename)
        self.context.log.debug('created {}'.format(os.path.relpath(pex_path, get_buildroot())))

        # Create a copy for pex.
        pex_copy = os.path.join(self._distdir, os.path.basename(pex_path))
        safe_mkdir_for(pex_copy)
        atomic_copy(pex_path, pex_copy)
        self.context.log.info('created pex {}'.format(os.path.relpath(pex_copy, get_buildroot())))
 def test_atomic_copy(self):
   with temporary_file() as src:
     src.write(src.name)
     src.flush()
     with temporary_file() as dst:
       atomic_copy(src.name, dst.name)
       dst.close()
       with open(dst.name) as new_dst:
         self.assertEquals(src.name, new_dst.read())
Exemple #9
0
 def test_atomic_copy(self):
     with temporary_file() as src:
         src.write(src.name)
         src.flush()
         with temporary_file() as dst:
             atomic_copy(src.name, dst.name)
             dst.close()
             with open(dst.name) as new_dst:
                 self.assertEquals(src.name, new_dst.read())
Exemple #10
0
        def work_for_vts(vts, compile_context, target_closure):
            progress_message = compile_context.target.address.spec
            cp_entries = self._compute_classpath_entries(
                classpath_products, target_closure, compile_context,
                extra_compile_time_classpath)

            upstream_analysis = dict(
                self._upstream_analysis(compile_contexts, cp_entries))

            # Capture a compilation log if requested.
            log_file = self._capture_log_file(compile_context.target)

            # Double check the cache before beginning compilation
            hit_cache = check_cache(vts)
            incremental = False

            if not hit_cache:
                # Mutate analysis within a temporary directory, and move it to the final location
                # on success.
                tmpdir = os.path.join(self.analysis_tmpdir,
                                      compile_context.target.id)
                safe_mkdir(tmpdir)
                tmp_analysis_file = self._analysis_for_target(
                    tmpdir, compile_context.target)
                # If the analysis exists for this context, it is an incremental compile.
                if os.path.exists(compile_context.analysis_file):
                    incremental = True
                    shutil.copy(compile_context.analysis_file,
                                tmp_analysis_file)
                target, = vts.targets
                compile_vts(vts, compile_context.sources, tmp_analysis_file,
                            upstream_analysis, cp_entries,
                            compile_context.classes_dir, log_file,
                            progress_message, target.platform)
                atomic_copy(tmp_analysis_file, compile_context.analysis_file)

                # Jar the compiled output.
                self._create_context_jar(compile_context)

            # Update the products with the latest classes.
            register_vts([compile_context])

            # We write to the cache only if we didn't hit during the double check, and optionally
            # only for clean builds.
            is_cacheable = not hit_cache and (
                self.get_options().incremental_caching or not incremental)
            self.context.log.debug(
                'Completed compile for {}. '
                'Hit cache: {}, was incremental: {}, is cacheable: {}, cache writes enabled: {}.'
                .format(compile_context.target.address.spec, hit_cache,
                        incremental, is_cacheable,
                        update_artifact_cache_vts_work is not None))
            if is_cacheable and update_artifact_cache_vts_work:
                # Kick off the background artifact cache write.
                self._write_to_artifact_cache(vts, compile_context,
                                              update_artifact_cache_vts_work)
Exemple #11
0
 def test_atomic_copy(self):
   with temporary_file() as src:
     src.write(src.name.encode('utf-8'))
     src.flush()
     with temporary_file() as dst:
       atomic_copy(src.name, dst.name)
       dst.close()
       with open(dst.name) as new_dst:
         self.assertEquals(src.name, new_dst.read())
       self.assertEqual(os.stat(src.name).st_mode, os.stat(dst.name).st_mode)
Exemple #12
0
 def test_atomic_copy(self) -> None:
     with temporary_file() as src:
         src.write(src.name.encode())
         src.flush()
         with temporary_file() as dst:
             atomic_copy(src.name, dst.name)
             dst.close()
             with open(dst.name, "r") as new_dst:
                 self.assertEqual(src.name, new_dst.read())
             self.assertEqual(os.stat(src.name).st_mode, os.stat(dst.name).st_mode)
Exemple #13
0
 def _copy_ivy_reports(cls, workdir_report_paths_by_conf, confs, ivy_cache_dir, resolve_hash_name):
   for conf in confs:
     ivy_cache_report_path = IvyUtils.xml_report_path(ivy_cache_dir, resolve_hash_name,
                                                      conf)
     workdir_report_path = workdir_report_paths_by_conf[conf]
     try:
       atomic_copy(ivy_cache_report_path,
                   workdir_report_path)
     except IOError as e:
       raise cls.IvyError('Failed to copy report into workdir from {} to {}: {}'
                          .format(ivy_cache_report_path, workdir_report_path, e))
Exemple #14
0
  def publish_results(self, dist_dir, use_basename_prefix, vt, bundle_dir, archivepath, id, archive_ext):
    """Publish a copy of the bundle and archive from the results dir in dist."""
    # TODO (from mateor) move distdir management somewhere more general purpose.
    name = vt.target.basename if use_basename_prefix else id
    bundle_copy = os.path.join(dist_dir, '{}-bundle'.format(name))
    absolute_symlink(bundle_dir, bundle_copy)
    self.context.log.info(
      'created bundle copy {}'.format(os.path.relpath(bundle_copy, get_buildroot())))

    if archivepath:
      ext = archive.archive_extensions.get(archive_ext, archive_ext)
      archive_copy = os.path.join(dist_dir,'{}.{}'.format(name, ext))
      safe_mkdir_for(archive_copy)  # Ensure parent dir exists
      atomic_copy(archivepath, archive_copy)
      self.context.log.info(
        'created archive copy {}'.format(os.path.relpath(archive_copy, get_buildroot())))
Exemple #15
0
    def work_for_vts(vts, compile_context, target_closure):
      progress_message = compile_context.target.address.spec
      cp_entries = self._compute_classpath_entries(compile_classpaths,
                                                   target_closure,
                                                   compile_context,
                                                   extra_compile_time_classpath)

      upstream_analysis = dict(self._upstream_analysis(compile_contexts, cp_entries))

      # Capture a compilation log if requested.
      log_file = self._capture_log_file(compile_context.target)

      # Double check the cache before beginning compilation
      if not check_cache(vts):
        # Mutate analysis within a temporary directory, and move it to the final location
        # on success.
        tmpdir = os.path.join(self.analysis_tmpdir, compile_context.target.id)
        safe_mkdir(tmpdir)
        tmp_analysis_file = self._analysis_for_target(
            tmpdir, compile_context.target)
        if os.path.exists(compile_context.analysis_file):
          shutil.copy(compile_context.analysis_file, tmp_analysis_file)
        target, = vts.targets
        compile_vts(vts,
                    compile_context.sources,
                    tmp_analysis_file,
                    upstream_analysis,
                    cp_entries,
                    compile_context.classes_dir,
                    log_file,
                    progress_message,
                    target.platform)
        atomic_copy(tmp_analysis_file, compile_context.analysis_file)

        # Jar the compiled output.
        self._create_context_jar(compile_context)

      # Update the products with the latest classes.
      register_vts([compile_context])

      # Kick off the background artifact cache write.
      if update_artifact_cache_vts_work:
        self._write_to_artifact_cache(vts, compile_context, update_artifact_cache_vts_work)
  def execute(self):
    dist_targets = self.context.targets(is_local_python_dist)

    local_wheels_product = self.context.products.get('local_wheels')
    if not local_wheels_product:
      return
    safe_mkdir(self.dist_dir)  # Make sure dist dir is present.
    for t in dist_targets:
      # Copy generated wheel files to dist folder
      target_local_wheels = local_wheels_product.get(t)
      if not target_local_wheels:
        continue
      for output_dir, base_names in target_local_wheels.items():
        for base_name in base_names:
          wheel_output = os.path.join(output_dir, base_name)
          self.context.log.debug('found local built wheels {}'.format(wheel_output))
          # Create a copy for wheel in dist dir.
          wheel_copy = os.path.join(self.dist_dir, base_name)
          atomic_copy(wheel_output, wheel_copy)
          self.context.log.info(
            'created wheel {}'.format(os.path.relpath(wheel_copy, get_buildroot())))
Exemple #17
0
  def execute(self):
    targets = self.get_targets(self._is_python_lambda)
    with self.invalidated(targets=targets) as invalidation_check:
      python_lambda_product = self.context.products.get_data('python_aws_lambda', dict)
      for vt in invalidation_check.all_vts:
        lambda_path = os.path.join(vt.results_dir, '{}.pex'.format(vt.target.name))
        if not vt.valid:
          self.context.log.debug('Existing lambda for {} is invalid, rebuilding'.format(vt.target))
          self._create_lambda(vt.target, lambda_path)
        else:
          self.context.log.debug('Using existing lambda for {}'.format(vt.target))

        python_lambda_product[vt.target] = lambda_path
        self.context.log.debug('created {}'.format(os.path.relpath(lambda_path, get_buildroot())))

        # Put a copy in distdir.
        lambda_copy = os.path.join(self.get_options().pants_distdir, os.path.basename(lambda_path))
        safe_mkdir_for(lambda_copy)
        atomic_copy(lambda_path, lambda_copy)
        self.context.log.info('created lambda {}'.format(
          os.path.relpath(lambda_copy, get_buildroot())))
Exemple #18
0
  def execute(self):
    dist_targets = self.context.targets(is_local_python_dist)

    local_wheels_product = self.context.products.get('local_wheels')
    if not local_wheels_product:
      return
    safe_mkdir(self.dist_dir)  # Make sure dist dir is present.
    for t in dist_targets:
      # Copy generated wheel files to dist folder
      target_local_wheels = local_wheels_product.get(t)
      if not target_local_wheels:
        continue
      for output_dir, base_names in target_local_wheels.items():
        for base_name in base_names:
          wheel_output = os.path.join(output_dir, base_name)
          self.context.log.debug(f'found local built wheels {wheel_output}')
          # Create a copy for wheel in dist dir.
          wheel_copy = os.path.join(self.dist_dir, base_name)
          atomic_copy(wheel_output, wheel_copy)
          self.context.log.info(
            'created wheel {}'.format(os.path.relpath(wheel_copy, get_buildroot())))
Exemple #19
0
  def shade_jar(self, shading_rules, jar_path):
    """Shades a jar using the shading rules from the given jvm_binary.

    This *overwrites* the existing jar file at ``jar_path``.

    :param shading_rules: predefined rules for shading
    :param jar_path: The filepath to the jar that should be shaded.
    """
    self.context.log.debug('Shading {}.'.format(jar_path))
    with temporary_dir() as tempdir:
      output_jar = os.path.join(tempdir, os.path.basename(jar_path))
      with self.shader.binary_shader_for_rules(output_jar, jar_path, shading_rules) as shade_runner:
        result = execute_runner(shade_runner, workunit_factory=self.context.new_workunit,
                                workunit_name='jarjar')
        if result != 0:
          raise TaskError('Shading tool failed to shade {0} (error code {1})'.format(jar_path,
                                                                                     result))
        if not os.path.exists(output_jar):
          raise TaskError('Shading tool returned success for {0}, but '
                          'the output jar was not found at {1}'.format(jar_path, output_jar))
        atomic_copy(output_jar, jar_path)
        return jar_path
Exemple #20
0
  def _do_resolve(self, confs, executor, extra_args, global_vts, pinned_artifacts,
                       raw_target_classpath_file, resolve_hash_name, resolve_workdir,
                       workunit_name):
    safe_mkdir(resolve_workdir)
    ivy = Bootstrapper.default_ivy(bootstrap_workunit_factory=self.context.new_workunit)

    with safe_concurrent_creation(raw_target_classpath_file) as raw_target_classpath_file_tmp:
      args = ['-cachepath', raw_target_classpath_file_tmp] + extra_args

      targets = global_vts.targets
      # TODO(John Sirois): merge the code below into IvyUtils or up here; either way, better
      # diagnostics can be had in `IvyUtils.generate_ivy` if this is done.
      # See: https://github.com/pantsbuild/pants/issues/2239
      jars, global_excludes = IvyUtils.calculate_classpath(targets)

      # Don't pass global excludes to ivy when using soft excludes.
      if self.get_options().soft_excludes:
        global_excludes = []

      ivyxml = self._ivy_xml_path(resolve_workdir)
      with IvyUtils.ivy_lock:
        try:
          IvyUtils.generate_ivy(targets, jars, global_excludes, ivyxml, confs,
                                resolve_hash_name, pinned_artifacts)
        except IvyUtils.IvyError as e:
          raise self.Error('Failed to prepare ivy resolve: {}'.format(e))

        self._exec_ivy(ivy, executor, confs, ivyxml, args, workunit_name)

        # Copy ivy resolve file into resolve workdir.
        for conf in confs:
          atomic_copy(IvyUtils.xml_report_path(self.ivy_cache_dir, resolve_hash_name, conf),
                      self._resolve_report_path(resolve_workdir, conf))

      if not os.path.exists(raw_target_classpath_file_tmp):
        raise self.Error('Ivy failed to create classpath file at {}'
                         .format(raw_target_classpath_file_tmp))

    logger.debug('Moved ivy classfile file to {dest}'.format(dest=raw_target_classpath_file))