Ejemplo n.º 1
0
  def _link_current_reports(self, report_dir, link_dir, preserve):
    # Kill everything not preserved.
    for name in os.listdir(link_dir):
      path = os.path.join(link_dir, name)
      if name not in preserve:
        if os.path.isdir(path):
          safe_rmtree(path)
        else:
          os.unlink(path)

    # Link ~all the isolated run/ dir contents back up to the stable workdir
    # NB: When batching is enabled, files can be emitted under different subdirs. If those files
    # have the like-names, the last file with a like-name will be the one that is used. This may
    # result in a loss of information from the ignored files. We're OK with this because:
    # a) We're planning on deprecating this loss of information.
    # b) It is the same behavior as existed before batching was added.
    for root, dirs, files in safe_walk(report_dir, topdown=True):
      dirs.sort()  # Ensure a consistent walk order for sanity sake.
      for f in itertools.chain(fnmatch.filter(files, '*.err.txt'),
                               fnmatch.filter(files, '*.out.txt'),
                               fnmatch.filter(files, 'TEST-*.xml')):
        src = os.path.join(root, f)
        dst = os.path.join(link_dir, f)
        safe_delete(dst)
        os.symlink(src, dst)

    for path in os.listdir(report_dir):
      if path in ('coverage', 'reports'):
        src = os.path.join(report_dir, path)
        dst = os.path.join(link_dir, path)
        os.symlink(src, dst)
Ejemplo n.º 2
0
  def store_and_use_artifact(self, cache_key, src, results_dir=None):
    """Store and then extract the artifact from the given `src` iterator for the given cache_key.

    :param cache_key: Cache key for the artifact.
    :param src: Iterator over binary data to store for the artifact.
    :param str results_dir: The path to the expected destination of the artifact extraction: will
      be cleared both before extraction, and after a failure to extract.
    """
    with self._tmpfile(cache_key, 'read') as tmp:
      for chunk in src:
        tmp.write(chunk)
      tmp.close()
      tarball = self._store_tarball(cache_key, tmp.name)
      artifact = self._artifact(tarball)

      # NOTE(mateo): The two clean=True args passed in this method are likely safe, since the cache will by
      # definition be dealing with unique results_dir, as opposed to the stable vt.results_dir (aka 'current').
      # But if by chance it's passed the stable results_dir, safe_makedir(clean=True) will silently convert it
      # from a symlink to a real dir and cause mysterious 'Operation not permitted' errors until the workdir is cleaned.
      if results_dir is not None:
        safe_mkdir(results_dir, clean=True)

      try:
        artifact.extract()
      except Exception:
        # Do our best to clean up after a failed artifact extraction. If a results_dir has been
        # specified, it is "expected" to represent the output destination of the extracted
        # artifact, and so removing it should clear any partially extracted state.
        if results_dir is not None:
          safe_mkdir(results_dir, clean=True)
        safe_delete(tarball)
        raise

      return True
Ejemplo n.º 3
0
  def store_and_use_artifact(self, cache_key, src, results_dir=None):
    """Store and then extract the artifact from the given `src` iterator for the given cache_key.

    :param cache_key: Cache key for the artifact.
    :param src: Iterator over binary data to store for the artifact.
    :param str results_dir: The path to the expected destination of the artifact extraction: will
      be cleared both before extraction, and after a failure to extract.
    """
    with self._tmpfile(cache_key, 'read') as tmp:
      for chunk in src:
        tmp.write(chunk)
      tmp.close()
      tarball = self._store_tarball(cache_key, tmp.name)
      artifact = self._artifact(tarball)

      if results_dir is not None:
        safe_mkdir(results_dir, clean=True)

      try:
        artifact.extract()
      except Exception:
        # Do our best to clean up after a failed artifact extraction. If a results_dir has been
        # specified, it is "expected" to represent the output destination of the extracted
        # artifact, and so removing it should clear any partially extracted state.
        if results_dir is not None:
          safe_mkdir(results_dir, clean=True)
        safe_delete(tarball)
        raise

      return True
Ejemplo n.º 4
0
  def test_invalid_frozen_resolve_file_runs_resolve(self):
    junit_jar_lib = self._make_junit_target()

    with self._temp_workdir() as workdir:
      self.resolve([junit_jar_lib])

      # Find the resolution work dir.
      ivy_workdir = os.path.join(workdir, 'ivy')
      ivy_subdirs = os.listdir(ivy_workdir)
      ivy_subdirs.remove('jars')
      self.assertEqual(1, len(ivy_subdirs))
      resolve_workdir = os.path.join(ivy_workdir, ivy_subdirs[0])

      # Remove a required file for a simple load to force a fetch.
      resolve_report = os.path.join(resolve_workdir, 'resolve-report-default.xml')
      self._assertIsFile(resolve_report)
      safe_delete(resolve_report)

      # Open resolution.json, and make it invalid json.
      frozen_resolve_filename = os.path.join(resolve_workdir, 'resolution.json')
      with open(frozen_resolve_filename, 'w') as f:
        f.write('not json!')

      self.resolve([junit_jar_lib])

      self._assertIsFile(resolve_report)
Ejemplo n.º 5
0
  def select_binary(self, name):
    """Selects a binary matching the current os and architecture.

    :param name: the name of the binary to fetch.
    :raises: :class:`pants.binary_util.BinaryUtil.BinaryNotFound` if no binary of the given version
      and name could be found.
    """
    # TODO(John Sirois): finish doc of the path structure expected under base_path
    binary_path = BinaryUtil.select_binary_base_path(self._supportdir, self._version, name)
    bootstrap_dir = os.path.realpath(os.path.expanduser(self._pants_bootstrapdir))
    bootstrapped_binary_path = os.path.join(bootstrap_dir, binary_path)
    if not os.path.exists(bootstrapped_binary_path):
      downloadpath = bootstrapped_binary_path + '~'
      try:
        with self.select_binary_stream(name) as stream:
          with safe_open(downloadpath, 'wb') as bootstrapped_binary:
            bootstrapped_binary.write(stream())
          os.rename(downloadpath, bootstrapped_binary_path)
          chmod_plus_x(bootstrapped_binary_path)
      finally:
        safe_delete(downloadpath)

    logger.debug('Selected {binary} binary bootstrapped to: {path}'
                 .format(binary=name, path=bootstrapped_binary_path))
    return bootstrapped_binary_path
Ejemplo n.º 6
0
  def __init__(self,
               results_dir_root,
               cache_key_generator,
               build_invalidator_dir,
               invalidate_dependents,
               fingerprint_strategy=None,
               invalidation_report=None,
               task_name=None,
               task_version=None,
               artifact_write_callback=lambda _: None):
    """
    :API: public
    """
    self._cache_key_generator = cache_key_generator
    self._task_name = task_name or 'UNKNOWN'
    self._task_version = task_version or 'Unknown_0'
    self._invalidate_dependents = invalidate_dependents
    self._invalidator = BuildInvalidator(build_invalidator_dir)
    self._fingerprint_strategy = fingerprint_strategy
    self._artifact_write_callback = artifact_write_callback
    self.invalidation_report = invalidation_report

    # Create the task-versioned prefix of the results dir, and a stable symlink to it (useful when debugging).
    self._results_dir_prefix = os.path.join(results_dir_root, sha1(self._task_version).hexdigest()[:12])
    safe_mkdir(self._results_dir_prefix)
    stable_prefix = os.path.join(results_dir_root, self._STABLE_DIR_NAME)
    safe_delete(stable_prefix)
    relative_symlink(self._results_dir_prefix, stable_prefix)
  def test_deploy_excludes(self):
    jar_filename = os.path.join('dist', 'deployexcludes.jar')
    safe_delete(jar_filename)
    command = [
      '--no-compile-zinc-capture-classpath',
      'binary',
      'testprojects/src/java/org/pantsbuild/testproject/deployexcludes',
    ]
    with self.pants_results(command) as pants_run:
      self.assert_success(pants_run)
      # The resulting binary should not contain any guava classes
      with open_zip(jar_filename) as jar_file:
        self.assertEquals({'META-INF/',
                           'META-INF/MANIFEST.MF',
                           'org/',
                           'org/pantsbuild/',
                           'org/pantsbuild/testproject/',
                           'org/pantsbuild/testproject/deployexcludes/',
                           'org/pantsbuild/testproject/deployexcludes/DeployExcludesMain.class'},
                          set(jar_file.namelist()))

      # This jar should not run by itself, missing symbols
      self.run_java(java_args=['-jar', jar_filename],
                    expected_returncode=1,
                    expected_output='java.lang.NoClassDefFoundError: '
                                    'com/google/common/collect/ImmutableSortedSet')

      # But adding back the deploy_excluded symbols should result in a clean run.
      classpath = [jar_filename,
                   os.path.join(pants_run.workdir,
                                'ivy/jars/com.google.guava/guava/jars/guava-18.0.jar')]

      self.run_java(java_args=['-cp', os.pathsep.join(classpath),
                               'org.pantsbuild.testproject.deployexcludes.DeployExcludesMain'],
                    expected_output='DeployExcludes Hello World')
Ejemplo n.º 8
0
 def instrument(self, targets, tests, compute_junit_classpath):
   junit_classpath = compute_junit_classpath()
   cobertura_cp = self._task_exports.tool_classpath('cobertura-instrument')
   aux_classpath = os.pathsep.join(relativize_paths(junit_classpath, get_buildroot()))
   safe_delete(self._coverage_datafile)
   classes_by_target = self._context.products.get_data('classes_by_target')
   for target in targets:
     if self.is_coverage_target(target):
       classes_by_rootdir = classes_by_target.get(target)
       if classes_by_rootdir:
         for root, products in classes_by_rootdir.rel_paths():
           self._rootdirs[root].update(products)
   # Cobertura uses regular expressions for filters, and even then there are still problems
   # with filtering. It turned out to be easier to just select which classes to instrument
   # by filtering them here.
   # TODO(ji): Investigate again how we can use cobertura's own filtering mechanisms.
   if self._coverage_filters:
     for basedir, classes in self._rootdirs.items():
       updated_classes = []
       for cls in classes:
         does_match = False
         for positive_filter in self._include_filters:
           if fnmatch.fnmatchcase(_classfile_to_classname(cls), positive_filter):
             does_match = True
         for negative_filter in self._exclude_filters:
           if fnmatch.fnmatchcase(_classfile_to_classname(cls), negative_filter):
             does_match = False
         if does_match:
           updated_classes.append(cls)
       self._rootdirs[basedir] = updated_classes
   for basedir, classes in self._rootdirs.items():
     if not classes:
       continue  # No point in running instrumentation if there is nothing to instrument!
     self._nothing_to_instrument = False
     args = [
       '--basedir',
       basedir,
       '--datafile',
       self._coverage_datafile,
       '--auxClasspath',
       aux_classpath,
       ]
     with temporary_file_path(cleanup=False) as instrumented_classes_file:
       with file(instrumented_classes_file, 'wb') as icf:
         icf.write(('\n'.join(classes) + '\n').encode('utf-8'))
       self._context.log.debug('instrumented classes in {0}'.format(instrumented_classes_file))
       args.append('--listOfFilesToInstrument')
       args.append(instrumented_classes_file)
       main = 'net.sourceforge.cobertura.instrument.InstrumentMain'
       execute_java = self.preferred_jvm_distribution_for_targets(targets).execute_java
       result = execute_java(classpath=cobertura_cp,
                             main=main,
                             jvm_options=self._coverage_jvm_options,
                             args=args,
                             workunit_factory=self._context.new_workunit,
                             workunit_name='cobertura-instrument')
     if result != 0:
       raise TaskError("java {0} ... exited non-zero ({1})"
                       " 'failed to instrument'".format(main, result))
Ejemplo n.º 9
0
  def _default_work_for_vts(self, vts, ctx, input_classpath_product_key, counter, all_compile_contexts, output_classpath_product):
    progress_message = ctx.target.address.spec

    # Double check the cache before beginning compilation
    hit_cache = self.check_cache(vts, counter)

    if not hit_cache:
      # Compute the compile classpath for this target.
      dependency_cp_entries = self._zinc.compile_classpath_entries(
        input_classpath_product_key,
        ctx.target,
        extra_cp_entries=self._extra_compile_time_classpath,
      )

      upstream_analysis = dict(self._upstream_analysis(all_compile_contexts, dependency_cp_entries))

      is_incremental = self.should_compile_incrementally(vts, ctx)
      if not is_incremental:
        # Purge existing analysis file in non-incremental mode.
        safe_delete(ctx.analysis_file)
        # Work around https://github.com/pantsbuild/pants/issues/3670
        safe_rmtree(ctx.classes_dir.path)

      dep_context = DependencyContext.global_instance()
      tgt, = vts.targets
      compiler_option_sets = dep_context.defaulted_property(tgt, 'compiler_option_sets')
      zinc_file_manager = dep_context.defaulted_property(tgt, 'zinc_file_manager')
      with Timer() as timer:
        directory_digest = self._compile_vts(vts,
                          ctx,
                          upstream_analysis,
                          dependency_cp_entries,
                          progress_message,
                          tgt.platform,
                          compiler_option_sets,
                          zinc_file_manager,
                          counter)

      ctx.classes_dir = ClasspathEntry(ctx.classes_dir.path, directory_digest)

      self._record_target_stats(tgt,
                                len(dependency_cp_entries),
                                len(ctx.sources),
                                timer.elapsed,
                                is_incremental,
                                'compile')

      # Write any additional resources for this target to the target workdir.
      self.write_extra_resources(ctx)

      # Jar the compiled output.
      self._create_context_jar(ctx)

    # Update the products with the latest classes.
    output_classpath_product.add_for_target(
      ctx.target,
      [(conf, self._classpath_for_context(ctx)) for conf in self._confs],
    )
    self.register_extra_products_from_contexts([ctx.target], all_compile_contexts)
Ejemplo n.º 10
0
  def test_list_changed(self):
    deleted_file = 'src/python/sources/sources.py'

    with create_isolated_git_repo() as worktree:
      safe_delete(os.path.join(worktree, deleted_file))
      pants_run = self.run_pants(['--changed-parent=HEAD', 'list'])
      self.assert_success(pants_run)
      self.assertEqual(pants_run.stdout_data.strip(), 'src/python/sources:sources')
Ejemplo n.º 11
0
 def is_active_pidfile(pid_filename):
   pid = pid_from_pidfile(pid_filename)
   if not psutil.pid_exists(pid):
     logger.info("Pid {pid} not active. Deleting stale pidfile: {pid_filename}"
                 .format(pid=pid, pid_filename=pid_filename))
     safe_delete(pid_filename)
     return False
   return True
Ejemplo n.º 12
0
    def work_for_vts(vts, ctx):
      progress_message = ctx.target.address.spec

      # Capture a compilation log if requested.
      log_file = ctx.log_file if self._capture_log else None

      # Double check the cache before beginning compilation
      hit_cache = check_cache(vts)

      if not hit_cache:
        # Compute the compile classpath for this target.
        cp_entries = [compile_context.classes_dir]
        cp_entries.extend(ClasspathUtil.compute_classpath(ctx.dependencies(self._dep_context),
                                                          classpath_products,
                                                          extra_compile_time_classpath,
                                                          self._confs))
        # TODO: always provide transitive analysis, but not always all classpath entries?
        upstream_analysis = dict(self._upstream_analysis(compile_contexts, cp_entries))

        # Write analysis to a temporary file, and move it to the final location on success.
        tmp_analysis_file = "{}.tmp".format(ctx.analysis_file)
        if should_compile_incrementally(vts):
          # If this is an incremental compile, rebase the analysis to our new classes directory.
          self._analysis_tools.rebase_from_path(ctx.analysis_file,
                                                tmp_analysis_file,
                                                vts.previous_results_dir,
                                                vts.results_dir)
        else:
          # Otherwise, simply ensure that it is empty.
          safe_delete(tmp_analysis_file)
        tgt, = vts.targets
        fatal_warnings = self._compute_language_property(tgt, lambda x: x.fatal_warnings)
        self._compile_vts(vts,
                          ctx.sources,
                          tmp_analysis_file,
                          upstream_analysis,
                          cp_entries,
                          ctx.classes_dir,
                          log_file,
                          progress_message,
                          tgt.platform,
                          fatal_warnings,
                          counter)
        os.rename(tmp_analysis_file, ctx.analysis_file)
        self._analysis_tools.relativize(ctx.analysis_file, ctx.portable_analysis_file)

        # Write any additional resources for this target to the target workdir.
        self.write_extra_resources(ctx)

        # Jar the compiled output.
        self._create_context_jar(ctx)

      # Update the products with the latest classes.
      self._register_vts([ctx])

      # Once products are registered, check for unused dependencies (if enabled).
      if not hit_cache and self._unused_deps_check_enabled:
        self._check_unused_deps(ctx)
Ejemplo n.º 13
0
        def work_for_vts(vts, ctx):
            progress_message = ctx.target.address.spec

            # Capture a compilation log if requested.
            log_file = ctx.log_file if self._capture_log else None

            # Double check the cache before beginning compilation
            hit_cache = check_cache(vts)

            if not hit_cache:
                # Compute the compile classpath for this target.
                cp_entries = [ctx.classes_dir]
                cp_entries.extend(
                    ClasspathUtil.compute_classpath(
                        ctx.dependencies(self._dep_context),
                        classpath_products,
                        extra_compile_time_classpath,
                        self._confs,
                    )
                )
                upstream_analysis = dict(self._upstream_analysis(compile_contexts, cp_entries))

                if not should_compile_incrementally(vts, ctx):
                    # Purge existing analysis file in non-incremental mode.
                    safe_delete(ctx.analysis_file)
                    # Work around https://github.com/pantsbuild/pants/issues/3670
                    safe_rmtree(ctx.classes_dir)

                tgt, = vts.targets
                fatal_warnings = self._compute_language_property(tgt, lambda x: x.fatal_warnings)
                self._compile_vts(
                    vts,
                    ctx.sources,
                    ctx.analysis_file,
                    upstream_analysis,
                    cp_entries,
                    ctx.classes_dir,
                    log_file,
                    progress_message,
                    tgt.platform,
                    fatal_warnings,
                    counter,
                )
                self._analysis_tools.relativize(ctx.analysis_file, ctx.portable_analysis_file)

                # Write any additional resources for this target to the target workdir.
                self.write_extra_resources(ctx)

                # Jar the compiled output.
                self._create_context_jar(ctx)

            # Update the products with the latest classes.
            self._register_vts([ctx])

            # Once products are registered, check for unused dependencies (if enabled).
            if not hit_cache and self._unused_deps_check_enabled:
                self._check_unused_deps(ctx)
Ejemplo n.º 14
0
  def _handle_duplicate_sources(self, vt, sources):
    """Handles duplicate sources generated by the given gen target by either failure or deletion.

    This method should be called after all dependencies have been injected into the graph, but
    before injecting the synthetic version of this target.

    Returns a boolean indicating whether it modified the underlying filesystem.

    NB(gm): Some code generators may re-generate code that their dependent libraries generate.
    This results in targets claiming to generate sources that they really don't, so we try to
    filter out sources that were actually generated by dependencies of the target. This causes
    the code generated by the dependencies to 'win' over the code generated by dependees. By
    default, this behavior is disabled, and duplication in generated sources will raise a
    TaskError. This is controlled by the --allow-dups flag.
    """
    target = vt.target
    target_workdir = vt.results_dir

    # Walk dependency gentargets and record any sources owned by those targets that are also
    # owned by this target.
    duplicates_by_target = OrderedDict()
    def record_duplicates(dep):
      if dep == target or not self.is_gentarget(dep.concrete_derived_from):
        return False
      duped_sources = [s for s in dep.sources_relative_to_source_root() if s in sources.files and
                       not self.ignore_dup(target, dep, s)]
      if duped_sources:
        duplicates_by_target[dep] = duped_sources
    target.walk(record_duplicates)

    # If there were no dupes, we're done.
    if not duplicates_by_target:
      return False

    # If there were duplicates warn or error.
    messages = ['{target} generated sources that had already been generated by dependencies.'
                .format(target=target.address.spec)]
    for dep, duped_sources in duplicates_by_target.items():
      messages.append('\t{} also generated:'.format(dep.concrete_derived_from.address.spec))
      messages.extend(['\t\t{}'.format(source) for source in duped_sources])
    message = '\n'.join(messages)
    if self.get_options().allow_dups:
      logger.warn(message)
    else:
      raise self.DuplicateSourceError(message)

    did_modify = False

    # Finally, remove duplicates from the workdir. This prevents us from having to worry
    # about them during future incremental compiles.
    for dep, duped_sources in duplicates_by_target.items():
      for duped_source in duped_sources:
        safe_delete(os.path.join(target_workdir, duped_source))
        did_modify = True
    if did_modify:
      Digest.clear(vt.current_results_dir)
    return did_modify
Ejemplo n.º 15
0
 def delete_old_target_output_files(classpath_prefix):
     """Delete existing output files or symlinks for target."""
     directory, basename = os.path.split(classpath_prefix)
     pattern = re.compile(r"^{basename}(([0-9]+)(\.jar)?|classpath\.txt)$".format(basename=re.escape(basename)))
     files = [filename for filename in os.listdir(directory) if pattern.match(filename)]
     for rel_path in files:
         path = os.path.join(directory, rel_path)
         if os.path.islink(path) or os.path.isfile(path):
             safe_delete(path)
 def test_go_crosscompile(self):
   # We assume that targeting windows is cross-compiling.
   output_file = "dist/go/bin/hello.exe"
   safe_delete(output_file)
   args = ['binary',
           'contrib/go/examples/src/go/hello']
   pants_run = self.run_pants(args, extra_env={"GOOS": "windows"})
   self.assert_success(pants_run)
   self.assertIn(b"for MS Windows", subprocess.check_output(["file", output_file]))
Ejemplo n.º 17
0
  def instrument(self, targets, compute_junit_classpath, execute_java_for_targets):
    # Setup an instrumentation classpath based on the existing runtime classpath.
    runtime_classpath = self._context.products.get_data('runtime_classpath')
    instrumentation_classpath = self._context.products.safe_create_data('instrument_classpath',
                                                                        runtime_classpath.copy)
    self.initialize_instrument_classpath(targets, instrumentation_classpath)

    cobertura_cp = self._settings.tool_classpath('cobertura-instrument')
    safe_delete(self._coverage_datafile)
    files_to_instrument = []
    for target in targets:
      if self.is_coverage_target(target):
        paths = instrumentation_classpath.get_for_target(target)
        for (name, path) in paths:
          files_to_instrument.append(path)

    if len(files_to_instrument) > 0:
      self._nothing_to_instrument = False

      unique_files = list(set(files_to_instrument))
      relativize_paths(unique_files, self._settings.workdir)

      args = [
        '--basedir',
        self._settings.workdir,
        '--datafile',
        self._coverage_datafile,
      ]
      # apply class incl/excl filters
      if len(self._include_classes) > 0:
        for pattern in self._include_classes:
          args += ["--includeClasses", pattern]
      else:
        args += ["--includeClasses", '.*']  # default to instrumenting all classes
      for pattern in self._exclude_classes:
        args += ["--excludeClasses", pattern]

      with temporary_file() as tmp_file:
        tmp_file.write("\n".join(unique_files))
        tmp_file.flush()

        args += ["--listOfFilesToInstrument", tmp_file.name]

        main = 'net.sourceforge.cobertura.instrument.InstrumentMain'
        self._context.log.debug(
          "executing cobertura instrumentation with the following args: {}".format(args))
        result = execute_java_for_targets(targets,
                                          classpath=cobertura_cp,
                                          main=main,
                                          jvm_options=self._coverage_jvm_options,
                                          args=args,
                                          workunit_factory=self._context.new_workunit,
                                          workunit_name='cobertura-instrument')
        if result != 0:
          raise TaskError("java {0} ... exited non-zero ({1})"
                          " 'failed to instrument'".format(main, result))
Ejemplo n.º 18
0
 def safe_delete_current_directory(directory):
   """Delete only the files or symlinks under the current directory."""
   try:
     for name in os.listdir(directory):
       path = os.path.join(directory, name)
       if os.path.islink(path) or os.path.isfile(path):
         safe_delete(path)
   except OSError as e:
     if e.errno != errno.ENOENT:
       raise
Ejemplo n.º 19
0
  def _purge_metadata(self):
    assert not self.is_alive(), 'aborting attempt to purge metadata for a running process!'

    for f in (self.get_pid_path(), self.get_socket_path()):
      if f and os.path.exists(f):
        try:
          logging.debug('purging {file}'.format(file=f))
          safe_delete(f)
        except OSError as e:
          logging.warning('failed to unlink {file}: {exc}'.format(file=f, exc=e))
Ejemplo n.º 20
0
 def test_changed_with_deleted_resource(self):
   with create_isolated_git_repo() as worktree:
     safe_delete(os.path.join(worktree, 'src/python/sources/sources.txt'))
     pants_run = self.run_pants(['list', '--changed-parent=HEAD'])
     self.assert_success(pants_run)
     changed_targets = [
       'src/python/sources:overlapping-globs',
       'src/python/sources:some-missing-some-not',
       'src/python/sources:text',
     ]
     self.assertEqual(pants_run.stdout_data.strip(),
                      '\n'.join(changed_targets))
Ejemplo n.º 21
0
  def setUp(self):
    self.basedir = safe_mkdtemp()

    self.file_list = ['a', 'b', 'c']
    self.file_tar = os.path.join(self.basedir, 'test.tar')

    with TarFile.open(self.file_tar, mode='w') as tar:
      for f in self.file_list:
        full_path = os.path.join(self.basedir, f)
        touch(full_path)
        tar.add(full_path, f)
        safe_delete(full_path)
Ejemplo n.º 22
0
 def validate_analysis(self, path):
   """Throws a TaskError for invalid analysis files."""
   try:
     self._analysis_parser.validate_analysis(path)
   except Exception as e:
     if self._clear_invalid_analysis:
       self.context.log.warn("Invalid analysis detected at path {} ... pants will remove these "
                             "automatically, but\nyou may experience spurious warnings until "
                             "clean-all is executed.\n{}".format(path, e))
       safe_delete(path)
     else:
       raise TaskError("An internal build directory contains invalid/mismatched analysis: please "
                       "run `clean-all` if your tools versions changed recently:\n{}".format(e))
Ejemplo n.º 23
0
  def _empty_analysis_cleanup(self, compile_context):
    """Addresses cases where failed compilations leave behind invalid analysis.

    If compilation was creating analysis for the first time, and it fails, then the analysis
    will be empty/invalid.
    """
    preexisting_analysis = os.path.exists(compile_context.analysis_file)
    try:
      yield
    except:
      if not preexisting_analysis:
        safe_delete(compile_context.analysis_file)
      raise
Ejemplo n.º 24
0
def temporary_file(root_dir=None, cleanup=True):
  """
    A with-context that creates a temporary file and returns a writeable file descriptor to it.

    You may specify the following keyword args:
      root_dir [path]: The parent directory to create the temporary file.
      cleanup [True/False]: Whether or not to clean up the temporary file.
  """
  with tempfile.NamedTemporaryFile(dir=root_dir, delete=False) as fd:
    try:
      yield fd
    finally:
      if cleanup:
        safe_delete(fd.name)
Ejemplo n.º 25
0
        def work_for_vts(vts, compile_context):
            progress_message = compile_context.target.address.spec

            # Capture a compilation log if requested.
            log_file = compile_context.log_file if self._capture_log else None

            # Double check the cache before beginning compilation
            hit_cache = check_cache(vts)

            if not hit_cache:
                # Compute the compile classpath for this target.
                cp_entries = self._compute_classpath_entries(
                    classpath_products, compile_context, extra_compile_time_classpath
                )
                # TODO: always provide transitive analysis, but not always all classpath entries?
                upstream_analysis = dict(self._upstream_analysis(compile_contexts, cp_entries))

                # Write analysis to a temporary file, and move it to the final location on success.
                tmp_analysis_file = "{}.tmp".format(compile_context.analysis_file)
                if should_compile_incrementally(vts):
                    # If this is an incremental compile, rebase the analysis to our new classes directory.
                    self._analysis_tools.rebase_from_path(
                        compile_context.analysis_file, tmp_analysis_file, vts.previous_results_dir, vts.results_dir
                    )
                else:
                    # Otherwise, simply ensure that it is empty.
                    safe_delete(tmp_analysis_file)
                target, = vts.targets
                self._compile_vts(
                    vts,
                    compile_context.sources,
                    tmp_analysis_file,
                    upstream_analysis,
                    cp_entries,
                    compile_context.classes_dir,
                    log_file,
                    progress_message,
                    target.platform,
                )
                os.rename(tmp_analysis_file, compile_context.analysis_file)
                self._analysis_tools.relativize(compile_context.analysis_file, compile_context.portable_analysis_file)

                # Write any additional resources for this target to the target workdir.
                self.write_extra_resources(compile_context)

                # Jar the compiled output.
                self._create_context_jar(compile_context)

            # Update the products with the latest classes.
            self._register_vts([compile_context])
Ejemplo n.º 26
0
  def test_when_a_report_for_a_conf_is_missing_fall_back_to_full_resolve(self):
    junit_dep = JarDependency('junit', 'junit', rev='4.12')
    junit_jar_lib = self.make_target('//:a', JarLibrary, jars=[junit_dep])
    with self._temp_workdir() as workdir:
      self.resolve([junit_jar_lib])

      # Remove report from workdir.
      ivy_resolve_workdir = self._find_resolve_workdir(workdir)
      report_path = os.path.join(ivy_resolve_workdir, 'resolve-report-default.xml')
      safe_delete(report_path)

      self.resolve([junit_jar_lib])

      self.assertTrue(os.path.isfile(report_path),
                      'Expected {} to exist as a file'.format(report_path))
Ejemplo n.º 27
0
    def work_for_vts(vts, ctx):
      progress_message = ctx.target.address.spec

      # Double check the cache before beginning compilation
      hit_cache = self.check_cache(vts, counter)

      if not hit_cache:
        # Compute the compile classpath for this target.
        cp_entries = self._cp_entries_for_ctx(ctx, 'runtime_classpath')

        upstream_analysis = dict(self._upstream_analysis(all_compile_contexts, cp_entries))

        is_incremental = self.should_compile_incrementally(vts, ctx)
        if not is_incremental:
          # Purge existing analysis file in non-incremental mode.
          safe_delete(ctx.analysis_file)
          # Work around https://github.com/pantsbuild/pants/issues/3670
          safe_rmtree(ctx.classes_dir)

        dep_context = DependencyContext.global_instance()
        tgt, = vts.targets
        fatal_warnings = dep_context.defaulted_property(tgt, lambda x: x.fatal_warnings)
        zinc_file_manager = dep_context.defaulted_property(tgt, lambda x: x.zinc_file_manager)
        with Timer() as timer:
          self._compile_vts(vts,
                            ctx,
                            upstream_analysis,
                            cp_entries,
                            progress_message,
                            tgt.platform,
                            fatal_warnings,
                            zinc_file_manager,
                            counter)
        self._record_target_stats(tgt,
                                  len(cp_entries),
                                  len(ctx.sources),
                                  timer.elapsed,
                                  is_incremental,
                                  'compile')

        # Write any additional resources for this target to the target workdir.
        self.write_extra_resources(ctx)

        # Jar the compiled output.
        self._create_context_jar(ctx)

      # Update the products with the latest classes.
      self.register_extra_products_from_contexts([ctx.target], all_compile_contexts)
Ejemplo n.º 28
0
    def instrument(self, targets, tests, compute_junit_classpath, execute_java_for_targets):
        instrumentation_classpath = self.initialize_instrument_classpath(targets)
        junit_classpath = compute_junit_classpath()
        cobertura_cp = self._settings.tool_classpath("cobertura-instrument")
        aux_classpath = os.pathsep.join(relativize_paths(junit_classpath, get_buildroot()))
        safe_delete(self._coverage_datafile)
        files_to_instrument = []
        for target in targets:
            if self.is_coverage_target(target):
                paths = instrumentation_classpath.get_for_target(target)
                for (name, path) in paths:
                    files_to_instrument.append(path)

        if len(files_to_instrument) > 0:
            self._nothing_to_instrument = False

            unique_files = list(set(files_to_instrument))
            relativize_paths(unique_files, self._settings.workdir)

            args = ["--basedir", self._settings.workdir, "--datafile", self._coverage_datafile]
            # apply class incl/excl filters
            if len(self._include_classes) > 0:
                for pattern in self._include_classes:
                    args += ["--includeClasses", pattern]
            else:
                args += ["--includeClasses", ".*"]  # default to instrumenting all classes
            for pattern in self._exclude_classes:
                args += ["--excludeClasses", pattern]

            with temporary_file() as tmp_file:
                tmp_file.write("\n".join(unique_files))
                tmp_file.flush()

                args += ["--listOfFilesToInstrument", tmp_file.name]

                main = "net.sourceforge.cobertura.instrument.InstrumentMain"
                self._context.log.debug("executing cobertura instrumentation with the following args: {}".format(args))
                result = execute_java_for_targets(
                    targets,
                    classpath=cobertura_cp,
                    main=main,
                    jvm_options=self._coverage_jvm_options,
                    args=args,
                    workunit_factory=self._context.new_workunit,
                    workunit_name="cobertura-instrument",
                )
                if result != 0:
                    raise TaskError("java {0} ... exited non-zero ({1})" " 'failed to instrument'".format(main, result))
Ejemplo n.º 29
0
  def use_cached_files(self, cache_key, results_dir=None):
    tarfile = self._cache_file_for_key(cache_key)
    try:
      artifact = self._artifact_for(cache_key)
      if artifact.exists():
        if results_dir is not None:
          safe_rmtree(results_dir)
        artifact.extract()
        return True
    except Exception as e:
      # TODO(davidt): Consider being more granular in what is caught.
      logger.warn('Error while reading {0} from local artifact cache: {1}'.format(tarfile, e))
      safe_delete(tarfile)
      return UnreadableArtifact(cache_key, e)

    return False
Ejemplo n.º 30
0
  def test_when_a_report_for_a_conf_is_missing_fall_back_to_fetch(self):
    junit_jar_lib = self._make_junit_target()
    with self._temp_cache_dir():
      with self._temp_workdir():
        self.resolve([junit_jar_lib])

      with self._temp_workdir() as workdir:
        self.resolve([junit_jar_lib])

        # Remove report from workdir.
        ivy_resolve_workdir = self._find_resolve_workdir(workdir)
        report_path = os.path.join(ivy_resolve_workdir, 'fetch-report-default.xml')
        safe_delete(report_path)

        self.resolve([junit_jar_lib])

        self._assertIsFile(report_path)
Ejemplo n.º 31
0
    def prune(self, root):
        """Prune stale cache files

    If the user specifies the option --cache-target-max-entry then prune will remove all but n old
    cache files for each target/task.

    If the user has not specified the option --cache-target-max-entry then behavior is unchanged and
    files will remain in cache indefinitely.

    :param str root: The path under which cacheable artifacts will be cleaned
    """

        max_entries_per_target = self._max_entries_per_target
        if os.path.isdir(root) and max_entries_per_target:
            found_files = []
            for old_file in os.listdir(root):
                full_path = os.path.join(root, old_file)
                found_files.append((full_path, os.path.getmtime(full_path)))
            found_files = sorted(found_files, key=lambda x: x[1], reverse=True)
            for cur_file in found_files[self._max_entries_per_target:]:
                safe_delete(cur_file[0])
Ejemplo n.º 32
0
  def dist(self) -> Distribution:
    """Return the `Distribution` selected for Zinc based on execution strategy."""
    underlying_dist = self.underlying_dist
    if self._execution_strategy == NailgunTaskBase.ExecutionStrategy.hermetic:
      return underlying_dist
    # symlink .pants.d/.jdk -> /some/java/home/
    jdk_home_symlink = Path(
      self._zinc_factory.get_options().pants_workdir, '.jdk'
    ).relative_to(get_buildroot())

    # Since this code can be run in multi-threading mode due to multiple
    # zinc workers, we need to make sure the file operations below are atomic.
    with self._lock:
      # Create the symlink if it does not exist, or points to a file that doesn't exist,
      # (e.g., a JDK that is no longer present), or points to the wrong JDK.
      if not jdk_home_symlink.exists() or jdk_home_symlink.resolve() != Path(underlying_dist.home):
        safe_delete(str(jdk_home_symlink))  # Safe-delete, in case it's a broken symlink.
        safe_mkdir_for(jdk_home_symlink)
        jdk_home_symlink.symlink_to(underlying_dist.home)

    return Distribution(home_path=jdk_home_symlink)
Ejemplo n.º 33
0
    def test_deploy_excludes(self):
        jar_filename = os.path.join('dist', 'deployexcludes.jar')
        safe_delete(jar_filename)
        command = [
            '--no-compile-zinc-capture-classpath',
            'binary',
            'testprojects/src/java/org/pantsbuild/testproject/deployexcludes',
        ]
        with self.pants_results(command) as pants_run:
            self.assert_success(pants_run)
            # The resulting binary should not contain any guava classes
            with open_zip(jar_filename) as jar_file:
                self.assertEquals(
                    {
                        'META-INF/', 'META-INF/MANIFEST.MF', 'org/',
                        'org/pantsbuild/', 'org/pantsbuild/testproject/',
                        'org/pantsbuild/testproject/deployexcludes/',
                        'org/pantsbuild/testproject/deployexcludes/DeployExcludesMain.class'
                    }, set(jar_file.namelist()))

            # This jar should not run by itself, missing symbols
            self.run_java(java_args=['-jar', jar_filename],
                          expected_returncode=1,
                          expected_output='java.lang.NoClassDefFoundError: '
                          'com/google/common/collect/ImmutableSortedSet')

            # But adding back the deploy_excluded symbols should result in a clean run.
            classpath = [
                jar_filename,
                os.path.join(
                    pants_run.workdir,
                    'ivy/jars/com.google.guava/guava/jars/guava-18.0.jar')
            ]

            self.run_java(java_args=[
                '-cp',
                os.pathsep.join(classpath),
                'org.pantsbuild.testproject.deployexcludes.DeployExcludesMain'
            ],
                          expected_output='DeployExcludes Hello World')
Ejemplo n.º 34
0
def temporary_file(root_dir=None, cleanup=True, suffix='', permissions=None):
  """
    A with-context that creates a temporary file and returns a writeable file descriptor to it.

    You may specify the following keyword args:
    :param str root_dir: The parent directory to create the temporary file.
    :param bool cleanup: Whether or not to clean up the temporary file.
    :param str suffix: If suffix is specified, the file name will end with that suffix.
                       Otherwise there will be no suffix.
                       mkstemp() does not put a dot between the file name and the suffix;
                       if you need one, put it at the beginning of suffix.
                       See :py:class:`tempfile.NamedTemporaryFile`.
    :param int permissions: If provided, sets the file to use these permissions.
  """
  with tempfile.NamedTemporaryFile(suffix=suffix, dir=root_dir, delete=False) as fd:
    try:
      if permissions is not None:
        os.chmod(fd.name, permissions)
      yield fd
    finally:
      if cleanup:
        safe_delete(fd.name)
Ejemplo n.º 35
0
def safe_file(path: str,
              suffix: Optional[str] = None,
              cleanup: bool = True) -> Iterator[str]:
    """A with-context that copies a file, and copies the copy back to the original file on success.

    This is useful for doing work on a file but only changing its state on success.

    :param suffix: Use this suffix to create the copy. Otherwise use a random string.
    :param cleanup: Whether or not to clean up the copy.
    """
    safe_path = f"{path}.{(suffix or uuid.uuid4())}"
    if os.path.exists(path):
        shutil.copy(path, safe_path)
    try:
        yield safe_path
        if cleanup:
            shutil.move(safe_path, path)
        else:
            shutil.copy(safe_path, path)
    finally:
        if cleanup:
            safe_delete(safe_path)
Ejemplo n.º 36
0
    def test_invalid_frozen_resolve_file_runs_resolve(self):
        junit_jar_lib = self._make_junit_target()

        with self._temp_workdir() as workdir:
            self.resolve([junit_jar_lib])

            # Find the resolution work dir.
            resolve_workdir = self._find_resolve_workdir(workdir)

            # Remove a required file for a simple load to force a fetch.
            resolve_report = os.path.join(resolve_workdir, "resolve-report-default.xml")
            self._assertIsFile(resolve_report)
            safe_delete(resolve_report)

            # Open resolution.json, and make it invalid json.
            frozen_resolve_filename = os.path.join(resolve_workdir, "resolution.json")
            with open(frozen_resolve_filename, "w") as f:
                f.write("not json!")

            self.resolve([junit_jar_lib])

            self._assertIsFile(resolve_report)
Ejemplo n.º 37
0
    def test_when_invalid_hardlink_and_coursier_cache_should_trigger_resolve(
            self):
        jar_lib = self._make_junit_target()
        with self._temp_workdir():
            with temporary_dir(
            ) as coursier_cache_dir, self._temp_task_cache_dir():
                self.set_options_for_scope("coursier",
                                           cache_dir=coursier_cache_dir)

                context = self.context(target_roots=[jar_lib])
                task = self.execute(context)
                compile_classpath = context.products.get_data(
                    "compile_classpath")

                jar_cp = compile_classpath.get_for_target(jar_lib)

                # └─ junit:junit:4.12
                #    └─ org.hamcrest:hamcrest-core:1.3
                self.assertEqual(2, len(jar_cp))

                # Take a sample jar path, remove it, then call the task again, it should invoke
                # coursier again
                conf, path = jar_cp[0]

                # Remove the hard link under .pants.d/
                safe_delete(path)

                # Remove coursier's cache
                safe_rmtree(coursier_cache_dir)

                util.execute_runner = MagicMock()

                # Ignore any error because runjava may fail due to undefined behavior
                try:
                    task.execute()
                except TaskError:
                    pass

                util.execute_runner.assert_called()
Ejemplo n.º 38
0
  def _run_pants_get_artifact_dir(self, args, cache_dir, subdir, num_files_to_insert, expected_num_files, config=None, prev_dirs=[]):
    """Run Pants with the given `args` and `config`, delete the results, add
    some files, then run pants again and ensure there are exactly
    `expected_num_files` in the output.

    Pants needs to be run twice because we don't know what the results directory
    will be named before we run Pants, and we want to insert files into that
    specific directory to test cache cleanup procedures.
    """
    self.assert_success(self.run_pants(args, config=config))

    artifact_base_dir = self.get_cache_subdir(cache_dir, other_dirs=prev_dirs)
    artifact_dir = os.path.join(artifact_base_dir, subdir)

    for tgz in glob.glob(os.path.join(artifact_dir, '*.tgz')):
      safe_delete(tgz)
    for i in range(0, num_files_to_insert):
      touch(os.path.join(artifact_dir, 'old_cache_test{}'.format(i + 1)))

    self.assert_success(self.run_pants(args, config=config))
    self.assertEqual(len(os.listdir(artifact_dir)), expected_num_files)

    return artifact_base_dir
Ejemplo n.º 39
0
  def _bootstrap_ivy_classpath(self, workunit_factory, retry=True):
    # TODO(John Sirois): Extract a ToolCache class to control the path structure:
    # https://jira.twitter.biz/browse/DPB-283

    ivy_bootstrap_dir = os.path.join(self._ivy_subsystem.get_options().pants_bootstrapdir,
                                     'tools', 'jvm', 'ivy')
    digest = hashlib.sha1()
    if os.path.isfile(self._version_or_ivyxml):
      with open(self._version_or_ivyxml) as fp:
        digest.update(fp.read())
    else:
      digest.update(self._version_or_ivyxml)
    classpath = os.path.join(ivy_bootstrap_dir, '{}.classpath'.format(digest.hexdigest()))

    if not os.path.exists(classpath):
      ivy = self._bootstrap_ivy(os.path.join(ivy_bootstrap_dir, 'bootstrap.jar'))
      args = ['-confs', 'default', '-cachepath', classpath]
      if os.path.isfile(self._version_or_ivyxml):
        args.extend(['-ivy', self._version_or_ivyxml])
      else:
        args.extend(['-dependency', 'org.apache.ivy', 'ivy', self._version_or_ivyxml])

      try:
        ivy.execute(args=args, workunit_factory=workunit_factory, workunit_name='ivy-bootstrap')
      except ivy.Error as e:
        safe_delete(classpath)
        raise self.Error('Failed to bootstrap an ivy classpath! {}'.format(e))

    with open(classpath) as fp:
      cp = fp.read().strip().split(os.pathsep)
      if not all(map(os.path.exists, cp)):
        safe_delete(classpath)
        if retry:
          return self._bootstrap_ivy_classpath(workunit_factory, retry=False)
        raise self.Error('Ivy bootstrapping failed - invalid classpath: {}'.format(':'.join(cp)))
      return cp
Ejemplo n.º 40
0
  def execute_codegen(self, target, target_workdir):
    super().execute_codegen(target, target_workdir)

    # Thrift generates code with all parent namespaces with empty __init__.py's. Since pants allows
    # splitting a thrift namespace hierarchy across multiple packages, we explicitly insert
    # namespace packages to allow for consumption of 2 or more of these packages in the same
    # PYTHONPATH.
    for root, _, files in safe_walk(target_workdir):
      if '__init__.py' not in files:  # skip non-packages
        continue

      init_py_abspath = os.path.join(root, '__init__.py')

      # Thrift puts an __init__.py file at the root, and we don't want one there (it's not needed,
      # and it confuses some import mechanisms).
      if root == target_workdir:
        safe_delete(init_py_abspath)
      elif os.path.getsize(init_py_abspath) == 0:  # empty __init__, translate to namespace package
        with open(init_py_abspath, 'wb') as f:
          f.write(b"__import__('pkg_resources').declare_namespace(__name__)")
      else:
        # A non-empty __init__, this is a leaf package, usually with ttypes and constants; so we
        # leave as-is.
        pass
Ejemplo n.º 41
0
  def select_binary(self, base_path, version, name):
    """Selects a binary matching the current os and architecture.

    :raises: :class:`pants.binary_util.BinaryUtil.BinaryNotFound` if no binary of the given version
      and name could be found.
    """
    # TODO(John Sirois): finish doc of the path structure expected under base_path
    bootstrap_dir = self._boostrap_dir
    binary_path = self.select_binary_base_path(base_path, version, name)
    bootstrapped_binary_path = os.path.join(bootstrap_dir, binary_path)
    if not os.path.exists(bootstrapped_binary_path):
      downloadpath = bootstrapped_binary_path + '~'
      try:
        with self.select_binary_stream(base_path, version, name) as stream:
          with safe_open(downloadpath, 'wb') as bootstrapped_binary:
            bootstrapped_binary.write(stream())
          os.rename(downloadpath, bootstrapped_binary_path)
          chmod_plus_x(bootstrapped_binary_path)
      finally:
        safe_delete(downloadpath)

    log.debug('Selected {binary} binary bootstrapped to: {path}'
              .format(binary=name, path=bootstrapped_binary_path))
    return bootstrapped_binary_path
Ejemplo n.º 42
0
 def clear(cls, digested_path):
   """Clear any existing Digest file adjacent to the given digested_path."""
   safe_delete(cls._path(digested_path))
Ejemplo n.º 43
0
 def test_changed_with_deleted_target_transitive(self):
   with create_isolated_git_repo() as worktree:
     safe_delete(os.path.join(worktree, 'src/resources/org/pantsbuild/resourceonly/BUILD'))
     pants_run = self.run_pants(['list', '--changed-parent=HEAD', '--changed-include-dependees=transitive'])
     self.assert_failure(pants_run)
     self.assertIn('src/resources/org/pantsbuild/resourceonly', pants_run.stderr_data)
Ejemplo n.º 44
0
    def work_for_vts(vts, ctx):
      progress_message = ctx.target.address.spec

      # Capture a compilation log if requested.
      log_file = ctx.log_file if self._capture_log else None

      # Double check the cache before beginning compilation
      hit_cache = check_cache(vts)

      if not hit_cache:
        # Compute the compile classpath for this target.
        cp_entries = [ctx.classes_dir]
        cp_entries.extend(ClasspathUtil.compute_classpath(ctx.dependencies(self._dep_context),
                                                          classpath_products,
                                                          extra_compile_time_classpath,
                                                          self._confs))
        upstream_analysis = dict(self._upstream_analysis(compile_contexts, cp_entries))

        is_incremental = should_compile_incrementally(vts, ctx)
        if not is_incremental:
          # Purge existing analysis file in non-incremental mode.
          safe_delete(ctx.analysis_file)
          # Work around https://github.com/pantsbuild/pants/issues/3670
          safe_rmtree(ctx.classes_dir)

        tgt, = vts.targets
        fatal_warnings = self._compute_language_property(tgt, lambda x: x.fatal_warnings)
        zinc_file_manager = self._compute_language_property(tgt, lambda x: x.zinc_file_manager)
        with Timer() as timer:
          self._compile_vts(vts,
                            ctx.target,
                            ctx.sources,
                            ctx.analysis_file,
                            upstream_analysis,
                            cp_entries,
                            ctx.classes_dir,
                            log_file,
                            ctx.zinc_args_file,
                            progress_message,
                            tgt.platform,
                            fatal_warnings,
                            zinc_file_manager,
                            counter)
        self._record_target_stats(tgt,
                                  len(cp_entries),
                                  len(ctx.sources),
                                  timer.elapsed,
                                  is_incremental)
        self._analysis_tools.relativize(ctx.analysis_file, ctx.portable_analysis_file)

        # Write any additional resources for this target to the target workdir.
        self.write_extra_resources(ctx)

        # Jar the compiled output.
        self._create_context_jar(ctx)

      # Update the products with the latest classes.
      self._register_vts([ctx])

      # Once products are registered, check for unused dependencies (if enabled).
      if not hit_cache and self._unused_deps_check_enabled:
        self._check_unused_deps(ctx)
Ejemplo n.º 45
0
 def setUp(self):
     safe_delete('/tmp/running-in-goal-test')
     safe_delete('/tmp/running-in-goal-binary')
     safe_delete('/tmp/running-in-goal-compile.jar')
Ejemplo n.º 46
0
    def generate_project(self, project):
        def linked_folder_id(source_set):
            return source_set.source_base.replace(os.path.sep, '.')

        def base_path(source_set):
            return os.path.join(source_set.root_dir, source_set.source_base)

        def create_source_base_template(source_set):
            source_base = base_path(source_set)
            return source_base, TemplateData(id=linked_folder_id(source_set),
                                             path=source_base)

        source_bases = dict(map(create_source_base_template, project.sources))
        if project.has_python:
            source_bases.update(
                map(create_source_base_template, project.py_sources))
            source_bases.update(
                map(create_source_base_template, project.py_libs))

        def create_source_template(base_id, includes=None, excludes=None):
            return TemplateData(
                base=base_id,
                includes='|'.join(OrderedSet(includes)) if includes else None,
                excludes='|'.join(OrderedSet(excludes)) if excludes else None,
            )

        def create_sourcepath(base_id, sources):
            def normalize_path_pattern(path):
                return '{}/'.format(path) if not path.endswith('/') else path

            includes = [
                normalize_path_pattern(src_set.path) for src_set in sources
                if src_set.path
            ]
            excludes = []
            for source_set in sources:
                excludes.extend(
                    normalize_path_pattern(exclude)
                    for exclude in source_set.excludes)

            return create_source_template(base_id, includes, excludes)

        pythonpaths = []
        if project.has_python:
            for source_set in project.py_sources:
                pythonpaths.append(
                    create_source_template(linked_folder_id(source_set)))
            for source_set in project.py_libs:
                lib_path = source_set.path if source_set.path.endswith(
                    '.egg') else '{}/'.format(source_set.path)
                pythonpaths.append(
                    create_source_template(linked_folder_id(source_set),
                                           includes=[lib_path]))

        configured_project = TemplateData(
            name=self.project_name,
            java=TemplateData(jdk=self.java_jdk,
                              language_level=('1.{}'.format(
                                  self.java_language_level))),
            python=project.has_python,
            scala=project.has_scala and not project.skip_scala,
            source_bases=source_bases.values(),
            pythonpaths=pythonpaths,
            debug_port=project.debug_port,
        )

        outdir = os.path.abspath(os.path.join(self.gen_project_workdir, 'bin'))
        safe_mkdir(outdir)

        source_sets = defaultdict(OrderedSet)  # base_id -> source_set
        for source_set in project.sources:
            source_sets[linked_folder_id(source_set)].add(source_set)
        sourcepaths = [
            create_sourcepath(base_id, sources)
            for base_id, sources in source_sets.items()
        ]

        libs = list(project.internal_jars)
        libs.extend(project.external_jars)

        configured_classpath = TemplateData(
            sourcepaths=sourcepaths,
            has_tests=project.has_tests,
            libs=libs,
            scala=project.has_scala,

            # Eclipse insists the outdir be a relative path unlike other paths
            outdir=os.path.relpath(outdir, get_buildroot()),
        )

        def apply_template(output_path, template_relpath, **template_data):
            with safe_open(output_path, 'w') as output:
                Generator(pkgutil.get_data(__name__, template_relpath),
                          **template_data).write(output)

        apply_template(self.project_filename,
                       self.project_template,
                       project=configured_project)
        apply_template(self.classpath_filename,
                       self.classpath_template,
                       classpath=configured_classpath)
        apply_template(os.path.join(
            self.gen_project_workdir,
            'Debug on port {}.launch'.format(project.debug_port)),
                       self.debug_template,
                       project=configured_project)
        apply_template(self.coreprefs_filename,
                       self.coreprefs_template,
                       project=configured_project)

        for resource in _SETTINGS:
            with safe_open(os.path.join(self.cwd, '.settings', resource),
                           'w') as prefs:
                prefs.write(
                    pkgutil.get_data(__name__,
                                     os.path.join(_TEMPLATE_BASEDIR,
                                                  resource)))

        factorypath = TemplateData(
            project_name=self.project_name,

            # The easiest way to make sure eclipse sees all annotation processors is to put all libs on
            # the apt factorypath - this does not seem to hurt eclipse performance in any noticeable way.
            jarpaths=libs)
        apply_template(self.apt_filename,
                       self.apt_template,
                       factorypath=factorypath)

        if project.has_python:
            apply_template(self.pydev_filename,
                           self.pydev_template,
                           project=configured_project)
        else:
            safe_delete(self.pydev_filename)

        print('\nGenerated project at {}{}'.format(self.gen_project_workdir,
                                                   os.sep))
Ejemplo n.º 47
0
 def execute_codegen(self, target, target_workdir):
   super(ApacheThriftPyGen, self).execute_codegen(target, target_workdir)
   # Thrift puts an __init__.py file at the root, and we don't want one there
   # (it's not needed, and it confuses some import mechanisms).
   safe_delete(os.path.join(target_workdir, '__init__.py'))
Ejemplo n.º 48
0
 def release(self):
     if self.acquired:
         safe_delete(self.message_path)
     return super(OwnerPrintingInterProcessFileLock, self).release()
Ejemplo n.º 49
0
Archivo: fs.py Proyecto: thoward/pants
 def clear(cls, directory):
   """Clear any existing Digest file adjacent to the given directory."""
   safe_delete(cls._path(directory))
Ejemplo n.º 50
0
    def instrument(self, targets, compute_junit_classpath,
                   execute_java_for_targets):
        # Setup an instrumentation classpath based on the existing runtime classpath.
        runtime_classpath = self._context.products.get_data(
            'runtime_classpath')
        instrumentation_classpath = self._context.products.safe_create_data(
            'instrument_classpath', runtime_classpath.copy)
        self.initialize_instrument_classpath(targets,
                                             instrumentation_classpath)

        cobertura_cp = self._settings.tool_classpath('cobertura-instrument')
        safe_delete(self._coverage_datafile)
        files_to_instrument = []
        for target in targets:
            if self.is_coverage_target(target):
                paths = instrumentation_classpath.get_for_target(target)
                for (name, path) in paths:
                    files_to_instrument.append(path)

        if len(files_to_instrument) > 0:
            self._nothing_to_instrument = False

            unique_files = list(set(files_to_instrument))
            relativize_paths(unique_files, self._settings.workdir)

            args = [
                '--basedir',
                self._settings.workdir,
                '--datafile',
                self._coverage_datafile,
            ]
            # apply class incl/excl filters
            if len(self._include_classes) > 0:
                for pattern in self._include_classes:
                    args += ["--includeClasses", pattern]
            else:
                args += ["--includeClasses",
                         '.*']  # default to instrumenting all classes
            for pattern in self._exclude_classes:
                args += ["--excludeClasses", pattern]

            with temporary_file() as tmp_file:
                tmp_file.write("\n".join(unique_files))
                tmp_file.flush()

                args += ["--listOfFilesToInstrument", tmp_file.name]

                main = 'net.sourceforge.cobertura.instrument.InstrumentMain'
                self._context.log.debug(
                    "executing cobertura instrumentation with the following args: {}"
                    .format(args))
                result = execute_java_for_targets(
                    targets,
                    classpath=cobertura_cp,
                    main=main,
                    jvm_options=self._coverage_jvm_options,
                    args=args,
                    workunit_factory=self._context.new_workunit,
                    workunit_name='cobertura-instrument')
                if result != 0:
                    raise TaskError("java {0} ... exited non-zero ({1})"
                                    " 'failed to instrument'".format(
                                        main, result))
 def test_changed_with_deleted_resource(self):
   with create_isolated_git_repo() as worktree:
     safe_delete(os.path.join(worktree, 'src/python/sources/sources.txt'))
     pants_run = self.run_pants(['list', '--changed-parent=HEAD'])
     self.assert_success(pants_run)
     self.assertEqual(pants_run.stdout_data.strip(), 'src/python/sources:text')
Ejemplo n.º 52
0
 def test_changed_with_deleted_source(self):
   with create_isolated_git_repo() as worktree:
     safe_delete(os.path.join(worktree, 'src/python/sources/sources.py'))
     pants_run = self.run_pants(['changed'])
     self.assert_success(pants_run)
     self.assertEqual(pants_run.stdout_data.strip(), 'src/python/sources:sources')
Ejemplo n.º 53
0
 def setUp(self):
     safe_delete("/tmp/running-in-goal-test")
     safe_delete("/tmp/running-in-goal-binary")
     safe_delete("/tmp/running-in-goal-compile.jar")
Ejemplo n.º 54
0
    def _handle_duplicate_sources(self, vt, sources):
        """Handles duplicate sources generated by the given gen target by either failure or deletion.

    This method should be called after all dependencies have been injected into the graph, but
    before injecting the synthetic version of this target.

    Returns a boolean indicating whether it modified the underlying filesystem.

    NB(gm): Some code generators may re-generate code that their dependent libraries generate.
    This results in targets claiming to generate sources that they really don't, so we try to
    filter out sources that were actually generated by dependencies of the target. This causes
    the code generated by the dependencies to 'win' over the code generated by dependees. By
    default, this behavior is disabled, and duplication in generated sources will raise a
    TaskError. This is controlled by the --allow-dups flag.
    """
        target = vt.target
        target_workdir = vt.results_dir

        # Walk dependency gentargets and record any sources owned by those targets that are also
        # owned by this target.
        duplicates_by_target = OrderedDict()

        def record_duplicates(dep):
            if dep == target or not self.is_gentarget(
                    dep.concrete_derived_from):
                return False
            duped_sources = [
                s for s in dep.sources_relative_to_source_root()
                if s in sources.files and not self.ignore_dup(target, dep, s)
            ]
            if duped_sources:
                duplicates_by_target[dep] = duped_sources

        target.walk(record_duplicates)

        # If there were no dupes, we're done.
        if not duplicates_by_target:
            return False

        # If there were duplicates warn or error.
        messages = [
            '{target} generated sources that had already been generated by dependencies.'
            .format(target=target.address.spec)
        ]
        for dep, duped_sources in duplicates_by_target.items():
            messages.append('\t{} also generated:'.format(
                dep.concrete_derived_from.address.spec))
            messages.extend(
                ['\t\t{}'.format(source) for source in duped_sources])
        message = '\n'.join(messages)
        if self.get_options().allow_dups:
            logger.warn(message)
        else:
            raise self.DuplicateSourceError(message)

        did_modify = False

        # Finally, remove duplicates from the workdir. This prevents us from having to worry
        # about them during future incremental compiles.
        for dep, duped_sources in duplicates_by_target.items():
            for duped_source in duped_sources:
                safe_delete(os.path.join(target_workdir, duped_source))
                did_modify = True
        if did_modify:
            Digest.clear(vt.current_results_dir)
        return did_modify
Ejemplo n.º 55
0
 def test_changed_with_deleted_resource(self):
     with create_isolated_git_repo() as worktree:
         safe_delete(os.path.join(worktree, "src/python/sources/sources.txt"))
         pants_run = self.run_pants(["list", "--changed-since=HEAD"])
         self.assert_success(pants_run)
         self.assertEqual(pants_run.stdout_data.strip(), "src/python/sources:text")
Ejemplo n.º 56
0
 def release(self):
     logger.debug('releasing lock: {!r}'.format(self))
     if self.acquired:
         safe_delete(self.message_path)
     return super(OwnerPrintingInterProcessFileLock, self).release()
Ejemplo n.º 57
0
 def post_process(cached_vts):
     for vt in cached_vts:
         cc = self._compile_context(vt.target, vt.results_dir)
         safe_delete(cc.analysis_file)
         self._analysis_tools.localize(cc.portable_analysis_file,
                                       cc.analysis_file)
Ejemplo n.º 58
0
 def release(self):
     logger.debug('releasing lock: {!r}'.format(self))
     if self.acquired:
         safe_delete(self.message_path)
     return super().release()
Ejemplo n.º 59
0
 def release(self):
     logger.debug(f"releasing lock: {self!r}")
     if self.acquired:
         safe_delete(self.message_path)
     return super().release()
Ejemplo n.º 60
0
 def delete(self, cache_key):
     safe_delete(self._cache_file_for_key(cache_key))