def harness():
  try:
    for name, content in BUILD_FILES.items():
      safe_file_dump(name, dedent(content))
    yield
  finally:
    safe_rmtree(SUBPROJ_SPEC)
Esempio n. 2
0
 def _configure_eslinter(self, bootstrapped_support_path):
   logger.debug('Copying {setupdir} to bootstrapped dir: {support_path}'
                          .format(setupdir=self.eslint_setupdir,
                                  support_path=bootstrapped_support_path))
   safe_rmtree(bootstrapped_support_path)
   shutil.copytree(self.eslint_setupdir, bootstrapped_support_path)
   return True
Esempio n. 3
0
  def _link_current_reports(self, report_dir, link_dir, preserve):
    # Kill everything not preserved.
    for name in os.listdir(link_dir):
      path = os.path.join(link_dir, name)
      if name not in preserve:
        if os.path.isdir(path):
          safe_rmtree(path)
        else:
          os.unlink(path)

    # Link ~all the isolated run/ dir contents back up to the stable workdir
    # NB: When batching is enabled, files can be emitted under different subdirs. If those files
    # have the like-names, the last file with a like-name will be the one that is used. This may
    # result in a loss of information from the ignored files. We're OK with this because:
    # a) We're planning on deprecating this loss of information.
    # b) It is the same behavior as existed before batching was added.
    for root, dirs, files in safe_walk(report_dir, topdown=True):
      dirs.sort()  # Ensure a consistent walk order for sanity sake.
      for f in itertools.chain(fnmatch.filter(files, '*.err.txt'),
                               fnmatch.filter(files, '*.out.txt'),
                               fnmatch.filter(files, 'TEST-*.xml')):
        src = os.path.join(root, f)
        dst = os.path.join(link_dir, f)
        safe_delete(dst)
        os.symlink(src, dst)

    for path in os.listdir(report_dir):
      if path in ('coverage', 'reports'):
        src = os.path.join(report_dir, path)
        dst = os.path.join(link_dir, path)
        os.symlink(src, dst)
Esempio n. 4
0
  def test_mkdtemp_setup_teardown(self):
    def faux_cleaner():
      pass

    DIR1, DIR2 = 'fake_dir1__does_not_exist', 'fake_dir2__does_not_exist'
    self._mox.StubOutWithMock(atexit, 'register')
    self._mox.StubOutWithMock(os, 'getpid')
    self._mox.StubOutWithMock(tempfile, 'mkdtemp')
    self._mox.StubOutWithMock(dirutil, 'safe_rmtree')
    atexit.register(faux_cleaner)  # Ensure only called once.
    tempfile.mkdtemp(dir='1').AndReturn(DIR1)
    tempfile.mkdtemp(dir='2').AndReturn(DIR2)
    os.getpid().MultipleTimes().AndReturn('unicorn')
    dirutil.safe_rmtree(DIR1)
    dirutil.safe_rmtree(DIR2)
    # Make sure other "pids" are not cleaned.
    dirutil._MKDTEMP_DIRS['fluffypants'].add('yoyo')

    try:
      self._mox.ReplayAll()
      self.assertEquals(DIR1, dirutil.safe_mkdtemp(dir='1', cleaner=faux_cleaner))
      self.assertEquals(DIR2, dirutil.safe_mkdtemp(dir='2', cleaner=faux_cleaner))
      self.assertIn('unicorn', dirutil._MKDTEMP_DIRS)
      self.assertEquals({DIR1, DIR2}, dirutil._MKDTEMP_DIRS['unicorn'])
      dirutil._mkdtemp_atexit_cleaner()
      self.assertNotIn('unicorn', dirutil._MKDTEMP_DIRS)
      self.assertEquals({'yoyo'}, dirutil._MKDTEMP_DIRS['fluffypants'])
    finally:
      dirutil._MKDTEMP_DIRS.pop('unicorn', None)
      dirutil._MKDTEMP_DIRS.pop('fluffypants', None)
      dirutil._mkdtemp_unregister_cleaner()

    self._mox.VerifyAll()
Esempio n. 5
0
  def execute(self):
    pants_wd = self.get_options().pants_workdir
    pants_trash = os.path.join(pants_wd, "trash")

    # Creates, and eventually deletes, trash dir created in .pants_cleanall.
    with temporary_dir(cleanup=False, root_dir=os.path.dirname(pants_wd), prefix=".pants_cleanall") as tmpdir:
      logger.debug('Moving trash to {} for deletion'.format(tmpdir))

      tmp_trash = os.path.join(tmpdir, "trash")

      # Moves contents of .pants.d to cleanup dir.
      safe_concurrent_rename(pants_wd, tmp_trash)
      safe_concurrent_rename(tmpdir, pants_wd)

      if self.get_options().async:
        # The trash directory is deleted in a child process.
        pid = os.fork()
        if pid == 0:
          try:
            safe_rmtree(pants_trash)
          except (IOError, OSError):
            logger.warning("Async clean-all failed. Please try again.")
          finally:
            os._exit(0)
        else:
          logger.debug("Forked an asynchronous clean-all worker at pid: {}".format(pid))
      else:
        # Recursively removes pants cache; user waits patiently.

        logger.info('For async removal, run `./pants clean-all --async`')
        safe_rmtree(pants_trash)
Esempio n. 6
0
  def execute(self):
    targets = self.context.targets(self._is_remote)
    runtime_classpath_product = self.context.products.get_data(
      'runtime_classpath', init_func=ClasspathProducts.init_func(self.get_options().pants_workdir)
    )
    with self.invalidated(targets, invalidate_dependents=True, topological_order=True) as invalidation_check:
      # The fetches are idempotent operations from the subsystem, invalidation only controls recreating the symlinks.
      for vt in invalidation_check.all_vts:
        remote_source = RemoteSourceFetcher(vt.target)
        fetched = remote_source.path
        safe_mkdir(fetched)

        # Some unfortunate rigamorole to cover for the case where different targets want the same fetched file
        # but extracted/not. Both cases use the same base namespacing so we rely on the target to tell us.
        fetch_dir = fetched if remote_source.extracted else os.path.dirname(fetched)
        filenames = os.listdir(fetched) if remote_source.extracted else [os.path.basename(fetched)]
        stable_outpath = vt.target.namespace + '-{}'.format('extracted') if vt.target.extract else ''

        for filename in filenames:
          symlink_file = os.path.join(vt.results_dir, filename)
          if not vt.valid or not os.path.isfile(symlink_file):
            safe_rmtree(symlink_file)
            relative_symlink(os.path.join(fetch_dir, filename), symlink_file)
          self.context.products.get('remote_files').add(vt.target, vt.results_dir).append(filename)

        # The runtime_classpath product is a constructed object that is rooted in the results_dir.
        runtime_classpath_product.add_for_target(vt.target, [('default', vt.results_dir)])
  def test_when_invalid_hardlink_and_coursier_cache_should_trigger_resolve(self):
    jar_lib = self._make_junit_target()
    with self._temp_workdir():
      with temporary_dir() as couriser_cache_dir:
        self.set_options_for_scope('coursier', cache_dir=couriser_cache_dir)

        context = self.context(target_roots=[jar_lib])
        task = self.execute(context)
        compile_classpath = context.products.get_data('compile_classpath')

        jar_cp = compile_classpath.get_for_target(jar_lib)

        # └─ junit:junit:4.12
        #    └─ org.hamcrest:hamcrest-core:1.3
        self.assertEqual(2, len(jar_cp))


        # Take a sample jar path, remove it, then call the task again, it should invoke coursier again
        conf, path = jar_cp[0]

        # Remove the hard link under .pants.d/
        safe_rmpath(path)

        # Remove coursier's cache
        safe_rmtree(couriser_cache_dir)

        util.execute_runner = MagicMock()

        # Ignore any error because runjava may fail due to undefined behavior
        try:
          task.execute()
        except TaskError:
          pass

        util.execute_runner.assert_called()
Esempio n. 8
0
  def test_live_dirs(self):
    task, vtA, _ = self._run_fixture(incremental=True)

    vtA_live = list(vtA.live_dirs())
    self.assertIn(vtA.results_dir, vtA_live)
    self.assertIn(vtA.current_results_dir, vtA_live)
    self.assertEqual(len(vtA_live), 2)

    self._create_clean_file(vtA.target, 'bar')
    vtB, _ = task.execute()
    vtB_live = list(vtB.live_dirs())

    # This time it contains the previous_results_dir.
    self.assertIn(vtB.results_dir, vtB_live)
    self.assertIn(vtB.current_results_dir, vtB_live)
    self.assertIn(vtA.current_results_dir, vtB_live)
    self.assertEqual(len(vtB_live), 3)

    # Delete vtB results_dir. live_dirs() should only return existing dirs, even if it knows the previous_cache_key.
    safe_rmtree(vtB.current_results_dir)

    self._create_clean_file(vtB.target, 'baz')
    vtC, _ = task.execute()
    vtC_live = list(vtC.live_dirs())
    self.assertNotIn(vtB.current_results_dir, vtC_live)
    self.assertEqual(len(vtC_live), 2)
Esempio n. 9
0
    def test_mkdtemp_setup_teardown(self):
        def faux_cleaner():
            pass

        DIR1, DIR2 = "fake_dir1__does_not_exist", "fake_dir2__does_not_exist"
        self._mox.StubOutWithMock(atexit, "register")
        self._mox.StubOutWithMock(os, "getpid")
        self._mox.StubOutWithMock(tempfile, "mkdtemp")
        self._mox.StubOutWithMock(dirutil, "safe_rmtree")
        atexit.register(faux_cleaner)  # Ensure only called once.
        tempfile.mkdtemp(dir="1").AndReturn(DIR1)
        tempfile.mkdtemp(dir="2").AndReturn(DIR2)
        os.getpid().MultipleTimes().AndReturn("unicorn")
        dirutil.safe_rmtree(DIR1)
        dirutil.safe_rmtree(DIR2)
        # Make sure other "pids" are not cleaned.
        dirutil._MKDTEMP_DIRS["fluffypants"].add("yoyo")

        try:
            self._mox.ReplayAll()
            self.assertEquals(DIR1, dirutil.safe_mkdtemp(dir="1", cleaner=faux_cleaner))
            self.assertEquals(DIR2, dirutil.safe_mkdtemp(dir="2", cleaner=faux_cleaner))
            self.assertIn("unicorn", dirutil._MKDTEMP_DIRS)
            self.assertEquals({DIR1, DIR2}, dirutil._MKDTEMP_DIRS["unicorn"])
            dirutil._mkdtemp_atexit_cleaner()
            self.assertNotIn("unicorn", dirutil._MKDTEMP_DIRS)
            self.assertEquals({"yoyo"}, dirutil._MKDTEMP_DIRS["fluffypants"])
        finally:
            dirutil._MKDTEMP_DIRS.pop("unicorn", None)
            dirutil._MKDTEMP_DIRS.pop("fluffypants", None)
            dirutil._mkdtemp_unregister_cleaner()

        self._mox.VerifyAll()
Esempio n. 10
0
  def test_bundled_classpath(self):
    """This creates the following classpath
      basedir/libs/A.jar:basedir/resources
    """
    RESOURCES = 'resources'
    LIB_DIR = 'libs'
    JAR_FILE = 'A.jar'

    basedir = safe_mkdtemp()
    lib_dir = os.path.join(basedir, LIB_DIR)
    resource_dir = os.path.join(lib_dir, RESOURCES)
    jar_file = os.path.join(lib_dir, JAR_FILE)

    for dir in (lib_dir, resource_dir):
      safe_mkdir(dir)
    touch(jar_file)

    classpath = [jar_file, resource_dir]

    with bundled_classpath(classpath) as bundled_cp:
      self.assertEquals(1, len(bundled_cp))
      bundled_jar = bundled_cp[0]
      self.assertTrue(os.path.exists(bundled_jar))

      with open_zip(bundled_jar) as synthetic_jar:
        self.assertListEqual([Manifest.PATH], synthetic_jar.namelist())
        # manifest should contain the absolute path of both jar and resource directory
        self.assertEquals('{}: {} {}/\n'.format(Manifest.CLASS_PATH, os.path.realpath(jar_file),
                                                os.path.realpath(resource_dir)),
                          synthetic_jar.read(Manifest.PATH).replace('\n ', ''))

    safe_rmtree(resource_dir)
Esempio n. 11
0
  def _default_work_for_vts(self, vts, ctx, input_classpath_product_key, counter, all_compile_contexts, output_classpath_product):
    progress_message = ctx.target.address.spec

    # Double check the cache before beginning compilation
    hit_cache = self.check_cache(vts, counter)

    if not hit_cache:
      # Compute the compile classpath for this target.
      dependency_cp_entries = self._zinc.compile_classpath_entries(
        input_classpath_product_key,
        ctx.target,
        extra_cp_entries=self._extra_compile_time_classpath,
      )

      upstream_analysis = dict(self._upstream_analysis(all_compile_contexts, dependency_cp_entries))

      is_incremental = self.should_compile_incrementally(vts, ctx)
      if not is_incremental:
        # Purge existing analysis file in non-incremental mode.
        safe_delete(ctx.analysis_file)
        # Work around https://github.com/pantsbuild/pants/issues/3670
        safe_rmtree(ctx.classes_dir.path)

      dep_context = DependencyContext.global_instance()
      tgt, = vts.targets
      compiler_option_sets = dep_context.defaulted_property(tgt, 'compiler_option_sets')
      zinc_file_manager = dep_context.defaulted_property(tgt, 'zinc_file_manager')
      with Timer() as timer:
        directory_digest = self._compile_vts(vts,
                          ctx,
                          upstream_analysis,
                          dependency_cp_entries,
                          progress_message,
                          tgt.platform,
                          compiler_option_sets,
                          zinc_file_manager,
                          counter)

      ctx.classes_dir = ClasspathEntry(ctx.classes_dir.path, directory_digest)

      self._record_target_stats(tgt,
                                len(dependency_cp_entries),
                                len(ctx.sources),
                                timer.elapsed,
                                is_incremental,
                                'compile')

      # Write any additional resources for this target to the target workdir.
      self.write_extra_resources(ctx)

      # Jar the compiled output.
      self._create_context_jar(ctx)

    # Update the products with the latest classes.
    output_classpath_product.add_for_target(
      ctx.target,
      [(conf, self._classpath_for_context(ctx)) for conf in self._confs],
    )
    self.register_extra_products_from_contexts([ctx.target], all_compile_contexts)
Esempio n. 12
0
 def test_raises_both_clobbered_symlink_and_missing_current_results_dir(self):
   vt = self.make_vt()
   self.clobber_symlink(vt)
   safe_rmtree(vt.current_results_dir)
   with self.assertRaisesRegexp(VersionedTargetSet.IllegalResultsDir, r'The.*symlink*'):
     vt.ensure_legal()
   with self.assertRaisesRegexp(VersionedTargetSet.IllegalResultsDir, r'The.*current_results_dir*'):
     vt.ensure_legal()
Esempio n. 13
0
 def execute_codegen(self, targets):
   with self._task.context.new_workunit(name='execute', labels=[WorkUnit.MULTITOOL]):
     ordered = [target for target in reversed(sort_targets(targets)) if target in targets]
     for target in ordered:
       with self._task.context.new_workunit(name=target.address.spec):
         # TODO(gm): add a test-case to ensure this is correctly eliminating stale generated code.
         safe_rmtree(self._task.codegen_workdir(target))
         self._do_execute_codegen([target])
Esempio n. 14
0
        def work_for_vts(vts, ctx):
            progress_message = ctx.target.address.spec

            # Capture a compilation log if requested.
            log_file = ctx.log_file if self._capture_log else None

            # Double check the cache before beginning compilation
            hit_cache = check_cache(vts)

            if not hit_cache:
                # Compute the compile classpath for this target.
                cp_entries = [ctx.classes_dir]
                cp_entries.extend(
                    ClasspathUtil.compute_classpath(
                        ctx.dependencies(self._dep_context),
                        classpath_products,
                        extra_compile_time_classpath,
                        self._confs,
                    )
                )
                upstream_analysis = dict(self._upstream_analysis(compile_contexts, cp_entries))

                if not should_compile_incrementally(vts, ctx):
                    # Purge existing analysis file in non-incremental mode.
                    safe_delete(ctx.analysis_file)
                    # Work around https://github.com/pantsbuild/pants/issues/3670
                    safe_rmtree(ctx.classes_dir)

                tgt, = vts.targets
                fatal_warnings = self._compute_language_property(tgt, lambda x: x.fatal_warnings)
                self._compile_vts(
                    vts,
                    ctx.sources,
                    ctx.analysis_file,
                    upstream_analysis,
                    cp_entries,
                    ctx.classes_dir,
                    log_file,
                    progress_message,
                    tgt.platform,
                    fatal_warnings,
                    counter,
                )
                self._analysis_tools.relativize(ctx.analysis_file, ctx.portable_analysis_file)

                # Write any additional resources for this target to the target workdir.
                self.write_extra_resources(ctx)

                # Jar the compiled output.
                self._create_context_jar(ctx)

            # Update the products with the latest classes.
            self._register_vts([ctx])

            # Once products are registered, check for unused dependencies (if enabled).
            if not hit_cache and self._unused_deps_check_enabled:
                self._check_unused_deps(ctx)
Esempio n. 15
0
  def execute(self):
    # We drive creation of setup.py distributions from the original target graph, grabbing codegen'd
    # sources when needed. We ignore PythonDistribution targets.
    def is_exported_python_target(t):
      return t.is_original and self.has_provides(t) and not is_local_python_dist(t)

    exported_python_targets = OrderedSet(t for t in self.context.target_roots
                                         if is_exported_python_target(t))
    if not exported_python_targets:
      raise TaskError('setup-py target(s) must provide an artifact.')

    dist_dir = self.get_options().pants_distdir

    # NB: We have to create and then run in 2 steps so that we can discover all exported targets
    # in-play in the creation phase which then allows a tsort of these exported targets in the run
    # phase to ensure an exported target is, for example (--run="sdist upload"), uploaded before any
    # exported target that depends on it is uploaded.

    created = {}

    def create(exported_python_target):
      if exported_python_target not in created:
        self.context.log.info('Creating setup.py project for {}'.format(exported_python_target))
        subject = self.derived_by_original.get(exported_python_target, exported_python_target)
        setup_dir, dependencies = self.create_setup_py(subject, dist_dir)
        created[exported_python_target] = setup_dir
        if self._recursive:
          for dep in dependencies:
            if is_exported_python_target(dep):
              create(dep)

    for exported_python_target in exported_python_targets:
      create(exported_python_target)

    interpreter = self.context.products.get_data(PythonInterpreter)
    python_dists = self.context.products.register_data(self.PYTHON_DISTS_PRODUCT, {})
    for exported_python_target in reversed(sort_targets(list(created.keys()))):
      setup_dir = created.get(exported_python_target)
      if setup_dir:
        if not self._run:
          self.context.log.info('Running packager against {}'.format(setup_dir))
          setup_runner = Packager(setup_dir, interpreter=interpreter)
          tgz_name = os.path.basename(setup_runner.sdist())
          sdist_path = os.path.join(dist_dir, tgz_name)
          self.context.log.info('Writing {}'.format(sdist_path))
          shutil.move(setup_runner.sdist(), sdist_path)
          safe_rmtree(setup_dir)
          python_dists[exported_python_target] = sdist_path
        else:
          self.context.log.info('Running {} against {}'.format(self._run, setup_dir))
          split_command = safe_shlex_split(self._run)
          setup_runner = SetupPyRunner(setup_dir, split_command, interpreter=interpreter)
          installed = setup_runner.run()
          if not installed:
            raise TaskError('Install failed.')
          python_dists[exported_python_target] = setup_dir
Esempio n. 16
0
 def test_cache_read_from(self):
   all_vts, invalid_vts = self.task.execute()
   # Executing the task for the first time the vt is expected to be in the invalid_vts list
   self.assertGreater(len(invalid_vts), 0)
   # Delete .pants.d
   safe_rmtree(self.task._workdir)
   all_vts2, invalid_vts2 = self.task.execute()
   # Check that running the task a second time results in a valid vt,
   # implying the artifact cache was hit.
   self.assertListEqual(invalid_vts2, [])
Esempio n. 17
0
def initial_reporting(config, run_tracker):
    """Sets up the initial reporting configuration.

  Will be changed after we parse cmd-line flags.
  """
    reports_dir = os.path.join(config.get_option(Config.DEFAULT_PANTS_WORKDIR),
                               'reports')
    link_to_latest = os.path.join(reports_dir, 'latest')
    if os.path.lexists(link_to_latest):
        os.unlink(link_to_latest)

    run_id = run_tracker.run_info.get_info('id')
    if run_id is None:
        raise ReportingError('No run_id set')
    run_dir = os.path.join(reports_dir, run_id)
    safe_rmtree(run_dir)

    html_dir = os.path.join(run_dir, 'html')
    safe_mkdir(html_dir)
    os.symlink(run_dir, link_to_latest)

    report = Report()

    # Capture initial console reporting into a buffer. We'll do something with it once
    # we know what the cmd-line flag settings are.
    outfile = StringIO()
    capturing_reporter_settings = PlainTextReporter.Settings(
        outfile=outfile,
        log_level=Report.INFO,
        color=False,
        indent=True,
        timing=False,
        cache_stats=False)
    capturing_reporter = PlainTextReporter(run_tracker,
                                           capturing_reporter_settings)
    report.add_reporter('capturing', capturing_reporter)

    # Set up HTML reporting. We always want that.
    template_dir = config.get('reporting', 'reports_template_dir')
    html_reporter_settings = HtmlReporter.Settings(log_level=Report.INFO,
                                                   html_dir=html_dir,
                                                   template_dir=template_dir)
    html_reporter = HtmlReporter(run_tracker, html_reporter_settings)
    report.add_reporter('html', html_reporter)

    # Add some useful RunInfo.
    run_tracker.run_info.add_info('default_report',
                                  html_reporter.report_path())
    port = ReportingServerManager.get_current_server_port()
    if port:
        run_tracker.run_info.add_info(
            'report_url', 'http://localhost:%d/run/%s' % (port, run_id))

    return report
Esempio n. 18
0
 def test_raises_both_clobbered_symlink_and_missing_current_results_dir(
         self):
     vt = self.make_vt()
     self.clobber_symlink(vt)
     safe_rmtree(vt.current_results_dir)
     with self.assertRaisesRegex(VersionedTargetSet.IllegalResultsDir,
                                 r"The.*symlink*"):
         vt.ensure_legal()
     with self.assertRaisesRegex(VersionedTargetSet.IllegalResultsDir,
                                 r"The.*current_results_dir*"):
         vt.ensure_legal()
Esempio n. 19
0
 def create_setup_py(self, target, dist_dir):
   chroot = Chroot(dist_dir, name=target.provides.name)
   dependency_calculator = self.DependencyCalculator(self.context.build_graph)
   reduced_deps = dependency_calculator.reduced_dependencies(target)
   self.write_contents(target, reduced_deps, chroot)
   self.write_setup(target, reduced_deps, chroot)
   target_base = '{}-{}'.format(target.provides.name, target.provides.version)
   setup_dir = os.path.join(dist_dir, target_base)
   safe_rmtree(setup_dir)
   shutil.move(chroot.path(), setup_dir)
   return setup_dir, reduced_deps
Esempio n. 20
0
  def execute(self):
    # We drive creation of setup.py distributions from the original target graph, grabbing codegen'd
    # sources when needed. We ignore PythonDistribution targets.
    def is_exported_python_target(t):
      return t.is_original and self.has_provides(t) and not is_local_python_dist(t)

    exported_python_targets = OrderedSet(t for t in self.context.target_roots
                                         if is_exported_python_target(t))
    if not exported_python_targets:
      raise TaskError('setup-py target(s) must provide an artifact.')

    dist_dir = self.get_options().pants_distdir

    # NB: We have to create and then run in 2 steps so that we can discover all exported targets
    # in-play in the creation phase which then allows a tsort of these exported targets in the run
    # phase to ensure an exported target is, for example (--run="sdist upload"), uploaded before any
    # exported target that depends on it is uploaded.

    created = {}

    def create(exported_python_target):
      if exported_python_target not in created:
        self.context.log.info('Creating setup.py project for {}'.format(exported_python_target))
        subject = self.derived_by_original.get(exported_python_target, exported_python_target)
        setup_dir, dependencies = self.create_setup_py(subject, dist_dir)
        created[exported_python_target] = setup_dir
        if self._recursive:
          for dep in dependencies:
            if is_exported_python_target(dep):
              create(dep)

    for exported_python_target in exported_python_targets:
      create(exported_python_target)

    interpreter = self.context.products.get_data(PythonInterpreter)
    python_dists = self.context.products.register_data(self.PYTHON_DISTS_PRODUCT, {})
    for exported_python_target in reversed(sort_targets(list(created.keys()))):
      setup_dir = created.get(exported_python_target)
      if setup_dir:
        if not self._run:
          self.context.log.info('Running packager against {}'.format(setup_dir))
          setup_runner = Packager(setup_dir, interpreter=interpreter)
          tgz_name = os.path.basename(setup_runner.sdist())
          sdist_path = os.path.join(dist_dir, tgz_name)
          self.context.log.info('Writing {}'.format(sdist_path))
          shutil.move(setup_runner.sdist(), sdist_path)
          safe_rmtree(setup_dir)
          python_dists[exported_python_target] = sdist_path
        else:
          self.context.log.info('Running {} against {}'.format(self._run, setup_dir))
          split_command = safe_shlex_split(self._run)
          setup_runner = SetupPyRunner(setup_dir, split_command, interpreter=interpreter)
          setup_runner.run()
          python_dists[exported_python_target] = setup_dir
Esempio n. 21
0
 def test_safe_rmtree_link(self):
     with temporary_dir() as td:
         real = os.path.join(td, "real")
         link = os.path.join(td, "link")
         os.mkdir(real)
         os.symlink(real, link)
         self.assertTrue(os.path.exists(real))
         self.assertTrue(os.path.exists(link))
         safe_rmtree(link)
         self.assertTrue(os.path.exists(real))
         self.assertFalse(os.path.exists(link))
Esempio n. 22
0
    def initial_reporting(self, run_tracker):
        """Sets up the initial reporting configuration.

    Will be changed after we parse cmd-line flags.
    """
        link_to_latest = os.path.join(self.get_options().reports_dir, 'latest')

        run_id = run_tracker.run_info.get_info('id')
        if run_id is None:
            raise ReportingError('No run_id set')
        run_dir = os.path.join(self.get_options().reports_dir, run_id)
        safe_rmtree(run_dir)

        html_dir = os.path.join(run_dir, 'html')
        safe_mkdir(html_dir)

        relative_symlink(run_dir, link_to_latest)

        report = Report()

        # Capture initial console reporting into a buffer. We'll do something with it once
        # we know what the cmd-line flag settings are.
        outfile = StringIO()
        capturing_reporter_settings = PlainTextReporter.Settings(
            outfile=outfile,
            log_level=Report.INFO,
            color=False,
            indent=True,
            timing=False,
            cache_stats=False,
            label_format=self.get_options().console_label_format,
            tool_output_format=self.get_options().console_tool_output_format)
        capturing_reporter = PlainTextReporter(run_tracker,
                                               capturing_reporter_settings)
        report.add_reporter('capturing', capturing_reporter)

        # Set up HTML reporting. We always want that.
        html_reporter_settings = HtmlReporter.Settings(
            log_level=Report.INFO,
            html_dir=html_dir,
            template_dir=self.get_options().template_dir)
        html_reporter = HtmlReporter(run_tracker, html_reporter_settings)
        report.add_reporter('html', html_reporter)

        # Add some useful RunInfo.
        run_tracker.run_info.add_info('default_report',
                                      html_reporter.report_path())
        port = ReportingServerManager().socket
        if port:
            run_tracker.run_info.add_info(
                'report_url',
                'http://localhost:{}/run/{}'.format(port, run_id))

        return report
Esempio n. 23
0
    def execute(self):
        targets = [
            target for target in self.context.target_roots
            if self.has_provides(target)
        ]
        if not targets:
            raise TaskError('setup-py target(s) must provide an artifact.')

        dist_dir = self.get_options().pants_distdir

        # NB: We have to create and then run in 2 steps so that we can discover all exported targets
        # in-play in the creation phase which then allows a tsort of these exported targets in the run
        # phase to ensure an exported target is, for example (--run="sdist upload"), uploaded before any
        # exported target that depends on it is uploaded.

        created = {}

        def create(target):
            if target not in created:
                self.context.log.info(
                    'Creating setup.py project for {}'.format(target))
                setup_dir, dependencies = self.create_setup_py(
                    target, dist_dir)
                created[target] = setup_dir
                if self._recursive:
                    for dep in dependencies:
                        if self.has_provides(dep):
                            create(dep)

        for target in targets:
            create(target)

        executed = {
        }  # Collected and returned for tests, processed target -> sdist|setup_dir.
        for target in reversed(sort_targets(created.keys())):
            setup_dir = created.get(target)
            if setup_dir:
                if not self._run:
                    self.context.log.info(
                        'Running packager against {}'.format(setup_dir))
                    setup_runner = Packager(setup_dir)
                    tgz_name = os.path.basename(setup_runner.sdist())
                    sdist_path = os.path.join(dist_dir, tgz_name)
                    self.context.log.info('Writing {}'.format(sdist_path))
                    shutil.move(setup_runner.sdist(), sdist_path)
                    safe_rmtree(setup_dir)
                    executed[target] = sdist_path
                else:
                    self.context.log.info('Running {} against {}'.format(
                        self._run, setup_dir))
                    setup_runner = SetupPyRunner(setup_dir, self._run)
                    setup_runner.run()
                    executed[target] = setup_dir
        return executed
Esempio n. 24
0
        def work_for_vts(vts, ctx):
            progress_message = ctx.target.address.spec

            # Double check the cache before beginning compilation
            hit_cache = self.check_cache(vts, counter)

            if not hit_cache:
                # Compute the compile classpath for this target.
                dependency_cp_entries = self._zinc.compile_classpath_entries(
                    'runtime_classpath',
                    ctx.target,
                    extra_cp_entries=self._extra_compile_time_classpath,
                )

                upstream_analysis = dict(
                    self._upstream_analysis(all_compile_contexts,
                                            dependency_cp_entries))

                is_incremental = self.should_compile_incrementally(vts, ctx)
                if not is_incremental:
                    # Purge existing analysis file in non-incremental mode.
                    safe_delete(ctx.analysis_file)
                    # Work around https://github.com/pantsbuild/pants/issues/3670
                    safe_rmtree(ctx.classes_dir)

                dep_context = DependencyContext.global_instance()
                tgt, = vts.targets
                compiler_option_sets = dep_context.defaulted_property(
                    tgt, lambda x: x.compiler_option_sets)
                zinc_file_manager = dep_context.defaulted_property(
                    tgt, lambda x: x.zinc_file_manager)
                with Timer() as timer:
                    self._compile_vts(vts, ctx, upstream_analysis,
                                      dependency_cp_entries, progress_message,
                                      tgt.platform, compiler_option_sets,
                                      zinc_file_manager, counter)
                self._record_target_stats(tgt, len(dependency_cp_entries),
                                          len(ctx.sources), timer.elapsed,
                                          is_incremental, 'compile')

                # Write any additional resources for this target to the target workdir.
                self.write_extra_resources(ctx)

                # Jar the compiled output.
                self._create_context_jar(ctx)

            # Update the products with the latest classes.
            classpath_product.add_for_target(
                ctx.target,
                [(conf, self._classpath_for_context(ctx))
                 for conf in self._confs],
            )
            self.register_extra_products_from_contexts([ctx.target],
                                                       all_compile_contexts)
Esempio n. 25
0
    def _default_work_for_vts(self, vts, ctx, input_classpath_product_key,
                              counter, all_compile_contexts,
                              output_classpath_product):
        progress_message = ctx.target.address.spec

        # See whether the cache-doublecheck job hit the cache: if so, noop: otherwise, compile.
        if vts.valid:
            counter()
        else:
            # Compute the compile classpath for this target.
            dependency_cp_entries = self._zinc.compile_classpath_entries(
                input_classpath_product_key,
                ctx.target,
                extra_cp_entries=self._extra_compile_time_classpath,
            )

            upstream_analysis = dict(
                self._upstream_analysis(all_compile_contexts,
                                        dependency_cp_entries))

            is_incremental = self.should_compile_incrementally(vts, ctx)
            if not is_incremental:
                # Purge existing analysis file in non-incremental mode.
                safe_delete(ctx.analysis_file)
                # Work around https://github.com/pantsbuild/pants/issues/3670
                safe_rmtree(ctx.classes_dir.path)

            dep_context = DependencyContext.global_instance()
            tgt, = vts.targets
            compiler_option_sets = dep_context.defaulted_property(
                tgt, 'compiler_option_sets')
            zinc_file_manager = dep_context.defaulted_property(
                tgt, 'zinc_file_manager')
            with Timer() as timer:
                directory_digest = self._compile_vts(
                    vts, ctx, upstream_analysis, dependency_cp_entries,
                    progress_message, tgt.platform, compiler_option_sets,
                    zinc_file_manager, counter)

            # Store the produced Digest (if any).
            self._set_directory_digest_for_compile_context(
                ctx, directory_digest)

            self._record_target_stats(tgt, len(dependency_cp_entries),
                                      len(ctx.sources), timer.elapsed,
                                      is_incremental, 'compile')

        # Update the products with the latest classes.
        output_classpath_product.add_for_target(
            ctx.target,
            [(conf, self._classpath_for_context(ctx)) for conf in self._confs],
        )
        self.register_extra_products_from_contexts([ctx.target],
                                                   all_compile_contexts)
Esempio n. 26
0
  def _isolation(self, all_targets):
    run_dir = '_runs'
    output_dir = os.path.join(self.workdir, run_dir, Target.identify(all_targets))
    safe_mkdir(output_dir, clean=True)

    coverage = None
    options = self.get_options()
    if options.coverage or options.is_flagged('coverage_open'):
      coverage_processor = options.coverage_processor
      if coverage_processor == 'cobertura':
        settings = CoberturaTaskSettings.from_task(self, workdir=output_dir)
        coverage = Cobertura(settings)
      else:
        raise TaskError('unknown coverage processor {0}'.format(coverage_processor))

    self.context.release_lock()
    if coverage:
      coverage.instrument(targets=all_targets,
                          compute_junit_classpath=lambda: self.classpath(all_targets),
                          execute_java_for_targets=self.execute_java_for_coverage)

    def do_report(exc=None):
      if coverage:
        coverage.report(all_targets, self.execute_java_for_coverage, tests_failed_exception=exc)
      if self._html_report:
        self.context.log.debug('Generating JUnit HTML report...')
        html_file_path = JUnitHtmlReport().report(output_dir, os.path.join(output_dir, 'reports'))
        self.context.log.debug('JUnit HTML report generated to {}'.format(html_file_path))
        if self._open:
          desktop.ui_open(html_file_path)

    try:
      yield output_dir, do_report, coverage
    finally:
      # NB: Deposit of the "current" test output in the root workdir (.pants.d/test/junit) is a
      # defacto public API and so we implement that behavior here to maintain backwards
      # compatibility for non-pants report file consumers.
      # TODO(John Sirois): Deprecate this ~API and provide a stable directory solution for test
      # output: https://github.com/pantsbuild/pants/issues/3879
      lock_file = '.file_lock'
      with OwnerPrintingInterProcessFileLock(os.path.join(self.workdir, lock_file)):
        # Kill everything except the isolated runs/ dir.
        for name in os.listdir(self.workdir):
          path = os.path.join(self.workdir, name)
          if name not in (run_dir, lock_file):
            if os.path.isdir(path):
              safe_rmtree(path)
            else:
              os.unlink(path)

        # Link all the isolated run/ dir contents back up to the stable workdir
        for name in os.listdir(output_dir):
          path = os.path.join(output_dir, name)
          os.symlink(path, os.path.join(self.workdir, name))
Esempio n. 27
0
 def test_safe_rmtree_link(self):
   with temporary_dir() as td:
     real = os.path.join(td, 'real')
     link = os.path.join(td, 'link')
     os.mkdir(real)
     os.symlink(real, link)
     self.assertTrue(os.path.exists(real))
     self.assertTrue(os.path.exists(link))
     safe_rmtree(link);
     self.assertTrue(os.path.exists(real))
     self.assertFalse(os.path.exists(link))
Esempio n. 28
0
 def create_setup_py(self, target, dist_dir):
   chroot = Chroot(dist_dir, name=target.provides.name)
   dependency_calculator = self.DependencyCalculator(self.context.build_graph)
   reduced_deps = dependency_calculator.reduced_dependencies(target)
   self.write_contents(target, reduced_deps, chroot)
   self.write_setup(target, reduced_deps, chroot)
   target_base = '{}-{}'.format(target.provides.name, target.provides.version)
   setup_dir = os.path.join(dist_dir, target_base)
   safe_rmtree(setup_dir)
   shutil.move(chroot.path(), setup_dir)
   return setup_dir, reduced_deps
Esempio n. 29
0
    def work_for_vts(vts, ctx):
      progress_message = ctx.target.address.spec

      # Capture a compilation log if requested.
      log_file = ctx.log_file if self._capture_log else None

      # Double check the cache before beginning compilation
      hit_cache = check_cache(vts)

      if not hit_cache:
        # Compute the compile classpath for this target.
        cp_entries = [ctx.classes_dir]
        cp_entries.extend(ClasspathUtil.compute_classpath(ctx.dependencies(self._dep_context),
                                                          classpath_products,
                                                          extra_compile_time_classpath,
                                                          self._confs))
        upstream_analysis = dict(self._upstream_analysis(compile_contexts, cp_entries))

        if not should_compile_incrementally(vts, ctx):
          # Purge existing analysis file in non-incremental mode.
          safe_delete(ctx.analysis_file)
          # Work around https://github.com/pantsbuild/pants/issues/3670
          safe_rmtree(ctx.classes_dir)

        tgt, = vts.targets
        fatal_warnings = self._compute_language_property(tgt, lambda x: x.fatal_warnings)
        zinc_file_manager = self._compute_language_property(tgt, lambda x: x.zinc_file_manager)
        self._compile_vts(vts,
                          ctx.sources,
                          ctx.analysis_file,
                          upstream_analysis,
                          cp_entries,
                          ctx.classes_dir,
                          log_file,
                          progress_message,
                          tgt.platform,
                          fatal_warnings,
                          zinc_file_manager,
                          counter)
        self._analysis_tools.relativize(ctx.analysis_file, ctx.portable_analysis_file)

        # Write any additional resources for this target to the target workdir.
        self.write_extra_resources(ctx)

        # Jar the compiled output.
        self._create_context_jar(ctx)

      # Update the products with the latest classes.
      self._register_vts([ctx])

      # Once products are registered, check for unused dependencies (if enabled).
      if not hit_cache and self._unused_deps_check_enabled:
        self._check_unused_deps(ctx)
Esempio n. 30
0
  def _isolation(self, all_targets):
    run_dir = '_runs'
    output_dir = os.path.join(self.workdir, run_dir, Target.identify(all_targets))
    safe_mkdir(output_dir, clean=True)

    coverage = None
    options = self.get_options()
    if options.coverage or options.is_flagged('coverage_open'):
      coverage_processor = options.coverage_processor
      if coverage_processor == 'cobertura':
        settings = CoberturaTaskSettings.from_task(self, workdir=output_dir)
        coverage = Cobertura(settings)
      else:
        raise TaskError('unknown coverage processor {0}'.format(coverage_processor))

    self.context.release_lock()
    if coverage:
      coverage.instrument(targets=all_targets,
                          compute_junit_classpath=lambda: self.classpath(all_targets),
                          execute_java_for_targets=self.execute_java_for_coverage)

    def do_report(exc=None):
      if coverage:
        coverage.report(all_targets, self.execute_java_for_coverage, tests_failed_exception=exc)
      if self._html_report:
        html_file_path = JUnitHtmlReport().report(output_dir, os.path.join(output_dir, 'reports'))
        if self._open:
          desktop.ui_open(html_file_path)

    try:
      yield output_dir, do_report, coverage
    finally:
      # NB: Deposit of the "current" test output in the root workdir (.pants.d/test/junit) is a
      # defacto public API and so we implement that behavior here to maintain backwards
      # compatibility for non-pants report file consumers.
      # TODO(John Sirois): Deprecate this ~API and provide a stable directory solution for test
      # output: https://github.com/pantsbuild/pants/issues/3879
      lock_file = '.file_lock'
      with OwnerPrintingInterProcessFileLock(os.path.join(self.workdir, lock_file)):
        # Kill everything except the isolated runs/ dir.
        for name in os.listdir(self.workdir):
          path = os.path.join(self.workdir, name)
          if name not in (run_dir, lock_file):
            if os.path.isdir(path):
              safe_rmtree(path)
            else:
              os.unlink(path)

        # Link all the isolated run/ dir contents back up to the stable workdir
        for name in os.listdir(output_dir):
          path = os.path.join(output_dir, name)
          os.symlink(path, os.path.join(self.workdir, name))
Esempio n. 31
0
def initial_reporting(config, run_tracker):
  """Sets up the initial reporting configuration.

  Will be changed after we parse cmd-line flags.
  """
  reports_dir = os.path.join(config.getdefault('pants_workdir'), 'reports')
  link_to_latest = os.path.join(reports_dir, 'latest')

  run_id = run_tracker.run_info.get_info('id')
  if run_id is None:
    raise ReportingError('No run_id set')
  run_dir = os.path.join(reports_dir, run_id)
  safe_rmtree(run_dir)

  html_dir = os.path.join(run_dir, 'html')
  safe_mkdir(html_dir)

  try:
    if os.path.lexists(link_to_latest):
      os.unlink(link_to_latest)
    os.symlink(run_dir, link_to_latest)
  except OSError as e:
    # Another run may beat us to deletion or creation.
    if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT):
      raise

  report = Report()

  # Capture initial console reporting into a buffer. We'll do something with it once
  # we know what the cmd-line flag settings are.
  outfile = StringIO()
  capturing_reporter_settings = PlainTextReporter.Settings(outfile=outfile, log_level=Report.INFO,
                                                           color=False, indent=True, timing=False,
                                                           cache_stats=False)
  capturing_reporter = PlainTextReporter(run_tracker, capturing_reporter_settings)
  report.add_reporter('capturing', capturing_reporter)

  # Set up HTML reporting. We always want that.
  template_dir = config.get('reporting', 'reports_template_dir')
  html_reporter_settings = HtmlReporter.Settings(log_level=Report.INFO,
                                                 html_dir=html_dir,
                                                 template_dir=template_dir)
  html_reporter = HtmlReporter(run_tracker, html_reporter_settings)
  report.add_reporter('html', html_reporter)

  # Add some useful RunInfo.
  run_tracker.run_info.add_info('default_report', html_reporter.report_path())
  port = ReportingServerManager.get_current_server_port()
  if port:
    run_tracker.run_info.add_info('report_url', 'http://localhost:%d/run/%s' % (port, run_id))

  return report
Esempio n. 32
0
def initialize_repo(worktree):
  """Initialize git repository for the given worktree."""
  gitdir = safe_mkdtemp()
  with environment_as(GIT_DIR=gitdir, GIT_WORK_TREE=worktree):
    subprocess.check_call(['git', 'init'])
    subprocess.check_call(['git', 'config', 'user.email', '*****@*****.**'])
    subprocess.check_call(['git', 'config', 'user.name', 'Your Name'])
    subprocess.check_call(['git', 'add', '.'])
    subprocess.check_call(['git', 'commit', '-am', 'Add project files.'])

    yield Git(gitdir=gitdir, worktree=worktree)

    safe_rmtree(gitdir)
Esempio n. 33
0
    def test_pantsd_pid_deleted(self):
        with self.pantsd_successful_run_context() as ctx:
            ctx.runner(["help"])
            ctx.checker.assert_started()

            # Let any fs events quiesce.
            time.sleep(10)

            ctx.checker.assert_running()
            subprocess_dir = ctx.pantsd_config["GLOBAL"]["pants_subprocessdir"]
            safe_rmtree(subprocess_dir)

            ctx.checker.assert_stopped()
Esempio n. 34
0
 def execute_codegen(self, targets):
     with self._task.context.new_workunit(
             name='execute', labels=[WorkUnitLabel.MULTITOOL]):
         ordered = [
             target for target in reversed(sort_targets(targets))
             if target in targets
         ]
         for target in ordered:
             with self._task.context.new_workunit(
                     name=target.address.spec):
                 # TODO(gm): add a test-case to ensure this is correctly eliminating stale generated code.
                 safe_rmtree(self._task.codegen_workdir(target))
                 self._do_execute_codegen([target])
  def resolve_target(self, node_task, target, results_dir, node_paths, resolve_locally=False, **kwargs):
    if not resolve_locally:
      self._copy_sources(target, results_dir)

    with temporary_dir() as temp_dir:
      archive_file_name = urllib_parse.urlsplit(target.dependencies_archive_url).path.split('/')[-1]
      if not archive_file_name:
        raise TaskError('Could not determine archive file name for {target} from {url}'
                        .format(target=target.address.reference(),
                                url=target.dependencies_archive_url))

      download_path = os.path.join(temp_dir, archive_file_name)

      node_task.context.log.info(
        'Downloading archive {archive_file_name} from '
        '{dependencies_archive_url} to {path}'
        .format(archive_file_name=archive_file_name,
                dependencies_archive_url=target.dependencies_archive_url,
                path=download_path))

      try:
        Fetcher(get_buildroot()).download(target.dependencies_archive_url,
                                          listener=Fetcher.ProgressListener(),
                                          path_or_fd=download_path,
                                          timeout_secs=self.get_options().fetch_timeout_secs)
      except Fetcher.Error as error:
        raise TaskError('Failed to fetch preinstalled node_modules for {target} from {url}: {error}'
                        .format(target=target.address.reference(),
                                url=target.dependencies_archive_url,
                                error=error))

      node_task.context.log.info(
        'Fetched archive {archive_file_name} from {dependencies_archive_url} to {path}'
        .format(archive_file_name=archive_file_name,
                dependencies_archive_url=target.dependencies_archive_url,
                path=download_path))

      archiver_for_path(archive_file_name).extract(download_path, temp_dir)

      extracted_node_modules = os.path.join(temp_dir, 'node_modules')
      if not os.path.isdir(extracted_node_modules):
        raise TaskError('Did not find an extracted node_modules directory for {target} '
                        'inside {dependencies_archive_url}'
                        .format(target=target.address.reference(),
                                dependencies_archive_url=target.dependencies_archive_url))

      # shutil.move doesn't handle directory collision nicely. This is mainly to address
      # installing within the source directory for local resolves.
      node_modules_path = os.path.join(results_dir, 'node_modules')
      safe_rmtree(node_modules_path)
      shutil.move(extracted_node_modules, node_modules_path)
Esempio n. 36
0
def initialize_repo(worktree):
    """Initialize git repository for the given worktree."""
    gitdir = safe_mkdtemp()
    with environment_as(GIT_DIR=gitdir, GIT_WORK_TREE=worktree):
        subprocess.check_call(['git', 'init'])
        subprocess.check_call(
            ['git', 'config', 'user.email', '*****@*****.**'])
        subprocess.check_call(['git', 'config', 'user.name', 'Your Name'])
        subprocess.check_call(['git', 'add', '.'])
        subprocess.check_call(['git', 'commit', '-am', 'Add project files.'])

        yield Git(gitdir=gitdir, worktree=worktree)

        safe_rmtree(gitdir)
Esempio n. 37
0
  def store_and_use_artifact(self, cache_key, src, results_dir=None):
    """Read the content of a tarball from an iterator and return an artifact stored in the cache."""
    with self._tmpfile(cache_key, 'read') as tmp:
      for chunk in src:
        tmp.write(chunk)
      tmp.close()
      tarball = self._store_tarball(cache_key, tmp.name)
      artifact = self._artifact(tarball)

      if results_dir is not None:
        safe_rmtree(results_dir)

      artifact.extract()
      return True
Esempio n. 38
0
    def store_and_use_artifact(self, cache_key, src, results_dir=None):
        """Read the content of a tarball from an iterator and return an artifact stored in the cache."""
        with self._tmpfile(cache_key, 'read') as tmp:
            for chunk in src:
                tmp.write(chunk)
            tmp.close()
            tarball = self._store_tarball(cache_key, tmp.name)
            artifact = self._artifact(tarball)

            if results_dir is not None:
                safe_rmtree(results_dir)

            artifact.extract()
            return True
Esempio n. 39
0
        def work_for_vts(vts, ctx):
            progress_message = ctx.target.address.spec

            # Double check the cache before beginning compilation
            hit_cache = check_cache(vts)

            if not hit_cache:
                # Compute the compile classpath for this target.
                cp_entries = [ctx.classes_dir]
                cp_entries.extend(
                    self._zinc.compile_classpath(
                        classpath_product_key,
                        ctx.target,
                        extra_cp_entries=self._extra_compile_time_classpath,
                        zinc_compile_instance=self))
                upstream_analysis = dict(
                    self._upstream_analysis(compile_contexts, cp_entries))

                is_incremental = should_compile_incrementally(vts, ctx)
                if not is_incremental:
                    # Purge existing analysis file in non-incremental mode.
                    safe_delete(ctx.analysis_file)
                    # Work around https://github.com/pantsbuild/pants/issues/3670
                    safe_rmtree(ctx.classes_dir)

                dep_context = DependencyContext.global_instance()
                tgt, = vts.targets
                fatal_warnings = dep_context.defaulted_property(
                    tgt, lambda x: x.fatal_warnings)
                zinc_file_manager = dep_context.defaulted_property(
                    tgt, lambda x: x.zinc_file_manager)
                with Timer() as timer:
                    self._compile_vts(vts, ctx.target, ctx.sources,
                                      ctx.analysis_file, upstream_analysis,
                                      cp_entries, ctx.classes_dir, ctx.log_dir,
                                      ctx.zinc_args_file, progress_message,
                                      tgt.platform, fatal_warnings,
                                      zinc_file_manager, counter)
                self._record_target_stats(tgt, len(cp_entries),
                                          len(ctx.sources), timer.elapsed,
                                          is_incremental)

                # Write any additional resources for this target to the target workdir.
                self.write_extra_resources(ctx)

                # Jar the compiled output.
                self._create_context_jar(ctx)

            # Update the products with the latest classes.
            self._register_vts([ctx])
Esempio n. 40
0
  def use_cached_files(self, cache_key, results_dir=None):
    try:
      artifact = self._artifact_for(cache_key)
      if artifact.exists():
        if results_dir is not None:
          safe_rmtree(results_dir)
        artifact.extract()
        return True
    except Exception as e:
      # TODO(davidt): Consider being more granular in what is caught.
      logger.warn('Error while reading from local artifact cache: {0}'.format(e))
      return UnreadableArtifact(cache_key, e)

    return False
Esempio n. 41
0
 def test_cache_read_from(self):
     all_vts, invalid_vts = self.task.execute()
     # Executing the task for the first time the vt is expected to be in the invalid_vts list
     self.assertGreater(len(invalid_vts), 0)
     first_vt = invalid_vts[0]
     # Delete .pants.d
     safe_rmtree(self.task._workdir)
     all_vts2, invalid_vts2 = self.task.execute()
     # Check that running the task a second time results in a valid vt,
     # implying the artifact cache was hit.
     self.assertGreater(len(all_vts2), 0)
     second_vt = all_vts2[0]
     self.assertEqual(first_vt.cache_key.hash, second_vt.cache_key.hash)
     self.assertListEqual(invalid_vts2, [])
Esempio n. 42
0
    def _do_test_inexact_requirements(self, sdist: bool) -> None:
        with self.plugin_resolution(plugins=[("jake", "1.2.3"), "jane"], sdist=sdist) as results:

            working_set, chroot, repo_dir, cache_dir = results

            # Kill the cache and the repo source dir and wait past our 1s test TTL, if the PluginResolver
            # truly detects inexact plugin requirements it should skip perma-caching and fall through to
            # a pex resolve and then fail.
            safe_rmtree(repo_dir)
            safe_rmtree(cache_dir)

            with pytest.raises(Unsatisfiable):
                with self.plugin_resolution(chroot=chroot, plugins=[("jake", "1.2.3"), "jane"]):
                    pytest.fail("Should not reach here, should raise first.")
Esempio n. 43
0
def test_exact_requirements():
  with plugin_resolution(plugins=[('jake', '1.2.3'), ('jane', '3.4.5')]) as results:
    working_set, chroot, repo_dir, cache_dir = results

    assert 2 == len(working_set.entries)

    # Kill the the repo source dir and re-resolve.  If the PluginResolver truly detects exact
    # requirements it should skip any resolves and load directly from the still in-tact cache.
    safe_rmtree(repo_dir)

    with plugin_resolution(chroot=chroot,
                           plugins=[('jake', '1.2.3'), ('jane', '3.4.5')]) as results2:
      working_set2, _, _, _ = results2

      assert working_set.entries == working_set2.entries
Esempio n. 44
0
    def use_cached_files(self, cache_key, results_dir=None):
        try:
            artifact = self._artifact_for(cache_key)
            if artifact.exists():
                if results_dir is not None:
                    safe_rmtree(results_dir)
                artifact.extract()
                return True
        except Exception as e:
            # TODO(davidt): Consider being more granular in what is caught.
            logger.warn(
                'Error while reading from local artifact cache: {0}'.format(e))
            return UnreadableArtifact(cache_key, e)

        return False
Esempio n. 45
0
    def _isolation(self, all_targets):
        run_dir = '_runs'
        output_dir = os.path.join(self.workdir, run_dir,
                                  Target.identify(all_targets))
        safe_mkdir(output_dir, clean=False)

        if self._html_report:
            junit_html_report = JUnitHtmlReport.create(output_dir,
                                                       self.context.log)
        else:
            junit_html_report = NoJunitHtmlReport()

        if self.get_options().coverage or self.get_options().is_flagged(
                'coverage_open'):
            settings = CoberturaTaskSettings.from_task(self,
                                                       workdir=output_dir)
            coverage = Cobertura(settings, all_targets,
                                 self.execute_java_for_coverage)
        else:
            coverage = NoCoverage()

        reports = self.Reports(junit_html_report, coverage)

        self.context.release_lock()
        try:
            yield output_dir, reports, coverage
        finally:
            # NB: Deposit of the "current" test output in the root workdir (.pants.d/test/junit) is a
            # defacto public API and so we implement that behavior here to maintain backwards
            # compatibility for non-pants report file consumers.
            # TODO(John Sirois): Deprecate this ~API and provide a stable directory solution for test
            # output: https://github.com/pantsbuild/pants/issues/3879
            lock_file = '.file_lock'
            with OwnerPrintingInterProcessFileLock(
                    os.path.join(self.workdir, lock_file)):
                # Kill everything except the isolated `_runs/` dir.
                for name in os.listdir(self.workdir):
                    path = os.path.join(self.workdir, name)
                    if name not in (run_dir, lock_file):
                        if os.path.isdir(path):
                            safe_rmtree(path)
                        else:
                            os.unlink(path)

                # Link all the isolated run/ dir contents back up to the stable workdir
                for name in os.listdir(output_dir):
                    path = os.path.join(output_dir, name)
                    os.symlink(path, os.path.join(self.workdir, name))
Esempio n. 46
0
    def work_for_vts(vts, ctx):
      progress_message = ctx.target.address.spec

      # Double check the cache before beginning compilation
      hit_cache = self.check_cache(vts, counter)

      if not hit_cache:
        # Compute the compile classpath for this target.
        cp_entries = self._cp_entries_for_ctx(ctx, 'runtime_classpath')

        upstream_analysis = dict(self._upstream_analysis(all_compile_contexts, cp_entries))

        is_incremental = self.should_compile_incrementally(vts, ctx)
        if not is_incremental:
          # Purge existing analysis file in non-incremental mode.
          safe_delete(ctx.analysis_file)
          # Work around https://github.com/pantsbuild/pants/issues/3670
          safe_rmtree(ctx.classes_dir)

        dep_context = DependencyContext.global_instance()
        tgt, = vts.targets
        fatal_warnings = dep_context.defaulted_property(tgt, lambda x: x.fatal_warnings)
        zinc_file_manager = dep_context.defaulted_property(tgt, lambda x: x.zinc_file_manager)
        with Timer() as timer:
          self._compile_vts(vts,
                            ctx,
                            upstream_analysis,
                            cp_entries,
                            progress_message,
                            tgt.platform,
                            fatal_warnings,
                            zinc_file_manager,
                            counter)
        self._record_target_stats(tgt,
                                  len(cp_entries),
                                  len(ctx.sources),
                                  timer.elapsed,
                                  is_incremental,
                                  'compile')

        # Write any additional resources for this target to the target workdir.
        self.write_extra_resources(ctx)

        # Jar the compiled output.
        self._create_context_jar(ctx)

      # Update the products with the latest classes.
      self.register_extra_products_from_contexts([ctx.target], all_compile_contexts)
Esempio n. 47
0
 def _ensure_analysis_tmpdir(self):
     # Do this lazily, so we don't trigger creation of a worker pool unless we need it.
     if not os.path.exists(self._analysis_tmpdir):
         os.makedirs(self._analysis_tmpdir)
         if self._delete_scratch:
             self.context.background_worker_pool().add_shutdown_hook(
                 lambda: safe_rmtree(self._analysis_tmpdir))
Esempio n. 48
0
def test_exact_requirements(unused_test_name, packager_cls):
    with plugin_resolution(plugins=[("jake", "1.2.3"), ("jane", "3.4.5")],
                           packager_cls=packager_cls) as results:
        working_set, chroot, repo_dir, cache_dir = results

        # Kill the repo source dir and re-resolve.  If the PluginResolver truly detects exact
        # requirements it should skip any resolves and load directly from the still in-tact cache.
        safe_rmtree(repo_dir)

        with plugin_resolution(chroot=chroot,
                               plugins=[("jake", "1.2.3"),
                                        ("jane", "3.4.5")]) as results2:

            working_set2, _, _, _ = results2

            assert list(working_set) == list(working_set2)
Esempio n. 49
0
def test_inexact_requirements():
  with plugin_resolution(plugins=[('jake', '1.2.3'), 'jane']) as results:
    working_set, chroot, repo_dir, cache_dir = results

    assert 2 == len(working_set.entries)

    # Kill the cache and the repo source dir and wait past our 1s test TTL, if the PluginResolver
    # truly detects inexact plugin requirements it should skip perma-caching and fall through to
    # pex to a TLL expiry resolve and then fail.
    safe_rmtree(repo_dir)
    safe_rmtree(cache_dir)
    time.sleep(1.5)

    with pytest.raises(Unsatisfiable):
      with plugin_resolution(chroot=chroot, plugins=[('jake', '1.2.3'), 'jane']):
        assert False, 'Should not reach here, should raise first.'
Esempio n. 50
0
  def test_exact_requirements(self, unused_test_name, packager_cls):
    with self.plugin_resolution(plugins=[('jake', '1.2.3'), ('jane', '3.4.5')],
                                packager_cls=packager_cls) as results:
      working_set, chroot, repo_dir, cache_dir = results

      self.assertEqual(2, len(working_set.entries))

      # Kill the repo source dir and re-resolve.  If the PluginResolver truly detects exact
      # requirements it should skip any resolves and load directly from the still in-tact cache.
      safe_rmtree(repo_dir)

      with self.plugin_resolution(chroot=chroot,
                                  plugins=[('jake', '1.2.3'), ('jane', '3.4.5')]) as results2:
        working_set2, _, _, _ = results2

        self.assertEqual(working_set.entries, working_set2.entries)
Esempio n. 51
0
 def _ensure_analysis_tmpdir(self):
   # Do this lazily, so we don't trigger creation of a worker pool unless we need it.
   if not os.path.exists(self._analysis_tmpdir):
     os.makedirs(self._analysis_tmpdir)
     if self._delete_scratch:
       self.context.background_worker_pool().add_shutdown_hook(
           lambda: safe_rmtree(self._analysis_tmpdir))
Esempio n. 52
0
  def test_exact_requirements(self, unused_test_name, packager_cls):
    with self.plugin_resolution(plugins=[('jake', '1.2.3'), ('jane', '3.4.5')],
                                packager_cls=packager_cls) as results:
      working_set, chroot, repo_dir, cache_dir = results

      self.assertEqual(2, len(working_set.entries))

      # Kill the the repo source dir and re-resolve.  If the PluginResolver truly detects exact
      # requirements it should skip any resolves and load directly from the still in-tact cache.
      safe_rmtree(repo_dir)

      with self.plugin_resolution(chroot=chroot,
                                  plugins=[('jake', '1.2.3'), ('jane', '3.4.5')]) as results2:
        working_set2, _, _, _ = results2

        self.assertEqual(working_set.entries, working_set2.entries)
Esempio n. 53
0
    def isolated_local_store(self):
        """Temporarily use an anonymous, empty Store for the Scheduler.

        In most cases we re-use a Store across all tests, since `file` and `directory` entries are
        content addressed, and `process` entries are intended to have strong cache keys. But when
        dealing with non-referentially transparent `process` executions, it can sometimes be
        necessary to avoid this cache.
        """
        self._scheduler = None
        local_store_dir = os.path.realpath(safe_mkdtemp())
        self._init_engine(local_store_dir=local_store_dir)
        try:
            yield
        finally:
            self._scheduler = None
            safe_rmtree(local_store_dir)
Esempio n. 54
0
    def create_canonical_classpath(cls, classpath_products, targets, basedir, save_classpath_file=False):
        """Create a stable classpath of symlinks with standardized names.

    :param classpath_products: Classpath products.
    :param targets: Targets to create canonical classpath for.
    :param basedir: Directory to create symlinks.
    :param save_classpath_file: An optional file with original classpath entries that symlinks
      are created from.

    :returns: Converted canonical classpath.
    :rtype: list of strings
    """

        def _stable_output_folder(basedir, target):
            address = target.address
            return os.path.join(
                basedir,
                # target.address.spec is used in export goal to identify targets
                address.spec.replace(":", os.sep) if address.spec_path else address.target_name,
            )

        canonical_classpath = []
        for target in targets:
            folder_for_target_symlinks = _stable_output_folder(basedir, target)
            safe_rmtree(folder_for_target_symlinks)

            classpath_entries_for_target = classpath_products.get_internal_classpath_entries_for_targets([target])

            if len(classpath_entries_for_target) > 0:
                safe_mkdir(folder_for_target_symlinks)

                classpath = []
                for (index, (conf, entry)) in enumerate(classpath_entries_for_target):
                    classpath.append(entry.path)
                    # Create a unique symlink path by prefixing the base file name with a monotonic
                    # increasing `index` to avoid name collisions.
                    file_name = os.path.basename(entry.path)
                    symlink_path = os.path.join(folder_for_target_symlinks, "{}-{}".format(index, file_name))
                    os.symlink(entry.path, symlink_path)
                    canonical_classpath.append(symlink_path)

                if save_classpath_file:
                    with safe_open(os.path.join(folder_for_target_symlinks, "classpath.txt"), "w") as classpath_file:
                        classpath_file.write(os.pathsep.join(classpath))
                        classpath_file.write("\n")

        return canonical_classpath
Esempio n. 55
0
    def _do_test_exact_requirements(self, sdist: bool) -> None:
        with self.plugin_resolution(
            plugins=[("jake", "1.2.3"), ("jane", "3.4.5")], sdist=sdist
        ) as results:
            working_set, chroot, repo_dir, cache_dir = results

            # Kill the repo source dir and re-resolve.  If the PluginResolver truly detects exact
            # requirements it should skip any resolves and load directly from the still in-tact cache.
            safe_rmtree(repo_dir)

            with self.plugin_resolution(
                chroot=chroot, plugins=[("jake", "1.2.3"), ("jane", "3.4.5")]
            ) as results2:

                working_set2, _, _, _ = results2

                assert list(working_set) == list(working_set2)
Esempio n. 56
0
    def test_inexact_requirements(self, unused_test_name, packager_cls):
        with self.plugin_resolution(plugins=[("jake", "1.2.3"), "jane"],
                                    packager_cls=packager_cls) as results:

            working_set, chroot, repo_dir, cache_dir = results

            # Kill the cache and the repo source dir and wait past our 1s test TTL, if the PluginResolver
            # truly detects inexact plugin requirements it should skip perma-caching and fall through to
            # a pex resolve and then fail.
            safe_rmtree(repo_dir)
            safe_rmtree(cache_dir)

            with self.assertRaises(Unsatisfiable):
                with self.plugin_resolution(chroot=chroot,
                                            plugins=[("jake", "1.2.3"),
                                                     "jane"]):
                    self.fail("Should not reach here, should raise first.")
Esempio n. 57
0
  def test_inexact_requirements(self, unused_test_name, packager_cls):
    with self.plugin_resolution(plugins=[('jake', '1.2.3'), 'jane'],
                                packager_cls=packager_cls) as results:
      working_set, chroot, repo_dir, cache_dir = results

      self.assertEqual(2, len(working_set.entries))

      # Kill the cache and the repo source dir and wait past our 1s test TTL, if the PluginResolver
      # truly detects inexact plugin requirements it should skip perma-caching and fall through to
      # pex to a TLL expiry resolve and then fail.
      safe_rmtree(repo_dir)
      safe_rmtree(cache_dir)
      Crawler.reset_cache()
      time.sleep(1.5)

      with self.assertRaises(Unsatisfiable):
        with self.plugin_resolution(chroot=chroot, plugins=[('jake', '1.2.3'), 'jane']):
          self.fail('Should not reach here, should raise first.')
Esempio n. 58
0
  def copy_previous_results(self):
    """Use the latest valid results_dir as the starting contents of the current results_dir.

    Should be called after the cache is checked, since previous_results are not useful if there is
    a cached artifact.
    """
    # TODO(mateo): This should probably be managed by the task, which manages the rest of the
    # incremental support.
    if not self.previous_cache_key:
      return None
    previous_path = self._cache_manager.results_dir_path(self.previous_cache_key, stable=False)
    if os.path.isdir(previous_path):
      self.is_incremental = True
      safe_rmtree(self._current_results_dir)
      shutil.copytree(previous_path, self._current_results_dir)
    safe_mkdir(self._current_results_dir)
    relative_symlink(self._current_results_dir, self.results_dir)
    # Set the self._previous last, so that it is only True after the copy completed.
    self._previous_results_dir = previous_path