Beispiel #1
0
def hold_lock_until_terminate(path, lock_held, terminate):
  lock = OwnerPrintingInterProcessFileLock(path)
  lock.acquire()
  lock_held.set()
  # NOTE: We shouldn't ever wait this long, this is just to ensure
  # we don't somehow leak child processes.
  terminate.wait(60)
  lock.release()
  lock_held.clear()
Beispiel #2
0
 def __init__(
     self,
     options,
     run_tracker,
     target_roots,
     requested_goals=None,
     target_base=None,
     build_graph=None,
     build_file_parser=None,
     address_mapper=None,
     console_outstream=None,
     scm=None,
     workspace=None,
     invalidation_report=None,
 ):
     self._options = options
     self.build_graph = build_graph
     self.build_file_parser = build_file_parser
     self.address_mapper = address_mapper
     self.run_tracker = run_tracker
     self._log = self.Log(run_tracker)
     self._target_base = target_base or Target
     self._products = Products()
     self._buildroot = get_buildroot()
     self._source_roots = SourceRootConfig.global_instance().get_source_roots()
     self._lock = OwnerPrintingInterProcessFileLock(os.path.join(self._buildroot, ".pants.workdir.file_lock"))
     self._java_sysprops = None  # Computed lazily.
     self.requested_goals = requested_goals or []
     self._console_outstream = console_outstream or sys.stdout
     self._scm = scm or get_scm()
     self._workspace = workspace or (ScmWorkspace(self._scm) if self._scm else None)
     self._replace_targets(target_roots)
     self._invalidation_report = invalidation_report
Beispiel #3
0
 def setUp(self):
   self.lock_dir = tempfile.mkdtemp()
   self.lock_path = os.path.join(self.lock_dir, 'lock')
   self.lock = OwnerPrintingInterProcessFileLock(self.lock_path)
   self.manager = Manager()
   self.lock_held = self.manager.Event()
   self.terminate = self.manager.Event()
   self.lock_process = Process(
     target=hold_lock_until_terminate,
     args=(self.lock_path, self.lock_held, self.terminate),
   )
Beispiel #4
0
  def setup(self, filters=()):
    """Sets up a cache of python interpreters.

    :param filters: A sequence of strings that constrain the interpreter compatibility for this
      cache, using the Requirement-style format, e.g. ``'CPython>=3', or just ['>=2.7','<3']``
      for requirements agnostic to interpreter class.
    :returns: A list of cached interpreters
    :rtype: list of :class:`pex.interpreter.PythonInterpreter`
    """
    # We filter the interpreter cache itself (and not just the interpreters we pull from it)
    # because setting up some python versions (e.g., 3<=python<3.3) crashes, and this gives us
    # an escape hatch.
    filters = filters if any(filters) else self.python_setup.interpreter_constraints
    setup_paths = self.python_setup.interpreter_search_paths
    logger.debug(
      'Initializing Python interpreter cache matching filters `{}` from paths `{}`'.format(
        ':'.join(filters), ':'.join(setup_paths)))

    interpreters = []
    def unsatisfied_filters():
      return [f for f in filters if len(list(self._matching(interpreters, [f]))) == 0]

    with OwnerPrintingInterProcessFileLock(path=os.path.join(self._cache_dir, '.file_lock')):
      interpreters.extend(self._setup_cached(filters=filters))
      if not interpreters or unsatisfied_filters():
        interpreters.extend(self._setup_paths(setup_paths, filters=filters))

    for filt in unsatisfied_filters():
      logger.debug('No valid interpreters found for {}!'.format(filt))

    matches = list(self._matching(interpreters, filters=filters))
    if len(matches) == 0:
      logger.debug('Found no valid interpreters!')

    logger.debug(
      'Initialized Python interpreter cache with {}'.format(', '.join([x.binary for x in matches])))
    return matches
    def __init__(self, build_root, pants_workdir, engine_initializer, log_dir,
                 log_level, pailgun_host, pailgun_port, fs_event_workers,
                 pants_ignore_patterns, build_ignore_patterns,
                 exclude_target_regexp, subproject_roots):
        """
    :param str build_root: The path of the build root.
    :param str pants_workdir: The path of the pants workdir.
    :param class engine_initializer: The class representing the EngineInitializer.
    :param str log_dir: The path for pantsd logs.
    :param str log_level: The log level for pantsd logs (derived from the pants log level).
    :param str pailgun_host: The bind address for the Pailgun server.
    :param int pailgun_port: The bind port for the Pailgun server.
    :param int fs_event_workers: The number of workers to use for processing the fs event queue.
    :param list pants_ignore_patterns: A list of path ignore patterns for filesystem operations.
    :param list build_ignore_patterns: A list of path ignore patterns for BUILD file parsing.
    :param list exclude_target_regexp: A list of target exclude regexps.
    :param list subproject_roots: A list of subproject roots.

    """
        self._build_root = build_root
        self._pants_workdir = pants_workdir
        self._engine_initializer = engine_initializer
        self._log_dir = log_dir
        self._log_level = log_level
        self._pailgun_host = pailgun_host
        self._pailgun_port = pailgun_port
        self._fs_event_workers = fs_event_workers
        self._pants_ignore_patterns = pants_ignore_patterns
        self._build_ignore_patterns = build_ignore_patterns
        self._exclude_target_regexp = exclude_target_regexp
        self._subproject_roots = subproject_roots
        self._native = Native.Factory.global_instance().create()
        # TODO(kwlzn): Thread filesystem path ignores here to Watchman's subscription registration.

        lock_location = os.path.join(self._build_root, '.pantsd.startup')
        self._lock = OwnerPrintingInterProcessFileLock(lock_location)
        self._logger = logging.getLogger(__name__)
Beispiel #6
0
class TestOwnerPrintingInterProcessFileLock(unittest.TestCase):
    def setUp(self):
        self.lock_dir = tempfile.mkdtemp()
        self.lock_path = os.path.join(self.lock_dir, 'lock')
        self.lock = OwnerPrintingInterProcessFileLock(self.lock_path)
        self.manager = Manager()
        self.lock_held = self.manager.Event()
        self.terminate = self.manager.Event()
        self.lock_process = Process(
            target=hold_lock_until_terminate,
            args=(self.lock_path, self.lock_held, self.terminate),
        )

    def tearDown(self):
        self.terminate.set()
        try:
            shutil.rmtree(self.lock_dir)
        except OSError:
            pass

    def test_non_blocking_attempt(self):
        self.lock_process.start()
        self.lock_held.wait()
        self.assertFalse(self.lock.acquire(blocking=False))

    def test_message(self):
        self.lock_process.start()
        self.lock_held.wait()
        self.assertTrue(os.path.exists(self.lock.message_path))
        with open(self.lock.message_path, 'rb') as f:
            message_content = f.read()
        self.assertIn(str(self.lock_process.pid), message_content)

        os.unlink(self.lock.message_path)

        def message_fn(message):
            self.assertIn(self.lock.missing_message_output, message)

        self.lock.acquire(blocking=False, message_fn=message_fn)

    def test_blocking(self):
        self.lock_process.start()
        self.lock_held.wait()
        self.assertFalse(self.lock.acquire(timeout=.1))

        acquire_is_blocking = self.manager.Event()

        def terminate_subproc(terminate, acquire_is_blocking):
            acquire_is_blocking.wait()
            terminate.set()

        Thread(target=terminate_subproc,
               args=(self.terminate, acquire_is_blocking)).start()

        def message_fn(message):
            self.assertIn(str(self.lock_process.pid), message)
            acquire_is_blocking.set()

        # NOTE: We shouldn't ever wait this long (locally this runs in ~milliseconds)
        # but sometimes CI containers are extremely slow, so we choose a very large
        # value just in case.
        self.assertTrue(self.lock.acquire(timeout=30, message_fn=message_fn))

    def test_reentrant(self):
        self.assertTrue(self.lock.acquire())
        self.assertTrue(self.lock.acquire())

    def test_release(self):
        self.assertTrue(self.lock.acquire())
        self.assertTrue(self.lock.acquired)
        self.lock.release()
        self.assertFalse(self.lock.acquired)
Beispiel #7
0
class Context(object):
    """Contains the context for a single run of pants.

  Task implementations can access configuration data from pants.ini and any flags they have exposed
  here as well as information about the targets involved in the run.

  Advanced uses of the context include adding new targets to it for upstream or downstream goals to
  operate on and mapping of products a goal creates to the targets the products are associated with.

  :API: public
  """

    # TODO: Figure out a more structured way to construct and use context than this big flat
    # repository of attributes?
    def __init__(self,
                 options,
                 run_tracker,
                 target_roots,
                 requested_goals=None,
                 target_base=None,
                 build_graph=None,
                 build_file_parser=None,
                 build_configuration=None,
                 address_mapper=None,
                 console_outstream=None,
                 scm=None,
                 workspace=None,
                 invalidation_report=None,
                 scheduler=None):
        self._options = options
        self.build_graph = build_graph
        self._build_file_parser = build_file_parser
        self.build_configuration = build_configuration
        self.address_mapper = address_mapper
        self.run_tracker = run_tracker
        self._log = run_tracker.logger
        self._target_base = target_base or Target
        self._products = Products()
        self._buildroot = get_buildroot()
        self._source_roots = SourceRootConfig.global_instance(
        ).get_source_roots()
        self._lock = OwnerPrintingInterProcessFileLock(
            os.path.join(self._buildroot, '.pants.workdir.file_lock'))
        self._java_sysprops = None  # Computed lazily.
        self.requested_goals = requested_goals or []
        self._console_outstream = console_outstream or (sys.stdout.buffer
                                                        if PY3 else sys.stdout)
        self._scm = scm or get_scm()
        self._workspace = workspace or (ScmWorkspace(self._scm)
                                        if self._scm else None)
        self._replace_targets(target_roots)
        self._invalidation_report = invalidation_report
        self._scheduler = scheduler

    @property
    @deprecated('1.17.0.dev2',
                hint_message='Use the build_configuration property.')
    def build_file_parser(self):
        return self._build_file_parser

    @property
    def options(self):
        """Returns the new-style options.

    :API: public
    """
        return self._options

    @property
    def log(self):
        """Returns the preferred logger for goals to use.

    :API: public
    """
        return self._log

    @property
    def products(self):
        """Returns the Products manager for the current run.

    :API: public
    """
        return self._products

    @property
    def source_roots(self):
        """Returns the :class:`pants.source.source_root.SourceRoots` instance for the current run.

    :API: public
    """
        return self._source_roots

    @property
    def target_roots(self):
        """Returns the targets specified on the command line.

    This set is strictly a subset of all targets in play for the run as returned by self.targets().
    Note that for a command line invocation that uses wildcard selectors : or ::, the targets
    globbed by the wildcards are considered to be target roots.

    :API: public
    """
        return self._target_roots

    @property
    def console_outstream(self):
        """Returns the output stream to write console messages to.

    :API: public
    """
        return self._console_outstream

    @property
    def scm(self):
        """Returns the current workspace's scm, if any.

    :API: public
    """
        return self._scm

    @property
    def workspace(self):
        """Returns the current workspace, if any."""
        return self._workspace

    @property
    def invalidation_report(self):
        return self._invalidation_report

    def __str__(self):
        ident = Target.identify(self.targets())
        return 'Context(id:{}, targets:{})'.format(ident, self.targets())

    @contextmanager
    def executing(self):
        """A contextmanager that sets metrics in the context of a (v1) engine execution."""
        self._set_target_root_count_in_runtracker()
        yield
        metrics = self._scheduler.metrics()
        self.run_tracker.pantsd_stats.set_scheduler_metrics(metrics)
        engine_workunits = self._scheduler.engine_workunits(metrics)
        if engine_workunits:
            self.run_tracker.report.bulk_record_workunits(engine_workunits)
        self._set_affected_target_count_in_runtracker()

    def _set_target_root_count_in_runtracker(self):
        """Sets the target root count in the run tracker's daemon stats object."""
        # N.B. `self._target_roots` is always an expanded list of `Target` objects as
        # provided by `GoalRunner`.
        target_count = len(self._target_roots)
        self.run_tracker.pantsd_stats.set_target_root_size(target_count)
        return target_count

    def _set_affected_target_count_in_runtracker(self):
        """Sets the realized target count in the run tracker's daemon stats object."""
        target_count = len(self.build_graph)
        self.run_tracker.pantsd_stats.set_affected_targets_size(target_count)
        return target_count

    def submit_background_work_chain(self,
                                     work_chain,
                                     parent_workunit_name=None):
        """
    :API: public
    """
        background_root_workunit = self.run_tracker.get_background_root_workunit(
        )
        if parent_workunit_name:
            # We have to keep this workunit alive until all its child work is done, so
            # we manipulate the context manually instead of using it as a contextmanager.
            # This is slightly funky, but the with-context usage is so pervasive and
            # useful elsewhere that it's worth the funkiness in this one place.
            workunit_parent_ctx = self.run_tracker.new_workunit_under_parent(
                name=parent_workunit_name,
                labels=[WorkUnitLabel.MULTITOOL],
                parent=background_root_workunit)
            workunit_parent = workunit_parent_ctx.__enter__()
            done_hook = lambda: workunit_parent_ctx.__exit__(None, None, None)
        else:
            workunit_parent = background_root_workunit  # Run directly under the root.
            done_hook = None
        self.run_tracker.background_worker_pool().submit_async_work_chain(
            work_chain, workunit_parent=workunit_parent, done_hook=done_hook)

    def background_worker_pool(self):
        """Returns the pool to which tasks can submit background work.

    :API: public
    """
        return self.run_tracker.background_worker_pool()

    def subproc_map(self, f, items):
        """Map function `f` over `items` in subprocesses and return the result.

      :API: public

      :param f: A multiproc-friendly (importable) work function.
      :param items: A iterable of pickleable arguments to f.
    """
        try:
            # Pool.map (and async_map().get() w/o timeout) can miss SIGINT.
            # See: http://stackoverflow.com/a/1408476, http://bugs.python.org/issue8844
            # Instead, we map_async(...), wait *with a timeout* until ready, then .get()
            # NB: in 2.x, wait() with timeout wakes up often to check, burning CPU. Oh well.
            res = SubprocPool.foreground().map_async(f, items)
            while not res.ready():
                res.wait(60)  # Repeatedly wait for up to a minute.
                if not res.ready():
                    self.log.debug('subproc_map result still not ready...')
            return res.get()
        except KeyboardInterrupt:
            SubprocPool.shutdown(True)
            raise

    @contextmanager
    def new_workunit(self, name, labels=None, cmd='', log_config=None):
        """Create a new workunit under the calling thread's current workunit.

    :API: public
    """
        with self.run_tracker.new_workunit(name=name,
                                           labels=labels,
                                           cmd=cmd,
                                           log_config=log_config) as workunit:
            yield workunit

    def acquire_lock(self):
        """ Acquire the global lock for the root directory associated with this context. When
    a goal requires serialization, it will call this to acquire the lock.

    :API: public
    """
        if self.options.for_global_scope().lock:
            if not self._lock.acquired:
                self._lock.acquire()

    def release_lock(self):
        """Release the global lock if it's held.
    Returns True if the lock was held before this call.

    :API: public
    """
        if not self._lock.acquired:
            return False
        else:
            self._lock.release()
            return True

    def is_unlocked(self):
        """Whether the global lock object is actively holding the lock.

    :API: public
    """
        return not self._lock.acquired

    def _replace_targets(self, target_roots):
        # Replaces all targets in the context with the given roots and their transitive dependencies.
        #
        # If another task has already retrieved the current targets, mutable state may have been
        # initialized somewhere, making it now unsafe to replace targets. Thus callers of this method
        # must know what they're doing!
        #
        # TODO(John Sirois): This currently has only 1 use (outside ContextTest) in pantsbuild/pants and
        # only 1 remaining known use case in the Foursquare codebase that will be able to go away with
        # the post RoundEngine engine - kill the method at that time.
        self._target_roots = list(target_roots)

    def add_new_target(self,
                       address,
                       target_type,
                       target_base=None,
                       dependencies=None,
                       derived_from=None,
                       **kwargs):
        """Creates a new target, adds it to the context and returns it.

    This method ensures the target resolves files against the given target_base, creating the
    directory if needed and registering a source root.

    :API: public
    """
        rel_target_base = target_base or address.spec_path
        abs_target_base = os.path.join(get_buildroot(), rel_target_base)
        if not os.path.exists(abs_target_base):
            os.makedirs(abs_target_base)
            # TODO: Adding source roots on the fly like this is yucky, but hopefully this
            # method will go away entirely under the new engine. It's primarily used for injecting
            # synthetic codegen targets, and that isn't how codegen will work in the future.
        if not self.source_roots.find_by_path(rel_target_base):
            # TODO: Set the lang and root category (source/test/thirdparty) based on the target type?
            self.source_roots.add_source_root(rel_target_base)
        if dependencies:
            dependencies = [dep.address for dep in dependencies]

        self.build_graph.inject_synthetic_target(address=address,
                                                 target_type=target_type,
                                                 dependencies=dependencies,
                                                 derived_from=derived_from,
                                                 **kwargs)
        new_target = self.build_graph.get_target(address)

        return new_target

    def targets(self, predicate=None, **kwargs):
        """Selects targets in-play in this run from the target roots and their transitive dependencies.

    Also includes any new synthetic targets created from the target roots or their transitive
    dependencies during the course of the run.

    See Target.closure_for_targets for remaining parameters.

    :API: public

    :param predicate: If specified, the predicate will be used to narrow the scope of targets
                      returned.
    :param bool postorder: `True` to gather transitive dependencies with a postorder traversal;
                          `False` or preorder by default.
    :returns: A list of matching targets.
    """
        target_set = self._collect_targets(self.target_roots, **kwargs)

        synthetics = OrderedSet()
        for synthetic_address in self.build_graph.synthetic_addresses:
            if self.build_graph.get_concrete_derived_from(
                    synthetic_address) in target_set:
                synthetics.add(self.build_graph.get_target(synthetic_address))
        target_set.update(self._collect_targets(synthetics, **kwargs))

        return list(filter(predicate, target_set))

    def _collect_targets(self, root_targets, **kwargs):
        return Target.closure_for_targets(target_roots=root_targets, **kwargs)

    def dependents(self, on_predicate=None, from_predicate=None):
        """Returns  a map from targets that satisfy the from_predicate to targets they depend on that
      satisfy the on_predicate.

    :API: public
    """
        core = set(self.targets(on_predicate))
        dependees = defaultdict(set)
        for target in self.targets(from_predicate):
            for dependency in target.dependencies:
                if dependency in core:
                    dependees[target].add(dependency)
        return dependees

    def resolve(self, spec):
        """Returns an iterator over the target(s) the given address points to.

    :API: public
    """
        return self.build_graph.resolve(spec)

    def scan(self, root=None):
        """Scans and parses all BUILD files found under ``root``.

    Only BUILD files found under ``root`` are parsed as roots in the graph, but any dependencies of
    targets parsed in the root tree's BUILD files will be followed and this may lead to BUILD files
    outside of ``root`` being parsed and included in the returned build graph.

    :API: public

    :param string root: The path to scan; by default, the build root.
    :returns: A new build graph encapsulating the targets found.
    """
        build_graph = self.build_graph.clone_new()
        for address in self.address_mapper.scan_addresses(root):
            build_graph.inject_address_closure(address)
        return build_graph

    def execute_process_synchronously_without_raising(self,
                                                      execute_process_request,
                                                      name,
                                                      labels=None):
        """Executes a process (possibly remotely), and returns information about its output.

    :param execute_process_request: The ExecuteProcessRequest to run.
    :param name: A descriptive name representing the process being executed.
    :param labels: A tuple of WorkUnitLabels.
    :return: An ExecuteProcessResult with information about the execution.

    Note that this is an unstable, experimental API, which is subject to change with no notice.
    """
        with self.new_workunit(
                name=name,
                labels=labels,
                cmd=' '.join(execute_process_request.argv),
        ) as workunit:
            result = self._scheduler.product_request(
                FallibleExecuteProcessResult, [execute_process_request])[0]
            workunit.output("stdout").write(result.stdout)
            workunit.output("stderr").write(result.stderr)
            workunit.set_outcome(
                WorkUnit.FAILURE if result.exit_code else WorkUnit.SUCCESS)
            return result

    def execute_process_synchronously_or_raise(self,
                                               execute_process_request,
                                               name,
                                               labels=None):
        """Execute process synchronously, and throw if the return code is not 0.

    See execute_process_synchronously for the api docs.
    """
        fallible_result = self.execute_process_synchronously_without_raising(
            execute_process_request, name, labels)
        return fallible_to_exec_result_or_raise(fallible_result,
                                                execute_process_request)
Beispiel #8
0
class Context(object):
  """Contains the context for a single run of pants.

  Task implementations can access configuration data from pants.ini and any flags they have exposed
  here as well as information about the targets involved in the run.

  Advanced uses of the context include adding new targets to it for upstream or downstream goals to
  operate on and mapping of products a goal creates to the targets the products are associated with.

  :API: public
  """

  class Log(object):
    """A logger facade that logs into the pants reporting framework."""

    def __init__(self, run_tracker):
      self._run_tracker = run_tracker

    def debug(self, *msg_elements):
      self._run_tracker.log(Report.DEBUG, *msg_elements)

    def info(self, *msg_elements):
      self._run_tracker.log(Report.INFO, *msg_elements)

    def warn(self, *msg_elements):
      self._run_tracker.log(Report.WARN, *msg_elements)

    def error(self, *msg_elements):
      self._run_tracker.log(Report.ERROR, *msg_elements)

    def fatal(self, *msg_elements):
      self._run_tracker.log(Report.FATAL, *msg_elements)

  # TODO: Figure out a more structured way to construct and use context than this big flat
  # repository of attributes?
  def __init__(self, options, run_tracker, target_roots,
               requested_goals=None, target_base=None, build_graph=None,
               build_file_parser=None, address_mapper=None, console_outstream=None, scm=None,
               workspace=None, invalidation_report=None, scheduler=None):
    self._options = options
    self.build_graph = build_graph
    self.build_file_parser = build_file_parser
    self.address_mapper = address_mapper
    self.run_tracker = run_tracker
    self._log = self.Log(run_tracker)
    self._target_base = target_base or Target
    self._products = Products()
    self._buildroot = get_buildroot()
    self._source_roots = SourceRootConfig.global_instance().get_source_roots()
    self._lock = OwnerPrintingInterProcessFileLock(os.path.join(self._buildroot, '.pants.workdir.file_lock'))
    self._java_sysprops = None  # Computed lazily.
    self.requested_goals = requested_goals or []
    self._console_outstream = console_outstream or sys.stdout
    self._scm = scm or get_scm()
    self._workspace = workspace or (ScmWorkspace(self._scm) if self._scm else None)
    self._replace_targets(target_roots)
    self._invalidation_report = invalidation_report
    self._scheduler = scheduler

  @property
  def options(self):
    """Returns the new-style options.

    :API: public
    """
    return self._options

  @property
  def log(self):
    """Returns the preferred logger for goals to use.

    :API: public
    """
    return self._log

  @property
  def products(self):
    """Returns the Products manager for the current run.

    :API: public
    """
    return self._products

  @property
  def source_roots(self):
    """Returns the :class:`pants.source.source_root.SourceRoots` instance for the current run.

    :API: public
    """
    return self._source_roots

  @property
  def target_roots(self):
    """Returns the targets specified on the command line.

    This set is strictly a subset of all targets in play for the run as returned by self.targets().
    Note that for a command line invocation that uses wildcard selectors : or ::, the targets
    globbed by the wildcards are considered to be target roots.

    :API: public
    """
    return self._target_roots

  @property
  def console_outstream(self):
    """Returns the output stream to write console messages to.

    :API: public
    """
    return self._console_outstream

  @property
  def scm(self):
    """Returns the current workspace's scm, if any.

    :API: public
    """
    return self._scm

  @property
  def workspace(self):
    """Returns the current workspace, if any."""
    return self._workspace

  @property
  def invalidation_report(self):
    return self._invalidation_report

  def __str__(self):
    ident = Target.identify(self.targets())
    return 'Context(id:{}, targets:{})'.format(ident, self.targets())

  @contextmanager
  def executing(self):
    """A contextmanager that sets metrics in the context of a (v1) engine execution."""
    self._set_target_root_count_in_runtracker()
    yield
    self._set_affected_target_count_in_runtracker()
    self._set_affected_target_files_count_in_runtracker()
    self._set_resulting_graph_size_in_runtracker()

  def _set_target_root_count_in_runtracker(self):
    """Sets the target root count in the run tracker's daemon stats object."""
    # N.B. `self._target_roots` is always an expanded list of `Target` objects as
    # provided by `GoalRunner`.
    target_count = len(self._target_roots)
    self.run_tracker.pantsd_stats.set_target_root_size(target_count)
    return target_count

  def _set_affected_target_count_in_runtracker(self):
    """Sets the realized target count in the run tracker's daemon stats object."""
    target_count = len(self.build_graph)
    self.run_tracker.pantsd_stats.set_affected_targets_size(target_count)
    return target_count

  def _set_affected_target_files_count_in_runtracker(self):
    """Sets the realized target file count in the run tracker's daemon stats object."""
    # TODO: Move this file counting into the `ProductGraph`.
    target_file_count = self.build_graph.target_file_count()
    self.run_tracker.pantsd_stats.set_affected_targets_file_count(target_file_count)
    return target_file_count

  def _set_resulting_graph_size_in_runtracker(self):
    """Sets the resulting graph size in the run tracker's daemon stats object."""
    node_count = self._scheduler.graph_len()
    self.run_tracker.pantsd_stats.set_resulting_graph_size(node_count)
    return node_count

  def submit_background_work_chain(self, work_chain, parent_workunit_name=None):
    """
    :API: public
    """
    background_root_workunit = self.run_tracker.get_background_root_workunit()
    if parent_workunit_name:
      # We have to keep this workunit alive until all its child work is done, so
      # we manipulate the context manually instead of using it as a contextmanager.
      # This is slightly funky, but the with-context usage is so pervasive and
      # useful elsewhere that it's worth the funkiness in this one place.
      workunit_parent_ctx = self.run_tracker.new_workunit_under_parent(
        name=parent_workunit_name, labels=[WorkUnitLabel.MULTITOOL], parent=background_root_workunit)
      workunit_parent = workunit_parent_ctx.__enter__()
      done_hook = lambda: workunit_parent_ctx.__exit__(None, None, None)
    else:
      workunit_parent = background_root_workunit  # Run directly under the root.
      done_hook = None
    self.run_tracker.background_worker_pool().submit_async_work_chain(
      work_chain, workunit_parent=workunit_parent, done_hook=done_hook)

  def background_worker_pool(self):
    """Returns the pool to which tasks can submit background work.

    :API: public
    """
    return self.run_tracker.background_worker_pool()

  def subproc_map(self, f, items):
    """Map function `f` over `items` in subprocesses and return the result.

      :API: public

      :param f: A multiproc-friendly (importable) work function.
      :param items: A iterable of pickleable arguments to f.
    """
    try:
      # Pool.map (and async_map().get() w/o timeout) can miss SIGINT.
      # See: http://stackoverflow.com/a/1408476, http://bugs.python.org/issue8844
      # Instead, we map_async(...), wait *with a timeout* until ready, then .get()
      # NB: in 2.x, wait() with timeout wakes up often to check, burning CPU. Oh well.
      res = SubprocPool.foreground().map_async(f, items)
      while not res.ready():
        res.wait(60)  # Repeatedly wait for up to a minute.
        if not res.ready():
          self.log.debug('subproc_map result still not ready...')
      return res.get()
    except KeyboardInterrupt:
      SubprocPool.shutdown(True)
      raise

  @contextmanager
  def new_workunit(self, name, labels=None, cmd='', log_config=None):
    """Create a new workunit under the calling thread's current workunit.

    :API: public
    """
    with self.run_tracker.new_workunit(name=name, labels=labels, cmd=cmd, log_config=log_config) as workunit:
      yield workunit

  def acquire_lock(self):
    """ Acquire the global lock for the root directory associated with this context. When
    a goal requires serialization, it will call this to acquire the lock.

    :API: public
    """
    if self.options.for_global_scope().lock:
      if not self._lock.acquired:
        self._lock.acquire()

  def release_lock(self):
    """Release the global lock if it's held.
    Returns True if the lock was held before this call.

    :API: public
    """
    if not self._lock.acquired:
      return False
    else:
      self._lock.release()
      return True

  def is_unlocked(self):
    """Whether the global lock object is actively holding the lock.

    :API: public
    """
    return not self._lock.acquired

  def _replace_targets(self, target_roots):
    # Replaces all targets in the context with the given roots and their transitive dependencies.
    #
    # If another task has already retrieved the current targets, mutable state may have been
    # initialized somewhere, making it now unsafe to replace targets. Thus callers of this method
    # must know what they're doing!
    #
    # TODO(John Sirois): This currently has only 1 use (outside ContextTest) in pantsbuild/pants and
    # only 1 remaining known use case in the Foursquare codebase that will be able to go away with
    # the post RoundEngine engine - kill the method at that time.
    self._target_roots = list(target_roots)

  def add_new_target(self, address, target_type, target_base=None, dependencies=None,
                     derived_from=None, **kwargs):
    """Creates a new target, adds it to the context and returns it.

    This method ensures the target resolves files against the given target_base, creating the
    directory if needed and registering a source root.

    :API: public
    """
    rel_target_base = target_base or address.spec_path
    abs_target_base = os.path.join(get_buildroot(), rel_target_base)
    if not os.path.exists(abs_target_base):
      os.makedirs(abs_target_base)
      # TODO: Adding source roots on the fly like this is yucky, but hopefully this
      # method will go away entirely under the new engine. It's primarily used for injecting
      # synthetic codegen targets, and that isn't how codegen will work in the future.
    if not self.source_roots.find_by_path(rel_target_base):
      # TODO: Set the lang and root category (source/test/thirdparty) based on the target type?
      self.source_roots.add_source_root(rel_target_base)
    if dependencies:
      dependencies = [dep.address for dep in dependencies]

    self.build_graph.inject_synthetic_target(address=address,
                                             target_type=target_type,
                                             dependencies=dependencies,
                                             derived_from=derived_from,
                                             **kwargs)
    new_target = self.build_graph.get_target(address)

    return new_target

  def targets(self, predicate=None, **kwargs):
    """Selects targets in-play in this run from the target roots and their transitive dependencies.

    Also includes any new synthetic targets created from the target roots or their transitive
    dependencies during the course of the run.

    See Target.closure_for_targets for remaining parameters.

    :API: public

    :param predicate: If specified, the predicate will be used to narrow the scope of targets
                      returned.
    :param bool postorder: `True` to gather transitive dependencies with a postorder traversal;
                          `False` or preorder by default.
    :returns: A list of matching targets.
    """
    target_set = self._collect_targets(self.target_roots, **kwargs)

    synthetics = OrderedSet()
    for synthetic_address in self.build_graph.synthetic_addresses:
      if self.build_graph.get_concrete_derived_from(synthetic_address) in target_set:
        synthetics.add(self.build_graph.get_target(synthetic_address))
    target_set.update(self._collect_targets(synthetics, **kwargs))

    return filter(predicate, target_set)

  def _collect_targets(self, root_targets, **kwargs):
    return Target.closure_for_targets(
      target_roots=root_targets,
      **kwargs
    )

  def dependents(self, on_predicate=None, from_predicate=None):
    """Returns  a map from targets that satisfy the from_predicate to targets they depend on that
      satisfy the on_predicate.

    :API: public
    """
    core = set(self.targets(on_predicate))
    dependees = defaultdict(set)
    for target in self.targets(from_predicate):
      for dependency in target.dependencies:
        if dependency in core:
          dependees[target].add(dependency)
    return dependees

  def resolve(self, spec):
    """Returns an iterator over the target(s) the given address points to.

    :API: public
    """
    return self.build_graph.resolve(spec)

  def scan(self, root=None):
    """Scans and parses all BUILD files found under ``root``.

    Only BUILD files found under ``root`` are parsed as roots in the graph, but any dependencies of
    targets parsed in the root tree's BUILD files will be followed and this may lead to BUILD files
    outside of ``root`` being parsed and included in the returned build graph.

    :API: public

    :param string root: The path to scan; by default, the build root.
    :returns: A new build graph encapsulating the targets found.
    """
    build_graph = self.build_graph.clone_new()
    for address in self.address_mapper.scan_addresses(root):
      build_graph.inject_address_closure(address)
    return build_graph
Beispiel #9
0
    def _isolation(self, all_targets):
        run_dir = '_runs'
        output_dir = os.path.join(self.workdir, run_dir,
                                  Target.identify(all_targets))
        safe_mkdir(output_dir, clean=True)

        coverage = None
        options = self.get_options()
        if options.coverage or options.is_flagged('coverage_open'):
            coverage_processor = options.coverage_processor
            if coverage_processor == 'cobertura':
                settings = CoberturaTaskSettings.from_task(self,
                                                           workdir=output_dir)
                coverage = Cobertura(settings)
            else:
                raise TaskError('unknown coverage processor {0}'.format(
                    coverage_processor))

        self.context.release_lock()
        if coverage:
            coverage.instrument(
                targets=all_targets,
                compute_junit_classpath=lambda: self.classpath(all_targets),
                execute_java_for_targets=self.execute_java_for_coverage)

        def do_report(exc=None):
            if coverage:
                coverage.report(all_targets,
                                self.execute_java_for_coverage,
                                tests_failed_exception=exc)
            if self._html_report:
                self.context.log.debug('Generating JUnit HTML report...')
                html_file_path = JUnitHtmlReport().report(
                    output_dir, os.path.join(output_dir, 'reports'))
                self.context.log.debug(
                    'JUnit HTML report generated to {}'.format(html_file_path))
                if self._open:
                    desktop.ui_open(html_file_path)

        try:
            yield output_dir, do_report, coverage
        finally:
            # NB: Deposit of the "current" test output in the root workdir (.pants.d/test/junit) is a
            # defacto public API and so we implement that behavior here to maintain backwards
            # compatibility for non-pants report file consumers.
            # TODO(John Sirois): Deprecate this ~API and provide a stable directory solution for test
            # output: https://github.com/pantsbuild/pants/issues/3879
            lock_file = '.file_lock'
            with OwnerPrintingInterProcessFileLock(
                    os.path.join(self.workdir, lock_file)):
                # Kill everything except the isolated runs/ dir.
                for name in os.listdir(self.workdir):
                    path = os.path.join(self.workdir, name)
                    if name not in (run_dir, lock_file):
                        if os.path.isdir(path):
                            safe_rmtree(path)
                        else:
                            os.unlink(path)

                # Link all the isolated run/ dir contents back up to the stable workdir
                for name in os.listdir(output_dir):
                    path = os.path.join(output_dir, name)
                    os.symlink(path, os.path.join(self.workdir, name))
Beispiel #10
0
class TestOwnerPrintingInterProcessFileLock(unittest.TestCase):
  def setUp(self):
    self.lock_dir = tempfile.mkdtemp()
    self.lock_path = os.path.join(self.lock_dir, 'lock')
    self.lock = OwnerPrintingInterProcessFileLock(self.lock_path)
    self.manager = Manager()
    self.lock_held = self.manager.Event()
    self.terminate = self.manager.Event()
    self.lock_process = Process(
      target=hold_lock_until_terminate,
      args=(self.lock_path, self.lock_held, self.terminate),
    )

  def tearDown(self):
    self.terminate.set()
    try:
      shutil.rmtree(self.lock_dir)
    except OSError:
      pass

  def test_non_blocking_attempt(self):
    self.lock_process.start()
    self.lock_held.wait()
    self.assertFalse(self.lock.acquire(blocking=False))

  def test_message(self):
    self.lock_process.start()
    self.lock_held.wait()
    self.assertTrue(os.path.exists(self.lock.message_path))
    with open(self.lock.message_path, 'r') as f:
      message_content = f.read()
    self.assertIn(str(self.lock_process.pid), message_content)

    os.unlink(self.lock.message_path)

    def message_fn(message):
      self.assertIn(self.lock.missing_message_output, message)

    self.lock.acquire(blocking=False, message_fn=message_fn)

  def test_blocking(self):
    self.lock_process.start()
    self.lock_held.wait()
    self.assertFalse(self.lock.acquire(timeout=.1))

    acquire_is_blocking = self.manager.Event()

    def terminate_subproc(terminate, acquire_is_blocking):
      acquire_is_blocking.wait()
      terminate.set()

    Thread(target=terminate_subproc, args=(self.terminate, acquire_is_blocking)).start()

    def message_fn(message):
      self.assertIn(str(self.lock_process.pid), message)
      acquire_is_blocking.set()

    # NOTE: We shouldn't ever wait this long (locally this runs in ~milliseconds)
    # but sometimes CI containers are extremely slow, so we choose a very large
    # value just in case.
    self.assertTrue(self.lock.acquire(timeout=30, message_fn=message_fn))

  def test_reentrant(self):
    self.assertTrue(self.lock.acquire())
    self.assertTrue(self.lock.acquire())

  def test_release(self):
    self.assertTrue(self.lock.acquire())
    self.assertTrue(self.lock.acquired)
    self.lock.release()
    self.assertFalse(self.lock.acquired)
Beispiel #11
0
 def _lock(self):
     """An identity-keyed inter-process lock around the cookie file."""
     lockfile = "{}.lock".format(self._get_cookie_file())
     safe_mkdir_for(lockfile)
     return OwnerPrintingInterProcessFileLock(lockfile)