Example #1
0
    def wait_for_pending_cleanups(self,
                                  initial_active_threads=None,
                                  initial_main_greenlets=None):
        initial_active_threads = initial_active_threads or self.threads_before_test
        initial_main_greenlets = initial_main_greenlets or self.main_greenlets_before_test
        sleep_time = self.cleanup_attempt_sleep_duration
        # NOTE: This is racy! A Python-level thread object may be dead
        # and gone, but the C thread may not yet have fired its
        # destructors and added to the queue. There's no particular
        # way to know that's about to happen. We try to watch the
        # Python threads to make sure they, at least, have gone away.
        # Counting the main greenlets, which we can easily do deterministically,
        # also helps.

        # Always sleep at least once to let other threads run
        sleep(sleep_time)
        quit_after = time() + self.cleanup_max_sleep_seconds
        # TODO: We could add an API that calls us back when a particular main greenlet is deleted?
        # It would have to drop the GIL
        while (get_pending_cleanup_count()
               or active_thread_count() > initial_active_threads
               or (not self.expect_greenlet_leak
                   and get_total_main_greenlets() > initial_main_greenlets)):
            sleep(sleep_time)
            if time() > quit_after:
                print("Time limit exceeded.")
                print("Threads: Waiting for only", initial_active_threads,
                      "-->", active_thread_count())
                print("MGlets : Waiting for only", initial_main_greenlets,
                      "-->", get_total_main_greenlets())
                break
        collect()
def manage_workers(flags, collection_filter, responses):
    """Monitor a response dict printing new entries and spawning new workers as
    needed."""
    old_responses, login_sequence = {}, []
    while True:
        if active_thread_count() is 1:
            if responses.keys() != old_responses.keys():
                logging.critical("New entries found.")
                old_sequence = login_sequence
                login_sequence = sorted(responses,
                                        key=lambda x: responses[x]['time'])
                print(u"\n".join([fmt_response(responses[k])
                                  for k in login_sequence
                                  if k not in old_sequence]))
            else:
                logging.info("No new entries.")

            if flags.follow:
                logging.info("Sleeping for %s seconds.", flags.interval)
                time.sleep(flags.interval)
                old_responses = responses
                selectors = copy(flags.selectors)
                spawn_workers(flags.jobs, selectors,
                              collection_filter, responses)
            else:
                return
Example #3
0
def manage_workers(flags, collection_filter, responses):
    """Monitor a response dict printing new entries and spawning new workers as
    needed."""
    old_responses, login_sequence = {}, []
    while True:
        if active_thread_count() is 1:
            if responses.keys() != old_responses.keys():
                logging.critical("New entries found.")
                old_sequence = login_sequence
                login_sequence = sorted(responses,
                                        key=lambda x: responses[x]['time'])
                print(u"\n".join([
                    fmt_response(responses[k]) for k in login_sequence
                    if k not in old_sequence
                ]))
            else:
                logging.info("No new entries.")

            if flags.follow:
                logging.info("Sleeping for %s seconds.", flags.interval)
                time.sleep(flags.interval)
                old_responses = responses
                selectors = copy(flags.selectors)
                spawn_workers(flags.jobs, selectors, collection_filter,
                              responses)
            else:
                return
def spawn_workers(jobs, selectors, collection_filter, responses):
    """Spawn a pool of workers using taking selectors, collection_filter and
    responses dicts and constrained to a maximum number of jobs."""
    logging.info("Spawning %s threads", min(jobs, len(selectors)))
    while len(selectors) is not 0:
        if active_thread_count() < jobs:
            selector = selectors.pop()
            worker = Thread(target=query_worker, name=selector,
                            args=(collection_filter, selector, responses))
            worker.start()
Example #5
0
 def setUp(self):
     # Ensure the main greenlet exists, otherwise the first test
     # gets a false positive leak
     super(TestCase, self).setUp()
     getcurrent()
     self.threads_before_test = active_thread_count()
     self.main_greenlets_before_test = get_total_main_greenlets()
     self.wait_for_pending_cleanups(self.threads_before_test,
                                    self.main_greenlets_before_test)
     self.greenlets_before_test = self.count_greenlets()
Example #6
0
def spawn_workers(jobs, selectors, collection_filter, responses):
    """Spawn a pool of workers using taking selectors, collection_filter and
    responses dicts and constrained to a maximum number of jobs."""
    logging.info("Spawning %s threads", min(jobs, len(selectors)))
    while len(selectors) is not 0:
        if active_thread_count() < jobs:
            selector = selectors.pop()
            worker = Thread(target=query_worker,
                            name=selector,
                            args=(collection_filter, selector, responses))
            worker.start()
def get_kafka_consumer_group_desc(kcg_cmd, groups):

    kcg_threads = {}
    for group in groups:
        kcg_threads[group] = kcg_cmd.describe_t(group)
    log.debug("kcg java processes initiated - %d dispatch threads:\n%s" %
              (active_thread_count(), pformat(kcg_threads, indent=4)))

    kcg_desc = {}
    thread_timeout = 120
    for group in groups:
        g_desc = kcg_threads[group].join(thread_timeout)
        if g_desc is not None:
            kcg_desc[group] = g_desc
        else:
            log.warning("kcg request '%s' joined empty" % group)

    if not (active_thread_count() == 1):
        log.warning("%d threads unreturned after timeout (%.2fs)" %
                    ((active_thread_count()), thread_timeout))
        log.debug("Active threads:\n%s" %
                  pformat(thread_enumerate(), indent=4))

    return kcg_desc