Exemple #1
0
    def test_concurrent_green_lock_succeeds(self):
        """Verify spawn_n greenthreads with two locks run concurrently."""
        self.completed = False
        with utils.tempdir() as tmpdir:

            def locka(wait):
                a = utils.InterProcessLock(os.path.join(tmpdir, 'a'))
                with a:
                    wait.wait()
                self.completed = True

            def lockb(wait):
                b = utils.InterProcessLock(os.path.join(tmpdir, 'b'))
                with b:
                    wait.wait()

            wait1 = eventlet.event.Event()
            wait2 = eventlet.event.Event()
            pool = greenpool.GreenPool()
            pool.spawn_n(locka, wait1)
            pool.spawn_n(lockb, wait2)
            wait2.send()
            eventlet.sleep(0)
            wait1.send()
            pool.waitall()
        self.assertTrue(self.completed)
Exemple #2
0
 def __init__(self, connection=None, topic='broadcast', proxy=None):
     LOG.debug(_('Initing the Adapter Consumer for %s') % topic)
     self.proxy = proxy
     self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size)
     super(AdapterConsumer, self).__init__(connection=connection,
                                           topic=topic)
     self.register_callback(self.process_data)
Exemple #3
0
    def test_spawn_n_2(self):
        p = greenpool.GreenPool(2)
        self.assertEqual(p.free(), 2)
        r = []

        def foo(a):
            r.append(a)

        gt = p.spawn(foo, 1)
        self.assertEqual(p.free(), 1)
        gt.wait()
        self.assertEqual(r, [1])
        eventlet.sleep(0)
        self.assertEqual(p.free(), 2)

        # Once the pool is exhausted, spawning forces a yield.
        p.spawn_n(foo, 2)
        self.assertEqual(1, p.free())
        self.assertEqual(r, [1])

        p.spawn_n(foo, 3)
        self.assertEqual(0, p.free())
        self.assertEqual(r, [1])

        p.spawn_n(foo, 4)
        self.assertEqual(set(r), set([1, 2, 3]))
        eventlet.sleep(0)
        self.assertEqual(set(r), set([1, 2, 3, 4]))
    def test_concurrent_green_lock_succeeds(self):
        """Verify spawn_n greenthreads with two locks run concurrently."""
        tmpdir = tempfile.mkdtemp()
        try:
            self.completed = False

            def locka(wait):
                a = lockutils.InterProcessLock(os.path.join(tmpdir, 'a'))
                with a:
                    wait.wait()
                self.completed = True

            def lockb(wait):
                b = lockutils.InterProcessLock(os.path.join(tmpdir, 'b'))
                with b:
                    wait.wait()

            wait1 = eventlet.event.Event()
            wait2 = eventlet.event.Event()
            pool = greenpool.GreenPool()
            pool.spawn_n(locka, wait1)
            pool.spawn_n(lockb, wait2)
            wait2.send()
            eventlet.sleep(0)
            wait1.send()
            pool.waitall()

            self.assertTrue(self.completed)

        finally:
            if os.path.exists(tmpdir):
                shutil.rmtree(tmpdir)
    def test_lock_internally(self):
        """We can lock across multiple green threads."""
        saved_sem_num = len(lockutils._semaphores)
        seen_threads = list()

        def f(_id):
            with lockutils.lock('testlock2', 'test-', external=False):
                for x in range(10):
                    seen_threads.append(_id)
                    greenthread.sleep(0)

        threads = []
        pool = greenpool.GreenPool(10)
        for i in range(10):
            threads.append(pool.spawn(f, i))

        for thread in threads:
            thread.wait()

        self.assertEqual(len(seen_threads), 100)
        # Looking at the seen threads, split it into chunks of 10, and verify
        # that the last 9 match the first in each chunk.
        for i in range(10):
            for j in range(9):
                self.assertEqual(seen_threads[i * 10],
                                 seen_threads[i * 10 + 1 + j])

        self.assertEqual(saved_sem_num, len(lockutils._semaphores),
                         "Semaphore leak detected")
Exemple #6
0
    def init_host(self):
        self.dbapi = dbapi.get_instance()

        self.driver_factory = driver_factory.DriverFactory()
        self.drivers = self.driver_factory.names
        """List of driver names which this conductor supports."""

        try:
            self.dbapi.register_conductor({'hostname': self.host,
                                           'drivers': self.drivers})
        except exception.ConductorAlreadyRegistered:
            LOG.warn(_("A conductor with hostname %(hostname)s "
                       "was previously registered. Updating registration")
                       % {'hostname': self.host})
            self.dbapi.unregister_conductor(self.host)
            self.dbapi.register_conductor({'hostname': self.host,
                                           'drivers': self.drivers})

        self.ring_manager = hash.HashRingManager()
        """Consistent hash ring which maps drivers to conductors."""

        self._worker_pool = greenpool.GreenPool(
                                size=CONF.conductor.workers_pool_size)
        """GreenPool of background workers for performing tasks async."""

        # Spawn a dedicated greenthread for the keepalive
        try:
            self._keepalive_evt = threading.Event()
            self._spawn_worker(self._conductor_service_record_keepalive)
        except exception.NoFreeConductorWorker:
            with excutils.save_and_reraise_exception():
                LOG.critical(_('Failed to start keepalive'))
                self.del_host()
Exemple #7
0
    def start(self):
        super(ConductorManager, self).start()
        self.dbapi = dbapi.get_instance()

        # create a DriverFactory instance, which initializes the stevedore
        # extension manager, when the service starts.
        # TODO(deva): Enable re-loading of the DriverFactory to load new
        #             extensions without restarting the whole service.
        df = driver_factory.DriverFactory()
        self.drivers = df.names
        """List of driver names which this conductor supports."""

        try:
            self.dbapi.register_conductor({
                'hostname': self.host,
                'drivers': self.drivers
            })
        except exception.ConductorAlreadyRegistered:
            LOG.warn(
                _("A conductor with hostname %(hostname)s "
                  "was previously registered. Updating registration") %
                {'hostname': self.host})
            self.dbapi.unregister_conductor(self.host)
            self.dbapi.register_conductor({
                'hostname': self.host,
                'drivers': self.drivers
            })

        self.driver_rings = self._get_current_driver_rings()
        """Consistent hash ring which maps drivers to conductors."""

        self._worker_pool = greenpool.GreenPool(size=CONF.rpc_thread_pool_size)
        """GreenPool of background workers for performing tasks async."""
Exemple #8
0
    def test_waiting(self):
        pool = greenpool.GreenPool(1)
        done = event.Event()

        def consume():
            done.wait()

        def waiter(pool):
            gt = pool.spawn(consume)
            gt.wait()

        waiters = []
        self.assertEqual(pool.running(), 0)
        waiters.append(eventlet.spawn(waiter, pool))
        eventlet.sleep(0)
        self.assertEqual(pool.waiting(), 0)
        waiters.append(eventlet.spawn(waiter, pool))
        eventlet.sleep(0)
        self.assertEqual(pool.waiting(), 1)
        waiters.append(eventlet.spawn(waiter, pool))
        eventlet.sleep(0)
        self.assertEqual(pool.waiting(), 2)
        self.assertEqual(pool.running(), 1)
        done.send(None)
        for w in waiters:
            w.wait()
        self.assertEqual(pool.waiting(), 0)
        self.assertEqual(pool.running(), 0)
Exemple #9
0
 def __init__(self, thread_pool_size=1000):
     self.tg = greenpool.GreenPool(size=thread_pool_size)
     self.exc = None
     self.exc_stacktrace = None
     self.failed_thread = None
     self.threads = 0
     self.cv = threading.Condition()
Exemple #10
0
    def test_concurrent_green_lock_succeeds(self):
        """Verify spawn_n greenthreads with two locks run concurrently.

        This succeeds with spawn but fails with spawn_n because lockfile
        gets the same thread id for both spawn_n threads. Our workaround
        of using the GreenLockFile will work even if the issue is fixed.
        """
        self.completed = False
        with utils.tempdir() as tmpdir:

            def locka(wait):
                a = utils.GreenLockFile(os.path.join(tmpdir, 'a'))
                a.acquire()
                wait.wait()
                a.release()
                self.completed = True

            def lockb(wait):
                b = utils.GreenLockFile(os.path.join(tmpdir, 'b'))
                b.acquire()
                wait.wait()
                b.release()

            wait1 = eventlet.event.Event()
            wait2 = eventlet.event.Event()
            pool = greenpool.GreenPool()
            pool.spawn_n(locka, wait1)
            pool.spawn_n(lockb, wait2)
            wait2.send()
            eventlet.sleep(0)
            wait1.send()
            pool.waitall()
        self.assertTrue(self.completed)
Exemple #11
0
    def test_resize(self):
        pool = greenpool.GreenPool(2)
        evt = event.Event()
        def wait_long_time(e):
            e.wait()
        pool.spawn(wait_long_time, evt)
        pool.spawn(wait_long_time, evt)
        self.assertEquals(pool.free(), 0)
        self.assertEquals(pool.running(), 2)
        self.assert_pool_has_free(pool, 0)

        # verify that the pool discards excess items put into it
        pool.resize(1)
        
        # cause the wait_long_time functions to return, which will
        # trigger puts to the pool
        evt.send(None)
        eventlet.sleep(0)
        eventlet.sleep(0)
        
        self.assertEquals(pool.free(), 1)
        self.assertEquals(pool.running(), 0)
        self.assert_pool_has_free(pool, 1)

        # resize larger and assert that there are more free items
        pool.resize(2)
        self.assertEquals(pool.free(), 2)
        self.assertEquals(pool.running(), 0)
        self.assert_pool_has_free(pool, 2)
Exemple #12
0
    def init_host(self):
        self.dbapi = dbapi.get_instance()

        self._keepalive_evt = threading.Event()
        """Event for the keepalive thread."""

        self._worker_pool = greenpool.GreenPool(
            size=CONF.conductor.workers_pool_size)
        """GreenPool of background workers for performing tasks async."""

        try:
            # Register this conductor with the cluster
            cdr = self.dbapi.register_conductor({'hostname': self.host})
        except exception.ConductorAlreadyRegistered:
            LOG.warn(
                _LW("A conductor with hostname %(hostname)s "
                    "was previously registered. Updating registration"),
                {'hostname': self.host})

            cdr = self.dbapi.register_conductor({'hostname': self.host},
                                                update_existing=True)
        self.conductor = cdr

        # Spawn a dedicated greenthread for the keepalive
        try:
            self._spawn_worker(self._conductor_service_record_keepalive)
            LOG.info(
                _LI('Successfully started conductor with hostname '
                    '%(hostname)s.'), {'hostname': self.host})
        except exception.NoFreeConductorWorker:
            with excutils.save_and_reraise_exception():
                LOG.critical(_LC('Failed to start keepalive'))
                self.del_host()
Exemple #13
0
 def __init__(self, max_workers=1000):
     assert int(max_workers) > 0, 'Max workers must be greater than zero'
     self._max_workers = int(max_workers)
     self._pool = greenpool.GreenPool(self._max_workers)
     self._work_queue = queue.LightQueue()
     self._shutdown_lock = threading.RLock()
     self._shutdown = False
Exemple #14
0
    def imap_memory_check(self, concurrency):
        # checks that imap is strictly
        # ordered and consumes a constant amount of memory
        p = greenpool.GreenPool(concurrency)
        count = 1000
        it = p.imap(passthru, six.moves.range(count))
        latest = -1
        while True:
            try:
                i = six.next(it)
            except StopIteration:
                break

            if latest == -1:
                gc.collect()
                initial_obj_count = len(gc.get_objects())
            assert i > latest
            latest = i
            if latest % 5 == 0:
                eventlet.sleep(0.001)
            if latest % 10 == 0:
                gc.collect()
                objs_created = len(gc.get_objects()) - initial_obj_count
                assert objs_created < 25 * concurrency, objs_created
        # make sure we got to the end
        self.assertEqual(latest, count - 1)
Exemple #15
0
 def test_spawn(self):
     p = greenpool.GreenPool(4)
     waiters = []
     for i in range(10):
         waiters.append(p.spawn(passthru, i))
     results = [waiter.wait() for waiter in waiters]
     self.assertEqual(results, list(range(10)))
Exemple #16
0
 def execute(self, context, murano_class):
     if not self.code_block:
         return
     limit = helpers.evaluate(self._limit, context)
     gpool = greenpool.GreenPool(helpers.evaluate(limit, context))
     for expr in self.code_block:
         gpool.spawn_n(expr.execute, context, murano_class)
     gpool.waitall()
Exemple #17
0
 def test_gt_cancel_doesnt_run_thread(self):
     pool = greenpool.GreenPool()
     func = mock.Mock()
     thread = pool.spawn(func)
     thread.link(lambda t: None)
     thread.cancel()
     pool.waitall()
     self.assertFalse(func.called)
Exemple #18
0
    def test_gt_link_callback_added_after_execution(self):
        pool = greenpool.GreenPool()
        link_callback = mock.Mock()

        thread = pool.spawn(lambda: None)
        pool.waitall()
        thread.link(link_callback)
        link_callback.assert_called_once_with(thread)
Exemple #19
0
 def __init__(self, max_workers=1000):
     assert EVENTLET_AVAILABLE, 'eventlet is needed to use GreenExecutor'
     assert int(max_workers) > 0, 'Max workers must be greater than zero'
     self._max_workers = int(max_workers)
     self._pool = greenpool.GreenPool(self._max_workers)
     self._work_queue = queue.LightQueue()
     self._shutdown_lock = threading.RLock()
     self._shutdown = False
Exemple #20
0
def gthread():
    pool = greenpool.GreenPool(2)
    pool.spawn_n(func)
    pool.spawn_n(func)
    pool.spawn_n(func)

    while True:
        greenthread.sleep(10)
Exemple #21
0
def download_images(images):
    failed_images = []
    threads = min(MAX_DOWNLOAD_THREAD, len(images))

    pool = greenpool.GreenPool(size=threads)
    for img, success in pool.imap(download_an_image, images):
        if not success:
            failed_images.append(img)
    return failed_images
    def __init__(self, thread_pool_size=10):
        """Create a ThreadGroup with a pool of greenthreads.

        :param thread_pool_size: the maximum number of threads allowed to run
                                 concurrently.
        """
        self.pool = greenpool.GreenPool(thread_pool_size)
        self.threads = []
        self.timers = []
def map_function(images, function):
    failed_images = []
    threads = min(MAX_DOWNLOAD_THREAD, len(images))

    pool = greenpool.GreenPool(size=threads)
    for image, success in pool.imap(function, images):
        if not success:
            failed_images.append(image)
    return failed_images
    def test_contention(self):
        from tests import saranwrap_test
        prox = saranwrap.wrap(saranwrap_test)

        pool = greenpool.GreenPool(4)
        pool.spawn_n(lambda: self.assertEquals(prox.one, 1))
        pool.spawn_n(lambda: self.assertEquals(prox.two, 2))
        pool.spawn_n(lambda: self.assertEquals(prox.three, 3))
        pool.waitall()
Exemple #25
0
 def __init__(self, max_workers=1000):
     assert EVENTLET_AVAILABLE, 'eventlet is needed to use a green executor'
     if max_workers <= 0:
         raise ValueError("Max workers must be greater than zero")
     self._max_workers = max_workers
     self._pool = greenpool.GreenPool(self._max_workers)
     self._delayed_work = greenqueue.Queue()
     self._shutdown_lock = greenthreading.Lock()
     self._shutdown = False
Exemple #26
0
    def _init_endpoints(self, providers, min_conns_per_pool,
                        max_conns_per_pool):
        LOG.debug("Initializing API endpoints")

        def _create_conn(p):
            def _conn():
                # called when a pool needs to create a new connection
                try:
                    return self._http_provider.new_connection(self, p)
                except Exception as e:
                    if self._http_provider.is_conn_open_exception(e):
                        LOG.warning(
                            "Timeout while trying to open a "
                            "connection with %s", p)

            return _conn

        self._endpoints = {}
        for provider in providers:
            pool = pools.Pool(min_size=min_conns_per_pool,
                              max_size=max_conns_per_pool,
                              order_as_stack=True,
                              create=_create_conn(provider))

            endpoint = Endpoint(provider, pool)
            self._endpoints[provider.id] = endpoint

        # service requests using round robin
        self._endpoint_schedule = itertools.cycle(self._endpoints.values())

        # duck type to proxy http invocations
        for method in ClusteredAPI._HTTP_VERBS:
            setattr(self, method, self._proxy_stub(method))

        conns = greenpool.GreenPool()
        for endpoint in self._endpoints.values():
            conns.spawn(self._validate, endpoint)
        eventlet.sleep(0)
        while conns.running():
            if (self.health == ClusterHealth.GREEN
                    or self.health == ClusterHealth.ORANGE):
                # only wait for 1 or more endpoints to reduce init time
                break
            eventlet.sleep(0.5)

        for endpoint in self._endpoints.values():
            # dynamic loop for each endpoint to ensure connectivity
            loop = loopingcall.DynamicLoopingCall(self._endpoint_keepalive,
                                                  endpoint)
            loop.start(initial_delay=self._keepalive_interval,
                       periodic_interval_max=self._keepalive_interval,
                       stop_on_exception=False)

        LOG.debug(
            "Done initializing API endpoint(s). "
            "API cluster health: %s", self.health)
Exemple #27
0
 def test_spawn_n(self):
     p = greenpool.GreenPool(4)
     results_closure = []
     def do_something(a):
         eventlet.sleep(0.01)
         results_closure.append(a)
     for i in xrange(10):
         p.spawn(do_something, i)
     p.waitall()
     self.assertEquals(results_closure, range(10))
Exemple #28
0
        def _do_evacuate(context,
                         host_name,
                         instance_list,
                         reserved_host=None):
            failed_evacuation_instances = []
            if reserved_host:
                if CONF.host_failure.add_reserved_host_to_aggregate:
                    # Assign reserved_host to an aggregate to which the failed
                    # compute host belongs to.
                    aggregates = self.novaclient.get_aggregate_list(context)
                    for aggregate in aggregates:
                        if host_name in aggregate.hosts:
                            try:
                                self.novaclient.add_host_to_aggregate(
                                    context, reserved_host.name, aggregate)
                            except exception.Conflict:
                                msg = ("Host '%(reserved_host)s' already has "
                                       "been added to aggregate "
                                       "'%(aggregate)s'.")
                                LOG.info(
                                    msg, {
                                        'reserved_host': reserved_host.name,
                                        'aggregate': aggregate.name
                                    })

                            # A failed compute host can be associated with
                            # multiple aggregates but operators will not
                            # associate it with multiple aggregates in real
                            # deployment so adding reserved_host to the very
                            # first aggregate from the list.
                            break

                self.novaclient.enable_disable_service(context,
                                                       reserved_host.name,
                                                       enable=True)

                # Set reserved property of reserved_host to False
                reserved_host.reserved = False
                reserved_host.save()

            thread_pool = greenpool.GreenPool(
                CONF.host_failure_recovery_threads)
            for instance in instance_list:
                thread_pool.spawn_n(self._evacuate_and_confirm, context,
                                    instance, host_name,
                                    failed_evacuation_instances, reserved_host)
            thread_pool.waitall()

            if failed_evacuation_instances:
                msg = _("Failed to evacuate instances %(instances)s from "
                        "host %(host_name)s.") % {
                            'instances': failed_evacuation_instances,
                            'host_name': host_name
                        }
                raise exception.HostRecoveryFailureException(message=msg)
Exemple #29
0
def start_collective():
    global follower
    global retrythread
    global initting
    initting = True
    retrythread = None
    try:
        cfm.membership_callback = schedule_rebalance
        if follower is not None:
            initting = False
            return
        try:
            if cfm.cfgstreams:
                cfm.check_quorum()
                # Do not start if we have quorum and are leader
                return
        except exc.DegradedCollective:
            pass
        if leader_init.active:  # do not start trying to connect if we are
            # xmitting data to a follower
            return
        myname = get_myname()
        connecto = []
        for member in sorted(list(cfm.list_collective())):
            if member == myname:
                continue
            if cfm.cfgleader is None:
                cfm.stop_following(True)
            ldrcandidate = cfm.get_collective_member(member)['address']
            connecto.append(ldrcandidate)
        conpool = greenpool.GreenPool(64)
        connections = conpool.imap(create_connection, connecto)
        for ent in connections:
            member, remote = ent
            if isinstance(remote, Exception):
                continue
            if follower is None:
                log.log({
                    'info':
                    'Performing startup attempt to {0}'.format(member),
                    'subsystem':
                    'collective'
                })
                if not connect_to_leader(
                        name=myname, leader=member, remote=remote):
                    remote.close()
            else:
                remote.close()
    except Exception as e:
        pass
    finally:
        if retrythread is None and follower is None:
            retrythread = eventlet.spawn_after(5 + random.random(),
                                               start_collective)
        initting = False
Exemple #30
0
def child_zone_helper(zone_list, func):
    """Fire off a command to each zone in the list.
    The return is [novaclient return objects] from each child zone.
    For example, if you are calling server.pause(), the list will
    be whatever the response from server.pause() is. One entry
    per child zone called."""
    green_pool = greenpool.GreenPool()
    return [
        result
        for result in green_pool.imap(_wrap_method(_process, func), zone_list)
    ]