def test_resize(self): pool = GreenPool(2) evt = Event() def wait_long_time(e): e.wait() pool.spawn(wait_long_time, evt) pool.spawn(wait_long_time, evt) self.assertEquals(pool.free(), 0) self.assert_pool_has_free(pool, 0) # verify that the pool discards excess items put into it pool.resize(1) # cause the wait_long_time functions to return, which will # trigger puts to the pool evt.send(None) sleep(0) sleep(0) self.assertEquals(pool.free(), 1) self.assert_pool_has_free(pool, 1) # resize larger and assert that there are more free items pool.resize(2) self.assertEquals(pool.free(), 2) self.assert_pool_has_free(pool, 2)
def test_execute_async(self): done = Event() def some_work(): done.send() pool = GreenPool(2) pool.spawn(some_work) done.wait()
def subtest(intpool_size, pool_size, num_executes): def run(int_pool): token = int_pool.get() sleep(0.0001) int_pool.put(token) return token int_pool = IntPool(intpool_size) pool = GreenPool(pool_size) for ix in xrange(num_executes): pool.spawn(run, int_pool) pool.waitall()
def test_spawn_n(self): p = GreenPool(4) results_closure = [] def do_something(a): sleep(0.01) results_closure.append(a) for i in xrange(10): p.spawn(do_something, i) p.waitall() self.assertEquals(sorted(results_closure), range(10))
def test_spawn_n_2(self): p = GreenPool(2) self.assertEqual(p.free(), 2) r = [] def foo(a): r.append(a) gt = p.spawn(foo, 1) self.assertEqual(p.free(), 1) gt.wait() self.assertEqual(r, [1]) sleep(0) self.assertEqual(p.free(), 2) # Once the pool is exhausted, spawning forces a yield. p.spawn_n(foo, 2) self.assertEqual(1, p.free()) self.assertEqual(r, [1]) p.spawn_n(foo, 3) self.assertEqual(0, p.free()) self.assertEqual(r, [1]) p.spawn_n(foo, 4) self.assertEqual(set(r), set([1, 2, 3])) sleep(0) self.assertEqual(set(r), set([1, 2, 3, 4]))
def test_spawn(self): p = GreenPool(4) waiters = [] for i in xrange(10): waiters.append(p.spawn(passthru, i)) results = [waiter.wait() for waiter in waiters] self.assertEquals(results, list(xrange(10)))
def test_spawn_n_2(self): p = GreenPool(2) self.assertEqual(p.free(), 2) r = [] def foo(a): r.append(a) gt = p.spawn(foo, 1) self.assertEqual(p.free(), 1) gt.wait() self.assertEqual(r, [1]) sleep(0) self.assertEqual(p.free(), 2) #Once the pool is exhausted, spawning forces a yield. p.spawn_n(foo, 2) self.assertEqual(1, p.free()) self.assertEqual(r, [1]) p.spawn_n(foo, 3) self.assertEqual(0, p.free()) self.assertEqual(r, [1]) p.spawn_n(foo, 4) self.assertEqual(set(r), set([1, 2, 3])) sleep(0) self.assertEqual(set(r), set([1, 2, 3, 4]))
def test_execute(self): value = 'return value' def some_work(): return value pool = GreenPool(2) worker = pool.spawn(some_work) self.assertEqual(value, worker.wait())
def test_multiple_coros(self): evt = Event() results = [] def producer(): results.append('prod') evt.send() def consumer(): results.append('cons1') evt.wait() results.append('cons2') pool = GreenPool(2) done = pool.spawn(consumer) pool.spawn(producer) done.wait() self.assertEquals(sorted(['cons1', 'prod', 'cons2']), sorted(results))
def test_execute(self): value = "return value" def some_work(): return value pool = GreenPool(2) worker = pool.spawn(some_work) self.assertEqual(value, worker.wait())
def test_reentrant(self): pool = GreenPool(1) def reenter(): waiter = pool.spawn(lambda a: a, 'reenter') self.assertEqual('reenter', waiter.wait()) outer_waiter = pool.spawn(reenter) outer_waiter.wait() evt = Event() def reenter_async(): pool.spawn(lambda a: a, 'reenter') evt.send('done') pool.spawn(reenter_async) evt.wait()
def test_multiple_coros(self): evt = Event() results = [] def producer(): results.append("prod") evt.send() def consumer(): results.append("cons1") evt.wait() results.append("cons2") pool = GreenPool(2) done = pool.spawn(consumer) pool.spawn(producer) done.wait() self.assertEquals(sorted(["cons1", "prod", "cons2"]), sorted(results))
def test_reentrant(self): pool = GreenPool(1) def reenter(): waiter = pool.spawn(lambda a: a, "reenter") self.assertEqual("reenter", waiter.wait()) outer_waiter = pool.spawn(reenter) outer_waiter.wait() evt = Event() def reenter_async(): pool.spawn(lambda a: a, "reenter") evt.send("done") pool.spawn(reenter_async) evt.wait()
def test_pool_smash(self): """ The premise is that a coroutine in a Pool tries to get a token out of a token pool but times out before getting the token. We verify that neither pool is adversely affected by this situation. """ from evy import pools pool = GreenPool(1) tp = pools.TokenPool(max_size=1) token = tp.get() # empty out the pool def do_receive(tp): _timer = Timeout(0, RuntimeError()) try: t = tp.get() self.fail("Shouldn't have recieved anything from the pool") except RuntimeError: return 'timed out' else: _timer.cancel() # the spawn makes the token pool expect that coroutine, but then # immediately cuts bait e1 = pool.spawn(do_receive, tp) self.assertEquals(e1.wait(), 'timed out') # the pool can get some random item back def send_wakeup(tp): tp.put('wakeup') gt = spawn(send_wakeup, tp) # now we ask the pool to run something else, which should not # be affected by the previous send at all def resume(): return 'resumed' e2 = pool.spawn(resume) self.assertEquals(e2.wait(), 'resumed') # we should be able to get out the thing we put in there, too self.assertEquals(tp.get(), 'wakeup') gt.wait()
def test_pool_smash(self): """ The premise is that a coroutine in a Pool tries to get a token out of a token pool but times out before getting the token. We verify that neither pool is adversely affected by this situation. """ from evy import pools pool = GreenPool(1) tp = pools.TokenPool(max_size=1) token = tp.get() # empty out the pool def do_receive(tp): _timer = Timeout(0, RuntimeError()) try: t = tp.get() self.fail("Shouldn't have recieved anything from the pool") except RuntimeError: return "timed out" else: _timer.cancel() # the spawn makes the token pool expect that coroutine, but then # immediately cuts bait e1 = pool.spawn(do_receive, tp) self.assertEquals(e1.wait(), "timed out") # the pool can get some random item back def send_wakeup(tp): tp.put("wakeup") gt = spawn(send_wakeup, tp) # now we ask the pool to run something else, which should not # be affected by the previous send at all def resume(): return "resumed" e2 = pool.spawn(resume) self.assertEquals(e2.wait(), "resumed") # we should be able to get out the thing we put in there, too self.assertEquals(tp.get(), "wakeup") gt.wait()
def test_no_leaking (self): refs = weakref.WeakKeyDictionary() my_local = corolocal.local() class X(object): pass def do_something (i): o = X() refs[o] = True my_local.foo = o p = GreenPool() for i in xrange(100): p.spawn(do_something, i) p.waitall() del p # at this point all our coros have terminated self.assertEqual(len(refs), 1)
def test_no_leaking(self): refs = weakref.WeakKeyDictionary() my_local = corolocal.local() class X(object): pass def do_something(i): o = X() refs[o] = True my_local.foo = o p = GreenPool() for i in xrange(100): p.spawn(do_something, i) p.waitall() del p # at this point all our coros have terminated self.assertEqual(len(refs), 1)
def test_stderr_raising(self): # testing that really egregious errors in the error handling code # (that prints tracebacks to stderr) don't cause the pool to lose # any members import sys pool = GreenPool(1) def crash(*args, **kw): raise RuntimeError("Whoa") class FakeFile(object): write = crash # we're going to do this by causing the traceback.print_exc in # safe_apply to raise an exception and thus exit _main_loop normal_err = sys.stderr try: sys.stderr = FakeFile() waiter = pool.spawn(crash) self.assertRaises(RuntimeError, waiter.wait) # the pool should have something free at this point since the # waiter returned # GreenPool change: if an exception is raised during execution of a link, # the rest of the links are scheduled to be executed on the next hub iteration # this introduces a delay in updating pool.sem which makes pool.free() report 0 # therefore, sleep: sleep(0) self.assertEqual(pool.free(), 1) # shouldn't block when trying to get t = Timeout(0.1) try: pool.spawn(sleep, 1) finally: t.cancel() finally: sys.stderr = normal_err
def test_timer_cancel(self): # this test verifies that local timers are not fired # outside of the context of the spawn method timer_fired = [] def fire_timer(): timer_fired.append(True) def some_work(): hubs.get_hub().schedule_call_local(0, fire_timer) pool = GreenPool(2) worker = pool.spawn(some_work) worker.wait() sleep(0) self.assertEquals(timer_fired, [])
def serve (sock, handle, concurrency = 1000): """ Runs a server on the supplied socket. Calls the function *handle* in a separate greenthread for every incoming client connection. *handle* takes two arguments: the client socket object, and the client address:: def myhandle(client_sock, client_addr): print "client connected", client_addr evy.serve(evy.listen(('127.0.0.1', 9999)), myhandle) Returning from *handle* closes the client socket. :func:`serve` blocks the calling greenthread; it won't return until the server completes. If you desire an immediate return, spawn a new greenthread for :func:`serve`. Any uncaught exceptions raised in *handle* are raised as exceptions from :func:`serve`, terminating the server, so be sure to be aware of the exceptions your application can raise. The return value of *handle* is ignored. Raise a :class:`~evy.StopServe` exception to gracefully terminate the server -- that's the only way to get the server() function to return rather than raise. The value in *concurrency* controls the maximum number of greenthreads that will be open at any time handling requests. When the server hits the concurrency limit, it stops accepting new connections until the existing ones complete. """ pool = GreenPool(concurrency) server_gt = getcurrent() while True: try: conn, addr = sock.accept() gt = pool.spawn(handle, conn, addr) gt.link(_stop_checker, server_gt, conn) conn, addr, gt = None, None, None except StopServe: return
def serve(sock, handle, concurrency=1000): """ Runs a server on the supplied socket. Calls the function *handle* in a separate greenthread for every incoming client connection. *handle* takes two arguments: the client socket object, and the client address:: def myhandle(client_sock, client_addr): print "client connected", client_addr evy.serve(evy.listen(('127.0.0.1', 9999)), myhandle) Returning from *handle* closes the client socket. :func:`serve` blocks the calling greenthread; it won't return until the server completes. If you desire an immediate return, spawn a new greenthread for :func:`serve`. Any uncaught exceptions raised in *handle* are raised as exceptions from :func:`serve`, terminating the server, so be sure to be aware of the exceptions your application can raise. The return value of *handle* is ignored. Raise a :class:`~evy.StopServe` exception to gracefully terminate the server -- that's the only way to get the server() function to return rather than raise. The value in *concurrency* controls the maximum number of greenthreads that will be open at any time handling requests. When the server hits the concurrency limit, it stops accepting new connections until the existing ones complete. """ pool = GreenPool(concurrency) server_gt = getcurrent() while True: try: conn, addr = sock.accept() gt = pool.spawn(handle, conn, addr) gt.link(_stop_checker, server_gt, conn) conn, addr, gt = None, None, None except StopServe: return
def test_execute(self): p = GreenPool() evt = p.spawn(lambda a: ('foo', a), 1) self.assertEqual(evt.wait(), ('foo', 1))
def test_execute(self): p = GreenPool() evt = p.spawn(lambda a: ("foo", a), 1) self.assertEqual(evt.wait(), ("foo", 1))
def test_recursive_waitall(self): p = GreenPool() gt = p.spawn(p.waitall) self.assertRaises(AssertionError, gt.wait)