def test_coroutine1(self): loop = get_event_loop() d1 = Future() loop.call_later(0.2, d1.set_result, 1) a = yield c_summation(d1) self.assertEqual(a, 3) self.assertEqual(d1.result(), 1)
def shutdown(self, wait=True): with self._shutdown_lock: self._shutdown = True self._put(None) if wait: self._waiter = Future(loop=self._loop) return self._waiter
def wait_fd(fd, read=True): '''Wait for an event on file descriptor ``fd``. :param fd: file descriptor :param read=True: wait for a read event if ``True``, otherwise a wait for write event. This function must be invoked from a coroutine with parent, therefore invoking it from the main greenlet will raise an exception. Check how this function is used in the :func:`.psycopg2_wait_callback` function. ''' current = greenlet.getcurrent() parent = current.parent assert parent, '"wait_fd" must be called by greenlet with a parent' try: fileno = fd.fileno() except AttributeError: fileno = fd loop = get_event_loop() future = Future(loop=loop) # When the event on fd occurs switch back to the current greenlet if read: loop.add_reader(fileno, _done_wait_fd, fileno, future, read) else: loop.add_writer(fileno, _done_wait_fd, fileno, future, read) # switch back to parent greenlet parent.switch(future) return future.result()
def test_call_at(self): loop = get_event_loop() d1 = Future() d2 = Future() c1 = loop.call_at(loop.time()+1, lambda: d1.set_result(loop.time())) c2 = loop.call_later(1, lambda: d2.set_result(loop.time())) t1, t2 = yield pulsar.multi_async((d1, d2)) self.assertTrue(t1 <= t2)
async def test_json_with_async_string2(self): d = Future() astr = wsgi.String(d) response = wsgi.Json({'bla': astr}) self.assertEqual(len(response.children), 1) result = response.render() self.assertIsInstance(result, Future) d.set_result('ciao') result = await result self.assertEqual(result, json.dumps({'bla': 'ciao'}))
def test_json_with_async_string2(self): d = Future() astr = wsgi.AsyncString(d) response = wsgi.Json({"bla": astr}) self.assertEqual(len(response.children), 1) result = response.render() self.assertIsInstance(result, Future) d.set_result("ciao") result = yield from result self.assertEqual(result, json.dumps({"bla": "ciao"}))
def test_call_soon(self): ioloop = get_event_loop() tid = yield loop_thread_id(ioloop) d = Future() callback = lambda: d.set_result(current_thread().ident) cbk = ioloop.call_soon(callback) self.assertEqual(cbk._callback, callback) self.assertEqual(cbk._args, ()) # we should be able to wait less than a second result = yield d self.assertEqual(result, tid)
def test_ping_pong_monitor(self): value = yield 3 self.assertEqual(value, 3) try: future = Future() future.set_exception(ValueError('test')) yield future except ValueError: pass pong = yield send('monitor', 'ping') self.assertEqual(pong, 'pong')
def check_twisted(deferred, loop): """Binding for twisted. Added to pulsar asynchronous engine via the :func:`.add_async_binding` function. """ if isinstance(deferred, Deferred): future = Future(loop=loop) deferred.addCallbacks(future.set_result, lambda failure: future.set_exception(failure.value)) return future
def _(*args, **kwargs): res = callable(*args, **kwargs) if isinstance(res, types.GeneratorType): res = _inlineCallbacks(None, res, Deferred()) if isinstance(res, Deferred): future = Future() res.addCallbacks( future.set_result, lambda failure: future.set_exception(failure.value)) future._deferred = res return future else: raise TypeError( "Callable %r should return a generator or a twisted Deferred" % callable)
def test_periodic(self): test = self loop = get_event_loop() waiter = Future() class p: def __init__(self, loops): self.loops = loops self.c = 0 def __call__(self): self.c += 1 if self.c == self.loops: try: raise ValueError('test periodic') except Exception: waiter.set_result(self.c) raise every = 2 loops = 2 track = p(loops) start = loop.time() periodic = call_repeatedly(loop, every, track) self.assertIsInstance(periodic, LoopingCall) done = yield waiter taken = loop.time() - start self.assertEqual(done, loops) self.assertTrue(taken > every * loops) self.assertTrue(taken < every * loops + 2) self.assertTrue(periodic.cancelled) self.assertFalse(has_callback(loop, periodic.handler))
def test_chain(self): loop = get_event_loop() future = Future() next = chain_future(future, callback=lambda r: r+2) loop.call_later(0.2, future.set_result, 1) result = yield next self.assertEqual(result, 3)
def test_multi(self): d1 = Future() d2 = Future() d = multi_async([d1, d2, 'bla']) self.assertFalse(d.done()) d2.set_result('first') self.assertFalse(d.done()) d1.set_result('second') result = yield from d self.assertEqual(result, ['second', 'first', 'bla'])
def _throttle(self, rw): self.logger.debug('Throttling %s', self._types[rw]) if rw: assert not self._throttle[rw] self._throttle[rw] = Future(self.protocol._loop) else: self._throttle[rw] = True t = self.protocol._transport t.pause_reading()
def coro1(): done = yield 3 fut = Future() fut._loop.call_soon(fut.set_exception, ValueError('test')) try: yield fut except ValueError: done += 1 coroutine_return(done)
def test_call_at(self): loop = get_event_loop() d1 = Future() d2 = Future() c1 = loop.call_at(loop.time() + 1, lambda: d1.set_result(loop.time())) c2 = loop.call_later(1, lambda: d2.set_result(loop.time())) t1, t2 = yield pulsar.multi_async((d1, d2)) self.assertTrue(t1 <= t2)
def submit(self, func, *args, **kwargs): '''Equivalent to ``func(*args, **kwargs)``. This method create a new task for function ``func`` and adds it to the queue. Return a :class:`~asyncio.Future` called back once the task has finished. ''' with self._shutdown_lock: if self._shutdown: raise RuntimeError( 'cannot schedule new futures after shutdown') future = Future(loop=self._loop) self._put((future, func, args, kwargs)) return future
async def test_multi(self): d1 = Future() d2 = Future() d = multi_async([d1, d2, 'bla']) self.assertFalse(d.done()) d2.set_result('first') self.assertFalse(d.done()) d1.set_result('second') result = await d self.assertEqual(result, ['second', 'first', 'bla'])
def test_multi(self): d1 = Future() d2 = Future() d = multi_async([d1, d2, "bla"]) self.assertFalse(d.done()) d2.set_result("first") self.assertFalse(d.done()) d1.set_result("second") result = yield from d self.assertEqual(result, ["second", "first", "bla"])
def acquire(self, timeout=None): '''Acquires the lock if in the unlocked state otherwise switch back to the parent coroutine. ''' green = getcurrent() parent = green.parent if parent is None: raise RuntimeError('acquire in main greenlet') if self._local.locked: future = Future(loop=self._loop) self._queue.append(future) parent.switch(future) self._local.locked = green return self.locked()
def acquire(self, timeout=None): """Acquires the lock if in the unlocked state otherwise switch back to the parent coroutine. """ green = getcurrent() parent = green.parent if parent is None: raise MustBeInChildGreenlet('GreenLock.acquire in main greenlet') if self._local.locked: future = Future(loop=self._loop) self._queue.append(future) parent.switch(future) self._local.locked = green return self.locked()
def test_yield(self): '''Yielding a future calling back on separate thread''' worker = pulsar.get_actor() loop = get_event_loop() loop_tid = yield pulsar.loop_thread_id(loop) self.assertNotEqual(worker.tid, current_thread().ident) self.assertEqual(loop_tid, current_thread().ident) yield None self.assertEqual(loop_tid, current_thread().ident) d = Future(loop=worker._loop) # We are calling back the future in the event_loop which is on # a separate thread def _callback(): d.set_result(current_thread().ident) worker._loop.call_soon_threadsafe( worker._loop.call_later, 0.2, _callback) result = yield d self.assertEqual(worker.tid, result) self.assertNotEqual(worker.tid, current_thread().ident) self.assertEqual(loop_tid, current_thread().ident)
def __call__(self, actor=None): """Register this application with the (optional) calling ``actor``. If an ``actor`` is available (either via the function argument or via the :func:`~pulsar.async.actor.get_actor` function) it must be ``arbiter``, otherwise this call is no-op. If no actor is available, it means this application starts pulsar engine by creating the ``arbiter`` with its :ref:`global settings <setting-section-global-server-settings>` copied to the arbiter :class:`.Config` container. :return: the ``start`` one time event fired once this application has fired it. """ if actor is None: actor = get_actor() monitor = None if actor and actor.is_arbiter(): monitor = actor.get_actor(self.name) if monitor is None and (not actor or actor.is_arbiter()): self.cfg.on_start() self.logger = self.cfg.configured_logger() if not actor: actor = pulsar.arbiter(cfg=self.cfg.clone()) else: self.update_arbiter_params(actor) if not self.cfg.exc_id: self.cfg.set('exc_id', actor.cfg.exc_id) if self.on_config(actor) is not False: start = Future(loop=actor._loop) actor.bind_event('start', partial(self._add_monitor, start)) return start else: return elif monitor: raise ImproperlyConfigured('%s already started ' % monitor.name) else: raise ImproperlyConfigured('Cannot start application from %s' % actor)
def switch_to_ssl(self, prev_response): '''Wrap the transport for SSL communication.''' request = prev_response._request.request connection = prev_response._connection loop = connection._loop sock = connection._transport._sock # set a new connection_made event connection.events['connection_made'] = OneTime(loop=loop) connection._processed -= 1 connection.producer._requests_processed -= 1 waiter = Future(loop=loop) loop._make_ssl_transport(sock, connection, request._ssl, waiter, server_side=False, server_hostname=request._netloc) yield from waiter response = connection.current_consumer() response.start(request) yield from response.on_finished if response.request_again: response = response.request_again prev_response.request_again = response
def test_call_later(self): ioloop = get_event_loop() tid = yield loop_thread_id(ioloop) d = Future() timeout1 = ioloop.call_later( 20, lambda: d.set_result(current_thread().ident)) timeout2 = ioloop.call_later( 10, lambda: d.set_result(current_thread().ident)) # lets wake the ioloop self.assertTrue(has_callback(ioloop, timeout1)) self.assertTrue(has_callback(ioloop, timeout2)) timeout1.cancel() timeout2.cancel() self.assertTrue(timeout1._cancelled) self.assertTrue(timeout2._cancelled) timeout1 = ioloop.call_later( 0.1, lambda: d.set_result(current_thread().ident)) yield d self.assertTrue(d.done()) self.assertEqual(d.result(), tid) self.assertFalse(has_callback(ioloop, timeout1))
class GreenPool(AsyncObject): '''A pool of running greenlets. This pool maintains a group of greenlets to perform asynchronous tasks via the :meth:`submit` method. ''' worker_name = 'exec' def __init__(self, max_workers=None, loop=None): self._loop = loop or get_event_loop() self._max_workers = min(max_workers or _DEFAULT_WORKERS, _MAX_WORKERS) self._greenlets = set() self._available = set() self._queue = deque() self._shutdown = False self._waiter = None self._logger = logging.getLogger('pulsar.greenpool') self._shutdown_lock = threading.Lock() @property def max_workers(self): return self._max_workers @max_workers.setter def max_workers(self, value): value = int(value) assert value > 0 self._max_workers = value def submit(self, func, *args, **kwargs): '''Equivalent to ``func(*args, **kwargs)``. This method create a new task for function ``func`` and adds it to the queue. Return a :class:`~asyncio.Future` called back once the task has finished. ''' with self._shutdown_lock: if self._shutdown: raise RuntimeError( 'cannot schedule new futures after shutdown') future = Future(loop=self._loop) self._put((future, func, args, kwargs)) return future def shutdown(self, wait=True): with self._shutdown_lock: self._shutdown = True self._put(None) if wait: self._waiter = Future(loop=self._loop) return self._waiter # INTERNALS def _adjust_greenlet_count(self): if (not self._shutdown and not self._available and len(self._greenlets) < self._max_workers): green = GreenletWorker(self._green_run) self._greenlets.add(green) self.logger.debug('Num greenlets: %d', len(self._greenlets)) green.switch() return self._available def _put(self, task): # Run in the main greenlet of the evnet-loop thread self._queue.appendleft(task) self._check_queue() def _check_queue(self): # Run in the main greenlet of the event-loop thread if not self._adjust_greenlet_count(): self.logger.debug('No greenlet available') return self._loop.call_soon(self._check_queue) try: task = self._queue.pop() except IndexError: return async (self._green_task(self._available.pop(), task), loop=self._loop) def _green_task(self, green, task): # Coroutine executing the in main greenlet # This coroutine is executed for every task put into the queue while task is not _DONE: # switch to the greenlet to start the task task = green.switch(task) # if an asynchronous result is returned, yield from while is_async(task): try: task = yield from task except Exception as exc: # This call can return an asynchronous component task = green.throw(exc) def _green_run(self): # The run method of a worker greenlet task = True while task: green = getcurrent() parent = green.parent assert parent # add greenlet in the available greenlets self._available.add(green) task = parent.switch(_DONE) # switch back to the main execution if task: future, func, args, kwargs = task try: result = func(*args, **kwargs) except Exception as exc: future.set_exception(exc) else: future.set_result(result) else: # Greenlet cleanup self._greenlets.remove(green) if self._greenlets: self._put(None) elif self._waiter: self._waiter.set_result(None) self._waiter = None parent.switch(_DONE)
class StreamReader: _expect_sent = None _waiting = None def __init__(self, headers, parser, transport=None): self.headers = headers self.parser = parser self.transport = transport self.buffer = b'' self.on_message_complete = Future() def __repr__(self): return repr(self.transport) __str__ = __repr__ def done(self): '''``True`` when the full HTTP message has been read. ''' return self.on_message_complete.done() def protocol(self): version = self.parser.get_version() return "HTTP/%s" % ".".join(('%s' % v for v in version)) def waiting_expect(self): '''``True`` when the client is waiting for 100 Continue. ''' if self._expect_sent is None: if (not self.parser.is_message_complete() and self.headers.has('expect', '100-continue')): return True self._expect_sent = '' return False def recv(self): '''Read bytes in the buffer. ''' if self.waiting_expect(): if self.parser.get_version() < (1, 1): raise HttpException(status=417) else: msg = '%s 100 Continue\r\n\r\n' % self.protocol() self._expect_sent = msg self.transport.write(msg.encode(DEFAULT_CHARSET)) return self.parser.recv_body() def read(self, maxbuf=None): '''Return bytes in the buffer. If the stream is not yet ready, return a :class:`asyncio.Future` which results in the bytes read. ''' if not self._waiting: body = self.recv() if self.done(): return self._getvalue(body, maxbuf) else: self._waiting = chain_future( self.on_message_complete, lambda r: self._getvalue(body, maxbuf)) return self._waiting else: return self._waiting def fail(self): if self.waiting_expect(): raise HttpException(status=417) ## INTERNALS def _getvalue(self, body, maxbuf): if self.buffer: body = self.buffer + body body = body + self.recv() if maxbuf and len(body) > maxbuf: body, self.buffer = body[:maxbuf], body[maxbuf:] return body
class GreenPool(AsyncObject): """A pool of running greenlets. This pool maintains a group of greenlets to perform asynchronous tasks via the :meth:`submit` method. """ worker_name = 'exec' def __init__(self, max_workers=None, loop=None): self._loop = loop or get_event_loop() self._max_workers = min(max_workers or _DEFAULT_WORKERS, _MAX_WORKERS) self._greenlets = set() self._available = set() self._queue = deque() self._shutdown = False self._waiter = None self._logger = logging.getLogger('pulsar.greenpool') self._shutdown_lock = threading.Lock() self.wait = wait @property def max_workers(self): return self._max_workers @max_workers.setter def max_workers(self, value): value = int(value) assert value > 0 self._max_workers = value @property def in_green_worker(self): """True if the current greenlet is a green pool worker """ return isinstance(getcurrent(), GreenletWorker) def submit(self, func, *args, **kwargs): """Equivalent to ``func(*args, **kwargs)``. This method create a new task for function ``func`` and adds it to the queue. Return a :class:`~asyncio.Future` called back once the task has finished. """ with self._shutdown_lock: if self._shutdown: raise RuntimeError( 'cannot schedule new futures after shutdown') if self.in_green_worker: return wait(func(*args, **kwargs)) else: future = Future(loop=self._loop) self._put((future, func, args, kwargs)) return future def shutdown(self, wait=True): with self._shutdown_lock: self._shutdown = True self._put(None) if wait: self._waiter = Future(loop=self._loop) return self._waiter def getcurrent(self): return getcurrent() # INTERNALS def _adjust_greenlet_count(self): if (not self._shutdown and not self._available and len(self._greenlets) < self._max_workers): green = GreenletWorker(self._green_run) self._greenlets.add(green) self.logger.debug('Num greenlets: %d', len(self._greenlets)) green.switch() return self._available def _put(self, task): # Run in the main greenlet of the evnet-loop thread self._queue.appendleft(task) self._check_queue() def _check_queue(self): # Run in the main greenlet of the event-loop thread if not self._adjust_greenlet_count(): self.logger.debug('No greenlet available') return self._loop.call_soon(self._check_queue) try: task = self._queue.pop() except IndexError: return ensure_future(self._green_task(self._available.pop(), task), loop=self._loop) async def _green_task(self, green, task): # Coroutine executing the in main greenlet # This coroutine is executed for every task put into the queue while task is not _DONE: # switch to the greenlet to start the task task = green.switch(task) # if an asynchronous result is returned, await while isawaitable(task): try: task = await task except Exception as exc: # This call can return an asynchronous component exc_info = sys.exc_info() if not exc_info[0]: exc_info = (exc, None, None) task = green.throw(*exc_info) def _green_run(self): # The run method of a worker greenlet task = True while task: green = getcurrent() parent = green.parent assert parent # add greenlet in the available greenlets self._available.add(green) task = parent.switch(_DONE) # switch back to the main execution if task: future, func, args, kwargs = task try: try: result = wait(func(*args, **kwargs), True) except StopIteration as exc: # See PEP 479 raise RuntimeError('Unhandled StopIteration') from exc except Exception as exc: future.set_exception(exc) else: future.set_result(result) else: # Greenlet cleanup self._greenlets.remove(green) if self._greenlets: self._put(None) elif self._waiter: self._waiter.set_result(None) self._waiter = None parent.switch(_DONE)
def __init__(self, headers, parser, transport=None): self.headers = headers self.parser = parser self.transport = transport self.buffer = b'' self.on_message_complete = Future()
class StreamReader: _expect_sent = None _waiting = None def __init__(self, headers, parser, transport=None): self.headers = headers self.parser = parser self.transport = transport self.buffer = b'' self.on_message_complete = Future() def __repr__(self): return repr(self.transport) __str__ = __repr__ def done(self): '''``True`` when the full HTTP message has been read. ''' return self.on_message_complete.done() def protocol(self): version = self.parser.get_version() return "HTTP/%s" % ".".join(('%s' % v for v in version)) def waiting_expect(self): '''``True`` when the client is waiting for 100 Continue. ''' if self._expect_sent is None: if (not self.parser.is_message_complete() and self.headers.has('expect', '100-continue')): return True self._expect_sent = '' return False def recv(self): '''Read bytes in the buffer. ''' if self.waiting_expect(): if self.parser.get_version() < (1, 1): raise HttpException(status=417) else: msg = '%s 100 Continue\r\n\r\n' % self.protocol() self._expect_sent = msg self.transport.write(msg.encode(DEFAULT_CHARSET)) return self.parser.recv_body() def read(self, maxbuf=None): '''Return bytes in the buffer. If the stream is not yet ready, return a :class:`asyncio.Future` which results in the bytes read. ''' if not self._waiting: body = self.recv() if self.done(): return self._getvalue(body, maxbuf) else: self._waiting = chain_future( self.on_message_complete, lambda r: self._getvalue(body, maxbuf)) return self._waiting else: return self._waiting def fail(self): if self.waiting_expect(): raise HttpException(status=417) # INTERNALS def _getvalue(self, body, maxbuf): if self.buffer: body = self.buffer + body body = body + self.recv() if maxbuf and len(body) > maxbuf: body, self.buffer = body[:maxbuf], body[maxbuf:] return body
def async_func(loop, value): p = Future(loop=loop) loop.call_later(DELAY, p.set_result, value) return p
def send(self, message): assert self._waiting is None self._waiting = d = Future(loop=self._loop) self._transport.sendto(to_bytes(message) + self.separator) return d
def AsyncResponseMiddleware(environ, resp): '''This is just for testing the asynchronous response middleware ''' future = Future() future._loop.call_soon(future.set_result, resp) return future
class GreenPool(AsyncObject): '''A pool of running greenlets. This pool maintains a group of greenlets to perform asynchronous tasks via the :meth:`submit` method. ''' worker_name = 'exec' def __init__(self, max_workers=None, loop=None, maxtasks=None): self._loop = loop or get_event_loop() self._max_workers = min(max_workers or _DEFAULT_WORKERS, _MAX_WORKERS) self._greenlets = set() self._available = set() self._maxtasks = maxtasks self._queue = deque() self._shutdown = False self._waiter = None self._shutdown_lock = threading.Lock() def submit(self, func, *args, **kwargs): '''Equivalent to ``func(*args, **kwargs)``. This method create a new task for function ``func`` and adds it to the queue. Return a :class:`~asyncio.Future` called back once the task has finished. ''' with self._shutdown_lock: if self._shutdown: raise RuntimeError( 'cannot schedule new futures after shutdown') future = Future(loop=self._loop) self._put((future, func, args, kwargs)) return future def shutdown(self, wait=True): with self._shutdown_lock: self._shutdown = True self._put() if wait: self._waiter = Future(loop=self._loop) return self._waiter # INTERNALS def _adjust_greenlet_count(self): if not self._available and len(self._greenlets) < self._max_workers: greenlet = GreenletWorker(self._green_run) self._greenlets.add(greenlet) greenlet.switch() def _put(self, task=None): # Run in the main greenlet of the evnet-loop thread if task: self._adjust_greenlet_count() self._queue.appendleft(task) self._check_queue() def _check_queue(self): # Run in the main greenlet of the event-loop thread if not self._available: return try: task = self._queue.pop() except IndexError: return async(self._green_task(self._available.pop(), task), loop=self._loop) def _green_task(self, greenlet, task): # Run in the main greenlet of the event-loop thread while task is not _DONE: # switch to the greenlet to start the task task = greenlet.switch(task) # if an asynchronous result is returned, yield from while is_async(task): try: task = yield from task except Exception as exc: # This call can return an asynchronous component task = greenlet.throw(exc) def _green_run(self): # The run method of a worker greenlet task = True while task: greenlet = getcurrent() parent = greenlet.parent assert parent self._available.add(greenlet) self._loop.call_soon(self._check_queue) task = parent.switch(_DONE) # switch back to the main execution if task: # If a new task is available execute it # Here we are in the child greenlet future, func, args, kwargs = task try: result = func(*args, **kwargs) except Exception as exc: future.set_exception(exc) else: future.set_result(result) else: # Greenlet cleanup self._greenlets.remove(greenlet) if self._greenlets: self._put(None) elif self._waiter: self._waiter.set_result(None) self._waiter = None parent.switch(_DONE)