def _next(self): runner = self.runner cfg = self.monitor.cfg if self.tests: run = True tag, testcls = self.tests.pop() testcls.tag = tag testcls.cfg = cfg testcls.async = AsyncAssert(testcls) try: all_tests = runner.loadTestsFromTestCase(testcls) except Exception: self.logger.exception('Could not load tests', exc_info=True) run = False else: run = all_tests.countTestCases() if run: self.logger.info('Running Tests from %s', testcls) runner.startTestClass(testcls) ensure_future(self._run_testcls(testcls, all_tests), loop=self._loop) else: self._loop.call_soon(self._next) else: time_taken = self._loop.time() - self._time_start runner = self.runner runner.on_end() runner.printSummary(time_taken) if runner.result.errors or runner.result.failures: exit_code = 2 else: exit_code = 0 self._loop.call_soon(self._exit, exit_code)
def on_open(self, websocket): qid = websocket.handshake.qid channel = self.channel % qid self.pubsub.add_client(self.client(websocket, channel)) ensure_future(self.pubsub.subscribe(channel)) logging.info('New websocket opened. Add client to %s on "%s" channel', self.pubsub, self.channel)
def emit(self, record): """Emit record to slack channel using pycurl to avoid recurrence event logging (log logged record) """ if record.message.startswith(MESSAGE): # avoid cyrcular emit return cfg = self.app.config managers = cfg['SLACK_LINK_NAMES'] text = '' data = {} if managers: text = ' '.join(('@%s' % m for m in managers)) text = '%s\n\n' % text data['link_names'] = 1 context_factory = cfg['LOG_CONTEXT_FACTORY'] data['text'] = text if context_factory: ctx = context_factory(self) data['text'] += "\n" + context_text_formatter(ctx) data['text'] += "```\n%s\n```" % self.format(record) sessions = self.app.http() response = sessions.post(self.webhook_url, data=json.dumps(data)) if isawaitable(response): ensure_future(self._emit(response), loop=sessions._loop) else: sessions._loop.call_soon(self._raise_error, response)
def _next_class(self, tag, test_classes): try: test_cls, test_fun = next(test_classes) except StopIteration: return self._loop.call_soon(self._next_file) test_cls.tag = tag test_cls.cfg = self.cfg test_cls.wait = AsyncAssert(test_cls) try: all_tests = self.runner.loadTestsFromTestCase(test_cls) except Exception: self.logger.exception('Could not load tests') all_tests = False else: all_tests = list(all_tests) if test_fun: all_tests, funcs = False, all_tests for fun in funcs: if fun._testMethodName == test_fun: all_tests = [fun] break if all_tests: self.logger.info('Running %d Tests from %s.%s', len(all_tests), tag, test_cls.__name__) self.runner.startTestClass(test_cls) coro = self._run_test_cls(test_cls, test_classes, all_tests) ensure_future(coro, loop=self._loop) else: self._loop.call_soon(self._next_class, tag, test_classes)
def __call__(self, environ, start_response): uri = environ['RAW_URI'] logger.debug('new request for %r' % uri) if not uri or uri.startswith('/'): # No proper uri, raise 404 raise HttpException(status=404) response = TunnelResponse(self, environ, start_response) ensure_future(response.request()) return response.future
def _poll_tasks(self, worker, next_time=None): if self.closing() and not self._concurrent_tasks: self.do_close() elif worker.is_running() and not next_time: ensure_future(self._may_poll_task(worker), loop=worker._loop) else: next_time = next_time or 0 worker._loop.call_later(next_time, self._poll_tasks, worker)
def tick(self): if self.closing(): if self.accounts: accounts = self.accounts self.accounts = None self.logger.info('Closing twitter accounts') ensure_future(close(self, accounts), loop=self._loop) else: self.do_close()
async def _may_poll_task(self, worker): # Called in the ``worker`` event loop. # # It pools a new task if possible, and add it to the queue of # tasks consumed by the ``worker`` CPU-bound thread.''' task = None next_time = None lag = 0 if worker.is_running(): loop = worker._loop if self.num_concurrent_tasks < self.max_concurrent_tasks: max_tasks = self.cfg.max_requests if max_tasks and self._processed >= max_tasks: self.backend.close( 'Processed %s tasks. Stop polling tasks.' % self._processed) if not self.closing(): try: t0 = loop.time() task = await self.broker.get_message(*self.queues()) lag = loop.time() - t0 except ConnectionError: if self.broker.connection_error: next_time = backoff(self._next_time) else: next_time = RECONNECT_LAG self.broker.connection_error = True if worker.is_running(): self.logger.critical( '%s cannot pool messages - ' 'connection error - try again in %s seconds', self.broker, next_time) except CANCELLED_ERRORS: self.logger.debug('stopped polling messages') raise except Exception: if worker.is_running(): self.logger.exception('server exception') else: self.broker.connection_ok() if task: # Got a new task self._processed += 1 self._concurrent_tasks[task.id] = TaskExecutor(task) ensure_future(self._execute_task(task, worker)) else: self.logger.debug('%s concurrent messages. Cannot poll.', self.max_concurrent_tasks) if next_time is None: next_time = poll_time( self.cfg.task_pool_timeout, self.cfg.task_pool_timeout_max, self.num_concurrent_tasks / self.max_concurrent_tasks, lag) self._next_time = next_time self._poll_tasks(worker, next_time)
def _check_queue(self): # Run in the main greenlet of the event-loop thread if not self._adjust_greenlet_count(): self.logger.debug("No greenlet available") return self._loop.call_soon(self._check_queue) try: task = self._queue.pop() except IndexError: return ensure_future(self._green_task(self._available.pop(), task), loop=self._loop)
def write_body(self, transport): assert not self._write_done, 'Body already sent' self._write_done = True if not self.body: return if is_streamed(self.body): ensure_future(self._write_streamed_data(transport), loop=self._loop) else: self._write_body_data(transport, self.body, True)
def write_body(self, transport): assert not self._write_done, 'Body already sent' self._write_done = True if not self.data: return if is_streamed(self.data): ensure_future(self._write_streamed_data(transport), loop=transport._loop) else: self._write_body_data(transport, self.data, True)
def _check_queue(self): # Run in the main greenlet of the event-loop thread if not self._adjust_greenlet_count(): self.logger.debug('No greenlet available') return self._loop.call_soon(self._check_queue) try: task = self._queue.pop() except IndexError: return ensure_future(self._green_task(self._available.pop(), task), loop=self._loop)
def test_blocking_timeout_acquire(self): key = self.randomkey() eq = self.wait.assertEqual lock1 = self.client.lock(key) lock2 = self.client.lock(key, blocking=5) yield from eq(lock1.acquire(), True) ensure_future(self._release(lock1, 0.5)) start = lock2._loop.time() yield from eq(lock2.acquire(), True) self.assertTrue(5 > lock2._loop.time() - start > 0.5) yield from eq(lock2.release(), True)
async def test_blocking_timeout_acquire(self): key = self.randomkey() eq = self.assertEqual lock1 = self.client.lock(key) lock2 = self.client.lock(key, blocking=5) eq(await lock1.acquire(), True) ensure_future(self._release(lock1, 0.5)) start = lock2._loop.time() eq(await lock2.acquire(), True) self.assertTrue(5 > lock2._loop.time() - start > 0.5) eq(await lock2.release(), True)
def connect(self): '''Connect to twitter streaming endpoint. If the connection is dropped, the :meth:`reconnect` method is invoked according to twitter streaming connection policy_. ''' ensure_future(self.consumer.http.post( self.public_stream, data=self.filter, pre_request=self.oauth, on_headers=self._connected, data_processed=self._process_data, post_request=self._reconnect), loop=self._loop)
def worker_start(self, worker, exc=None): '''When the worker starts :meth:`connect` to twitter spreaming endpoint ''' api_key = self.get_param('twitter_api_key') client_secret = self.get_param('twitter_api_secret') access_token = self.get_param('twitter_access_token') access_secret = self.get_param('twitter_access_secret') self._http = HttpClient(encode_multipart=False) oauth1 = OAuth1(api_key, client_secret=client_secret, resource_owner_key=access_token, resource_owner_secret=access_secret) self._http.bind_event('pre_request', oauth1) self.buffer = [] ensure_future(self.connect())
def setup(self, environ): '''Called once only to setup the WSGI application handler. Check :ref:`lazy wsgi handler <wsgi-lazy-handler>` section for further information. ''' cfg = environ['pulsar.cfg'] loop = environ['pulsar.connection']._loop self.store = create_store(cfg.data_store, loop=loop) pubsub = self.store.pubsub(protocol=Protocol()) channel = '%s_webchat' % self.name ensure_future(pubsub.subscribe(channel), loop=loop) return WsgiHandler([Router('/', get=self.home_page), WebSocket('/message', Chat(pubsub, channel)), Router('/rpc', post=Rpc(pubsub, channel), response_content_types=JSON_CONTENT_TYPES)], [AsyncResponseMiddleware, GZipMiddleware(min_length=20)])
def setup(self, environ): '''Called once only by the WSGI server. It returns a :class:`.WsgiHandler` with three routes: * The base route served by the :meth:`home_page` method * The websocket route * A route for static files ''' cfg = environ['pulsar.cfg'] # Create the store and the pubsub handler self.store = create_store(cfg.data_store) pubsub = self.store.pubsub() # subscribe to channel ensure_future(self.subscribe(pubsub)) return WsgiHandler([Router('/', get=self.home_page), MediaRouter('/static', STATIC_DIR), WebSocket('/message', TweetsWsHandler(pubsub, self.channel))])
def parse(self, mem_limit=None, **kw): if self.content_length > self.limit: raise_large_body_error(self.limit) inp = self.environ.get('wsgi.input') or BytesIO() data = inp.read() if isawaitable(data): return ensure_future(self._async(data)) else: return self._ready(data)
def parse(self, mem_limit=None, **kw): if self.content_length > self.limit: raise_large_body_error(self.limit) inp = self.environ.get("wsgi.input") or BytesIO() data = inp.read() if isawaitable(data): return ensure_future(self._async(data)) else: return self._ready(data)
def parse(self, mem_limit=None, **kw): mem_limit = mem_limit or DEFAULT_MAXSIZE if self.content_length > mem_limit: raise HttpException("Request to big. Increase MAXMEM.", status=LARGE_BODY_CODE) inp = self.environ.get('wsgi.input') or BytesIO() data = inp.read() if isawaitable(data): return ensure_future(self._async(data)) else: return self._ready(data)
def parse(self): boundary = self.options.get("boundary", "") if not valid_boundary(boundary): raise HttpException("Invalid boundary for multipart/form-data", status=422) inp = self.environ.get("wsgi.input") or BytesIO() self.buffer = bytearray() if isinstance(inp, HttpBodyReader): return ensure_future(self._consume(inp, boundary), loop=inp.reader._loop) else: producer = BytesProducer(inp) return producer(self._consume, boundary)
def parse(self): boundary = self.options.get('boundary', '') if not valid_boundary(boundary): raise HttpException("Invalid boundary for multipart/form-data", status=422) inp = self.environ.get('wsgi.input') or BytesIO() self.buffer = bytearray() if isinstance(inp, HttpBodyReader): return ensure_future(self._consume(inp, boundary), loop=inp.reader._loop) else: producer = BytesProducer(inp) return producer(self._consume, boundary)
def __init__(self, consumer, api_key, api_secret, account): secret = account.get("access_token_secret") self.processed = 0 self.consumer = consumer self.logger = consumer.logger self.token = account.get("access_token") self.filter = account.get("stream_filter") self.oauth = OAuth1(api_key, client_secret=api_secret, resource_owner_key=self.token, resource_owner_secret=secret) self._queue = Queue(loop=self._loop) self._buffer = [] self._worker = ensure_future(self._queue_worker(), loop=self._loop)
def __iter__(self): '''Iterator over bytes or Futures resulting in bytes ''' if self._streamed: raise StreamConsumedError self._streamed = True self(self._response) while True: if self.done: try: yield self._queue.get_nowait() except asyncio.QueueEmpty: break else: yield ensure_future(self._queue.get())
def _next_class(self, tag, test_classes): try: test_cls = next(test_classes) except StopIteration: return self._loop.call_soon(self._next_file) test_cls.tag = tag test_cls.cfg = self.cfg test_cls.wait = AsyncAssert(test_cls) try: all_tests = self.runner.loadTestsFromTestCase(test_cls) except Exception: self.logger.exception('Could not load tests') run = False else: run = all_tests.countTestCases() if run: self.logger.info('Running Tests from %s', test_cls) self.runner.startTestClass(test_cls) coro = self._run_test_cls(test_cls, test_classes, all_tests) ensure_future(coro, loop=self._loop) else: self._loop.call_soon(self._next_class, tag, test_classes)
def _next_class(self, tag, test_classes): try: test_cls = next(test_classes) except StopIteration: return self._loop.call_soon(self._next_file) test_cls.tag = tag test_cls.cfg = self.cfg test_cls.wait = AsyncAssert(test_cls) try: all_tests = self.runner.loadTestsFromTestCase(test_cls) except Exception: self.logger.exception('Could not load tests') run = False else: run = all_tests.countTestCases() if run: self.logger.info('Running Tests from %s', test_cls) self.runner.startTestClass(test_cls) coro = self._run_test_cls(test_cls, test_classes, all_tests) ensure_future(coro, loop=self._loop) else: self._loop.call_soon(self._next_file)
async def worker_start(self, worker, exc=None): if not exc: try: print(self.script) executor = await self.executor(loop=worker._loop) except Exception: self.logger.exception('Could not initialise') worker._loop.call_soon(self.done, 2) else: if executor.cfg.list_commands: executed = executor.list_commands() elif executor.cfg.environ: executed = executor.show_environ() else: executed = executor.run() fut = ensure_future(executed, loop=worker._loop) fut.add_done_callback(self._exit)
def close(self, msg=None): '''Close this :class:`.TaskBackend`. Invoked by the :class:`.Actor` when stopping. ''' if not self._closing_waiter: if msg: self.logger.warning(msg) closing = [] for consumer in self.consumers: result = consumer.close() if not result.done(): closing.append(result) self._closing_waiter = ensure_future(_close( self, closing, self._loop), loop=self._loop) return self._closing_waiter
def __call__(self, environ, start_response): """The WSGI callable""" if self.async: return ensure_future(self._async(environ, start_response)) response = None try: for middleware in self.middleware: response = middleware(environ, start_response) if response is not None: break if response is None: raise Http404 except Exception as exc: response = handle_wsgi_error(environ, exc) if isinstance(response, WsgiResponse) and not response.started: for middleware in self.response_middleware: response = middleware(environ, response) or response response.start(start_response) return response
def __call__(self, twitter, messages): if not self.store: self.store = create_store(twitter.cfg.data_store) self.pubsub = self.store.pubsub() ensure_future(self._publish(messages))
async def _may_poll_task(self, worker): # Called in the ``worker`` event loop. # # It pools a new task if possible, and add it to the queue of # tasks consumed by the ``worker`` CPU-bound thread.''' task = None next_time = None lag = 0 if worker.is_running(): loop = worker._loop if self.num_concurrent_tasks < self.max_concurrent_tasks: max_tasks = self.cfg.max_requests if max_tasks and self._processed >= max_tasks: self.backend.close( 'Processed %s tasks. Stop polling tasks.' % self._processed ) if not self.closing(): try: t0 = loop.time() task = await self.broker.get_message(*self.queues()) lag = loop.time() - t0 except ConnectionError: if self.broker.connection_error: next_time = backoff(self._next_time) else: next_time = RECONNECT_LAG self.broker.connection_error = True if worker.is_running(): self.logger.critical( '%s cannot pool messages - ' 'connection error - try again in %s seconds', self.broker, next_time ) except CANCELLED_ERRORS: self.logger.debug('stopped polling messages') raise except Exception: if worker.is_running(): self.logger.exception('server exception') else: self.broker.connection_ok() if task: # Got a new task self._processed += 1 self._concurrent_tasks[task.id] = TaskExecutor(task) ensure_future(self._execute_task(task, worker)) else: self.logger.debug('%s concurrent messages. Cannot poll.', self.max_concurrent_tasks) if next_time is None: next_time = poll_time( self.cfg.task_pool_timeout, self.cfg.task_pool_timeout_max, self.num_concurrent_tasks/self.max_concurrent_tasks, lag ) self._next_time = next_time self._poll_tasks(worker, next_time)
def __call__(self, request): return ensure_future(self._execute_request(request))
def put(self, request): return ensure_future(self._async_put(request))
def test_coroutine(self): future = ensure_future(main(self.loop, 1), loop=self.loop) self.loop.run_until_complete(future) self.assertEqual(future.result(), 9)
def start_working(self, worker): """Don't be lazy""" pulsar.ensure_future(self.run(worker))
def start(arbiter, **kw): ensure_future(app(arbiter))