def decorator(*args, **kwargs): with self.sync_semaphore: wait_time = self.get_wait_time() if wait_time > 0: sync_sleep(wait_time) return self.fn(*args, **kwargs)
async def run(): """ Try to start worker and inform parent of outcome. """ try: await worker except Exception as e: logger.exception("Failed to start worker") init_result_q.put({"uid": uid, "exception": e}) init_result_q.close() # If we hit an exception here we need to wait for a least # one interval for the outside to pick up this message. # Otherwise we arrive in a race condition where the process # cleanup wipes the queue before the exception can be # properly handled. See also # WorkerProcess._wait_until_connected (the 2 is for good # measure) sync_sleep(cls._init_msg_interval * 2) else: try: assert worker.address except ValueError: pass else: init_result_q.put({ "address": worker.address, "dir": worker.local_directory, "uid": uid, }) init_result_q.close() await worker.finished() logger.info("Worker closed")
async def main(): from time import sleep as sync_sleep queue_size = await fill_queue() logger.info("%i tokens added to ticker queue", queue_size) while True: try: token = tokens_queue.get_nowait() except QueueEmpty: async with App().db.acquire_connection() as conn: await conn.execute(""" DELETE FROM tickers WHERE "updated" < NOW() - '1 month'::INTERVAL; """) queue_size = await fill_queue() if queue_size == 0: logger.warning( "No tokens added to ticker queue: pausing for 5 minutes") sync_sleep(300) # Prevent busy loops else: try: await update_ticker(token) except: logger.exception("Exception while processing token addr=%s", token) await asyncio.sleep(0.1)
def check_exception(*args, **kwargs): while True: try: return func(*args, **kwargs) except TooManyRequests as e: sync_sleep(e.retry_after) except RequestException: continue
def test_raise_on_disconnect(app: App): caught = Value("i", 0) @app.route("/inf") async def infinity(req, res): @res.stream(raise_on_disconnect=True) async def stream(): nonlocal caught try: while True: yield "∞" except ClientDisconnect: caught.value = 1 with LiveServer(app) as server: r = requests.get(f"{server.url}/inf", stream=True) assert r.status_code == 200 r.close() sync_sleep(0.1) assert caught.value
async def main(): from time import sleep as sync_sleep queue_size = await fill_queue() logger.info("%i tokens added to ticker queue", queue_size) while True: try: token = tokens_queue.get_nowait() except QueueEmpty: queue_size = await fill_queue() if queue_size == 0: logger.warning( "No tokens added to ticker queue: pausing for 5 minutes") sync_sleep(300) # Prevent busy loops else: try: await update_ticker(token) except: logger.exception("Exception while processing token addr=%s", token) await asyncio.sleep(0.1)
def terminate_children(run_helper_task: Task[None]) -> None: """Send SIGTERM to all children of this process""" signal_counter = 0 pids_sigtermed = set() def signal_all_children() -> None: nonlocal signal_counter signal_counter += 1 # Send SIGTERM to all our children for task_dir in Path('/proc/self/task/').iterdir(): # Open task with open(task_dir / 'children') as children_file: children_file_pids = children_file.read().split() for pid in (int(pid_str) for pid_str in children_file_pids): if signal_counter > 20: # if we tried to send sigterm 20 times and it still # did not work use SIGKILL kill(pid, SIGKILL) continue if pid not in pids_sigtermed: kill(pid, SIGTERM) pids_sigtermed.add(pid) signal_all_children() while True: # Reap the rest of the children # this will block # might cause stalls in shutdown # might also have race conditions with SIGCHLD try: pid_reaped, _, _ = wait3(WNOHANG) if pid_reaped == 0: sync_sleep(0.5) signal_all_children() except ChildProcessError: break run_helper_task.cancel()
async def graceful_exit(restart=False): """Kill the bot gracefully""" print("I breathe my last breath...") if restart: cmd = ' '.join((sys.executable, *sys.argv)) os.system(cmd) # clean up asyncio loop loop = get_event_loop() tasks = [t for t in loop.all_tasks() if t is not loop.current_task()] [task.cancel() for task in tasks] await bot.close() loop = get_event_loop() loop.stop() from time import sleep as sync_sleep sync_sleep(1) loop.close() # close the thread try: sys.exit(0) except (RuntimeError, SystemExit): pass
def _run( cls, worker_kwargs, worker_start_args, silence_logs, init_result_q, child_stop_q, uid, env, config, Worker, ): # pragma: no cover try: os.environ.update(env) dask.config.set(config) try: from dask.multiprocessing import initialize_worker_process except ImportError: # old Dask version pass else: initialize_worker_process() if silence_logs: logger.setLevel(silence_logs) IOLoop.clear_instance() loop = IOLoop() loop.make_current() worker = Worker(**worker_kwargs) async def do_stop(timeout=5, executor_wait=True): try: await worker.close( report=True, nanny=False, safe=True, # TODO: Graceful or not? executor_wait=executor_wait, timeout=timeout, ) finally: loop.stop() def watch_stop_q(): """ Wait for an incoming stop message and then stop the worker cleanly. """ msg = child_stop_q.get() child_stop_q.close() assert msg.pop("op") == "stop" loop.add_callback(do_stop, **msg) t = threading.Thread(target=watch_stop_q, name="Nanny stop queue watch") t.daemon = True t.start() async def run(): """ Try to start worker and inform parent of outcome. """ try: await worker except Exception as e: logger.exception("Failed to start worker") init_result_q.put({"uid": uid, "exception": e}) init_result_q.close() # If we hit an exception here we need to wait for a least # one interval for the outside to pick up this message. # Otherwise we arrive in a race condition where the process # cleanup wipes the queue before the exception can be # properly handled. See also # WorkerProcess._wait_until_connected (the 2 is for good # measure) sync_sleep(cls._init_msg_interval * 2) else: try: assert worker.address except ValueError: pass else: init_result_q.put({ "address": worker.address, "dir": worker.local_directory, "uid": uid, }) init_result_q.close() await worker.finished() logger.info("Worker closed") except Exception as e: logger.exception("Failed to initialize Worker") init_result_q.put({"uid": uid, "exception": e}) init_result_q.close() # If we hit an exception here we need to wait for a least one # interval for the outside to pick up this message. Otherwise we # arrive in a race condition where the process cleanup wipes the # queue before the exception can be properly handled. See also # WorkerProcess._wait_until_connected (the 2 is for good measure) sync_sleep(cls._init_msg_interval * 2) else: try: loop.run_sync(run) except (TimeoutError, gen.TimeoutError): # Loop was stopped before wait_until_closed() returned, ignore pass except KeyboardInterrupt: # At this point the loop is not running thus we have to run # do_stop() explicitly. loop.run_sync(do_stop)
def __exit__(self, exc_type, exc_val, exc_tb): if isinstance(exc_val, self._exception): sync_sleep(self._delay) return True
def get(self, **kwargs): a1 = '' a2 = '' u1 = '' u2 = '' text = '' if 'avatar0' in self.params: a1 = kwargs.pop('avatar1', None) if not a1: raise MissingParameterError('avatar1') if 'avatar1' in self.params: a2 = kwargs.pop('avatar2', None) if not a2: raise MissingParameterError('avatar2') if 'username0' in self.params: u1 = kwargs.pop('username1', None) if not u1: raise MissingParameterError('username1') if 'username1' in self.params: u2 = kwargs.pop('username2', None) if not u2: raise MissingParameterError('username2') if 'text' in self.params: text = kwargs.pop('text', None) if not text: raise MissingParameterError('text') params = {"text": text, "username1": u1, "username2": u2, "avatar1": a1, "avatar2": a2} for k, v in kwargs.items(): params[k] = v for tries in range(5): r = make_get_request('%s/api/%s' % (self.BASE, self.name), headers=self.headers, params=params) if not self._global_over: continue if r.status_code == 429: retry_after = int(r.headers['retry-after']) / 1000 data = r.json() is_global = data.get('global', False) if is_global: self._global_over = False sync_sleep(retry_after) if is_global: self._global_over = True continue if r.status_code == 200: result = r.content return BytesIO(result), r.headers['Content-Type'].split('/')[1] if r.status_code in (500, 502): sync_sleep(1 + tries * 2) continue result = r.text if r.status_code == 401: raise IncorrectTokenError elif r.status_code == 403: raise Forbidden elif r.status_code == 400: raise BadRequest(result) elif r.status_code == 404: raise NotFound(result) raise HTTPError(result)
def handle_interrupt_signal(server): server.close() while server.is_serving(): sync_sleep(0.1)