def broadcast_games(): global pcb data_dict = {} games_dict = {} if pcb is None: pcb = PeriodicCallback(broadcast_games, 6000) pcb.start() g_list = ww_redis_db.keys("g_list:*") for v in g_list: v = v.decode("utf-8") if len(g_list) > 0: # find game with least spaces for g_key in g_list: g_id = str(g_key.decode("utf-8")).split(":")[1] game = Game(g_id) games_dict["game:"+g_id] = game.as_JSON() data_dict["game"] = games_dict data_dict["channel"] = "lobbyinfo" publish_data("lobbyinfo", data_dict) return data_dict
class WebSocketChatHandler(tornado.websocket.WebSocketHandler): def initialize(self): self.clients = [] self.callback = PeriodicCallback(self.update_chat, 500) self.web_gui_user = self.player_manager.get_by_name(self.get_secure_cookie("player")) def open(self, *args): self.clients.append(self) for msg in self.messages_log: self.write_message(msg) self.callback.start() def on_message(self, message): messagejson = json.loads(message) self.messages.append(message) self.messages_log.append(message) self.factory.broadcast("^yellow;<{d}> <^red;{u}^yellow;> {m}".format( d=datetime.now().strftime("%H:%M"), u=self.web_gui_user.name, m=messagejson["message"]), 0, "") def update_chat(self): if len(self.messages) > 0: for message in sorted(self.messages): for client in self.clients: client.write_message(message) del self.messages[0:len(self.messages)] def on_close(self): self.clients.remove(self) self.callback.stop()
class LoLAPI(object): def __init__(self, client): self.timer = PeriodicCallback(self.status, 1000, IOLoop.instance()) self.client = client self.timer.start() def status(self): self.client.one.update_status(dict( last_updated = datetime.now().strftime("%H:%M:%S %d-%m-%y"), game_stats = db.games_data.count(), players = db.users.count(), full_games = db.games.count(), invalid_games = db.invalid_games.count() )) def set_user(self, name): self.user = User.by_name(name) stats = GameStats.find(dict(summoner = self.user.get_dbref())) games = [Game.find_one(stat['game_id']) for stat in stats] self.client.one.update_games([1, 2, 3, 4, 5, 6, 7]) # self.client.one.update_games(list(stats)) def detach(self): self.timer.stop()
class Service(Scaffold): def __init__(self, interval=1): ''' inteval is in seconds ''' super(Service, self).__init__() self.interval = interval * 1000 self.periodicalCb = None def stop(self): if self.periodicalCb: self.periodicalCb.stop() def run(self, *args, **kwargs): super(Service, self).run(*args, **kwargs) self.periodicalCb = PeriodicCallback( partial(super(Service, self).run, *args, **kwargs), self.interval, IOLoop.instance()) self.periodicalCb.start() IOLoop.instance().start() def main(self): ''' Subclass this method ''' logging.error('Subclass main method... %s' % time.clock())
class EventedStatsCollector(StatsCollector): """ Stats Collector which allows to subscribe to value changes. Update notifications are throttled: interval between updates is no shorter than ``accumulate_time``. It is assumed that stat keys are never deleted. """ accumulate_time = 0.1 # value is in seconds def __init__(self, crawler): super(EventedStatsCollector, self).__init__(crawler) self.signals = SignalManager(self) self._changes = {} self._task = PeriodicCallback(self.emit_changes, self.accumulate_time*1000) self._task.start() # FIXME: this is ugly self.crawler = crawler # used by ArachnadoCrawlerProcess def emit_changes(self): if self._changes: changes, self._changes = self._changes, {} self.signals.send_catch_log(stats_changed, changes=changes) def open_spider(self, spider): super(EventedStatsCollector, self).open_spider(spider) self._task.start() def close_spider(self, spider, reason): super(EventedStatsCollector, self).close_spider(spider, reason) self._task.stop()
class TempSocketHandler(websocket.WebSocketHandler): waiters = set() cache = [] cache_size = 200 actual_flag = '' def allow_draft76(self): return True def open(self): TempSocketHandler.waiters.add(self) self.callback = PeriodicCallback(self.send_flag, 1000) self.callback.start() def on_close(self): TempSocketHandler.waiters.remove(self) @classmethod def update_cache(cls, chat): cls.cache.append(chat) if len(cls.cache) > cls.cache_size: cls.cache = cls.cache[-cls.cache_size:] def send_flag(self): if len(self.waiters) > 0: logging.info("sending message to %d waiters", len(self.waiters)) data = '{"time":"'+str(int(time.time()))+'","data":"'+str(random.randrange(0, 100))+'"}' self.write_message(data) else: logging.info("No one is waiting...")
def start(self): periodic_task = PeriodicCallback( self.update, self.application.config.UPDATE_PERIOD * 1000, io_loop=self.main_loop ) periodic_task.start()
def broadcast_match_info(): global pcb if pcb is None: pcb = PeriodicCallback(broadcast_match_info, 500) pcb.start() matches = MatchModel.objects.all() match_status = {} for match in matches: fixture = FixtureModel.objects.get(id=match.match_id) r = ResultModel.objects.get(id=match.match_id) if(r.result != 'None'): match_status['color'+str(match.match_id)] = 'info' continue dd = fixture.match_day - timezone.now() dd_str = None if(timezone.now() > fixture.match_day): dd_str = "Locked" match_status['color'+str(match.match_id)] = 'danger' else: dd_str = "%sd:%sh:%sm:%ss" %(str(dd.days),str((dd.seconds//3600)%24),str((dd.seconds%3600)//60), str((dd.seconds%3600)%60),) match_status['color'+str(match.match_id)] = 'success' match_status['time_left'+str(match.match_id)] = dd_str match_status['storedbet'+str(match.id)] = "%s %s" %(match.betting_side, match.betting_points,) match_status['odds'+str(match.match_id)] = "%s:%s" %(fixture.home_odds, fixture.away_odds,) publish_data('matchinfo', { 'match_status': match_status, })
class WebSocketHandler(websocket.WebSocketHandler): def initialize(self, queue): self.clients = dict() self.queue = queue self.callback = PeriodicCallback(self.message_clients, 120) self.callback.start() def open(self, *args): self.id = self.get_argument("id") self.stream.set_nodelay(True) self.clients[self.id] = {"id": self.id, "object": self} def on_message(self, message): """ when we receive some message we want some message handler.. for this example i will just print message to console """ print "Client %s received a message : %s" % (self.id, message) def on_close(self): if self.id in self.clients: del self.clients[self.id] print "Removed client " + self.id def message_clients(self): message = self.queue.get() for client in self.clients: try: self.write_message(message) except: print "Message could not be written"
class WSHandler(tornado.websocket.WebSocketHandler): def check_origin(self, origin): return True def open(self): with q_live.mutex: q_live.queue.clear() self.callback = PeriodicCallback(self.send_werte, 1) self.callback.start() print ('Connection open') def send_werte(self): if not q_live.empty(): signals, values = q_live.get() senden = dict(zip(signals,values)) print(senden) json_send = json.dumps(senden) self.write_message(json_send) print(q_live.qsize()) if q_live.qsize() >15: with q_live.mutex: q_live.queue.clear() def on_message(self, empf): print('Daten recievied: ') def on_close(self): print('Connection closed!') self.callback.stop()
class ThroughputTracker(object): def __init__(self, logger, loop, num_samples=3): self.logger = logger self.loop = loop # callback_time is in milliseconds self.throughput_pc = PeriodicCallback(self.onThroughput, 30 * 1000, self.loop) self.throughput_pc.start() self.samples = deque(maxlen=num_samples) self.samples.appendleft(ThroughputSample(timestamp=datetime.utcnow(), num_emitted=0)) self.num_emitted = 0 def onThroughput(self): # Throughput measurements now = datetime.utcnow() current = ThroughputSample(timestamp=now, num_emitted=self.num_emitted) deltas = [ current.timestamp - sample.timestamp for sample in self.samples ] samples = [ '%s|%0.1f' % ( deltas[i], ((current.num_emitted-sample.num_emitted) / deltas[i].total_seconds()), ) for i, sample in enumerate(self.samples) ] self.samples.appendleft(current) self.logger.info('Throughput samples: %s', ', '.join(samples))
def start_game(): ''' Main entry point for the application ''' cache_actions() sockets = netutil.bind_sockets(8888) #if process.task_id() == None: # tornado.process.fork_processes(-1, max_restarts = 10) server = HTTPServer(application) server.add_sockets(sockets) io_loop = IOLoop.instance() session_manager = SessionManager.Instance() if process.task_id() == None: scoring = PeriodicCallback(scoring_round, application.settings['ticks'], io_loop = io_loop) session_clean_up = PeriodicCallback(session_manager.clean_up, application.settings['clean_up_timeout'], io_loop = io_loop) scoring.start() session_clean_up.start() try: for count in range(3, 0, -1): logging.info("The game will begin in ... %d" % (count,)) sleep(1) logging.info("Good hunting!") io_loop.start() except KeyboardInterrupt: if process.task_id() == 0: print '\r[!] Shutdown Everything!' session_clean_up.stop() io_loop.stop()
def broadcast_sys_info(): global upCount, downCount, leftCount, rightCount global pcb, ser if pcb is None: pcb = PeriodicCallback(broadcast_sys_info, 100) pcb.start() valueRead = serialArduino.readline() choiceSearch = re.search("UP|DOWN|LEFT|RIGHT", str(valueRead)) try: left_sent = 0 right_sent = 0 up_sent = 0 down_sent = 0 choice = choiceSearch.group(0) print(choice) if choice == "UP": up_sent += 1 upCount += 1 elif choice == "DOWN": down_sent += 1 downCount += 1 elif choice == "LEFT": left_sent += 1 leftCount += 1 elif choice == "RIGHT": right_sent += 1 rightCount += 1 publish_data("sysinfo", {"left_t": left_sent, "right_t": right_sent, "top": up_sent, "down": down_sent}) except AttributeError: pass
class SendWebSocket(tornado.websocket.WebSocketHandler): #on_message -> receive data #write_message -> send data def __init__(self, *args, **keys): self.i = 0 super(SendWebSocket, self).__init__(*args, **keys) def open(self): self.callback = PeriodicCallback(self._send_message, 1) self.callback.start() print "WebSocket opend" def on_message(self, message): print message def _send_message(self): self.i += 1 self.write_message(str(self.i)) if self.i % 20 == 0: self.write_message("\n") def on_close(self): self.callback.stop() print "WebSocket closed"
def broadcast_players(g_id): global pcb2 global cur_g_id data_dict = {} if pcb2 is None: cur_g_id = g_id pcb2 = PeriodicCallback(lambda: broadcast_players(g_id), 4000) pcb2.start() elif cur_g_id != g_id: cur_g_id = g_id pcb2.stop() pcb2 = PeriodicCallback(lambda: broadcast_players(g_id), 4000) pcb2.start() g_list = ww_redis_db.keys(g_id+"*") for v in g_list: v = v.decode("utf-8") if len(g_list) > 0: # find game with least spaces for g_key in g_list: game = ww_redis_db.hgetall(g_key) game = {k.decode('utf8'): v.decode('utf8') for k, v in game.items()} #convert from byte array to string dict players = game['players'].split("|") # obtain players in current game in the form of uuid data_dict[g_key.decode("utf-8")] = str(len(players)) data_dict["channel"] = "player_list" publish_data("player_list:"+g_id, data_dict) return data_dict
def main(): parser = argparse.ArgumentParser() parser.add_argument('config', help='config file') args = parser.parse_args() logger.warn('Reading config from {}'.format(args.config)) config = {} with open(args.config, 'r') as infile: config = json.load(infile) if config == {}: sys.exit() serve_config = config.get('car_serve', {}) logger.warn(serve_config) app = CarServer(config) try: logger.info('Opening HTTP server.') http_server = HTTPServer(app) http_server.listen(serve_config.get('port', 9001), address=serve_config.get('ip_address', '127.0.0.1')) update_ms = serve_config.get('update_ms', 100) logger.debug('Registering periodic callback. Every {} ms'.format(update_ms)) i = PeriodicCallback(app.car_state.update_physical_state, update_ms) i.start() IOLoop.current().start() except (SystemExit, KeyboardInterrupt): pass logger.info('Stopping server.') http_server.stop() IOLoop.current().stop() sys.exit(0)
class cpustatus(tornado.websocket.WebSocketHandler): #on_message -> receive data #write_message -> send data #index.html def open(self): #self.i = readData() self.i = 0 self.last = 0 self.cpu = PeriodicCallback(self._send_cpu, 500) # self.cpu.start() def on_message(self, message): global MainMotorMax self.i = int(message) MainMotorMax = self.i print message def _send_cpu(self): #self.write_message(str(vmstat()[15])) #self.write_message(str(time.time())) #self.i = readData() if self.i != self.last: self.write_message(str(self.i)) self.last = self.i print self.i # def on_close(self): self.cpu.stop()
class WSHandler(tornado.websocket.WebSocketHandler): # track clients: # simplest method is just to keep a list or dict of WSHandler instances: clients = [] def open(self): self.clients.append(self) # print 'New connection was opened' # self.write_message("Welcome to my websocket!") # http://tornado.readthedocs.org/en/latest/ioloop.html # The callback is called every callback_time milliseconds. # class tornado.ioloop.PeriodicCallback(callback, callback_time, io_loop=None) self.callback = PeriodicCallback(self.send_hello, 5000) self.callback.start() def send_hello(self): self.write_message('hello') def msg(self,message): self.write_message(message) threading.Timer(10, self.msg('in timer')).start() print 'in msg'+message # def on_message(self, message): # pass def on_message(self, message): print 'Incoming message:', message self.write_message("You said: " + message) def on_close(self): self.clients.remove(self) print 'Connection was closed...'
def run(): parser = ArgumentParser() parser.add_argument("-f", "--fake", action="store_true", help="Use a fake connection for development") parser.add_argument("-i", "--id", default=socket.gethostname(), help="ID of this site") args = parser.parse_args() if args.fake: m = MissileLauncher(FakeMissileLauncherConnection()) else: m = MissileLauncher(MissileLauncherConnection(0)) config = { 'launcher': m, 'id': args.id } application = Application([ (r"/position", PositionHandler, config), (r"/move/(-?[01])/(-?[01])", PositionHandler, config), (r"/move_to/([-0-9.]*)/([-0-9.]*)", MoveHandler, config), (r"/fire_at/([-0-9.]*)/([-0-9.]*)", FireHandler, config), (r"/calibrate", CalibrateHandler, config), (r"/", IndexHandler), (r"/static/(.*)", StaticFileHandler, {'path': 'static/'}) ], debug=True) application.listen(7777) periodic = PeriodicCallback(m.timestep, 100) periodic.start() print('Site {} listening at http://{}:7777'.format(args.id, socket.gethostname())) IOLoop.instance().start()
class WebSocket(tornado.websocket.WebSocketHandler): waiters = set() # multi clients connect OK wdata = "" def open(self): print("open websocket connection") WebSocket.waiters.add(self) # client add self.callback = PeriodicCallback(self._send_message, 30000) # time out taisaku self.callback.start() def on_close(self): WebSocket.waiters.remove(self) # client remove self.callback.stop() print("close websocket connection") def on_message(self, message): WebSocket.wdata = message WebSocket.send_updates(message) @classmethod def send_updates(cls, message): # this method is singleton print(message + ":connection=" + str(len(cls.waiters))) for waiter in cls.waiters: try: waiter.write_message(message) except: print("Error sending message", exc_info=True) # TIME OUT BOUSHI CALL BACK 30Sec def _send_message(self): self.write_message("C:POLLING")
def start(self): periodic_task = PeriodicCallback( self.update, 20, io_loop=self.main_loop ) periodic_task.start()
class WSHandler(tornado.websocket.WebSocketHandler): def initialize(self): self.values = [[], []] def check_origin(self, origin): return True def open(self): # Send message periodic via socket upon a time interval self.initialize() self.callback = PeriodicCallback(self.send_values, timeInterval) self.callback.start() def send_values(self): MAX_POINTS = 30 # Generates random values to send via websocket for val in self.values: if len(val) < MAX_POINTS: val.append(randint(1, 10)) else: val.pop(0) val.append(randint(1, 10)) # self.values1 = [randint(1,10) for i in range(100)] message = {"Channel0": self.values[0], "Channel1": self.values[1]} # self.write_message(message) message = {"DataInfo": [{"id": 40, "sname": "SOG"}]} self.write_message(message) def on_message(self, message): pass def on_close(self): self.callback.stop()
def start_server(): ''' Main entry point for the application ''' sockets = netutil.bind_sockets(config.listen_port) server = HTTPServer(app) server.add_sockets(sockets) io_loop = IOLoop.instance() scoring = PeriodicCallback( scoring_round, int(5 * 60 * 1000), io_loop=io_loop ) scoring.start() try: sys.stdout.write("\r" + INFO + "The game has begun, good hunting!\n") if config.debug: sys.stdout.write(WARN + "WARNING: Debug mode is enabled.\n") sys.stdout.flush() game_history = GameHistory.Instance() history_callback = PeriodicCallback( game_history.take_snapshot, int(60 * 1000), io_loop=io_loop ) history_callback.start() io_loop.start() except KeyboardInterrupt: print('\r' + WARN + 'Shutdown Everything!') except: logging.exception("Main i/o loop threw exception") finally: io_loop.stop() if config.debug and \ raw_input(PROMPT + "Flush Memcache? [Y/n]: ").lower() == 'y': print(INFO + 'Flushing cache ...'), FileCache.flush() print('OK') _exit(0)
class SocketHandler(WebSocketHandler): def check_origin(self, origin): """ Overrides the parent method to return True for any request, since we are working without names :returns: bool True """ return True def open(self): logging.info("Connection open from " + self.request.remote_ip) if not self in statusmonitor_open_sockets: statusmonitor_open_sockets.append(self) #http://stackoverflow.com/a/19571205 self.callback = PeriodicCallback(self.send_data, 1000) self.callback.start() start_callback() def send_data(self): self.write_message(data_json) return def on_close(self): self.callback.stop() if self in statusmonitor_open_sockets: statusmonitor_open_sockets.remove(self) stop_callback() def send_update(self): pass
class WebSocketconnectionsHandler(tornado.websocket.WebSocketHandler): def __init__(self, *args, **kwargs): logger.debug("Creating WebSocket connections handler") super(WebSocketconnectionsHandler, self).__init__(*args, **kwargs) # No WebSocket connection yet self.connected = False # We have not counted the connections yet self.connections = 0 # Update the connection count self.update() # Setup periodic callback via Tornado self.periodic_callback = PeriodicCallback(getattr(self, 'update'), 1000) def get_connections(self): self.connections = 0 # Get all connections using psutil conn = psutil.net_connections('inet') if ws.config.CONFIG['PORT'][0] == 'all': # If we need the count for all ports we've got it. for connection in conn: self.connections += 1 else: # Isolate date for the requested ports. for port in ws.config.CONFIG['PORT']: for connection in conn: if connection.laddr[1] == int(port): self.connections += 1 return(self.connections) def update(self): # Save the old number of connections old = self.connections self.get_connections() # Check if the number of connections has changed if old != self.connections: # Send the new data. if self.connected: logger.debug(json.dumps({ "connections": self.get_connections() })) self.write_message(json.dumps({ "connections": self.get_connections() })) def open(self): logger.debug(json.dumps({ "connections": self.get_connections() })) self.write_message(json.dumps({ "connections": self.get_connections() })) # We have a WebSocket connection self.connected = True self.periodic_callback.start() def on_message(self, message): logger.debug(json.dumps({ "connections": self.get_connections() })) self.write_message(json.dumps({ "connections": self.get_connections() })) def on_close(self): logger.debug("Connection closed") # We no longer have a WebSocket connection. self.connected = False self.periodic_callback.stop()
class AsyncPopenFixed(seesaw.externalprocess.AsyncPopen): """ Start the wait_callback after setting self.pipe, to prevent an infinite spew of "AttributeError: 'AsyncPopen' object has no attribute 'pipe'" """ def run(self): self.ioloop = IOLoop.instance() (master_fd, slave_fd) = pty.openpty() # make stdout, stderr non-blocking fcntl.fcntl(master_fd, fcntl.F_SETFL, fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK) self.master_fd = master_fd self.master = os.fdopen(master_fd) # listen to stdout, stderr self.ioloop.add_handler(master_fd, self._handle_subprocess_stdout, self.ioloop.READ) slave = os.fdopen(slave_fd) self.kwargs["stdout"] = slave self.kwargs["stderr"] = slave self.kwargs["close_fds"] = True self.pipe = subprocess.Popen(*self.args, **self.kwargs) self.stdin = self.pipe.stdin # check for process exit self.wait_callback = PeriodicCallback(self._wait_for_end, 250) self.wait_callback.start()
class WebSocket(tornado.websocket.WebSocketHandler): def check_origin(self, origin): return True def on_message(self, message): """Evaluates the function pointed to by json-rpc.""" # Start an infinite loop when this is called if message == "read_camera": self.camera_loop = PeriodicCallback(self.loop, 10) self.camera_loop.start() # Extensibility for other methods else: print("Unsupported function: " + message) def loop(self): """Sends camera images in an infinite loop.""" bio = io.BytesIO() if args.use_usb: _, frame = camera.read() img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) img.save(bio, "JPEG") else: camera.capture(bio, "jpeg", use_video_port=True) try: self.write_message(base64.b64encode(bio.getvalue())) except tornado.websocket.WebSocketClosedError: self.camera_loop.stop()
def add_periodic(self, callback, secs, name): if name not in self._timers: pcb = PeriodicCallback(callback, 1000*secs, io_loop=self.io_loop) self._timers[name] = (True, pcb) pcb.start() else: logging.warning("periodic timer with name %s already exists!", name)
def start_wanikani_updater(): # Send first request request_update_wanikani() # Schedule a new update request every 30 seconds periodic_callback = PeriodicCallback(wrap_traceback(request_update_wanikani), 30 * 1000) # milliseconds periodic_callback.start()
class SendWebSocketHandler(tornado.websocket.WebSocketHandler): # on_message recieve data # write_message send data def open(self): self.callback = PeriodicCallback(self._send_message, 10000) self.callback.start() print("[START] WebSocket") def on_message(self, message): print("[START] WebSocket on_message") print(message) def _send_message(self): cur = DB.execute("SELECT * FROM lm35dz ORDER BY YMDHHMM DESC") rec = cur.fetchone() send_value = "" if rec == None: send_value = "Data Nothing" else: send_value = "%s %s" % (rec[0], rec[1]) self.write_message(send_value) def on_close(self): self.callback.stop() print("[ENDED] WebSocket")
def test__lifecycle_hooks(ManagedServerLoop) -> None: application = Application() handler = HookTestHandler() application.add(handler) with ManagedServerLoop(application, check_unused_sessions_milliseconds=30) as server: # wait for server callbacks to run before we mix in the # session, this keeps the test deterministic def check_done(): if len(handler.hooks) == 4: server.io_loop.stop() server_load_checker = PeriodicCallback(check_done, 1) server_load_checker.start() server.io_loop.start() server_load_checker.stop() # now we create a session client_session = pull_session(session_id='test__lifecycle_hooks', url=url(server), io_loop=server.io_loop) client_doc = client_session.document assert len(client_doc.roots) == 1 server_session = server.get_session('/', client_session.id) server_doc = server_session.document assert len(server_doc.roots) == 1 # we have to capture these here for examination later, since after # the session is closed, doc.roots will be emptied client_hook_list = client_doc.roots[0] server_hook_list = server_doc.roots[0] client_session.close() # expire the session quickly rather than after the # usual timeout server_session.request_expiration() def on_done(): server.io_loop.stop() server.io_loop.call_later(0.1, on_done) server.io_loop.start() assert handler.hooks == ["server_loaded", "next_tick_server", "timeout_server", "periodic_server", "session_created", "modify", "next_tick_session", "timeout_session", "periodic_session", "session_destroyed", "server_unloaded"] assert handler.load_count == 1 assert handler.unload_count == 1 # this is 3 instead of 6 because locked callbacks on destroyed sessions # are turned into no-ops assert handler.session_creation_async_value == 3 assert client_doc.title == "Modified" assert server_doc.title == "Modified" # only the handler sees the event that adds "session_destroyed" since # the session is shut down at that point. assert client_hook_list.hooks == ["session_created", "modify"] assert server_hook_list.hooks == ["session_created", "modify"]
class WebSocketChannelsHandler(WebSocketHandler, IPythonHandler): session = None gateway = None kernel_id = None ping_callback = None def check_origin(self, origin=None): return True def set_default_headers(self): """Undo the set_default_headers in IPythonHandler which doesn't make sense for websockets""" pass def get_compression_options(self): # use deflate compress websocket return {} def authenticate(self): """Run before finishing the GET request Extend this method to add logic that should fire before the websocket finishes completing. """ # authenticate the request before opening the websocket if self.get_current_user() is None: self.log.warning("Couldn't authenticate WebSocket connection") raise web.HTTPError(403) if self.get_argument('session_id', False): self.session.session = cast_unicode( self.get_argument('session_id')) else: self.log.warning("No session ID specified") def initialize(self): self.log.debug("Initializing websocket connection %s", self.request.path) self.session = Session(config=self.config) self.gateway = GatewayWebSocketClient( gateway_url=GatewayClient.instance().url) @gen.coroutine def get(self, kernel_id, *args, **kwargs): self.authenticate() self.kernel_id = cast_unicode(kernel_id, 'ascii') yield super(WebSocketChannelsHandler, self).get(kernel_id=kernel_id, *args, **kwargs) def send_ping(self): if self.ws_connection is None and self.ping_callback is not None: self.ping_callback.stop() return self.ping(b'') def open(self, kernel_id, *args, **kwargs): """Handle web socket connection open to notebook server and delegate to gateway web socket handler """ self.ping_callback = PeriodicCallback( self.send_ping, GATEWAY_WS_PING_INTERVAL_SECS * 1000) self.ping_callback.start() self.gateway.on_open( kernel_id=kernel_id, message_callback=self.write_message, compression_options=self.get_compression_options()) def on_message(self, message): """Forward message to gateway web socket handler.""" self.gateway.on_message(message) def write_message(self, message, binary=False): """Send message back to notebook client. This is called via callback from self.gateway._read_messages.""" if self.ws_connection: # prevent WebSocketClosedError if isinstance(message, bytes): binary = True super(WebSocketChannelsHandler, self).write_message(message, binary=binary) elif self.log.isEnabledFor(logging.DEBUG): msg_summary = WebSocketChannelsHandler._get_message_summary( json_decode(utf8(message))) self.log.debug( "Notebook client closed websocket connection - message dropped: {}" .format(msg_summary)) def on_close(self): self.log.debug("Closing websocket connection %s", self.request.path) self.gateway.on_close() super(WebSocketChannelsHandler, self).on_close() @staticmethod def _get_message_summary(message): summary = [] message_type = message['msg_type'] summary.append('type: {}'.format(message_type)) if message_type == 'status': summary.append(', state: {}'.format( message['content']['execution_state'])) elif message_type == 'error': summary.append(', {}:{}:{}'.format( message['content']['ename'], message['content']['evalue'], message['content']['traceback'])) else: summary.append(', ...') # don't display potentially sensitive data return ''.join(summary)
class UpdateManager: def __init__(self, config): self.server = config.get_server() self.config = config self.config.read_supplemental_config(SUPPLEMENTAL_CFG_PATH) self.repo_debug = config.getboolean('enable_repo_debug', False) auto_refresh_enabled = config.getboolean('enable_auto_refresh', False) self.distro = config.get('distro', "debian").lower() if self.distro not in SUPPORTED_DISTROS: raise config.error(f"Unsupported distro: {self.distro}") if self.repo_debug: logging.warn("UPDATE MANAGER: REPO DEBUG ENABLED") env = sys.executable mooncfg = self.config[f"update_manager static {self.distro} moonraker"] self.updaters = { "system": PackageUpdater(self), "moonraker": GitUpdater(self, mooncfg, MOONRAKER_PATH, env) } self.current_update = None # TODO: Check for client config in [update_manager]. This is # deprecated and will be removed. client_repo = config.get("client_repo", None) if client_repo is not None: client_path = config.get("client_path") name = client_repo.split("/")[-1] self.updaters[name] = WebUpdater(self, { 'repo': client_repo, 'path': client_path }) client_sections = self.config.get_prefix_sections( "update_manager client") for section in client_sections: cfg = self.config[section] name = section.split()[-1] if name in self.updaters: raise config.error("Client repo named %s already added" % (name, )) client_type = cfg.get("type") if client_type == "git_repo": self.updaters[name] = GitUpdater(self, cfg) elif client_type == "web": self.updaters[name] = WebUpdater(self, cfg) else: raise config.error("Invalid type '%s' for section [%s]" % (client_type, section)) # GitHub API Rate Limit Tracking self.gh_rate_limit = None self.gh_limit_remaining = None self.gh_limit_reset_time = None self.gh_init_evt = Event() self.cmd_request_lock = Lock() self.is_refreshing = False # Auto Status Refresh self.last_auto_update_time = 0 self.refresh_cb = None if auto_refresh_enabled: self.refresh_cb = PeriodicCallback(self._handle_auto_refresh, UPDATE_REFRESH_INTERVAL_MS) self.refresh_cb.start() AsyncHTTPClient.configure(None, defaults=dict(user_agent="Moonraker")) self.http_client = AsyncHTTPClient() self.server.register_endpoint("/machine/update/moonraker", ["POST"], self._handle_update_request) self.server.register_endpoint("/machine/update/klipper", ["POST"], self._handle_update_request) self.server.register_endpoint("/machine/update/system", ["POST"], self._handle_update_request) self.server.register_endpoint("/machine/update/client", ["POST"], self._handle_update_request) self.server.register_endpoint("/machine/update/status", ["GET"], self._handle_status_request) # Register Ready Event self.server.register_event_handler("server:klippy_identified", self._set_klipper_repo) # Initialize GitHub API Rate Limits and configured updaters IOLoop.current().spawn_callback(self._initalize_updaters, list(self.updaters.values())) async def _initalize_updaters(self, initial_updaters): self.is_refreshing = True await self._init_api_rate_limit() for updater in initial_updaters: if isinstance(updater, PackageUpdater): ret = updater.refresh(False) else: ret = updater.refresh() if asyncio.iscoroutine(ret): await ret self.is_refreshing = False async def _set_klipper_repo(self): kinfo = self.server.get_klippy_info() if not kinfo: logging.info("No valid klippy info received") return kpath = kinfo['klipper_path'] env = kinfo['python_path'] kupdater = self.updaters.get('klipper', None) if kupdater is not None and kupdater.repo_path == kpath and \ kupdater.env == env: # Current Klipper Updater is valid return kcfg = self.config[f"update_manager static {self.distro} klipper"] self.updaters['klipper'] = GitUpdater(self, kcfg, kpath, env) await self.updaters['klipper'].refresh() async def _check_klippy_printing(self): klippy_apis = self.server.lookup_plugin('klippy_apis') result = await klippy_apis.query_objects({'print_stats': None}, default={}) pstate = result.get('print_stats', {}).get('state', "") return pstate.lower() == "printing" async def _handle_auto_refresh(self): if await self._check_klippy_printing(): # Don't Refresh during a print logging.info("Klippy is printing, auto refresh aborted") return cur_time = time.time() cur_hour = time.localtime(cur_time).tm_hour time_diff = cur_time - self.last_auto_update_time # Update packages if it has been more than 12 hours # and the local time is between 12AM and 5AM if time_diff < MIN_REFRESH_TIME or cur_hour >= MAX_PKG_UPDATE_HOUR: # Not within the update time window return self.last_auto_update_time = cur_time vinfo = {} need_refresh_all = not self.is_refreshing async with self.cmd_request_lock: self.is_refreshing = True try: for name, updater in list(self.updaters.items()): if need_refresh_all: ret = updater.refresh() if asyncio.iscoroutine(ret): await ret if hasattr(updater, "get_update_status"): vinfo[name] = updater.get_update_status() except Exception: logging.exception("Unable to Refresh Status") return finally: self.is_refreshing = False uinfo = { 'version_info': vinfo, 'github_rate_limit': self.gh_rate_limit, 'github_requests_remaining': self.gh_limit_remaining, 'github_limit_reset_time': self.gh_limit_reset_time, 'busy': self.current_update is not None } self.server.send_event("update_manager:update_refreshed", uinfo) async def _handle_update_request(self, web_request): if await self._check_klippy_printing(): raise self.server.error("Update Refused: Klippy is printing") app = web_request.get_endpoint().split("/")[-1] if app == "client": app = web_request.get('name') inc_deps = web_request.get_boolean('include_deps', False) if self.current_update is not None and \ self.current_update[0] == app: return f"Object {app} is currently being updated" updater = self.updaters.get(app, None) if updater is None: raise self.server.error(f"Updater {app} not available") async with self.cmd_request_lock: self.current_update = (app, id(web_request)) try: await updater.update(inc_deps) except Exception as e: self.notify_update_response(f"Error updating {app}") self.notify_update_response(str(e), is_complete=True) raise finally: self.current_update = None return "ok" async def _handle_status_request(self, web_request): check_refresh = web_request.get_boolean('refresh', False) # Don't refresh if a print is currently in progress or # if an update is in progress. Just return the current # state if self.current_update is not None or \ await self._check_klippy_printing(): check_refresh = False need_refresh = False if check_refresh: # If there is an outstanding request processing a # refresh, we don't need to do it again. need_refresh = not self.is_refreshing await self.cmd_request_lock.acquire() self.is_refreshing = True vinfo = {} try: for name, updater in list(self.updaters.items()): await updater.check_initialized(120.) if need_refresh: ret = updater.refresh() if asyncio.iscoroutine(ret): await ret if hasattr(updater, "get_update_status"): vinfo[name] = updater.get_update_status() except Exception: raise finally: if check_refresh: self.is_refreshing = False self.cmd_request_lock.release() return { 'version_info': vinfo, 'github_rate_limit': self.gh_rate_limit, 'github_requests_remaining': self.gh_limit_remaining, 'github_limit_reset_time': self.gh_limit_reset_time, 'busy': self.current_update is not None } async def execute_cmd(self, cmd, timeout=10., notify=False, retries=1): shell_command = self.server.lookup_plugin('shell_command') cb = self.notify_update_response if notify else None scmd = shell_command.build_shell_command(cmd, callback=cb) while retries: if await scmd.run(timeout=timeout, verbose=notify): break retries -= 1 if not retries: raise self.server.error("Shell Command Error") async def execute_cmd_with_response(self, cmd, timeout=10.): shell_command = self.server.lookup_plugin('shell_command') scmd = shell_command.build_shell_command(cmd, None) result = await scmd.run_with_response(timeout, retries=5) if result is None: raise self.server.error(f"Error Running Command: {cmd}") return result async def _init_api_rate_limit(self): url = "https://api.github.com/rate_limit" while 1: try: resp = await self.github_api_request(url, is_init=True) core = resp['resources']['core'] self.gh_rate_limit = core['limit'] self.gh_limit_remaining = core['remaining'] self.gh_limit_reset_time = core['reset'] except Exception: logging.exception("Error Initializing GitHub API Rate Limit") await tornado.gen.sleep(30.) else: reset_time = time.ctime(self.gh_limit_reset_time) logging.info( "GitHub API Rate Limit Initialized\n" f"Rate Limit: {self.gh_rate_limit}\n" f"Rate Limit Remaining: {self.gh_limit_remaining}\n" f"Rate Limit Reset Time: {reset_time}, " f"Seconds Since Epoch: {self.gh_limit_reset_time}") break self.gh_init_evt.set() async def github_api_request(self, url, etag=None, is_init=False): if not is_init: timeout = time.time() + 30. try: await self.gh_init_evt.wait(timeout) except Exception: raise self.server.error("Timeout while waiting for GitHub " "API Rate Limit initialization") if self.gh_limit_remaining == 0: curtime = time.time() if curtime < self.gh_limit_reset_time: raise self.server.error( f"GitHub Rate Limit Reached\nRequest: {url}\n" f"Limit Reset Time: {time.ctime(self.gh_limit_remaining)}") headers = {"Accept": "application/vnd.github.v3+json"} if etag is not None: headers['If-None-Match'] = etag retries = 5 while retries: try: timeout = time.time() + 10. fut = self.http_client.fetch(url, headers=headers, connect_timeout=5., request_timeout=5., raise_error=False) resp = await tornado.gen.with_timeout(timeout, fut) except Exception: retries -= 1 msg = f"Error Processing GitHub API request: {url}" if not retries: raise self.server.error(msg) logging.exception(msg) await tornado.gen.sleep(1.) continue etag = resp.headers.get('etag', None) if etag is not None: if etag[:2] == "W/": etag = etag[2:] logging.info("GitHub API Request Processed\n" f"URL: {url}\n" f"Response Code: {resp.code}\n" f"Response Reason: {resp.reason}\n" f"ETag: {etag}") if resp.code == 403: raise self.server.error( f"Forbidden GitHub Request: {resp.reason}") elif resp.code == 304: logging.info(f"Github Request not Modified: {url}") return None if resp.code != 200: retries -= 1 if not retries: raise self.server.error( f"Github Request failed: {resp.code} {resp.reason}") logging.info( f"Github request error, {retries} retries remaining") await tornado.gen.sleep(1.) continue # Update rate limit on return success if 'X-Ratelimit-Limit' in resp.headers and not is_init: self.gh_rate_limit = int(resp.headers['X-Ratelimit-Limit']) self.gh_limit_remaining = int( resp.headers['X-Ratelimit-Remaining']) self.gh_limit_reset_time = float( resp.headers['X-Ratelimit-Reset']) decoded = json.loads(resp.body) decoded['etag'] = etag return decoded async def http_download_request(self, url): retries = 5 while retries: try: timeout = time.time() + 130. fut = self.http_client.fetch( url, headers={"Accept": "application/zip"}, connect_timeout=5., request_timeout=120.) resp = await tornado.gen.with_timeout(timeout, fut) except Exception: retries -= 1 logging.exception("Error Processing Download") if not retries: raise await tornado.gen.sleep(1.) continue return resp.body def notify_update_response(self, resp, is_complete=False): resp = resp.strip() if isinstance(resp, bytes): resp = resp.decode() notification = { 'message': resp, 'application': None, 'proc_id': None, 'complete': is_complete } if self.current_update is not None: notification['application'] = self.current_update[0] notification['proc_id'] = self.current_update[1] self.server.send_event("update_manager:update_response", notification) def close(self): self.http_client.close() if self.refresh_cb is not None: self.refresh_cb.stop()
class UpdateManager: def __init__(self, config): self.server = config.get_server() self.config = config self.config.read_supplemental_config(SUPPLEMENTAL_CFG_PATH) auto_refresh_enabled = config.getboolean('enable_auto_refresh', False) self.distro = config.get('distro', "debian").lower() if self.distro not in SUPPORTED_DISTROS: raise config.error(f"Unsupported distro: {self.distro}") self.cmd_helper = CommandHelper(config) env = sys.executable mooncfg = self.config[f"update_manager static {self.distro} moonraker"] self.updaters = { "system": PackageUpdater(self.cmd_helper), "moonraker": GitUpdater(mooncfg, self.cmd_helper, MOONRAKER_PATH, env) } # TODO: Check for client config in [update_manager]. This is # deprecated and will be removed. client_repo = config.get("client_repo", None) if client_repo is not None: client_path = config.get("client_path") name = client_repo.split("/")[-1] self.updaters[name] = WebUpdater( { 'repo': client_repo, 'path': client_path }, self.cmd_helper) client_sections = self.config.get_prefix_sections( "update_manager client") for section in client_sections: cfg = self.config[section] name = section.split()[-1] if name in self.updaters: raise config.error("Client repo named %s already added" % (name, )) client_type = cfg.get("type") if client_type == "git_repo": self.updaters[name] = GitUpdater(cfg, self.cmd_helper) elif client_type == "web": self.updaters[name] = WebUpdater(cfg, self.cmd_helper) else: raise config.error("Invalid type '%s' for section [%s]" % (client_type, section)) self.cmd_request_lock = Lock() self.is_refreshing = False # Auto Status Refresh self.last_auto_update_time = 0 self.refresh_cb = None if auto_refresh_enabled: self.refresh_cb = PeriodicCallback(self._handle_auto_refresh, UPDATE_REFRESH_INTERVAL_MS) self.refresh_cb.start() self.server.register_endpoint("/machine/update/moonraker", ["POST"], self._handle_update_request) self.server.register_endpoint("/machine/update/klipper", ["POST"], self._handle_update_request) self.server.register_endpoint("/machine/update/system", ["POST"], self._handle_update_request) self.server.register_endpoint("/machine/update/client", ["POST"], self._handle_update_request) self.server.register_endpoint("/machine/update/status", ["GET"], self._handle_status_request) self.server.register_notification("update_manager:update_response") self.server.register_notification("update_manager:update_refreshed") # Register Ready Event self.server.register_event_handler("server:klippy_identified", self._set_klipper_repo) # Initialize GitHub API Rate Limits and configured updaters IOLoop.current().spawn_callback(self._initalize_updaters, list(self.updaters.values())) async def _initalize_updaters(self, initial_updaters): self.is_refreshing = True await self.cmd_helper.init_api_rate_limit() for updater in initial_updaters: if isinstance(updater, PackageUpdater): ret = updater.refresh(False) else: ret = updater.refresh() if asyncio.iscoroutine(ret): await ret self.is_refreshing = False async def _set_klipper_repo(self): kinfo = self.server.get_klippy_info() if not kinfo: logging.info("No valid klippy info received") return kpath = kinfo['klipper_path'] env = kinfo['python_path'] kupdater = self.updaters.get('klipper', None) if kupdater is not None and kupdater.repo_path == kpath and \ kupdater.env == env: # Current Klipper Updater is valid return kcfg = self.config[f"update_manager static {self.distro} klipper"] self.updaters['klipper'] = GitUpdater(kcfg, self.cmd_helper, kpath, env) await self.updaters['klipper'].refresh() async def _check_klippy_printing(self): klippy_apis = self.server.lookup_plugin('klippy_apis') result = await klippy_apis.query_objects({'print_stats': None}, default={}) pstate = result.get('print_stats', {}).get('state', "") return pstate.lower() == "printing" async def _handle_auto_refresh(self): if await self._check_klippy_printing(): # Don't Refresh during a print logging.info("Klippy is printing, auto refresh aborted") return cur_time = time.time() cur_hour = time.localtime(cur_time).tm_hour time_diff = cur_time - self.last_auto_update_time # Update packages if it has been more than 12 hours # and the local time is between 12AM and 5AM if time_diff < MIN_REFRESH_TIME or cur_hour >= MAX_PKG_UPDATE_HOUR: # Not within the update time window return self.last_auto_update_time = cur_time vinfo = {} need_refresh_all = not self.is_refreshing async with self.cmd_request_lock: self.is_refreshing = True try: for name, updater in list(self.updaters.items()): if need_refresh_all: ret = updater.refresh() if asyncio.iscoroutine(ret): await ret if hasattr(updater, "get_update_status"): vinfo[name] = updater.get_update_status() except Exception: logging.exception("Unable to Refresh Status") return finally: self.is_refreshing = False uinfo = self.cmd_helper.get_rate_limit_stats() uinfo['version_info'] = vinfo uinfo['busy'] = self.cmd_helper.is_update_busy() self.server.send_event("update_manager:update_refreshed", uinfo) async def _handle_update_request(self, web_request): if await self._check_klippy_printing(): raise self.server.error("Update Refused: Klippy is printing") app = web_request.get_endpoint().split("/")[-1] if app == "client": app = web_request.get('name') inc_deps = web_request.get_boolean('include_deps', False) if self.cmd_helper.is_app_updating(app): return f"Object {app} is currently being updated" updater = self.updaters.get(app, None) if updater is None: raise self.server.error(f"Updater {app} not available") async with self.cmd_request_lock: self.cmd_helper.set_update_info(app, id(web_request)) try: await updater.update(inc_deps) except Exception as e: self.cmd_helper.notify_update_response(f"Error updating {app}") self.cmd_helper.notify_update_response(str(e), is_complete=True) raise finally: self.cmd_helper.clear_update_info() return "ok" async def _handle_status_request(self, web_request): check_refresh = web_request.get_boolean('refresh', False) # Don't refresh if a print is currently in progress or # if an update is in progress. Just return the current # state if self.cmd_helper.is_update_busy() or \ await self._check_klippy_printing(): check_refresh = False need_refresh = False if check_refresh: # If there is an outstanding request processing a # refresh, we don't need to do it again. need_refresh = not self.is_refreshing await self.cmd_request_lock.acquire() self.is_refreshing = True vinfo = {} try: for name, updater in list(self.updaters.items()): await updater.check_initialized(120.) if need_refresh: ret = updater.refresh() if asyncio.iscoroutine(ret): await ret if hasattr(updater, "get_update_status"): vinfo[name] = updater.get_update_status() except Exception: raise finally: if check_refresh: self.is_refreshing = False self.cmd_request_lock.release() ret = self.cmd_helper.get_rate_limit_stats() ret['version_info'] = vinfo ret['busy'] = self.cmd_helper.is_update_busy() return ret def close(self): self.cmd_helper.close() if self.refresh_cb is not None: self.refresh_cb.stop()
def _widget(self): """ Create IPython widget for display within a notebook """ try: return self._cached_widget except AttributeError: pass try: from ipywidgets import Layout, VBox, HBox, IntText, Button, HTML, Accordion except ImportError: self._cached_widget = None return None layout = Layout(width="150px") if self.dashboard_link: link = '<p><b>Dashboard: </b><a href="%s" target="_blank">%s</a></p>\n' % ( self.dashboard_link, self.dashboard_link, ) else: link = "" title = "<h2>%s</h2>" % self._cluster_class_name title = HTML(title) dashboard = HTML(link) status = HTML(self._widget_status(), layout=Layout(min_width="150px")) if self._supports_scaling: request = IntText(0, description="Workers", layout=layout) scale = Button(description="Scale", layout=layout) minimum = IntText(0, description="Minimum", layout=layout) maximum = IntText(0, description="Maximum", layout=layout) adapt = Button(description="Adapt", layout=layout) accordion = Accordion( [HBox([request, scale]), HBox([minimum, maximum, adapt])], layout=Layout(min_width="500px"), ) accordion.selected_index = None accordion.set_title(0, "Manual Scaling") accordion.set_title(1, "Adaptive Scaling") def adapt_cb(b): self.adapt(minimum=minimum.value, maximum=maximum.value) update() adapt.on_click(adapt_cb) def scale_cb(b): with log_errors(): n = request.value with suppress(AttributeError): self._adaptive.stop() self.scale(n) update() scale.on_click(scale_cb) else: accordion = HTML("") box = VBox([title, HBox([status, accordion]), dashboard]) self._cached_widget = box def update(): status.value = self._widget_status() cluster_repr_interval = parse_timedelta( dask.config.get("distributed.deploy.cluster-repr-interval", default="ms")) pc = PeriodicCallback(update, cluster_repr_interval * 1000) self.periodic_callbacks["cluster-repr"] = pc pc.start() return box
class Engine(BaseEngine): """ This is an in-memory engine. This engine allows to run ONLY ONE instance of Centrifuge. If you want to scale - you should use Redis engine instead of this one. This engine is perfect when you have limited amount of users. This engine keeps all data in process memory. """ NAME = 'In memory - single node only' HISTORY_EXPIRE_TASK_INTERVAL = 60000 # once in a minute def __init__(self, *args, **kwargs): super(Engine, self).__init__(*args, **kwargs) self.subscriptions = {} self.history = {} self.history_expire_at = {} self.history_expire_heap = [] self.presence = {} self.history_expire_task = PeriodicCallback( self.check_history_expire, self.HISTORY_EXPIRE_TASK_INTERVAL) def initialize(self): self.history_expire_task.start() logger.info("Memory engine initialized") @coroutine def publish_message(self, channel, body, method="message"): yield self.handle_message(channel, method, body) raise Return((True, None)) @coroutine def publish_control_message(self, message): yield self.handle_control_message(message) raise Return((True, None)) @coroutine def publish_admin_message(self, message): yield self.handle_admin_message(message) raise Return((True, None)) @coroutine def handle_admin_message(self, message): message = json_encode(message) for uid, connection in six.iteritems( self.application.admin_connections): if uid not in self.application.admin_connections: continue connection.send(message) raise Return((True, None)) @coroutine def handle_control_message(self, message): """ Handle control message. """ app_id = message.get("app_id") method = message.get("method") params = message.get("params") if app_id and app_id == self.application.uid: # application id must be set when we don't want to do # make things twice for the same application. Setting # app_id means that we don't want to process control # message when it is appear in application instance if # application uid matches app_id raise Return((True, None)) func = getattr(self.application, 'handle_%s' % method, None) if not func: raise Return((None, self.application.METHOD_NOT_FOUND)) result, error = yield func(params) raise Return((result, error)) @coroutine def handle_message(self, channel, method, body): if channel not in self.subscriptions: raise Return((True, None)) timer = None if self.application.collector: timer = self.application.collector.get_timer('broadcast') response = Response(method=method, body=body) prepared_response = response.as_message() for uid, client in six.iteritems(self.subscriptions[channel]): if channel in self.subscriptions and uid in self.subscriptions[ channel]: yield client.send(prepared_response) if timer: timer.stop() raise Return((True, None)) @coroutine def add_subscription(self, project_key, channel, client): subscription_key = self.get_subscription_key(project_key, channel) if subscription_key not in self.subscriptions: self.subscriptions[subscription_key] = {} self.subscriptions[subscription_key][client.uid] = client raise Return((True, None)) @coroutine def remove_subscription(self, project_key, channel, client): subscription_key = self.get_subscription_key(project_key, channel) try: del self.subscriptions[subscription_key][client.uid] except KeyError: pass try: if not self.subscriptions[subscription_key]: del self.subscriptions[subscription_key] except KeyError: pass raise Return((True, None)) def get_presence_key(self, project_key, channel): return "%s:presence:%s:%s" % (self.prefix, project_key, channel) @coroutine def add_presence(self, project_key, channel, uid, user_info, presence_timeout=None): now = int(time.time()) expire_at = now + (presence_timeout or self.presence_timeout) hash_key = self.get_presence_key(project_key, channel) if hash_key not in self.presence: self.presence[hash_key] = {} self.presence[hash_key][uid] = { 'expire_at': expire_at, 'user_info': user_info } raise Return((True, None)) @coroutine def remove_presence(self, project_key, channel, uid): hash_key = self.get_presence_key(project_key, channel) try: del self.presence[hash_key][uid] except KeyError: pass raise Return((True, None)) @coroutine def get_presence(self, project_key, channel): now = int(time.time()) hash_key = self.get_presence_key(project_key, channel) to_return = {} if hash_key in self.presence: keys_to_delete = [] for uid, data in six.iteritems(self.presence[hash_key]): expire_at = data['expire_at'] if expire_at > now: to_return[uid] = data['user_info'] else: keys_to_delete.append(uid) for uid in keys_to_delete: try: del self.presence[hash_key][uid] except KeyError: pass if not self.presence[hash_key]: try: del self.presence[hash_key] except KeyError: pass raise Return((to_return, None)) def get_history_key(self, project_key, channel): return "%s:history:%s:%s" % (self.prefix, project_key, channel) @coroutine def add_history_message(self, project_key, channel, message, history_size, history_lifetime): history_key = self.get_history_key(project_key, channel) expire_at = int(time.time()) + history_lifetime self.history_expire_at[history_key] = expire_at heapq.heappush(self.history_expire_heap, (expire_at, history_key)) if history_key not in self.history: self.history[history_key] = [] self.history[history_key].insert(0, message) self.history[history_key] = self.history[history_key][:history_size] raise Return((True, None)) @coroutine def get_history(self, project_key, channel): history_key = self.get_history_key(project_key, channel) now = int(time.time()) if history_key in self.history_expire_at: expire_at = self.history_expire_at[history_key] if expire_at <= now: self.remove_history(history_key) raise Return(([], None)) try: data = self.history[history_key] except KeyError: data = [] raise Return((data, None)) def remove_history(self, history_key): try: del self.history[history_key] except KeyError: pass try: del self.history_expire_at[history_key] except KeyError: pass def check_history_expire(self): now = int(time.time()) while self.history_expire_heap: if self.history_expire_heap[0][0] <= now: expire, history_key = heapq.heappop(self.history_expire_heap) if history_key in self.history_expire_at and self.history_expire_at[ history_key] <= now: self.remove_history(history_key) else: break
class MappingKernelManager(MultiKernelManager): """A KernelManager that handles - File mapping - HTTP error handling - Kernel message filtering """ @default('kernel_manager_class') def _default_kernel_manager_class(self): return "jupyter_client.ioloop.IOLoopKernelManager" kernel_argv = List(Unicode()) root_dir = Unicode(config=True) _kernel_connections = Dict() _culler_callback = None _initialized_culler = False @default('root_dir') def _default_root_dir(self): try: return self.parent.root_dir except AttributeError: return getcwd() @validate('root_dir') def _update_root_dir(self, proposal): """Do a bit of validation of the root dir.""" value = proposal['value'] if not os.path.isabs(value): # If we receive a non-absolute path, make it absolute. value = os.path.abspath(value) if not exists(value) or not os.path.isdir(value): raise TraitError("kernel root dir %r is not a directory" % value) return value cull_idle_timeout = Integer( 0, config=True, help= """Timeout (in seconds) after which a kernel is considered idle and ready to be culled. Values of 0 or lower disable culling. Very short timeouts may result in kernels being culled for users with poor network connections.""") cull_interval_default = 300 # 5 minutes cull_interval = Integer( cull_interval_default, config=True, help= """The interval (in seconds) on which to check for idle kernels exceeding the cull timeout value.""" ) cull_connected = Bool( False, config=True, help= """Whether to consider culling kernels which have one or more connections. Only effective if cull_idle_timeout > 0.""") cull_busy = Bool( False, config=True, help="""Whether to consider culling kernels which are busy. Only effective if cull_idle_timeout > 0.""") buffer_offline_messages = Bool( True, config=True, help= """Whether messages from kernels whose frontends have disconnected should be buffered in-memory. When True (default), messages are buffered and replayed on reconnect, avoiding lost messages due to interrupted connectivity. Disable if long-running kernels will produce too much output while no frontends are connected. """) kernel_info_timeout = Float( 60, config=True, help="""Timeout for giving up on a kernel (in seconds). On starting and restarting kernels, we check whether the kernel is running and responsive by sending kernel_info_requests. This sets the timeout in seconds for how long the kernel can take before being presumed dead. This affects the MappingKernelManager (which handles kernel restarts) and the ZMQChannelsHandler (which handles the startup). """) _kernel_buffers = Any() @default('_kernel_buffers') def _default_kernel_buffers(self): return defaultdict(lambda: { 'buffer': [], 'session_key': '', 'channels': {} }) last_kernel_activity = Instance( datetime, help="The last activity on any kernel, including shutting down a kernel" ) def __init__(self, **kwargs): self.pinned_superclass = MultiKernelManager self.pinned_superclass.__init__(self, **kwargs) self.last_kernel_activity = utcnow() allowed_message_types = List( trait=Unicode(), config=True, help="""White list of allowed kernel message types. When the list is empty, all message types are allowed. """) allow_tracebacks = Bool( True, config=True, help=('Whether to send tracebacks to clients on exceptions.')) traceback_replacement_message = Unicode( 'An exception occurred at runtime, which is not shown due to security reasons.', config=True, help= ('Message to print when allow_tracebacks is False, and an exception occurs' )) #------------------------------------------------------------------------- # Methods for managing kernels and sessions #------------------------------------------------------------------------- def _handle_kernel_died(self, kernel_id): """notice that a kernel died""" self.log.warning("Kernel %s died, removing from map.", kernel_id) self.remove_kernel(kernel_id) def cwd_for_path(self, path): """Turn API path into absolute OS path.""" os_path = to_os_path(path, self.root_dir) # in the case of documents and kernels not being on the same filesystem, # walk up to root_dir if the paths don't exist while not os.path.isdir(os_path) and os_path != self.root_dir: os_path = os.path.dirname(os_path) return os_path async def start_kernel(self, kernel_id=None, path=None, **kwargs): """Start a kernel for a session and return its kernel_id. Parameters ---------- kernel_id : uuid The uuid to associate the new kernel with. If this is not None, this kernel will be persistent whenever it is requested. path : API path The API path (unicode, '/' delimited) for the cwd. Will be transformed to an OS path relative to root_dir. kernel_name : str The name identifying which kernel spec to launch. This is ignored if an existing kernel is returned, but it may be checked in the future. """ if kernel_id is None: if path is not None: kwargs['cwd'] = self.cwd_for_path(path) kernel_id = await ensure_async( self.pinned_superclass.start_kernel(self, **kwargs)) self._kernel_connections[kernel_id] = 0 self.start_watching_activity(kernel_id) self.log.info("Kernel started: %s" % kernel_id) self.log.debug("Kernel args: %r" % kwargs) # register callback for failed auto-restart self.add_restart_callback( kernel_id, lambda: self._handle_kernel_died(kernel_id), 'dead', ) # Increase the metric of number of kernels running # for the relevant kernel type by 1 KERNEL_CURRENTLY_RUNNING_TOTAL.labels( type=self._kernels[kernel_id].kernel_name).inc() else: self._check_kernel_id(kernel_id) self.log.info("Using existing kernel: %s" % kernel_id) # Initialize culling if not already if not self._initialized_culler: self.initialize_culler() return kernel_id def start_buffering(self, kernel_id, session_key, channels): """Start buffering messages for a kernel Parameters ---------- kernel_id : str The id of the kernel to stop buffering. session_key: str The session_key, if any, that should get the buffer. If the session_key matches the current buffered session_key, the buffer will be returned. channels: dict({'channel': ZMQStream}) The zmq channels whose messages should be buffered. """ if not self.buffer_offline_messages: for channel, stream in channels.items(): stream.close() return self.log.info("Starting buffering for %s", session_key) self._check_kernel_id(kernel_id) # clear previous buffering state self.stop_buffering(kernel_id) buffer_info = self._kernel_buffers[kernel_id] # record the session key because only one session can buffer buffer_info['session_key'] = session_key # TODO: the buffer should likely be a memory bounded queue, we're starting with a list to keep it simple buffer_info['buffer'] = [] buffer_info['channels'] = channels # forward any future messages to the internal buffer def buffer_msg(channel, msg_parts): self.log.debug("Buffering msg on %s:%s", kernel_id, channel) buffer_info['buffer'].append((channel, msg_parts)) for channel, stream in channels.items(): stream.on_recv(partial(buffer_msg, channel)) def get_buffer(self, kernel_id, session_key): """Get the buffer for a given kernel Parameters ---------- kernel_id : str The id of the kernel to stop buffering. session_key: str, optional The session_key, if any, that should get the buffer. If the session_key matches the current buffered session_key, the buffer will be returned. """ self.log.debug("Getting buffer for %s", kernel_id) if kernel_id not in self._kernel_buffers: return buffer_info = self._kernel_buffers[kernel_id] if buffer_info['session_key'] == session_key: # remove buffer self._kernel_buffers.pop(kernel_id) # only return buffer_info if it's a match return buffer_info else: self.stop_buffering(kernel_id) def stop_buffering(self, kernel_id): """Stop buffering kernel messages Parameters ---------- kernel_id : str The id of the kernel to stop buffering. """ self.log.debug("Clearing buffer for %s", kernel_id) self._check_kernel_id(kernel_id) if kernel_id not in self._kernel_buffers: return buffer_info = self._kernel_buffers.pop(kernel_id) # close buffering streams for stream in buffer_info['channels'].values(): if not stream.closed(): stream.on_recv(None) stream.close() msg_buffer = buffer_info['buffer'] if msg_buffer: self.log.info("Discarding %s buffered messages for %s", len(msg_buffer), buffer_info['session_key']) def shutdown_kernel(self, kernel_id, now=False, restart=False): """Shutdown a kernel by kernel_id""" self._check_kernel_id(kernel_id) kernel = self._kernels[kernel_id] if kernel._activity_stream: kernel._activity_stream.close() kernel._activity_stream = None self.stop_buffering(kernel_id) self._kernel_connections.pop(kernel_id, None) # Decrease the metric of number of kernels # running for the relevant kernel type by 1 KERNEL_CURRENTLY_RUNNING_TOTAL.labels( type=self._kernels[kernel_id].kernel_name).dec() self.pinned_superclass.shutdown_kernel(self, kernel_id, now=now, restart=restart) # Unlike its async sibling method in AsyncMappingKernelManager, removing the kernel_id # from the connections dictionary isn't as problematic before the shutdown since the # method is synchronous. However, we'll keep the relative call orders the same from # a maintenance perspective. self._kernel_connections.pop(kernel_id, None) async def restart_kernel(self, kernel_id, now=False): """Restart a kernel by kernel_id""" self._check_kernel_id(kernel_id) await ensure_async( self.pinned_superclass.restart_kernel(self, kernel_id, now=now)) kernel = self.get_kernel(kernel_id) # return a Future that will resolve when the kernel has successfully restarted channel = kernel.connect_shell() future = Future() def finish(): """Common cleanup when restart finishes/fails for any reason.""" if not channel.closed(): channel.close() loop.remove_timeout(timeout) kernel.remove_restart_callback(on_restart_failed, 'dead') def on_reply(msg): self.log.debug("Kernel info reply received: %s", kernel_id) finish() if not future.done(): future.set_result(msg) def on_timeout(): self.log.warning("Timeout waiting for kernel_info_reply: %s", kernel_id) finish() if not future.done(): future.set_exception( TimeoutError("Timeout waiting for restart")) def on_restart_failed(): self.log.warning("Restarting kernel failed: %s", kernel_id) finish() if not future.done(): future.set_exception(RuntimeError("Restart failed")) kernel.add_restart_callback(on_restart_failed, 'dead') kernel.session.send(channel, "kernel_info_request") channel.on_recv(on_reply) loop = IOLoop.current() timeout = loop.add_timeout(loop.time() + self.kernel_info_timeout, on_timeout) return future def notify_connect(self, kernel_id): """Notice a new connection to a kernel""" if kernel_id in self._kernel_connections: self._kernel_connections[kernel_id] += 1 def notify_disconnect(self, kernel_id): """Notice a disconnection from a kernel""" if kernel_id in self._kernel_connections: self._kernel_connections[kernel_id] -= 1 def kernel_model(self, kernel_id): """Return a JSON-safe dict representing a kernel For use in representing kernels in the JSON APIs. """ self._check_kernel_id(kernel_id) kernel = self._kernels[kernel_id] model = { "id": kernel_id, "name": kernel.kernel_name, "last_activity": isoformat(kernel.last_activity), "execution_state": kernel.execution_state, "connections": self._kernel_connections.get(kernel_id, 0), } return model def list_kernels(self): """Returns a list of kernel_id's of kernels running.""" kernels = [] kernel_ids = self.pinned_superclass.list_kernel_ids(self) for kernel_id in kernel_ids: try: model = self.kernel_model(kernel_id) kernels.append(model) except (web.HTTPError, KeyError): pass # Probably due to a (now) non-existent kernel, continue building the list return kernels # override _check_kernel_id to raise 404 instead of KeyError def _check_kernel_id(self, kernel_id): """Check a that a kernel_id exists and raise 404 if not.""" if kernel_id not in self: raise web.HTTPError(404, u'Kernel does not exist: %s' % kernel_id) # monitoring activity: def start_watching_activity(self, kernel_id): """Start watching IOPub messages on a kernel for activity. - update last_activity on every message - record execution_state from status messages """ kernel = self._kernels[kernel_id] # add busy/activity markers: kernel.execution_state = 'starting' kernel.last_activity = utcnow() kernel._activity_stream = kernel.connect_iopub() session = Session( config=kernel.session.config, key=kernel.session.key, ) def record_activity(msg_list): """Record an IOPub message arriving from a kernel""" self.last_kernel_activity = kernel.last_activity = utcnow() idents, fed_msg_list = session.feed_identities(msg_list) msg = session.deserialize(fed_msg_list) msg_type = msg['header']['msg_type'] if msg_type == 'status': kernel.execution_state = msg['content']['execution_state'] self.log.debug("activity on %s: %s (%s)", kernel_id, msg_type, kernel.execution_state) else: self.log.debug("activity on %s: %s", kernel_id, msg_type) kernel._activity_stream.on_recv(record_activity) def initialize_culler(self): """Start idle culler if 'cull_idle_timeout' is greater than zero. Regardless of that value, set flag that we've been here. """ if not self._initialized_culler and self.cull_idle_timeout > 0: if self._culler_callback is None: loop = IOLoop.current() if self.cull_interval <= 0: #handle case where user set invalid value self.log.warning( "Invalid value for 'cull_interval' detected (%s) - using default value (%s).", self.cull_interval, self.cull_interval_default) self.cull_interval = self.cull_interval_default self._culler_callback = PeriodicCallback( self.cull_kernels, 1000 * self.cull_interval) self.log.info( "Culling kernels with idle durations > %s seconds at %s second intervals ...", self.cull_idle_timeout, self.cull_interval) if self.cull_busy: self.log.info("Culling kernels even if busy") if self.cull_connected: self.log.info( "Culling kernels even with connected clients") self._culler_callback.start() self._initialized_culler = True async def cull_kernels(self): self.log.debug( "Polling every %s seconds for kernels idle > %s seconds...", self.cull_interval, self.cull_idle_timeout) """Create a separate list of kernels to avoid conflicting updates while iterating""" for kernel_id in list(self._kernels): try: await self.cull_kernel_if_idle(kernel_id) except Exception as e: self.log.exception( "The following exception was encountered while checking the idle duration of kernel %s: %s", kernel_id, e) async def cull_kernel_if_idle(self, kernel_id): kernel = self._kernels[kernel_id] if hasattr( kernel, 'last_activity' ): # last_activity is monkey-patched, so ensure that has occurred self.log.debug("kernel_id=%s, kernel_name=%s, last_activity=%s", kernel_id, kernel.kernel_name, kernel.last_activity) dt_now = utcnow() dt_idle = dt_now - kernel.last_activity # Compute idle properties is_idle_time = dt_idle > timedelta(seconds=self.cull_idle_timeout) is_idle_execute = self.cull_busy or (kernel.execution_state != 'busy') connections = self._kernel_connections.get(kernel_id, 0) is_idle_connected = self.cull_connected or not connections # Cull the kernel if all three criteria are met if (is_idle_time and is_idle_execute and is_idle_connected): idle_duration = int(dt_idle.total_seconds()) self.log.warning( "Culling '%s' kernel '%s' (%s) with %d connections due to %s seconds of inactivity.", kernel.execution_state, kernel.kernel_name, kernel_id, connections, idle_duration) await ensure_async(self.shutdown_kernel(kernel_id))
class WSHandler(tornado.websocket.WebSocketHandler): def open(self): global socketOK print 'connection opened...' socketOK = True self.callback = PeriodicCallback(self.sendToSocket, 100) self.callback.start() def on_message(self, message): global x1, x2, Kp2, Ki2, Kpxi2, Kixi2, timeLastReceived, socketOK jsonMessage = json.loads(message) # Annulation du timeout de réception des données timeLastReceived = time.time() if jsonMessage.get('vref') != None: x1 = float(jsonMessage.get('vref')) / 100 #print ("x1: %.2f" % x1) if jsonMessage.get('xiref') != None: x2 = (float(jsonMessage.get('xiref'))) * 3.141592 / 180 #print ("x2: %.2f" % x2) if jsonMessage.get('servoref') != None: servoref = int(jsonMessage.get('servoref')) try: uno.servo(servoref) except: pass #print ("servoref: %d" % servoref) if jsonMessage.get('Kp2ref') != None: Kp2 = float(jsonMessage.get('Kp2ref')) #print ("Kp2: %.2f" % Kp2) if jsonMessage.get('Ki2ref') != None: Ki2 = float(jsonMessage.get('Ki2ref')) #print ("Ki2: %.2f" % Ki2) if jsonMessage.get('Kpxi2ref') != None: Kpxi2 = float(jsonMessage.get('Kpxi2ref')) #print ("Kpxi2: %.2f" % Kpxi2) if jsonMessage.get('Kixi2ref') != None: Kixi2 = float(jsonMessage.get('Kixi2ref')) #print ("Kixi2: %.2f" % Kixi2) if not socketOK: x1 = 0 x2 = 0. def on_close(self): global socketOK, commandeDroit, commandeGauche print 'connection closed...' socketOK = False commandeDroit = 0. commandeGauche = 0. def sendToSocket(self): global started, codeurDroitDeltaPos, codeurGaucheDeltaPos, socketOK, commandeDroit, commandeGauche, vxref, xiref, \ vxmes, ximes, omega, thetames, T0 tcourant = time.time() - T0 aEnvoyer = json.dumps({ 'Temps':("%.2f" % tcourant), \ 'commandeDroit':("%.2f" % commandeDroit), \ 'commandeGauche':("%.2f" % commandeGauche), \ 'omega':("%.3f" % omega), \ 'omegaDroit':("%.2f" % omegaDroit), \ 'omegaGauche':("%.2f" % omegaGauche), \ 'thetames':("%.3f" % thetames), \ 'Consigne vitesse longitudinale':("%.2f" % x1), \ 'Consigne vitesse de rotation':("%.2f" % (180 * x2/3.141592)), \ 'Vitesse longitudinale':("%.2f" % vxmes), \ 'Vitesse de rotation':("%.2f" % (180 * ximes/3.141592)), \ 'Raw':("%.2f" % tcourant) + "," + \ ("%.2f" % commandeDroit) + "," + \ ("%.2f" % commandeGauche) + "," + \ ("%.3f" % omega) + "," + \ ("%.2f" % omegaDroit) + "," + \ ("%.2f" % omegaGauche) + "," + \ ("%.3f" % thetames) + "," + \ ("%.2f" % x1) + "," + \ ("%.2f" % x2) + "," + \ ("%.2f" % vxmes) + "," + \ ("%.2f" % (180 * ximes/3.141592))}) if socketOK: try: self.write_message(aEnvoyer) except: pass def check_origin(self, origin): # Voir http://www.tornadoweb.org/en/stable/websocket.html#tornado.websocket.WebSocketHandler.check_origin # et http://www.arundhaj.com/blog/tornado-error-during-websocket-handshake.html return True
class WSHandler(tornado.websocket.WebSocketHandler): def open(self): global socketOK print 'connection opened...' socketOK = True self.callback = PeriodicCallback(self.sendToSocket, 20) self.callback.start() def on_message(self, message): global vref, vrefDroit, vrefGauche, typeSignal, offset, amplitude, frequence, Kp, Ki, Kd, moteurint, timeLastReceived, socketOK jsonMessage = json.loads(message) # Annulation du timeout de réception des données timeLastReceived = time.time() if jsonMessage.get('typeSignal') != None: typeSignal = int(jsonMessage.get('typeSignal')) if jsonMessage.get('offset') != None: offset = float(jsonMessage.get('offset')) if jsonMessage.get('amplitude') != None: amplitude = float(jsonMessage.get('amplitude')) if jsonMessage.get('frequence') != None: frequence = float(jsonMessage.get('frequence')) if jsonMessage.get('Kp') != None: Kp = float(jsonMessage.get('Kp')) if jsonMessage.get('Ki') != None: Ki = float(jsonMessage.get('Ki')) if jsonMessage.get('Kd') != None: Kd = float(jsonMessage.get('Kd')) if jsonMessage.get('moteurint') != None: moteurint = int(jsonMessage.get('moteurint')) if not socketOK: typeSignal = 0 offset = 0. amplitude = 0. frequence = 0. def on_close(self): global socketOK, vrefDroit, vrefGauche print 'connection closed...' socketOK = False vrefDroit = 0. vrefGauche = 0. def sendToSocket(self): global socketOK, commandeDroit, commandeGauche, vref, omegaDroit, omegaGauche, tensionAlim tcourant = time.time() - T0 aEnvoyer = json.dumps({ 'Temps':("%.2f" % tcourant), \ 'Consigne':("%.2f" % vref), \ 'omegaDroit':("%.2f" % omegaDroit), \ 'omegaGauche':("%.2f" % omegaGauche), \ 'commandeDroit':("%.2f" % commandeDroit), \ 'commandeGauche':("%.2f" % commandeGauche), \ 'tensionAlim':("%.2f" % tensionAlim)}) if socketOK: try: self.write_message(aEnvoyer) except: pass def check_origin(self, origin): # Voir http://www.tornadoweb.org/en/stable/websocket.html#tornado.websocket.WebSocketHandler.check_origin # et http://www.arundhaj.com/blog/tornado-error-during-websocket-handshake.html return True
async def start(self): public_server = Server.from_url(self.public_url) api_server = Server.from_url(self.api_url) env = os.environ.copy() env['CONFIGPROXY_AUTH_TOKEN'] = self.auth_token cmd = self.command + [ '--ip', public_server.ip, '--port', str(public_server.port), '--api-ip', api_server.ip, '--api-port', str(api_server.port), '--error-target', url_path_join(self.hub.url, 'error'), ] if self.app.subdomain_host: cmd.append('--host-routing') if self.debug: cmd.extend(['--log-level', 'debug']) if self.ssl_key: cmd.extend(['--ssl-key', self.ssl_key]) if self.ssl_cert: cmd.extend(['--ssl-cert', self.ssl_cert]) if self.app.statsd_host: cmd.extend([ '--statsd-host', self.app.statsd_host, '--statsd-port', str(self.app.statsd_port), '--statsd-prefix', self.app.statsd_prefix + '.chp' ]) # Warn if SSL is not used if ' --ssl' not in ' '.join(cmd): self.log.warning("Running JupyterHub without SSL." " I hope there is SSL termination happening somewhere else...") self.log.info("Starting proxy @ %s", public_server.bind_url) self.log.debug("Proxy cmd: %s", cmd) shell = os.name == 'nt' try: self.proxy_process = Popen(cmd, env=env, start_new_session=True, shell=shell) except FileNotFoundError as e: self.log.error( "Failed to find proxy %r\n" "The proxy can be installed with `npm install -g configurable-http-proxy`." "To install `npm`, install nodejs which includes `npm`." "If you see an `EACCES` error or permissions error, refer to the `npm` " "documentation on How To Prevent Permissions Errors." % self.command ) raise def _check_process(): status = self.proxy_process.poll() if status is not None: e = RuntimeError( "Proxy failed to start with exit code %i" % status) raise e from None for server in (public_server, api_server): for i in range(10): _check_process() try: await server.wait_up(1) except TimeoutError: continue else: break await server.wait_up(1) _check_process() self.log.debug("Proxy started and appears to be up") pc = PeriodicCallback(self.check_running, 1e3 * self.check_running_interval) self._check_running_callback = pc pc.start()
class ProcStats: def __init__(self, config: ConfigHelper) -> None: self.server = config.get_server() self.ioloop = IOLoop.current() self.stat_update_cb = PeriodicCallback( self._handle_stat_update, STAT_UPDATE_TIME_MS) # type: ignore self.vcgencmd: Optional[shell_command.ShellCommand] = None if os.path.exists(VC_GEN_CMD_FILE): logging.info("Detected 'vcgencmd', throttle checking enabled") shell_cmd: shell_command.ShellCommandFactory shell_cmd = self.server.load_component(config, "shell_command") self.vcgencmd = shell_cmd.build_shell_command( "vcgencmd get_throttled") self.server.register_notification("proc_stats:cpu_throttled") else: logging.info("Unable to find 'vcgencmd', throttle checking " "disabled") self.temp_file = pathlib.Path(TEMPERATURE_PATH) self.smaps = pathlib.Path(STATM_FILE_PATH) self.server.register_endpoint( "/machine/proc_stats", ["GET"], self._handle_stat_request) self.server.register_event_handler( "server:klippy_shutdown", self._handle_shutdown) self.server.register_notification("proc_stats:proc_stat_update") self.proc_stat_queue: Deque[Dict[str, Any]] = deque(maxlen=30) self.last_update_time = time.time() self.last_proc_time = time.process_time() self.throttle_check_lock = Lock() self.total_throttled: int = 0 self.last_throttled: int = 0 self.update_sequence: int = 0 self.stat_update_cb.start() async def _handle_stat_request(self, web_request: WebRequest ) -> Dict[str, Any]: ts: Optional[Dict[str, Any]] = None if self.vcgencmd is not None: ts = await self._check_throttled_state() return { 'moonraker_stats': list(self.proc_stat_queue), 'throttled_state': ts, 'cpu_temp': self._get_cpu_temperature() } async def _handle_shutdown(self) -> None: msg = "\nMoonraker System Usage Statistics:" for stats in self.proc_stat_queue: msg += f"\n{self._format_stats(stats)}" msg += f"\nCPU Temperature: {self._get_cpu_temperature()}" logging.info(msg) if self.vcgencmd is not None: ts = await self._check_throttled_state() logging.info(f"Throttled Flags: {' '.join(ts['flags'])}") async def _handle_stat_update(self) -> None: update_time = time.time() proc_time = time.process_time() time_diff = update_time - self.last_update_time usage = round((proc_time - self.last_proc_time) / time_diff * 100, 2) mem, mem_units = self._get_memory_usage() cpu_temp = self._get_cpu_temperature() result = { "time": update_time, "cpu_usage": usage, "memory": mem, "mem_units": mem_units, } self.proc_stat_queue.append(result) self.server.send_event("proc_stats:proc_stat_update", { 'moonraker_stats': result, 'cpu_temp': cpu_temp }) self.last_update_time = update_time self.last_proc_time = proc_time self.update_sequence += 1 if self.update_sequence == THROTTLE_CHECK_INTERVAL: self.update_sequence = 0 if self.vcgencmd is not None: ts = await self._check_throttled_state() cur_throttled = ts['bits'] if cur_throttled & ~self.total_throttled: self.server.add_log_rollover_item( 'throttled', f"CPU Throttled Flags: {ts['flags']}") if cur_throttled != self.last_throttled: self.server.send_event("proc_stats:cpu_throttled", ts) self.last_throttled = cur_throttled self.total_throttled |= cur_throttled async def _check_throttled_state(self) -> Dict[str, Any]: async with self.throttle_check_lock: assert self.vcgencmd is not None try: resp = await self.vcgencmd.run_with_response( timeout=.5, log_complete=False) ts = int(resp.strip().split("=")[-1], 16) except Exception: return {'bits': 0, 'flags': ["?"]} flags = [] for flag, desc in THROTTLED_FLAGS.items(): if flag & ts: flags.append(desc) return {'bits': ts, 'flags': flags} def _get_memory_usage(self) -> Tuple[Optional[int], Optional[str]]: try: mem_data = self.smaps.read_text() rss_match = re.search(r"Rss:\s+(\d+)\s+(\w+)", mem_data) if rss_match is None: return None, None mem = int(rss_match.group(1)) units = rss_match.group(2) except Exception: return None, None return mem, units def _get_cpu_temperature(self) -> Optional[float]: temp = None if self.temp_file.exists(): try: res = int(self.temp_file.read_text().strip()) temp = res / 1000. except Exception: return None return temp def _format_stats(self, stats: Dict[str, Any]) -> str: return f"System Time: {stats['time']:2f}, " \ f"Usage: {stats['cpu_usage']}%, " \ f"Memory: {stats['memory']} {stats['mem_units']}" def close(self) -> None: self.stat_update_cb.stop()
class PeriodicCallback(param.Parameterized): """ Periodic encapsulates a periodic callback which will run both in tornado based notebook environments and on bokeh server. By default the callback will run until the stop method is called, but count and timeout values can be set to limit the number of executions or the maximum length of time for which the callback will run. The callback may also be started and stopped by setting the running parameter to True or False respectively. """ callback = param.Callable(doc=""" The callback to execute periodically.""") count = param.Integer(default=None, doc=""" Number of times the callback will be executed, by default this is unlimited.""") period = param.Integer(default=500, doc=""" Period in milliseconds at which the callback is executed.""") timeout = param.Integer(default=None, doc=""" Timeout in milliseconds from the start time at which the callback expires.""") running = param.Boolean(default=False, doc=""" Toggles whether the periodic callback is currently running.""") def __init__(self, **params): super().__init__(**params) self._counter = 0 self._start_time = None self._cb = None self._updating = False self._doc = None @param.depends('running', watch=True) def _start(self): if not self.running or self._updating: return self.start() @param.depends('running', watch=True) def _stop(self): if self.running or self._updating: return self.stop() @param.depends('period', watch=True) def _update_period(self): if self._cb: self.stop() self.start() def _periodic_callback(self): with edit_readonly(state): state.busy = True try: self.callback() finally: with edit_readonly(state): state.busy = False self._counter += 1 if self.timeout is not None: dt = (time.time() - self._start_time) * 1000 if dt > self.timeout: self.stop() if self._counter == self.count: self.stop() @property def counter(self): """ Returns the execution count of the periodic callback. """ return self._counter def _cleanup(self, session_context): self.stop() def start(self): """ Starts running the periodic callback. """ if self._cb is not None: raise RuntimeError('Periodic callback has already started.') if not self.running: try: self._updating = True self.running = True finally: self._updating = False self._start_time = time.time() if state.curdoc: self._doc = state.curdoc self._cb = self._doc.add_periodic_callback(self._periodic_callback, self.period) else: from tornado.ioloop import PeriodicCallback self._cb = PeriodicCallback(self._periodic_callback, self.period) self._cb.start() try: state.on_session_destroyed(self._cleanup) except Exception: pass def stop(self): """ Stops running the periodic callback. """ if self.running: try: self._updating = True self.running = False finally: self._updating = False self._counter = 0 self._timeout = None if self._doc: self._doc.remove_periodic_callback(self._cb) elif self._cb: self._cb.stop() self._cb = None doc = self._doc or _curdoc() if doc: doc.session_destroyed_callbacks = { cb for cb in doc.session_destroyed_callbacks if cb is not self._cleanup } self._doc = None
class WebSocketProtocol13(WebSocketProtocol): """Implementation of the WebSocket protocol from RFC 6455. This class supports versions 7 and 8 of the protocol in addition to the final version 13. """ # Bit masks for the first byte of a frame. FIN = 0x80 RSV1 = 0x40 RSV2 = 0x20 RSV3 = 0x10 RSV_MASK = RSV1 | RSV2 | RSV3 OPCODE_MASK = 0x0f def __init__(self, handler, mask_outgoing=False, compression_options=None): WebSocketProtocol.__init__(self, handler) self.mask_outgoing = mask_outgoing self._final_frame = False self._frame_opcode = None self._masked_frame = None self._frame_mask = None self._frame_length = None self._fragmented_message_buffer = None self._fragmented_message_opcode = None self._waiting = None self._compression_options = compression_options self._decompressor = None self._compressor = None self._frame_compressed = None # The total uncompressed size of all messages received or sent. # Unicode messages are encoded to utf8. # Only for testing; subject to change. self._message_bytes_in = 0 self._message_bytes_out = 0 # The total size of all packets received or sent. Includes # the effect of compression, frame overhead, and control frames. self._wire_bytes_in = 0 self._wire_bytes_out = 0 self.ping_callback = None self.last_ping = 0 self.last_pong = 0 def accept_connection(self): try: self._handle_websocket_headers() except ValueError: self.handler.set_status(400) log_msg = "Missing/Invalid WebSocket headers" self.handler.finish(log_msg) gen_log.debug(log_msg) return try: self._accept_connection() except ValueError: gen_log.debug("Malformed WebSocket request received", exc_info=True) self._abort() return def _handle_websocket_headers(self): """Verifies all invariant- and required headers If a header is missing or have an incorrect value ValueError will be raised """ fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version") if not all(map(lambda f: self.request.headers.get(f), fields)): raise ValueError("Missing/Invalid WebSocket headers") @staticmethod def compute_accept_value(key): """Computes the value for the Sec-WebSocket-Accept header, given the value for Sec-WebSocket-Key. """ sha1 = hashlib.sha1() sha1.update(utf8(key)) sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value return native_str(base64.b64encode(sha1.digest())) def _challenge_response(self): return WebSocketProtocol13.compute_accept_value( self.request.headers.get("Sec-Websocket-Key")) def _accept_connection(self): subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '') subprotocols = [s.strip() for s in subprotocols.split(',')] if subprotocols: selected = self.handler.select_subprotocol(subprotocols) if selected: assert selected in subprotocols self.handler.set_header("Sec-WebSocket-Protocol", selected) extensions = self._parse_extensions_header(self.request.headers) for ext in extensions: if (ext[0] == 'permessage-deflate' and self._compression_options is not None): # TODO: negotiate parameters if compression_options # specifies limits. self._create_compressors('server', ext[1], self._compression_options) if ('client_max_window_bits' in ext[1] and ext[1]['client_max_window_bits'] is None): # Don't echo an offered client_max_window_bits # parameter with no value. del ext[1]['client_max_window_bits'] self.handler.set_header("Sec-WebSocket-Extensions", httputil._encode_header( 'permessage-deflate', ext[1])) break self.handler.clear_header("Content-Type") self.handler.set_status(101) self.handler.set_header("Upgrade", "websocket") self.handler.set_header("Connection", "Upgrade") self.handler.set_header("Sec-WebSocket-Accept", self._challenge_response()) self.handler.finish() self.handler._attach_stream() self.stream = self.handler.stream self.start_pinging() self._run_callback(self.handler.open, *self.handler.open_args, **self.handler.open_kwargs) self._receive_frame() def _parse_extensions_header(self, headers): extensions = headers.get("Sec-WebSocket-Extensions", '') if extensions: return [httputil._parse_header(e.strip()) for e in extensions.split(',')] return [] def _process_server_headers(self, key, headers): """Process the headers sent by the server to this client connection. 'key' is the websocket handshake challenge/response key. """ assert headers['Upgrade'].lower() == 'websocket' assert headers['Connection'].lower() == 'upgrade' accept = self.compute_accept_value(key) assert headers['Sec-Websocket-Accept'] == accept extensions = self._parse_extensions_header(headers) for ext in extensions: if (ext[0] == 'permessage-deflate' and self._compression_options is not None): self._create_compressors('client', ext[1]) else: raise ValueError("unsupported extension %r", ext) def _get_compressor_options(self, side, agreed_parameters, compression_options=None): """Converts a websocket agreed_parameters set to keyword arguments for our compressor objects. """ options = dict( persistent=(side + '_no_context_takeover') not in agreed_parameters) wbits_header = agreed_parameters.get(side + '_max_window_bits', None) if wbits_header is None: options['max_wbits'] = zlib.MAX_WBITS else: options['max_wbits'] = int(wbits_header) options['compression_options'] = compression_options return options def _create_compressors(self, side, agreed_parameters, compression_options=None): # TODO: handle invalid parameters gracefully allowed_keys = set(['server_no_context_takeover', 'client_no_context_takeover', 'server_max_window_bits', 'client_max_window_bits']) for key in agreed_parameters: if key not in allowed_keys: raise ValueError("unsupported compression parameter %r" % key) other_side = 'client' if (side == 'server') else 'server' self._compressor = _PerMessageDeflateCompressor( **self._get_compressor_options(side, agreed_parameters, compression_options)) self._decompressor = _PerMessageDeflateDecompressor( **self._get_compressor_options(other_side, agreed_parameters, compression_options)) def _write_frame(self, fin, opcode, data, flags=0): if fin: finbit = self.FIN else: finbit = 0 frame = struct.pack("B", finbit | opcode | flags) data_len = len(data) if self.mask_outgoing: mask_bit = 0x80 else: mask_bit = 0 if data_len < 126: frame += struct.pack("B", data_len | mask_bit) elif data_len <= 0xFFFF: frame += struct.pack("!BH", 126 | mask_bit, data_len) else: frame += struct.pack("!BQ", 127 | mask_bit, data_len) if self.mask_outgoing: mask = os.urandom(4) data = mask + _websocket_mask(mask, data) frame += data self._wire_bytes_out += len(frame) return self.stream.write(frame) def write_message(self, message, binary=False): """Sends the given message to the client of this Web Socket.""" if binary: opcode = 0x2 else: opcode = 0x1 message = tornado.escape.utf8(message) assert isinstance(message, bytes) self._message_bytes_out += len(message) flags = 0 if self._compressor: message = self._compressor.compress(message) flags |= self.RSV1 # For historical reasons, write methods in Tornado operate in a semi-synchronous # mode in which awaiting the Future they return is optional (But errors can # still be raised). This requires us to go through an awkward dance here # to transform the errors that may be returned while presenting the same # semi-synchronous interface. try: fut = self._write_frame(True, opcode, message, flags=flags) except StreamClosedError: raise WebSocketClosedError() @gen.coroutine def wrapper(): try: yield fut except StreamClosedError: raise WebSocketClosedError() return wrapper() def write_ping(self, data): """Send ping frame.""" assert isinstance(data, bytes) self._write_frame(True, 0x9, data) def _receive_frame(self): try: self.stream.read_bytes(2, self._on_frame_start) except StreamClosedError: self._abort() def _on_frame_start(self, data): self._wire_bytes_in += len(data) header, payloadlen = struct.unpack("BB", data) self._final_frame = header & self.FIN reserved_bits = header & self.RSV_MASK self._frame_opcode = header & self.OPCODE_MASK self._frame_opcode_is_control = self._frame_opcode & 0x8 if self._decompressor is not None and self._frame_opcode != 0: self._frame_compressed = bool(reserved_bits & self.RSV1) reserved_bits &= ~self.RSV1 if reserved_bits: # client is using as-yet-undefined extensions; abort self._abort() return self._masked_frame = bool(payloadlen & 0x80) payloadlen = payloadlen & 0x7f if self._frame_opcode_is_control and payloadlen >= 126: # control frames must have payload < 126 self._abort() return try: if payloadlen < 126: self._frame_length = payloadlen if self._masked_frame: self.stream.read_bytes(4, self._on_masking_key) else: self._read_frame_data(False) elif payloadlen == 126: self.stream.read_bytes(2, self._on_frame_length_16) elif payloadlen == 127: self.stream.read_bytes(8, self._on_frame_length_64) except StreamClosedError: self._abort() def _read_frame_data(self, masked): new_len = self._frame_length if self._fragmented_message_buffer is not None: new_len += len(self._fragmented_message_buffer) if new_len > (self.handler.max_message_size or 10 * 1024 * 1024): self.close(1009, "message too big") return self.stream.read_bytes( self._frame_length, self._on_masked_frame_data if masked else self._on_frame_data) def _on_frame_length_16(self, data): self._wire_bytes_in += len(data) self._frame_length = struct.unpack("!H", data)[0] try: if self._masked_frame: self.stream.read_bytes(4, self._on_masking_key) else: self._read_frame_data(False) except StreamClosedError: self._abort() def _on_frame_length_64(self, data): self._wire_bytes_in += len(data) self._frame_length = struct.unpack("!Q", data)[0] try: if self._masked_frame: self.stream.read_bytes(4, self._on_masking_key) else: self._read_frame_data(False) except StreamClosedError: self._abort() def _on_masking_key(self, data): self._wire_bytes_in += len(data) self._frame_mask = data try: self._read_frame_data(True) except StreamClosedError: self._abort() def _on_masked_frame_data(self, data): # Don't touch _wire_bytes_in; we'll do it in _on_frame_data. self._on_frame_data(_websocket_mask(self._frame_mask, data)) def _on_frame_data(self, data): handled_future = None self._wire_bytes_in += len(data) if self._frame_opcode_is_control: # control frames may be interleaved with a series of fragmented # data frames, so control frames must not interact with # self._fragmented_* if not self._final_frame: # control frames must not be fragmented self._abort() return opcode = self._frame_opcode elif self._frame_opcode == 0: # continuation frame if self._fragmented_message_buffer is None: # nothing to continue self._abort() return self._fragmented_message_buffer += data if self._final_frame: opcode = self._fragmented_message_opcode data = self._fragmented_message_buffer self._fragmented_message_buffer = None else: # start of new data message if self._fragmented_message_buffer is not None: # can't start new message until the old one is finished self._abort() return if self._final_frame: opcode = self._frame_opcode else: self._fragmented_message_opcode = self._frame_opcode self._fragmented_message_buffer = data if self._final_frame: handled_future = self._handle_message(opcode, data) if not self.client_terminated: if handled_future: # on_message is a coroutine, process more frames once it's done. handled_future.add_done_callback( lambda future: self._receive_frame()) else: self._receive_frame() def _handle_message(self, opcode, data): """Execute on_message, returning its Future if it is a coroutine.""" if self.client_terminated: return if self._frame_compressed: data = self._decompressor.decompress(data) if opcode == 0x1: # UTF-8 data self._message_bytes_in += len(data) try: decoded = data.decode("utf-8") except UnicodeDecodeError: self._abort() return return self._run_callback(self.handler.on_message, decoded) elif opcode == 0x2: # Binary data self._message_bytes_in += len(data) return self._run_callback(self.handler.on_message, data) elif opcode == 0x8: # Close self.client_terminated = True if len(data) >= 2: self.handler.close_code = struct.unpack('>H', data[:2])[0] if len(data) > 2: self.handler.close_reason = to_unicode(data[2:]) # Echo the received close code, if any (RFC 6455 section 5.5.1). self.close(self.handler.close_code) elif opcode == 0x9: # Ping try: self._write_frame(True, 0xA, data) except StreamClosedError: self._abort() self._run_callback(self.handler.on_ping, data) elif opcode == 0xA: # Pong self.last_pong = IOLoop.current().time() return self._run_callback(self.handler.on_pong, data) else: self._abort() def close(self, code=None, reason=None): """Closes the WebSocket connection.""" if not self.server_terminated: if not self.stream.closed(): if code is None and reason is not None: code = 1000 # "normal closure" status code if code is None: close_data = b'' else: close_data = struct.pack('>H', code) if reason is not None: close_data += utf8(reason) try: self._write_frame(True, 0x8, close_data) except StreamClosedError: self._abort() self.server_terminated = True if self.client_terminated: if self._waiting is not None: self.stream.io_loop.remove_timeout(self._waiting) self._waiting = None self.stream.close() elif self._waiting is None: # Give the client a few seconds to complete a clean shutdown, # otherwise just close the connection. self._waiting = self.stream.io_loop.add_timeout( self.stream.io_loop.time() + 5, self._abort) @property def ping_interval(self): interval = self.handler.ping_interval if interval is not None: return interval return 0 @property def ping_timeout(self): timeout = self.handler.ping_timeout if timeout is not None: return timeout return max(3 * self.ping_interval, 30) def start_pinging(self): """Start sending periodic pings to keep the connection alive""" if self.ping_interval > 0: self.last_ping = self.last_pong = IOLoop.current().time() self.ping_callback = PeriodicCallback( self.periodic_ping, self.ping_interval * 1000) self.ping_callback.start() def periodic_ping(self): """Send a ping to keep the websocket alive Called periodically if the websocket_ping_interval is set and non-zero. """ if self.stream.closed() and self.ping_callback is not None: self.ping_callback.stop() return # Check for timeout on pong. Make sure that we really have # sent a recent ping in case the machine with both server and # client has been suspended since the last ping. now = IOLoop.current().time() since_last_pong = now - self.last_pong since_last_ping = now - self.last_ping if (since_last_ping < 2 * self.ping_interval and since_last_pong > self.ping_timeout): self.close() return self.write_ping(b'') self.last_ping = now
class PeriodicCallback(param.Parameterized): """ Periodic encapsulates a periodic callback which will run both in tornado based notebook environments and on bokeh server. By default the callback will run until the stop method is called, but count and timeout values can be set to limit the number of executions or the maximum length of time for which the callback will run. """ callback = param.Callable(doc=""" The callback to execute periodically.""") count = param.Integer(default=None, doc=""" Number of times the callback will be executed, by default this is unlimited.""") period = param.Integer(default=500, doc=""" Period in milliseconds at which the callback is executed.""") timeout = param.Integer(default=None, doc=""" Timeout in seconds from the start time at which the callback expires""") def __init__(self, **params): super(PeriodicCallback, self).__init__(**params) self._counter = 0 self._start_time = None self._timeout = None self._cb = None self._doc = None def start(self): if self._cb is not None: raise RuntimeError('Periodic callback has already started.') self._start_time = time.time() if _curdoc().session_context: self._doc = _curdoc() self._cb = self._doc.add_periodic_callback(self._periodic_callback, self.period) else: from tornado.ioloop import PeriodicCallback self._cb = PeriodicCallback(self._periodic_callback, self.period) self._cb.start() def _periodic_callback(self): self.callback() self._counter += 1 if self._timeout is not None: dt = (time.time() - self._start_time) if dt > self._timeout: self.stop() if self._counter == self.count: self.stop() def stop(self): self._counter = 0 self._timeout = None if self._doc: self._doc.remove_periodic_callback(self._cb) else: self._cb.stop() self._cb = None
class DataStore: def __init__(self, config): self.server = config.get_server() self.temp_store_size = config.getint('temperature_store_size', 1200) self.gcode_store_size = config.getint('gcode_store_size', 1000) # Temperature Store Tracking self.last_temps = {} self.gcode_queue = deque(maxlen=self.gcode_store_size) self.temperature_store = {} self.temp_update_cb = PeriodicCallback( self._update_temperature_store, TEMPERATURE_UPDATE_MS) # Register status update event self.server.register_event_handler( "server:status_update", self._set_current_temps) self.server.register_event_handler( "server:gcode_response", self._update_gcode_store) self.server.register_event_handler( "server:klippy_ready", self._init_sensors) # Register endpoints self.server.register_endpoint( "/server/temperature_store", ['GET'], self._handle_temp_store_request) self.server.register_endpoint( "/server/gcode_store", ['GET'], self._handle_gcode_store_request) async def _init_sensors(self): klippy_apis = self.server.lookup_component('klippy_apis') # Fetch sensors try: result = await klippy_apis.query_objects({'heaters': None}) except self.server.error as e: logging.info(f"Error Configuring Sensors: {e}") return sensors = result.get("heaters", {}).get("available_sensors", []) if sensors: # Add Subscription sub = {s: None for s in sensors} try: status = await klippy_apis.subscribe_objects(sub) except self.server.error as e: logging.info(f"Error subscribing to sensors: {e}") return logging.info(f"Configuring available sensors: {sensors}") new_store = {} for sensor in sensors: fields = list(status.get(sensor, {}).keys()) if sensor in self.temperature_store: new_store[sensor] = self.temperature_store[sensor] else: new_store[sensor] = { 'temperatures': deque(maxlen=self.temp_store_size)} for item in ["target", "power", "speed"]: if item in fields: new_store[sensor][f"{item}s"] = deque( maxlen=self.temp_store_size) if sensor not in self.last_temps: self.last_temps[sensor] = (0., 0., 0., 0.) self.temperature_store = new_store # Prune unconfigured sensors in self.last_temps for sensor in list(self.last_temps.keys()): if sensor not in self.temperature_store: del self.last_temps[sensor] # Update initial temperatures self._set_current_temps(status) self.temp_update_cb.start() else: logging.info("No sensors found") self.last_temps = {} self.temperature_store = {} self.temp_update_cb.stop() def _set_current_temps(self, data): for sensor in self.temperature_store: if sensor in data: last_val = self.last_temps[sensor] self.last_temps[sensor] = ( round(data[sensor].get('temperature', last_val[0]), 2), data[sensor].get('target', last_val[1]), data[sensor].get('power', last_val[2]), data[sensor].get('speed', last_val[3])) def _update_temperature_store(self): # XXX - If klippy is not connected, set values to zero # as they are unknown? for sensor, vals in self.last_temps.items(): self.temperature_store[sensor]['temperatures'].append(vals[0]) for val, item in zip(vals[1:], ["targets", "powers", "speeds"]): if item in self.temperature_store[sensor]: self.temperature_store[sensor][item].append(val) async def _handle_temp_store_request(self, web_request): store = {} for name, sensor in self.temperature_store.items(): store[name] = {k: list(v) for k, v in sensor.items()} return store async def close(self): self.temp_update_cb.stop() def _update_gcode_store(self, response): curtime = time.time() self.gcode_queue.append( {'message': response, 'time': curtime, 'type': "response"}) def store_gcode_command(self, script): curtime = time.time() for cmd in script.split('\n'): cmd = cmd.strip() if not cmd: continue self.gcode_queue.append( {'message': script, 'time': curtime, 'type': "command"}) async def _handle_gcode_store_request(self, web_request): count = web_request.get_int("count", None) if count is not None: gc_responses = list(self.gcode_queue)[-count:] else: gc_responses = list(self.gcode_queue) return {'gcode_store': gc_responses}
itertools.chain(*(entry[b"peak_list"][b"ss"] for entry in new_data))), "fs": list( itertools.chain(*(entry[b"peak_list"][b"fs"] for entry in new_data))), })) print("Got3") except IndexError: pass # In[ ]: cb = PeriodicCallback(update_hitrate_stream, 2000) cb.start() def reset_plots(): # type: () -> None """Reset all plots to initial state.""" hitratestream.send(pandas.DataFrame({"hitrate": [0.0] * 5000})) peakstream.send( pandas.DataFrame({ "ss": [-1.0, 0.0, 1.0] + [0] * 499997, "fs": [-1.0, 0.0, 1.0] + [0] * 499997, }), ) # In[ ]:
class DRMAACluster(object): def __init__(self, template=None, cleanup_interval=1000, hostname=None, script=None, preexec_commands=(), copy_script=True, **kwargs): """ Dask workers launched by a DRMAA-compatible cluster Parameters ---------- jobName: string Name of the job as known by the DRMAA cluster. script: string (optional) Path to the dask-worker executable script. A temporary file will be made if none is provided (recommended) copy_script: bool Whether should copy the passed script to the current working directory. This is primarily to work around an issue with SGE. args: list Extra string arguments to pass to dask-worker outputPath: string errorPath: string workingDirectory: string Where dask-worker runs, defaults to current directory nativeSpecification: string Options native to the job scheduler Examples -------- >>> from dask_drmaa import DRMAACluster # doctest: +SKIP >>> cluster = DRMAACluster() # doctest: +SKIP >>> cluster.start_workers(10) # doctest: +SKIP >>> from distributed import Client # doctest: +SKIP >>> client = Client(cluster) # doctest: +SKIP >>> future = client.submit(lambda x: x + 1, 10) # doctest: +SKIP >>> future.result() # doctest: +SKIP 11 """ self.hostname = hostname or socket.gethostname() logger.info("Start local scheduler at %s", self.hostname) self.local_cluster = LocalCluster(n_workers=0, ip='', **kwargs) if script is None: fn = tempfile.mktemp(suffix='sh', prefix='dask-worker-script', dir=os.path.curdir) self.script = fn self._should_cleanup_script = True script_contents = make_job_script(executable=worker_bin_path, name='%s.%s' % (JOB_ID, TASK_ID), preexec=preexec_commands) with open(fn, 'wt') as f: f.write(script_contents) @atexit.register def remove_script(): if os.path.exists(fn): os.remove(fn) os.chmod(self.script, 0o777) else: self._should_cleanup_script = False if copy_script: with ignoring(EnvironmentError): # may be in the same path shutil.copy(script, os.path.curdir) # python 2.x returns None script = os.path.join(os.path.curdir, os.path.basename(script)) self._should_cleanup_script = True self.script = script assert not preexec_commands, "Cannot specify both script and preexec_commands" # TODO: check that user-provided script is executable self.template = merge(default_template, {'remoteCommand': self.script}, template or {}) self._cleanup_callback = PeriodicCallback( callback=self.cleanup_closed_workers, callback_time=cleanup_interval, io_loop=self.scheduler.loop) self._cleanup_callback.start() self.workers = {} # {job-id: WorkerSpec} @gen.coroutine def _start(self): pass @property def scheduler(self): return self.local_cluster.scheduler @property def scheduler_address(self): return self.scheduler.address def create_job_template(self, **kwargs): template = self.template.copy() if kwargs: template.update(kwargs) template['args'] = [self.scheduler_address] + template['args'] jt = get_session().createJobTemplate() valid_attributes = dir(jt) for key, value in template.items(): if key not in valid_attributes: raise ValueError("Invalid job template attribute %s" % key) setattr(jt, key, value) return jt def start_workers(self, n=1, **kwargs): with log_errors(): with self.create_job_template(**kwargs) as jt: ids = get_session().runBulkJobs(jt, 1, n, 1) logger.info("Start %d workers. Job ID: %s", len(ids), ids[0].split('.')[0]) self.workers.update({ jid: WorkerSpec( job_id=jid, kwargs=kwargs, stdout=worker_out_path_template % dict(jid=jid, kind='out'), stderr=worker_out_path_template % dict(jid=jid, kind='err'), ) for jid in ids }) @gen.coroutine def stop_workers(self, worker_ids, sync=False): if isinstance(worker_ids, str): worker_ids = [worker_ids] else: worker_ids = list(worker_ids) # Let the scheduler gracefully retire workers first ids_to_ips = { v['name']: k for k, v in self.scheduler.worker_info.items() } worker_ips = [ ids_to_ips[wid] for wid in worker_ids if wid in ids_to_ips ] retired = yield self.scheduler.retire_workers(workers=worker_ips, close_workers=True) logger.info("Retired workers %s", retired) for wid in list(worker_ids): try: get_session().control(wid, drmaa.JobControlAction.TERMINATE) except drmaa.errors.InvalidJobException: pass try: self.workers.pop(wid) except KeyError: # If we have multiple callers at once, it may have already # been popped off pass logger.info("Stop workers %s", worker_ids) if sync: get_session().synchronize(worker_ids, dispose=True) @gen.coroutine def scale_up(self, n, **kwargs): yield [ self.start_workers(**kwargs) for _ in range(n - len(self.workers)) ] @gen.coroutine def scale_down(self, workers): workers = set(workers) yield self.scheduler.retire_workers(workers=workers) def close(self): logger.info("Closing DRMAA cluster") if self.workers: self.stop_workers(self.workers, sync=True) self.local_cluster.close() if self._should_cleanup_script and os.path.exists(self.script): os.remove(self.script) def __enter__(self): return self def __exit__(self, *args): self.close() def cleanup_closed_workers(self): for jid in list(self.workers): if get_session().jobStatus(jid) in ('closed', 'done'): logger.info("Removing closed worker %s", jid) del self.workers[jid] def __del__(self): try: self.close() except: pass def __str__(self): return "<%s: %d workers>" % (self.__class__.__name__, len( self.workers)) __repr__ = __str__
class EventHandler: events_enable_interval = 5000 # in seconds # Maximum number of finished items to keep track of max_finished_history = 1000 # celery events that represent a task finishing finished_events = ( 'task-succeeded', 'task-failed', 'task-rejected', 'task-revoked', ) def __init__(self, capp, io_loop): """Monitors events that are received from celery. capp - The celery app io_loop - The event loop to use for dispatch """ super().__init__() self.capp = capp self.timer = PeriodicCallback(self.on_enable_events, self.events_enable_interval) self.monitor = EventMonitor(self.capp, io_loop) self.listeners = {} self.finished_tasks = LRUCache(self.max_finished_history) @tornado.gen.coroutine def start(self): """Start event handler. Expects to be run as a coroutine. """ self.timer.start() logger.debug('Starting celery monitor thread') self.monitor.start() while True: event = yield self.monitor.events.get() try: task_id = event['uuid'] except KeyError: continue # Record finished tasks in-case they are requested # too late or are re-requested. if event['type'] in self.finished_events: self.finished_tasks[task_id] = event try: callback = self.listeners[task_id] except KeyError: pass else: callback(event) def stop(self): self.timer.stop() # FIXME: can not be stopped gracefully # self.monitor.stop() def on_enable_events(self): """Called periodically to enable events for workers launched after the monitor. """ try: self.capp.control.enable_events() except Exception as e: logger.debug('Failed to enable events: %s', e) def add_listener(self, task_id, callback): """Add event listener for a task with ID `task_id`.""" try: event = self.finished_tasks[task_id] except KeyError: self.listeners[task_id] = callback else: # Task has already finished callback(event) def remove_listener(self, task_id): """Remove listener for `task_id`.""" try: del self.listeners[task_id] except KeyError: # may have been cached pass
class Reader(Client): r""" Reader provides high-level functionality for building robust NSQ consumers in Python on top of the async module. Reader receives messages over the specified ``topic/channel`` and calls ``message_handler`` for each message (up to ``max_tries``). Multiple readers can be instantiated in a single process (to consume from multiple topics/channels at once). Supports various hooks to modify behavior when heartbeats are received, to temporarily disable the reader, and pre-process/validate messages. When supplied a list of ``nsqlookupd`` addresses, it will periodically poll those addresses to discover new producers of the specified ``topic``. It maintains a sufficient RDY count based on the # of producers and your configured ``max_in_flight``. Handlers should be defined as shown in the examples below. The ``message_handler`` callback function receives a :class:`nsq.Message` object that has instance methods :meth:`nsq.Message.finish`, :meth:`nsq.Message.requeue`, and :meth:`nsq.Message.touch` which can be used to respond to ``nsqd``. As an alternative to explicitly calling these response methods, the handler function can simply return ``True`` to finish the message, or ``False`` to requeue it. If the handler function calls :meth:`nsq.Message.enable_async`, then automatic finish/requeue is disabled, allowing the :class:`nsq.Message` to finish or requeue in a later async callback or context. The handler function may also be a coroutine, in which case Message async handling is enabled automatically, but the coroutine can still return a final value of True/False to automatically finish/requeue the message. After re-queueing a message, the handler will backoff from processing additional messages for an increasing delay (calculated exponentially based on consecutive failures up to ``max_backoff_duration``). Synchronous example:: import nsq def handler(message): print message return True r = nsq.Reader(message_handler=handler, lookupd_http_addresses=['http://127.0.0.1:4161'], topic='nsq_reader', channel='asdf', lookupd_poll_interval=15) nsq.run() Asynchronous example:: import nsq buf = [] def process_message(message): global buf message.enable_async() # cache the message for later processing buf.append(message) if len(buf) >= 3: for msg in buf: print msg msg.finish() buf = [] else: print 'deferring processing' r = nsq.Reader(message_handler=process_message, lookupd_http_addresses=['http://127.0.0.1:4161'], topic='nsq_reader', channel='async', max_in_flight=9) nsq.run() :param message_handler: the callable that will be executed for each message received :param topic: specifies the desired NSQ topic :param channel: specifies the desired NSQ channel :param name: a string that is used for logging messages (defaults to 'topic:channel') :param nsqd_tcp_addresses: a sequence of string addresses of the nsqd instances this reader should connect to :param lookupd_http_addresses: a sequence of string addresses of the nsqlookupd instances this reader should query for producers of the specified topic :param max_tries: the maximum number of attempts the reader will make to process a message after which messages will be automatically discarded :param max_in_flight: the maximum number of messages this reader will pipeline for processing. this value will be divided evenly amongst the configured/discovered nsqd producers :param lookupd_poll_interval: the amount of time in seconds between querying all of the supplied nsqlookupd instances. a random amount of time based on this value will be initially introduced in order to add jitter when multiple readers are running :param lookupd_poll_jitter: The maximum fractional amount of jitter to add to the lookupd poll loop. This helps evenly distribute requests even if multiple consumers restart at the same time. :param lookupd_connect_timeout: the amount of time in seconds to wait for a connection to ``nsqlookupd`` to be established :param lookupd_request_timeout: the amount of time in seconds to wait for a request to ``nsqlookupd`` to complete. :param low_rdy_idle_timeout: the amount of time in seconds to wait for a message from a producer when in a state where RDY counts are re-distributed (ie. max_in_flight < num_producers) :param max_backoff_duration: the maximum time we will allow a backoff state to last in seconds :param \*\*kwargs: passed to :class:`nsq.AsyncConn` initialization """ def __init__( self, topic, channel, message_handler=None, name=None, nsqd_tcp_addresses=None, lookupd_http_addresses=None, max_tries=5, max_in_flight=1, lookupd_poll_interval=60, low_rdy_idle_timeout=10, max_backoff_duration=128, lookupd_poll_jitter=0.3, lookupd_connect_timeout=1, lookupd_request_timeout=2, **kwargs): super(Reader, self).__init__(**kwargs) assert isinstance(topic, string_types) and len(topic) > 0 assert isinstance(channel, string_types) and len(channel) > 0 assert isinstance(max_in_flight, int) and max_in_flight > 0 assert isinstance(max_backoff_duration, (int, float)) and max_backoff_duration > 0 assert isinstance(name, string_types + (None.__class__,)) assert isinstance(lookupd_poll_interval, int) assert isinstance(lookupd_poll_jitter, float) assert isinstance(lookupd_connect_timeout, int) assert isinstance(lookupd_request_timeout, int) assert lookupd_poll_jitter >= 0 and lookupd_poll_jitter <= 1 if nsqd_tcp_addresses: if not isinstance(nsqd_tcp_addresses, (list, set, tuple)): assert isinstance(nsqd_tcp_addresses, string_types) nsqd_tcp_addresses = [nsqd_tcp_addresses] else: nsqd_tcp_addresses = [] if lookupd_http_addresses: if not isinstance(lookupd_http_addresses, (list, set, tuple)): assert isinstance(lookupd_http_addresses, string_types) lookupd_http_addresses = [lookupd_http_addresses] random.shuffle(lookupd_http_addresses) else: lookupd_http_addresses = [] assert nsqd_tcp_addresses or lookupd_http_addresses self.name = name or (topic + ':' + channel) self.message_handler = None if message_handler: self.set_message_handler(message_handler) self.topic = topic self.channel = channel self.nsqd_tcp_addresses = nsqd_tcp_addresses self.lookupd_http_addresses = lookupd_http_addresses self.lookupd_query_index = 0 self.max_tries = max_tries self.max_in_flight = max_in_flight self.low_rdy_idle_timeout = low_rdy_idle_timeout self.total_rdy = 0 self.need_rdy_redistributed = False self.lookupd_poll_interval = lookupd_poll_interval self.lookupd_poll_jitter = lookupd_poll_jitter self.lookupd_connect_timeout = lookupd_connect_timeout self.lookupd_request_timeout = lookupd_request_timeout self.random_rdy_ts = time.time() # Verify keyword arguments valid_args = func_args(AsyncConn.__init__) diff = set(kwargs) - set(valid_args) assert len(diff) == 0, 'Invalid keyword argument(s): %s' % list(diff) self.conn_kwargs = kwargs self.backoff_timer = BackoffTimer(0, max_backoff_duration) self.backoff_block = False self.backoff_block_completed = True self.conns = {} self.connection_attempts = {} self.http_client = tornado.httpclient.AsyncHTTPClient() # will execute when run() is called (for all Reader instances) self.io_loop.add_callback(self._run) self.redist_periodic = None self.query_periodic = None def _run(self): assert self.message_handler, "you must specify the Reader's message_handler" logger.info('[%s] starting reader for %s/%s...', self.name, self.topic, self.channel) for addr in self.nsqd_tcp_addresses: address, port = addr.split(':') self.connect_to_nsqd(address, int(port)) self.redist_periodic = PeriodicCallback( self._redistribute_rdy_state, 5 * 1000, ) self.redist_periodic.start() if not self.lookupd_http_addresses: return # trigger the first lookup query manually self.io_loop.spawn_callback(self.query_lookupd) self.query_periodic = PeriodicCallback( self.query_lookupd, self.lookupd_poll_interval * 1000, ) # randomize the time we start this poll loop so that all # consumers don't query at exactly the same time delay = random.random() * self.lookupd_poll_interval * self.lookupd_poll_jitter self.io_loop.call_later(delay, self.query_periodic.start) def close(self): """ Closes all connections stops all periodic callbacks """ for conn in self.conns.values(): conn.close() self.redist_periodic.stop() if self.query_periodic is not None: self.query_periodic.stop() def set_message_handler(self, message_handler): """ Assigns the callback method to be executed for each message received :param message_handler: a callable that takes a single argument """ assert callable(message_handler), 'message_handler must be callable' self.message_handler = message_handler def _connection_max_in_flight(self): return max(1, self.max_in_flight // max(1, len(self.conns))) def is_starved(self): """ Used to identify when buffered messages should be processed and responded to. When max_in_flight > 1 and you're batching messages together to perform work is isn't possible to just compare the len of your list of buffered messages against your configured max_in_flight (because max_in_flight may not be evenly divisible by the number of producers you're connected to, ie. you might never get that many messages... it's a *max*). Example:: def message_handler(self, nsq_msg, reader): # buffer messages if reader.is_starved(): # perform work reader = nsq.Reader(...) reader.set_message_handler(functools.partial(message_handler, reader=reader)) nsq.run() """ for conn in itervalues(self.conns): if conn.in_flight > 0 and conn.in_flight >= (conn.last_rdy * 0.85): return True return False def _on_message(self, conn, message, **kwargs): try: self._handle_message(conn, message) except Exception: logger.exception('[%s:%s] failed to handle_message() %r', conn.id, self.name, message) def _handle_message(self, conn, message): self._maybe_update_rdy(conn) result = False try: if 0 < self.max_tries < message.attempts: self.giving_up(message) return message.finish() pre_processed_message = self.preprocess_message(message) if not self.validate_message(pre_processed_message): return message.finish() result = self.process_message(message) except Exception: logger.exception('[%s:%s] uncaught exception while handling message %s body:%r', conn.id, self.name, message.id, message.body) if not message.has_responded(): return message.requeue() if result not in (True, False, None): # assume handler returned a Future or Coroutine message.enable_async() fut = tornado.gen.convert_yielded(result) fut.add_done_callback(functools.partial(self._maybe_finish, message)) elif not message.is_async() and not message.has_responded(): assert result is not None, 'ambiguous return value for synchronous mode' if result: return message.finish() return message.requeue() def _maybe_finish(self, message, fut): if not message.has_responded(): try: if fut.result(): message.finish() return except Exception: pass message.requeue() def _maybe_update_rdy(self, conn): if self.backoff_timer.get_interval() or self.max_in_flight == 0: return # Update RDY in 2 cases: # 1. On a new connection or in backoff we start with a tentative RDY # count of 1. After successfully receiving a first message we go to # full throttle. # 2. After a change in connection count or max_in_flight we adjust to the new # connection_max_in_flight. conn_max_in_flight = self._connection_max_in_flight() if conn.rdy == 1 or conn.rdy != conn_max_in_flight: self._send_rdy(conn, conn_max_in_flight) def _finish_backoff_block(self): self.backoff_block = False # we must have raced and received a message out of order that resumed # so just complete the backoff block if not self.backoff_timer.get_interval(): self._complete_backoff_block() return # test the waters after finishing a backoff round # if we have no connections, this will happen when a new connection gets RDY 1 if not self.conns or self.max_in_flight == 0: return conn = random.choice(list(self.conns.values())) logger.info('[%s:%s] testing backoff state with RDY 1', conn.id, self.name) self._send_rdy(conn, 1) # for tests return conn def _on_backoff_resume(self, success, **kwargs): if success: self.backoff_timer.success() elif success is False and not self.backoff_block: self.backoff_timer.failure() self._enter_continue_or_exit_backoff() def _complete_backoff_block(self): self.backoff_block_completed = True rdy = self._connection_max_in_flight() logger.info('[%s] backoff complete, resuming normal operation (%d connections)', self.name, len(self.conns)) for c in self.conns.values(): self._send_rdy(c, rdy) def _enter_continue_or_exit_backoff(self): # Take care of backoff in the appropriate cases. When this # happens, we set a failure on the backoff timer and set the RDY count to zero. # Once the backoff time has expired, we allow *one* of the connections let # a single message through to test the water. This will continue until we # reach no backoff in which case we go back to the normal RDY count. current_backoff_interval = self.backoff_timer.get_interval() # do nothing if self.backoff_block: return # we're out of backoff completely, return to full blast for all conns if not self.backoff_block_completed and not current_backoff_interval: self._complete_backoff_block() return # enter or continue a backoff iteration if current_backoff_interval: self._start_backoff_block() def _start_backoff_block(self): self.backoff_block = True self.backoff_block_completed = False backoff_interval = self.backoff_timer.get_interval() logger.info('[%s] backing off for %0.2f seconds (%d connections)', self.name, backoff_interval, len(self.conns)) for c in self.conns.values(): self._send_rdy(c, 0) self.io_loop.call_later(backoff_interval, self._finish_backoff_block) def _rdy_retry(self, conn, value): conn.rdy_timeout = None self._send_rdy(conn, value) def _send_rdy(self, conn, value): if conn.rdy_timeout: self.io_loop.remove_timeout(conn.rdy_timeout) conn.rdy_timeout = None if value and (self.disabled() or self.max_in_flight == 0): logger.info('[%s:%s] disabled, delaying RDY state change', conn.id, self.name) rdy_retry_callback = functools.partial(self._rdy_retry, conn, value) conn.rdy_timeout = self.io_loop.call_later(15, rdy_retry_callback) return if value > conn.max_rdy_count: value = conn.max_rdy_count new_rdy = max(self.total_rdy - conn.rdy + value, 0) if conn.send_rdy(value): self.total_rdy = new_rdy def connect_to_nsqd(self, host, port): """ Adds a connection to ``nsqd`` at the specified address. :param host: the address to connect to :param port: the port to connect to """ assert isinstance(host, string_types) assert isinstance(port, int) conn = AsyncConn(host, port, **self.conn_kwargs) conn.on('identify', self._on_connection_identify) conn.on('identify_response', self._on_connection_identify_response) conn.on('auth', self._on_connection_auth) conn.on('auth_response', self._on_connection_auth_response) conn.on('error', self._on_connection_error) conn.on('close', self._on_connection_close) conn.on('ready', self._on_connection_ready) conn.on('message', self._on_message) conn.on('heartbeat', self._on_heartbeat) conn.on('backoff', functools.partial(self._on_backoff_resume, success=False)) conn.on('resume', functools.partial(self._on_backoff_resume, success=True)) conn.on('continue', functools.partial(self._on_backoff_resume, success=None)) if conn.id in self.conns: return # only attempt to re-connect once every 10s per destination # this throttles reconnects to failed endpoints now = time.time() last_connect_attempt = self.connection_attempts.get(conn.id) if last_connect_attempt and last_connect_attempt > now - 10: return self.connection_attempts[conn.id] = now logger.info('[%s:%s] connecting to nsqd', conn.id, self.name) conn.connect() return conn def _on_connection_ready(self, conn, **kwargs): conn.send(protocol.subscribe(self.topic, self.channel)) # re-check to make sure another connection didn't beat this one done if conn.id in self.conns: logger.warning( '[%s:%s] connected to NSQ but anothermatching connection already exists', conn.id, self.name) conn.close() return if conn.max_rdy_count < self.max_in_flight: logger.warning( '[%s:%s] max RDY count %d < reader max in flight %d, truncation possible', conn.id, self.name, conn.max_rdy_count, self.max_in_flight) self.conns[conn.id] = conn conn_max_in_flight = self._connection_max_in_flight() for c in self.conns.values(): if c.rdy > conn_max_in_flight: self._send_rdy(c, conn_max_in_flight) # we send an initial RDY of 1 up to our configured max_in_flight # this resolves two cases: # 1. `max_in_flight >= num_conns` ensuring that no connections are ever # *initially* starved since redistribute won't apply # 2. `max_in_flight < num_conns` ensuring that we never exceed max_in_flight # and rely on the fact that redistribute will handle balancing RDY across conns if not self.backoff_timer.get_interval() or len(self.conns) == 1: # only send RDY 1 if we're not in backoff (some other conn # should be testing the waters) # (but always send it if we're the first) self._send_rdy(conn, 1) def _on_connection_close(self, conn, **kwargs): if conn.id in self.conns: del self.conns[conn.id] self.total_rdy = max(self.total_rdy - conn.rdy, 0) logger.warning('[%s:%s] connection closed', conn.id, self.name) if (conn.rdy_timeout or conn.rdy) and \ (len(self.conns) == self.max_in_flight or self.backoff_timer.get_interval()): # we're toggling out of (normal) redistribution cases and this conn # had a RDY count... # # trigger RDY redistribution to make sure this RDY is moved # to a new connection self.need_rdy_redistributed = True if conn.rdy_timeout: self.io_loop.remove_timeout(conn.rdy_timeout) conn.rdy_timeout = None if not self.lookupd_http_addresses: # automatically reconnect to nsqd addresses when not using lookupd logger.info('[%s:%s] attempting to reconnect in 15s', conn.id, self.name) reconnect_callback = functools.partial(self.connect_to_nsqd, host=conn.host, port=conn.port) self.io_loop.call_later(15, reconnect_callback) @tornado.gen.coroutine def query_lookupd(self): """ Trigger a query of the configured ``nsq_lookupd_http_addresses``. """ endpoint = self.lookupd_http_addresses[self.lookupd_query_index] self.lookupd_query_index = (self.lookupd_query_index + 1) % len(self.lookupd_http_addresses) # urlsplit() is faulty if scheme not present if '://' not in endpoint: endpoint = 'http://' + endpoint scheme, netloc, path, query, fragment = urlparse.urlsplit(endpoint) if not path or path == "/": path = "/lookup" params = parse_qs(query) params['topic'] = self.topic query = urlencode(_utf8_params(params), doseq=1) lookupd_url = urlparse.urlunsplit((scheme, netloc, path, query, fragment)) req = tornado.httpclient.HTTPRequest( lookupd_url, method='GET', headers={'Accept': 'application/vnd.nsq; version=1.0'}, connect_timeout=self.lookupd_connect_timeout, request_timeout=self.lookupd_request_timeout) try: response = yield self.http_client.fetch(req) except Exception as e: logger.warning('[%s] lookupd %s query error: %s', self.name, lookupd_url, e) return try: lookup_data = json.loads(response.body.decode("utf8")) except ValueError: logger.warning('[%s] lookupd %s failed to parse JSON: %r', self.name, lookupd_url, response.body) return for producer in lookup_data['producers']: # TODO: this can be dropped for 1.0 address = producer.get('broadcast_address', producer.get('address')) assert address self.connect_to_nsqd(address, producer['tcp_port']) def set_max_in_flight(self, max_in_flight): """Dynamically adjust the reader max_in_flight. Set to 0 to immediately disable a Reader""" assert isinstance(max_in_flight, int) self.max_in_flight = max_in_flight if max_in_flight == 0: # set RDY 0 to all connections for conn in itervalues(self.conns): if conn.rdy > 0: logger.debug('[%s:%s] rdy: %d -> 0', conn.id, self.name, conn.rdy) self._send_rdy(conn, 0) self.total_rdy = 0 else: self.need_rdy_redistributed = True self._redistribute_rdy_state() def _redistribute_rdy_state(self): # We redistribute RDY counts in a few cases: # # 1. our # of connections exceeds our configured max_in_flight # 2. we're in backoff mode (but not in a current backoff block) # 3. something out-of-band has set the need_rdy_redistributed flag (connection closed # that was about to get RDY during backoff) # # At a high level, we're trying to mitigate stalls related to low-volume # producers when we're unable (by configuration or backoff) to provide a RDY count # of (at least) 1 to all of our connections. if not self.conns: return if self.disabled() or self.backoff_block or self.max_in_flight == 0: return if len(self.conns) > self.max_in_flight: self.need_rdy_redistributed = True logger.debug('redistributing RDY state (%d conns > %d max_in_flight)', len(self.conns), self.max_in_flight) backoff_interval = self.backoff_timer.get_interval() if backoff_interval and len(self.conns) > 1: self.need_rdy_redistributed = True logger.debug('redistributing RDY state (%d backoff interval and %d conns > 1)', backoff_interval, len(self.conns)) if self.need_rdy_redistributed: self.need_rdy_redistributed = False # first set RDY 0 to all connections that have not received a message within # a configurable timeframe (low_rdy_idle_timeout). for conn_id, conn in iteritems(self.conns): last_message_duration = time.time() - conn.last_msg_timestamp logger.debug('[%s:%s] rdy: %d (last message received %.02fs)', conn.id, self.name, conn.rdy, last_message_duration) if conn.rdy > 0 and last_message_duration > self.low_rdy_idle_timeout: logger.info('[%s:%s] idle connection, giving up RDY count', conn.id, self.name) self._send_rdy(conn, 0) conns = self.conns.values() in_flight_or_rdy = len([c for c in conns if c.in_flight or c.rdy]) if backoff_interval: available_rdy = max(0, 1 - in_flight_or_rdy) else: available_rdy = max(0, self.max_in_flight - in_flight_or_rdy) # if moving any connections from RDY 0 to non-0 would violate in-flight constraints, # set RDY 0 on some connection with msgs in flight so that a later redistribution # round can proceed and we don't stay pinned to the same connections. # # if nothing's in flight, then we have connections with RDY 1 that are still # waiting to hit the idle timeout, in which case it's ok to do nothing. in_flight = [c for c in conns if c.in_flight] if in_flight and not available_rdy: conn = random.choice(in_flight) logger.info('[%s:%s] too many msgs in flight, giving up RDY count', conn.id, self.name) self._send_rdy(conn, 0) # randomly walk the list of possible connections and send RDY 1 (up to our # calculated "max_in_flight"). We only need to send RDY 1 because in both # cases described above your per connection RDY count would never be higher. # # We also don't attempt to avoid the connections who previously might have had RDY 1 # because it would be overly complicated and not actually worth it (ie. given enough # redistribution rounds it doesn't matter). possible_conns = [c for c in conns if not (c.in_flight or c.rdy)] while possible_conns and available_rdy: available_rdy -= 1 conn = possible_conns.pop(random.randrange(len(possible_conns))) logger.info('[%s:%s] redistributing RDY', conn.id, self.name) self._send_rdy(conn, 1) # for tests return conn # # subclass overwriteable # def process_message(self, message): """ Called when a message is received in order to execute the configured ``message_handler`` This is useful to subclass and override if you want to change how your message handlers are called. :param message: the :class:`nsq.Message` received """ return self.message_handler(message) def giving_up(self, message): """ Called when a message has been received where ``msg.attempts > max_tries`` This is useful to subclass and override to perform a task (such as writing to disk, etc.) :param message: the :class:`nsq.Message` received """ logger.warning('[%s] giving up on message %s after %d tries (max:%d) %r', self.name, message.id, message.attempts, self.max_tries, message.body) def _on_connection_identify_response(self, conn, data, **kwargs): if not hasattr(self, '_disabled_notice'): self._disabled_notice = True def semver(v): def cast(x): try: return int(x) except Exception: return x return [cast(x) for x in v.replace('-', '.').split('.')] if self.disabled.__code__ != Reader.disabled.__code__ and \ semver(data['version']) >= semver('0.3'): warnings.warn('disabled() is deprecated and will be removed in a future release, ' 'use set_max_in_flight(0) instead', DeprecationWarning) return super(Reader, self)._on_connection_identify_response(conn, data, **kwargs) @classmethod def disabled(cls): """ Called as part of RDY handling to identify whether this Reader has been disabled This is useful to subclass and override to examine a file on disk or a key in cache to identify if this reader should pause execution (during a deploy, etc.). Note: deprecated. Use set_max_in_flight(0) """ return False def validate_message(self, message): return True def preprocess_message(self, message): return message
class WSHandler(tornado.websocket.WebSocketHandler): def open(self): global socketOK print 'connection opened...' socketOK = True self.callback = PeriodicCallback(self.sendToSocket, 100) self.callback.start() def on_message(self, message): global x1, x2, Kp2, Ki2, Kd2, Kpxi2, Kixi2, Kdxi2 jsonMessage = json.loads(message) if jsonMessage.get('vref') != None: x1 = float(jsonMessage.get('vref')) #print ("x1: %.2f" % x1) if jsonMessage.get('psidotref') != None: x2 = (float(jsonMessage.get('psidotref'))) * 3.141592 / 180 #print ("x2: %.2f" % x2) if jsonMessage.get('Kp2ref') != None: Kp2 = float(jsonMessage.get('Kp2ref')) #print ("Kp2: %.2f" % Kp2) if jsonMessage.get('Ki2ref') != None: Ki2 = float(jsonMessage.get('Ki2ref')) #print ("Ki2: %.2f" % Ki2) if jsonMessage.get('Kd2ref') != None: Kd2 = float(jsonMessage.get('Kd2ref')) #print ("Kd2: %.2f" % Kd2) if jsonMessage.get('Kpxi2ref') != None: Kpxi2 = float(jsonMessage.get('Kpxi2ref')) #print ("Kpxi2: %.2f" % Kpxi2) if jsonMessage.get('Kixi2ref') != None: Kixi2 = float(jsonMessage.get('Kixi2ref')) #print ("Kixi2: %.2f" % Kixi2) if jsonMessage.get('Kdxi2ref') != None: Kdxi2 = float(jsonMessage.get('Kdxi2ref')) #print ("Kdxi2: %.2f" % Kdxi2) def on_close(self): global socketOK, commandeDroit, commandeGauche print 'connection closed...' socketOK = False commandeDroit = 0. commandeGauche = 0. def sendToSocket(self): global started, codeurDroitDeltaPos, codeurGaucheDeltaPos, socketOK, commandeDroit, commandeGauche, vxref, xidotref, vxmes, xidotmes, interruptKO if interruptKO: #print "Restart interrupt" #print time.time() - T0 noInterrupts() #Inhibe les interruption attachInterrupt(0, GestionInterruptionCodeurDroitPinA, RISING) attachInterrupt(1, GestionInterruptionCodeurGauchePinA, RISING) interrupts() #Valide les interruptions interruptKO = False tcourant = time.time() - T0 aEnvoyer = json.dumps({ 'Temps': ("%.2f" % tcourant), 'Consigne vitesse longitudinale': ("%.2f" % x1), 'Consigne vitesse de rotation': ("%.2f" % x2), 'Vitesse longitudinale': ("%.2f" % vxmes), 'Vitesse de rotation': ("%.2f" % (180 * xidotmes / 3.141592)), 'omegaDroit': ("%.2f" % omegaDroit), 'omegaGauche': ("%.2f" % omegaGauche), 'commandeDroit': ("%.2f" % commandeDroit), 'commandeGauche': ("%.2f" % commandeGauche), 'Raw': ("%.2f" % tcourant) + "," + ("%.2f" % x1) + "," + ("%.2f" % x2) + "," + ("%.2f" % vxmes) + "," + ("%.2f" % (180 * xidotmes / 3.141592)) + "," + ("%.2f" % omegaDroit) + "," + ("%.2f" % omegaGauche) + "," + ("%.2f" % commandeDroit) + "," + ("%.2f" % commandeGauche) }) if socketOK: try: self.write_message(aEnvoyer) except: pass def check_origin(self, origin): # Voir http://www.tornadoweb.org/en/stable/websocket.html#tornado.websocket.WebSocketHandler.check_origin # et http://www.arundhaj.com/blog/tornado-error-during-websocket-handshake.html return True
if __name__ == '__main__': model = SimulatedPricingData() app = Application([ ("/", IndexHandler, dict(html=build_html())), ("/data", DataHandler, dict(data=model.data)), ]) # Setup the server with a dynamically chosen port sockets = bind_sockets(0, '') server = HTTPServer(app) server.add_sockets(sockets) # Determine the dynamically chosen address host = socket.gethostbyname(socket.gethostname()) port = sockets[0].getsockname()[1] address = 'http://%s:%s' % (host, port) print('Listening on %r' % address) # Register the page with the Skein Web UI. # This is the only Skein-specific bit app = skein.ApplicationClient.from_current() app.ui.add_page('price-dashboard', address, link_name='Price Dashboard') # Register a callback to update the plot every INTERVAL milliseconds pc = PeriodicCallback(model.update, INTERVAL) pc.start() # Start the server IOLoop.current().start()
class Adaptive(object): ''' Adaptively allocate workers based on scheduler load. A superclass. Contains logic to dynamically resize a Dask cluster based on current use. Parameters ---------- scheduler: distributed.Scheduler cluster: object Must have scale_up and scale_down methods/coroutines Examples -------- >>> class MyCluster(object): ... def scale_up(self, n): ... """ Bring worker count up to n """ ... def scale_down(self, workers): ... """ Remove worker addresses from cluster """ ''' def __init__(self, scheduler, cluster, interval=1000, startup_cost=1): self.scheduler = scheduler self.cluster = cluster self.startup_cost = startup_cost self._adapt_callback = PeriodicCallback(self._adapt, interval, self.scheduler.loop) self._adapt_callback.start() self._adapting = False def should_scale_up(self): with log_errors(): if self.scheduler.unrunnable and not self.scheduler.ncores: return True total_occupancy = sum(self.scheduler.occupancy.values()) total_cores = sum(self.scheduler.ncores.values()) if total_occupancy / (total_cores + 1e-9) > self.startup_cost * 2: return True limit_bytes = { w: self.scheduler.worker_info[w]['memory_limit'] for w in self.scheduler.worker_info } worker_bytes = self.scheduler.worker_bytes limit = sum(limit_bytes.values()) total = sum(worker_bytes.values()) if total > 0.6 * limit: return True return False @gen.coroutine def _retire_workers(self): with log_errors(): workers = yield self.scheduler.retire_workers(remove=False) logger.info("Retiring workers %s", workers) f = self.cluster.scale_down(workers) if gen.is_future(f): yield f for w in workers: self.scheduler.remove_worker(address=w, safe=True) @gen.coroutine def _adapt(self): if self._adapting: # Semaphore to avoid overlapping adapt calls return self._adapting = True try: if self.should_scale_up(): instances = max(1, len(self.scheduler.ncores) * 2) logger.info("Scaling up to %d workers", instances) f = self.cluster.scale_up(instances) if gen.is_future(f): yield f yield self._retire_workers() finally: self._adapting = False def adapt(self): self.scheduler.loop.add_callback(self._adapt)
class SemaphoreExtension: """ An extension for the scheduler to manage Semaphores This adds the following routes to the scheduler * semaphore_acquire * semaphore_release * semaphore_close * semaphore_refresh_leases * semaphore_register """ def __init__(self, scheduler): self.scheduler = scheduler # {semaphore_name: asyncio.Event} self.events = defaultdict(asyncio.Event) # {semaphore_name: max_leases} self.max_leases = dict() # {semaphore_name: {lease_id: lease_last_seen_timestamp}} self.leases = defaultdict(dict) self.scheduler.handlers.update({ "semaphore_register": self.create, "semaphore_acquire": self.acquire, "semaphore_release": self.release, "semaphore_close": self.close, "semaphore_refresh_leases": self.refresh_leases, "semaphore_value": self.get_value, }) self.scheduler.extensions["semaphores"] = self # {metric_name: {semaphore_name: metric}} self.metrics = { "acquire_total": defaultdict(int), # counter "release_total": defaultdict(int), # counter "average_pending_lease_time": defaultdict(float), # gauge "pending": defaultdict(int), # gauge } validation_callback_time = parse_timedelta( dask.config.get( "distributed.scheduler.locks.lease-validation-interval"), default="s", ) self._pc_lease_timeout = PeriodicCallback( self._check_lease_timeout, validation_callback_time * 1000) self._pc_lease_timeout.start() self.lease_timeout = parse_timedelta( dask.config.get("distributed.scheduler.locks.lease-timeout"), default="s") async def get_value(self, comm=None, name=None): return len(self.leases[name]) # `comm` here is required by the handler interface def create(self, comm=None, name=None, max_leases=None): # We use `self.max_leases` as the point of truth to find out if a semaphore with a specific # `name` has been created. if name not in self.max_leases: assert isinstance(max_leases, int), max_leases self.max_leases[name] = max_leases else: if max_leases != self.max_leases[name]: raise ValueError("Inconsistent max leases: %s, expected: %s" % (max_leases, self.max_leases[name])) def refresh_leases(self, comm=None, name=None, lease_ids=None): with log_errors(): now = time() logger.debug("Refresh leases for %s with ids %s at %s", name, lease_ids, now) for id_ in lease_ids: if id_ not in self.leases[name]: logger.critical( f"Refreshing an unknown lease ID {id_} for {name}. This might be due to leases " f"timing out and may cause overbooking of the semaphore!" f"This is often caused by long-running GIL-holding in the task which acquired the lease." ) self.leases[name][id_] = now def _get_lease(self, name, lease_id): result = True if ( # This allows request idempotency lease_id in self.leases[name] or len(self.leases[name]) < self.max_leases[name]): now = time() logger.info("Acquire lease %s for %s at %s", lease_id, name, now) self.leases[name][lease_id] = now self.metrics["acquire_total"][name] += 1 else: result = False return result def _semaphore_exists(self, name): if name not in self.max_leases: return False return True async def acquire(self, comm=None, name=None, timeout=None, lease_id=None): with log_errors(): if not self._semaphore_exists(name): raise RuntimeError( f"Semaphore `{name}` not known or already closed.") if isinstance(name, list): name = tuple(name) w = _Watch(timeout) w.start() self.metrics["pending"][name] += 1 while True: logger.info( "Trying to acquire %s for %s with %ss left.", lease_id, name, w.leftover(), ) # Reset the event and try to get a release. The event will be set if the state # is changed and helps to identify when it is worth to retry an acquire self.events[name].clear() result = self._get_lease(name, lease_id) # If acquiring fails, we wait for the event to be set, i.e. something has # been released and we can try to acquire again (continue loop) if not result: future = asyncio.wait_for(self.events[name].wait(), timeout=w.leftover()) try: await future continue except TimeoutError: result = False logger.info( "Acquisition of lease %s for %s is %s after waiting for %ss.", lease_id, name, result, w.elapsed(), ) # We're about to return, so the lease is no longer "pending" self.metrics["average_pending_lease_time"][name] = ( self.metrics["average_pending_lease_time"][name] + w.elapsed()) / 2 self.metrics["pending"][name] -= 1 return result def release(self, comm=None, name=None, lease_id=None): with log_errors(): if not self._semaphore_exists(name): logger.warning( f"Tried to release semaphore `{name}` but it is not known or already closed." ) return if isinstance(name, list): name = tuple(name) if name in self.leases and lease_id in self.leases[name]: self._release_value(name, lease_id) else: logger.warning( f"Tried to release semaphore but it was already released: " f"name={name}, lease_id={lease_id}. This can happen if the semaphore timed out before." ) def _release_value(self, name, lease_id): logger.info("Releasing %s for %s", lease_id, name) # Everything needs to be atomic here. del self.leases[name][lease_id] self.events[name].set() self.metrics["release_total"][name] += 1 def _check_lease_timeout(self): now = time() semaphore_names = list(self.leases.keys()) for name in semaphore_names: ids = list(self.leases[name]) logger.debug( "Validating leases for %s at time %s. Currently known %s", name, now, self.leases[name], ) for _id in ids: time_since_refresh = now - self.leases[name][_id] if time_since_refresh > self.lease_timeout: logger.info( "Lease %s for %s timed out after %ss.", _id, name, time_since_refresh, ) self._release_value(name=name, lease_id=_id) def close(self, comm=None, name=None): """Hard close the semaphore without warning clients which still hold a lease.""" with log_errors(): if not self._semaphore_exists(name): return del self.max_leases[name] if name in self.events: del self.events[name] if name in self.leases: if self.leases[name]: warnings.warn( f"Closing semaphore {name} but there remain unreleased leases {sorted(self.leases[name])}", RuntimeWarning, ) del self.leases[name] if name in self.metrics["pending"]: if self.metrics["pending"][name]: warnings.warn( f"Closing semaphore {name} but there remain pending leases", RuntimeWarning, ) # Clean-up state of semaphore metrics for _, metric_dict in self.metrics.items(): if name in metric_dict: del metric_dict[name]
current_key = current_timestamp_key() users = redis_cli.hkeys(USER_LIST) remove_users = [] changed = False for u in users: if not redis_cli.hexists(past_key, u) and not redis_cli.hexists(current_key, u): changed = True redis_cli.hdel(USER_LIST, u) remove_users.append(u) if changed: publish_online_users() periodic_callback = PeriodicCallback(_clean, CLEANUP_FREQUENCY * 1000) periodic_callback.start() def add_user(id): """ Check if the user is already in there first. """ current_key = current_timestamp_key() # Check if current key already exists hash_exists = redis_cli.exists(current_key) != 0 # Add user to the current key and the global user list redis_cli.hincrby(current_key, id, 1) result = redis_cli.hincrby(USER_LIST, id, 1)
class Spawner(LoggingConfigurable): """Base class for spawning single-user notebook servers. Subclass this, and override the following methods: - load_state - get_state - start - stop - poll """ db = Any() user = Any() hub = Any() authenticator = Any() api_token = Unicode() ip = Unicode( '127.0.0.1', help= "The IP address (or hostname) the single-user server should listen on" ).tag(config=True) port = Integer( 0, help= "The port for single-user servers to listen on. New in version 0.7.") start_timeout = Integer( 60, help="""Timeout (in seconds) before giving up on the spawner. This is the timeout for start to return, not the timeout for the server to respond. Callers of spawner.start will assume that startup has failed if it takes longer than this. start should return when the server process is started and its location is known. """).tag(config=True) http_timeout = Integer( 30, help="""Timeout (in seconds) before giving up on a spawned HTTP server Once a server has successfully been spawned, this is the amount of time we wait before assuming that the server is unable to accept connections. """).tag(config=True) poll_interval = Integer( 30, help="""Interval (in seconds) on which to poll the spawner.""").tag( config=True) _callbacks = List() _poll_callback = Any() debug = Bool( False, help="Enable debug-logging of the single-user server").tag(config=True) options_form = Unicode("", help=""" An HTML form for options a user can specify on launching their server. The surrounding `<form>` element and the submit button are already provided. For example: Set your key: <input name="key" val="default_key"></input> <br> Choose a letter: <select name="letter" multiple="true"> <option value="A">The letter A</option> <option value="B">The letter B</option> </select> """).tag(config=True) def options_from_form(self, form_data): """Interpret HTTP form data Form data will always arrive as a dict of lists of strings. Override this function to understand single-values, numbers, etc. This should coerce form data into the structure expected by self.user_options, which must be a dict. Instances will receive this data on self.user_options, after passing through this function, prior to `Spawner.start`. """ return form_data user_options = Dict( help="This is where form-specified options ultimately end up.") env_keep = List( [ 'PATH', 'PYTHONPATH', 'CONDA_ROOT', 'CONDA_DEFAULT_ENV', 'VIRTUAL_ENV', 'LANG', 'LC_ALL', ], help="Whitelist of environment variables for the subprocess to inherit" ).tag(config=True) env = Dict(help="""Deprecated: use Spawner.get_env or Spawner.environment - extend Spawner.get_env for adding required env in Spawner subclasses - Spawner.environment for config-specified env """) environment = Dict(help="""Environment variables to load for the Spawner. Value could be a string or a callable. If it is a callable, it will be called with one parameter, which will be the instance of the spawner in use. It should quickly (without doing much blocking operations) return a string that will be used as the value for the environment variable. """).tag(config=True) cmd = Command( ['jupyterhub-singleuser'], help="""The command used for starting notebooks.""").tag(config=True) args = List( Unicode(), help="""Extra arguments to be passed to the single-user server""").tag( config=True) notebook_dir = Unicode( '', help="""The notebook directory for the single-user server `~` will be expanded to the user's home directory `{username}` will be expanded to the user's username """).tag(config=True) default_url = Unicode('', help="""The default URL for the single-user server. Can be used in conjunction with --notebook-dir=/ to enable full filesystem traversal, while preserving user's homedir as landing page for notebook `{username}` will be expanded to the user's username """).tag(config=True) @validate('notebook_dir', 'default_url') def _deprecate_percent_u(self, proposal): print(proposal) v = proposal['value'] if '%U' in v: self.log.warn( "%%U for username in %s is deprecated in JupyterHub 0.7, use {username}", proposal['trait'].name, ) v = v.replace('%U', '{username}') self.log.warn("Converting %r to %r", proposal['value'], v) return v disable_user_config = Bool( False, help="""Disable per-user configuration of single-user servers. This prevents any config in users' $HOME directories from having an effect on their server. """).tag(config=True) def __init__(self, **kwargs): super(Spawner, self).__init__(**kwargs) if self.user.state: self.load_state(self.user.state) def load_state(self, state): """load state from the database This is the extensible part of state Override in a subclass if there is state to load. Should call `super`. See Also -------- get_state, clear_state """ pass def get_state(self): """store the state necessary for load_state A black box of extra state for custom spawners. Subclasses should call `super`. Returns ------- state: dict a JSONable dict of state """ state = {} return state def clear_state(self): """clear any state that should be cleared when the process stops State that should be preserved across server instances should not be cleared. Subclasses should call super, to ensure that state is properly cleared. """ self.api_token = '' def get_env(self): """Return the environment dict to use for the Spawner. This applies things like `env_keep`, anything defined in `Spawner.environment`, and adds the API token to the env. Use this to access the env in Spawner.start to allow extension in subclasses. """ env = {} if self.env: warnings.warn("Spawner.env is deprecated, found %s" % self.env, DeprecationWarning) env.update(self.env) for key in self.env_keep: if key in os.environ: env[key] = os.environ[key] # config overrides. If the value is a callable, it will be called with # one parameter - the current spawner instance - and the return value # will be assigned to the environment variable. This will be called at # spawn time. for key, value in self.environment.items(): if callable(value): env[key] = value(self) else: env[key] = value env['JPY_API_TOKEN'] = self.api_token return env def template_namespace(self): """Return the template namespace for format-string formatting. Currently used on default_url and notebook_dir. Subclasses may add items to the available namespace. The default implementation includes:: { 'username': user.name, 'base_url': users_base_url, } Returns: ns (dict): namespace for string formatting. """ d = {'username': self.user.name} if self.user.server: d['base_url'] = self.user.server.base_url return d def format_string(self, s): """Render a Python format string Uses :meth:`Spawner.template_namespace` to populate format namespace. Args: s (str): Python format-string to be formatted. Returns: str: Formatted string, rendered """ return s.format(**self.template_namespace()) def get_args(self): """Return the arguments to be passed after self.cmd""" args = [ '--user=%s' % self.user.name, '--cookie-name=%s' % self.user.server.cookie_name, '--base-url=%s' % self.user.server.base_url, '--hub-host=%s' % self.hub.host, '--hub-prefix=%s' % self.hub.server.base_url, '--hub-api-url=%s' % self.hub.api_url, ] if self.ip: args.append('--ip=%s' % self.ip) if self.port: args.append('--port=%i' % self.port) elif self.user.server.port: self.log.warning( "Setting port from user.server is deprecated as of JupyterHub 0.7." ) args.append('--port=%i' % self.user.server.port) if self.notebook_dir: notebook_dir = self.format_string(self.notebook_dir) args.append('--notebook-dir=%s' % notebook_dir) if self.default_url: default_url = self.format_string(self.default_url) args.append('--NotebookApp.default_url=%s' % default_url) if self.debug: args.append('--debug') if self.disable_user_config: args.append('--disable-user-config') args.extend(self.args) return args @gen.coroutine def start(self): """Start the single-user server Returns: (ip, port): the ip, port where the Hub can connect to the server. .. versionchanged:: 0.7 Return ip, port instead of setting on self.user.server directly. """ raise NotImplementedError( "Override in subclass. Must be a Tornado gen.coroutine.") @gen.coroutine def stop(self, now=False): """Stop the single-user process""" raise NotImplementedError( "Override in subclass. Must be a Tornado gen.coroutine.") @gen.coroutine def poll(self): """Check if the single-user process is running return None if it is, an exit status (0 if unknown) if it is not. """ raise NotImplementedError( "Override in subclass. Must be a Tornado gen.coroutine.") def add_poll_callback(self, callback, *args, **kwargs): """add a callback to fire when the subprocess stops as noticed by periodic poll_and_notify() """ if args or kwargs: cb = callback callback = lambda: cb(*args, **kwargs) self._callbacks.append(callback) def stop_polling(self): """stop the periodic poll""" if self._poll_callback: self._poll_callback.stop() self._poll_callback = None def start_polling(self): """Start polling periodically callbacks registered via `add_poll_callback` will fire if/when the process stops. Explicit termination via the stop method will not trigger the callbacks. """ if self.poll_interval <= 0: self.log.debug("Not polling subprocess") return else: self.log.debug("Polling subprocess every %is", self.poll_interval) self.stop_polling() self._poll_callback = PeriodicCallback(self.poll_and_notify, 1e3 * self.poll_interval) self._poll_callback.start() @gen.coroutine def poll_and_notify(self): """Used as a callback to periodically poll the process, and notify any watchers """ status = yield self.poll() if status is None: # still running, nothing to do here return self.stop_polling() for callback in self._callbacks: try: yield gen.maybe_future(callback()) except Exception: self.log.exception("Unhandled error in poll callback for %s", self) return status death_interval = Float(0.1) @gen.coroutine def wait_for_death(self, timeout=10): """wait for the process to die, up to timeout seconds""" for i in range(int(timeout / self.death_interval)): status = yield self.poll() if status is not None: break else: yield gen.sleep(self.death_interval)
class TornadoSubscriptionManager(SubscriptionManager): def __init__(self, pubnub_instance): subscription_manager = self self._message_queue = Queue() self._consumer_event = Event() self._cancellation_event = Event() self._subscription_lock = Semaphore(1) # self._current_request_key_object = None self._heartbeat_periodic_callback = None self._reconnection_manager = TornadoReconnectionManager(pubnub_instance) super(TornadoSubscriptionManager, self).__init__(pubnub_instance) self._start_worker() class TornadoReconnectionCallback(ReconnectionCallback): def on_reconnect(self): subscription_manager.reconnect() pn_status = PNStatus() pn_status.category = PNStatusCategory.PNReconnectedCategory pn_status.error = False subscription_manager._subscription_status_announced = True subscription_manager._listener_manager.announce_status(pn_status) self._reconnection_listener = TornadoReconnectionCallback() self._reconnection_manager.set_reconnection_listener(self._reconnection_listener) def _set_consumer_event(self): self._consumer_event.set() def _message_queue_put(self, message): self._message_queue.put(message) def _start_worker(self): self._consumer = TornadoSubscribeMessageWorker(self._pubnub, self._listener_manager, self._message_queue, self._consumer_event) run = stack_context.wrap(self._consumer.run) self._pubnub.ioloop.spawn_callback(run) def reconnect(self): self._should_stop = False self._pubnub.ioloop.spawn_callback(self._start_subscribe_loop) # self._register_heartbeat_timer() def disconnect(self): self._should_stop = True self._stop_heartbeat_timer() self._stop_subscribe_loop() @tornado.gen.coroutine def _start_subscribe_loop(self): self._stop_subscribe_loop() yield self._subscription_lock.acquire() self._cancellation_event.clear() combined_channels = self._subscription_state.prepare_channel_list(True) combined_groups = self._subscription_state.prepare_channel_group_list(True) if len(combined_channels) == 0 and len(combined_groups) == 0: return envelope_future = Subscribe(self._pubnub) \ .channels(combined_channels).channel_groups(combined_groups) \ .timetoken(self._timetoken).region(self._region) \ .filter_expression(self._pubnub.config.filter_expression) \ .cancellation_event(self._cancellation_event) \ .future() canceller_future = self._cancellation_event.wait() wi = tornado.gen.WaitIterator(envelope_future, canceller_future) # iterates 2 times: one for result one for cancelled while not wi.done(): try: result = yield wi.next() except Exception as e: # TODO: verify the error will not be eaten logger.error(e) raise else: if wi.current_future == envelope_future: e = result elif wi.current_future == canceller_future: return else: raise Exception("Unexpected future resolved: %s" % str(wi.current_future)) if e.is_error(): # 599 error doesn't works - tornado use this status code # for a wide range of errors, for ex: # HTTP Server Error (599): [Errno -2] Name or service not known if e.status is not None and e.status.category == PNStatusCategory.PNTimeoutCategory: self._pubnub.ioloop.spawn_callback(self._start_subscribe_loop) return logger.error("Exception in subscribe loop: %s" % str(e)) if e.status is not None and e.status.category == PNStatusCategory.PNAccessDeniedCategory: e.status.operation = PNOperationType.PNUnsubscribeOperation self._listener_manager.announce_status(e.status) self._reconnection_manager.start_polling() self.disconnect() return else: self._handle_endpoint_call(e.result, e.status) self._pubnub.ioloop.spawn_callback(self._start_subscribe_loop) finally: self._cancellation_event.set() yield tornado.gen.moment self._subscription_lock.release() self._cancellation_event.clear() break def _stop_subscribe_loop(self): if self._cancellation_event is not None and not self._cancellation_event.is_set(): self._cancellation_event.set() def _stop_heartbeat_timer(self): if self._heartbeat_periodic_callback is not None: self._heartbeat_periodic_callback.stop() def _register_heartbeat_timer(self): super(TornadoSubscriptionManager, self)._register_heartbeat_timer() self._heartbeat_periodic_callback = PeriodicCallback( stack_context.wrap(self._perform_heartbeat_loop), self._pubnub.config.heartbeat_interval * TornadoSubscriptionManager.HEARTBEAT_INTERVAL_MULTIPLIER, self._pubnub.ioloop) self._heartbeat_periodic_callback.start() @tornado.gen.coroutine def _perform_heartbeat_loop(self): if self._heartbeat_call is not None: # TODO: cancel call pass cancellation_event = Event() state_payload = self._subscription_state.state_payload() presence_channels = self._subscription_state.prepare_channel_list(False) presence_groups = self._subscription_state.prepare_channel_group_list(False) if len(presence_channels) == 0 and len(presence_groups) == 0: return try: envelope = yield self._pubnub.heartbeat() \ .channels(presence_channels) \ .channel_groups(presence_groups) \ .state(state_payload) \ .cancellation_event(cancellation_event) \ .future() heartbeat_verbosity = self._pubnub.config.heartbeat_notification_options if envelope.status.is_error: if heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL or \ heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL: self._listener_manager.announce_status(envelope.status) else: if heartbeat_verbosity == PNHeartbeatNotificationOptions.ALL: self._listener_manager.announce_status(envelope.status) except PubNubTornadoException: pass # TODO: check correctness # if e.status is not None and e.status.category == PNStatusCategory.PNTimeoutCategory: # self._start_subscribe_loop() # else: # self._listener_manager.announce_status(e.status) except Exception as e: print(e) finally: cancellation_event.set() @tornado.gen.coroutine def _send_leave(self, unsubscribe_operation): envelope = yield Leave(self._pubnub) \ .channels(unsubscribe_operation.channels) \ .channel_groups(unsubscribe_operation.channel_groups).future() self._listener_manager.announce_status(envelope.status)
class Spawner(LoggingConfigurable): """Base class for spawning single-user notebook servers. Subclass this, and override the following methods: - load_state - get_state - start - stop - poll As JupyterHub supports multiple users, an instance of the Spawner subclass is created for each user. If there are 20 JupyterHub users, there will be 20 instances of the subclass. """ # private attributes for tracking status _spawn_pending = False _start_pending = False _stop_pending = False _proxy_pending = False _waiting_for_response = False @property def _log_name(self): """Return username:servername or username Used in logging for consistency with named servers. """ if self.name: return '%s:%s' % (self.user.name, self.name) else: return self.user.name @property def pending(self): """Return the current pending event, if any Return False if nothing is pending. """ if self._spawn_pending: return 'spawn' elif self._stop_pending: return 'stop' return False @property def ready(self): """Is this server ready to use? A server is not ready if an event is pending. """ if self.pending: return False if self.server is None: return False return True @property def active(self): """Return True if the server is active. This includes fully running and ready or any pending start/stop event. """ return bool(self.pending or self.ready) authenticator = Any() hub = Any() orm_spawner = Any() @observe('orm_spawner') def _orm_spawner_changed(self, change): if change.new and change.new.server: self._server = Server(orm_server=change.new.server) else: self._server = None user = Any() def __init_subclass__(cls, **kwargs): super().__init_subclass__() missing = [] for attr in ('start', 'stop', 'poll'): if getattr(Spawner, attr) is getattr(cls, attr): missing.append(attr) if missing: raise NotImplementedError( "class `{}` needs to redefine the `start`," "`stop` and `poll` methods. `{}` not redefined.".format( cls.__name__, '`, `'.join(missing))) proxy_spec = Unicode() @property def server(self): if hasattr(self, '_server'): return self._server if self.orm_spawner and self.orm_spawner.server: return Server(orm_server=self.orm_spawner.server) @server.setter def server(self, server): self._server = server if self.orm_spawner: if self.orm_spawner.server is not None: # delete the old value db = inspect(self.orm_spawner.server).session db.delete(self.orm_spawner.server) if server is None: self.orm_spawner.server = None else: self.orm_spawner.server = server.orm_server @property def name(self): if self.orm_spawner: return self.orm_spawner.name return '' admin_access = Bool(False) api_token = Unicode() oauth_client_id = Unicode() will_resume = Bool(False, help="""Whether the Spawner will resume on next start Default is False where each launch of the Spawner will be a new instance. If True, an existing Spawner will resume instead of starting anew (e.g. resuming a Docker container), and API tokens in use when the Spawner stops will not be deleted. """) ip = Unicode('', help=""" The IP address (or hostname) the single-user server should listen on. The JupyterHub proxy implementation should be able to send packets to this interface. """).tag(config=True) port = Integer(0, help=""" The port for single-user servers to listen on. Defaults to `0`, which uses a randomly allocated port number each time. If set to a non-zero value, all Spawners will use the same port, which only makes sense if each server is on a different address, e.g. in containers. New in version 0.7. """).tag(config=True) start_timeout = Integer(60, help=""" Timeout (in seconds) before giving up on starting of single-user server. This is the timeout for start to return, not the timeout for the server to respond. Callers of spawner.start will assume that startup has failed if it takes longer than this. start should return when the server process is started and its location is known. """).tag(config=True) http_timeout = Integer(30, help=""" Timeout (in seconds) before giving up on a spawned HTTP server Once a server has successfully been spawned, this is the amount of time we wait before assuming that the server is unable to accept connections. """).tag(config=True) poll_interval = Integer(30, help=""" Interval (in seconds) on which to poll the spawner for single-user server's status. At every poll interval, each spawner's `.poll` method is called, which checks if the single-user server is still running. If it isn't running, then JupyterHub modifies its own state accordingly and removes appropriate routes from the configurable proxy. """).tag(config=True) _callbacks = List() _poll_callback = Any() debug = Bool( False, help="Enable debug-logging of the single-user server").tag(config=True) options_form = Unicode(help=""" An HTML form for options a user can specify on launching their server. The surrounding `<form>` element and the submit button are already provided. For example: .. code:: html Set your key: <input name="key" val="default_key"></input> <br> Choose a letter: <select name="letter" multiple="true"> <option value="A">The letter A</option> <option value="B">The letter B</option> </select> The data from this form submission will be passed on to your spawner in `self.user_options` """).tag(config=True) def options_from_form(self, form_data): """Interpret HTTP form data Form data will always arrive as a dict of lists of strings. Override this function to understand single-values, numbers, etc. This should coerce form data into the structure expected by self.user_options, which must be a dict. Instances will receive this data on self.user_options, after passing through this function, prior to `Spawner.start`. """ return form_data user_options = Dict(help=""" Dict of user specified options for the user's spawned instance of a single-user server. These user options are usually provided by the `options_form` displayed to the user when they start their server. """) env_keep = List([ 'PATH', 'PYTHONPATH', 'CONDA_ROOT', 'CONDA_DEFAULT_ENV', 'VIRTUAL_ENV', 'LANG', 'LC_ALL', ], help=""" Whitelist of environment variables for the single-user server to inherit from the JupyterHub process. This whitelist is used to ensure that sensitive information in the JupyterHub process's environment (such as `CONFIGPROXY_AUTH_TOKEN`) is not passed to the single-user server's process. """).tag(config=True) env = Dict(help="""Deprecated: use Spawner.get_env or Spawner.environment - extend Spawner.get_env for adding required env in Spawner subclasses - Spawner.environment for config-specified env """) environment = Dict(help=""" Extra environment variables to set for the single-user server's process. Environment variables that end up in the single-user server's process come from 3 sources: - This `environment` configurable - The JupyterHub process' environment variables that are whitelisted in `env_keep` - Variables to establish contact between the single-user notebook and the hub (such as JUPYTERHUB_API_TOKEN) The `enviornment` configurable should be set by JupyterHub administrators to add installation specific environment variables. It is a dict where the key is the name of the environment variable, and the value can be a string or a callable. If it is a callable, it will be called with one parameter (the spawner instance), and should return a string fairly quickly (no blocking operations please!). Note that the spawner class' interface is not guaranteed to be exactly same across upgrades, so if you are using the callable take care to verify it continues to work after upgrades! """).tag(config=True) cmd = Command(['jupyterhub-singleuser'], allow_none=True, help=""" The command used for starting the single-user server. Provide either a string or a list containing the path to the startup script command. Extra arguments, other than this path, should be provided via `args`. This is usually set if you want to start the single-user server in a different python environment (with virtualenv/conda) than JupyterHub itself. Some spawners allow shell-style expansion here, allowing you to use environment variables. Most, including the default, do not. Consult the documentation for your spawner to verify! """).tag(config=True) args = List(Unicode(), help=""" Extra arguments to be passed to the single-user server. Some spawners allow shell-style expansion here, allowing you to use environment variables here. Most, including the default, do not. Consult the documentation for your spawner to verify! """).tag(config=True) notebook_dir = Unicode(help=""" Path to the notebook directory for the single-user server. The user sees a file listing of this directory when the notebook interface is started. The current interface does not easily allow browsing beyond the subdirectories in this directory's tree. `~` will be expanded to the home directory of the user, and {username} will be replaced with the name of the user. Note that this does *not* prevent users from accessing files outside of this path! They can do so with many other means. """).tag(config=True) default_url = Unicode(help=""" The URL the single-user server should start in. `{username}` will be expanded to the user's username Example uses: - You can set `notebook_dir` to `/` and `default_url` to `/tree/home/{username}` to allow people to navigate the whole filesystem from their notebook server, but still start in their home directory. - Start with `/notebooks` instead of `/tree` if `default_url` points to a notebook instead of a directory. - You can set this to `/lab` to have JupyterLab start by default, rather than Jupyter Notebook. """).tag(config=True) @validate('notebook_dir', 'default_url') def _deprecate_percent_u(self, proposal): v = proposal['value'] if '%U' in v: self.log.warning( "%%U for username in %s is deprecated in JupyterHub 0.7, use {username}", proposal['trait'].name, ) v = v.replace('%U', '{username}') self.log.warning("Converting %r to %r", proposal['value'], v) return v disable_user_config = Bool(False, help=""" Disable per-user configuration of single-user servers. When starting the user's single-user server, any config file found in the user's $HOME directory will be ignored. Note: a user could circumvent this if the user modifies their Python environment, such as when they have their own conda environments / virtualenvs / containers. """).tag(config=True) mem_limit = ByteSpecification(None, help=""" Maximum number of bytes a single-user notebook server is allowed to use. Allows the following suffixes: - K -> Kilobytes - M -> Megabytes - G -> Gigabytes - T -> Terabytes If the single user server tries to allocate more memory than this, it will fail. There is no guarantee that the single-user notebook server will be able to allocate this much memory - only that it can not allocate more than this. This needs to be supported by your spawner for it to work. """).tag(config=True) cpu_limit = Float(None, allow_none=True, help=""" Maximum number of cpu-cores a single-user notebook server is allowed to use. If this value is set to 0.5, allows use of 50% of one CPU. If this value is set to 2, allows use of up to 2 CPUs. The single-user notebook server will never be scheduled by the kernel to use more cpu-cores than this. There is no guarantee that it can access this many cpu-cores. This needs to be supported by your spawner for it to work. """).tag(config=True) mem_guarantee = ByteSpecification(None, help=""" Minimum number of bytes a single-user notebook server is guaranteed to have available. Allows the following suffixes: - K -> Kilobytes - M -> Megabytes - G -> Gigabytes - T -> Terabytes This needs to be supported by your spawner for it to work. """).tag(config=True) cpu_guarantee = Float(None, allow_none=True, help=""" Minimum number of cpu-cores a single-user notebook server is guaranteed to have available. If this value is set to 0.5, allows use of 50% of one CPU. If this value is set to 2, allows use of up to 2 CPUs. Note that this needs to be supported by your spawner for it to work. """).tag(config=True) pre_spawn_hook = Any(help=""" An optional hook function that you can implement to do some bootstrapping work before the spawner starts. For example, create a directory for your user or load initial content. This can be set independent of any concrete spawner implementation. Example:: from subprocess import check_call def my_hook(spawner): username = spawner.user.name check_call(['./examples/bootstrap-script/bootstrap.sh', username]) c.Spawner.pre_spawn_hook = my_hook """).tag(config=True) def load_state(self, state): """Restore state of spawner from database. Called for each user's spawner after the hub process restarts. `state` is a dict that'll contain the value returned by `get_state` of the spawner, or {} if the spawner hasn't persisted any state yet. Override in subclasses to restore any extra state that is needed to track the single-user server for that user. Subclasses should call super(). """ pass def get_state(self): """Save state of spawner into database. A black box of extra state for custom spawners. The returned value of this is passed to `load_state`. Subclasses should call `super().get_state()`, augment the state returned from there, and return that state. Returns ------- state: dict a JSONable dict of state """ state = {} return state def clear_state(self): """Clear any state that should be cleared when the single-user server stops. State that should be preserved across single-user server instances should not be cleared. Subclasses should call super, to ensure that state is properly cleared. """ self.api_token = '' def get_env(self): """Return the environment dict to use for the Spawner. This applies things like `env_keep`, anything defined in `Spawner.environment`, and adds the API token to the env. When overriding in subclasses, subclasses must call `super().get_env()`, extend the returned dict and return it. Use this to access the env in Spawner.start to allow extension in subclasses. """ env = {} if self.env: warnings.warn("Spawner.env is deprecated, found %s" % self.env, DeprecationWarning) env.update(self.env) for key in self.env_keep: if key in os.environ: env[key] = os.environ[key] # config overrides. If the value is a callable, it will be called with # one parameter - the current spawner instance - and the return value # will be assigned to the environment variable. This will be called at # spawn time. for key, value in self.environment.items(): if callable(value): env[key] = value(self) else: env[key] = value env['JUPYTERHUB_API_TOKEN'] = self.api_token # deprecated (as of 0.7.2), for old versions of singleuser env['JPY_API_TOKEN'] = self.api_token if self.admin_access: env['JUPYTERHUB_ADMIN_ACCESS'] = '1' # OAuth settings env['JUPYTERHUB_CLIENT_ID'] = self.oauth_client_id env['JUPYTERHUB_HOST'] = self.hub.public_host env['JUPYTERHUB_OAUTH_CALLBACK_URL'] = \ url_path_join(self.user.url, self.name, 'oauth_callback') # Info previously passed on args env['JUPYTERHUB_USER'] = self.user.name env['JUPYTERHUB_API_URL'] = self.hub.api_url env['JUPYTERHUB_BASE_URL'] = self.hub.base_url[:-4] if self.server: env['JUPYTERHUB_SERVICE_PREFIX'] = self.server.base_url # Put in limit and guarantee info if they exist. # Note that this is for use by the humans / notebook extensions in the # single-user notebook server, and not for direct usage by the spawners # themselves. Spawners should just use the traitlets directly. if self.mem_limit: env['MEM_LIMIT'] = str(self.mem_limit) if self.mem_guarantee: env['MEM_GUARANTEE'] = str(self.mem_guarantee) if self.cpu_limit: env['CPU_LIMIT'] = str(self.cpu_limit) if self.cpu_guarantee: env['CPU_GUARANTEE'] = str(self.cpu_guarantee) return env def template_namespace(self): """Return the template namespace for format-string formatting. Currently used on default_url and notebook_dir. Subclasses may add items to the available namespace. The default implementation includes:: { 'username': user.name, 'base_url': users_base_url, } Returns: ns (dict): namespace for string formatting. """ d = {'username': self.user.name} if self.server: d['base_url'] = self.server.base_url return d def format_string(self, s): """Render a Python format string Uses :meth:`Spawner.template_namespace` to populate format namespace. Args: s (str): Python format-string to be formatted. Returns: str: Formatted string, rendered """ return s.format(**self.template_namespace()) def get_args(self): """Return the arguments to be passed after self.cmd Doesn't expect shell expansion to happen. """ args = [] if self.ip: args.append('--ip="%s"' % self.ip) if self.port: args.append('--port=%i' % self.port) elif self.server.port: self.log.warning( "Setting port from user.server is deprecated as of JupyterHub 0.7." ) args.append('--port=%i' % self.server.port) if self.notebook_dir: notebook_dir = self.format_string(self.notebook_dir) args.append('--notebook-dir="%s"' % notebook_dir) if self.default_url: default_url = self.format_string(self.default_url) args.append('--NotebookApp.default_url="%s"' % default_url) if self.debug: args.append('--debug') if self.disable_user_config: args.append('--disable-user-config') args.extend(self.args) return args def run_pre_spawn_hook(self): """Run the pre_spawn_hook if defined""" if self.pre_spawn_hook: return self.pre_spawn_hook(self) @gen.coroutine def start(self): """Start the single-user server Returns: (str, int): the (ip, port) where the Hub can connect to the server. .. versionchanged:: 0.7 Return ip, port instead of setting on self.user.server directly. """ raise NotImplementedError( "Override in subclass. Must be a Tornado gen.coroutine.") @gen.coroutine def stop(self, now=False): """Stop the single-user server If `now` is False (default), shutdown the server as gracefully as possible, e.g. starting with SIGINT, then SIGTERM, then SIGKILL. If `now` is True, terminate the server immediately. The coroutine should return when the single-user server process is no longer running. Must be a coroutine. """ raise NotImplementedError( "Override in subclass. Must be a Tornado gen.coroutine.") @gen.coroutine def poll(self): """Check if the single-user process is running Returns: None if single-user process is running. Integer exit status (0 if unknown), if it is not running. State transitions, behavior, and return response: - If the Spawner has not been initialized (neither loaded state, nor called start), it should behave as if it is not running (status=0). - If the Spawner has not finished starting, it should behave as if it is running (status=None). Design assumptions about when `poll` may be called: - On Hub launch: `poll` may be called before `start` when state is loaded on Hub launch. `poll` should return exit status 0 (unknown) if the Spawner has not been initialized via `load_state` or `start`. - If `.start()` is async: `poll` may be called during any yielded portions of the `start` process. `poll` should return None when `start` is yielded, indicating that the `start` process has not yet completed. """ raise NotImplementedError( "Override in subclass. Must be a Tornado gen.coroutine.") def add_poll_callback(self, callback, *args, **kwargs): """Add a callback to fire when the single-user server stops""" if args or kwargs: cb = callback callback = lambda: cb(*args, **kwargs) self._callbacks.append(callback) def stop_polling(self): """Stop polling for single-user server's running state""" if self._poll_callback: self._poll_callback.stop() self._poll_callback = None def start_polling(self): """Start polling periodically for single-user server's running state. Callbacks registered via `add_poll_callback` will fire if/when the server stops. Explicit termination via the stop method will not trigger the callbacks. """ if self.poll_interval <= 0: self.log.debug("Not polling subprocess") return else: self.log.debug("Polling subprocess every %is", self.poll_interval) self.stop_polling() self._poll_callback = PeriodicCallback(self.poll_and_notify, 1e3 * self.poll_interval) self._poll_callback.start() @gen.coroutine def poll_and_notify(self): """Used as a callback to periodically poll the process and notify any watchers""" status = yield self.poll() if status is None: # still running, nothing to do here return self.stop_polling() # clear callbacks list self._callbacks, callbacks = ([], self._callbacks) for callback in callbacks: try: yield gen.maybe_future(callback()) except Exception: self.log.exception("Unhandled error in poll callback for %s", self) return status death_interval = Float(0.1) @gen.coroutine def wait_for_death(self, timeout=10): """Wait for the single-user server to die, up to timeout seconds""" @gen.coroutine def _wait_for_death(): status = yield self.poll() return status is not None try: r = yield exponential_backoff( _wait_for_death, 'Process did not die in {timeout} seconds'.format( timeout=timeout), start_wait=self.death_interval, timeout=timeout, ) return r except TimeoutError: return False
class RendezvousHandler(PubSubHandler): ROUTER = {} MY_ADDRESS = 'mitosis/v1/p000/wss/signal.mitosis.dev/websocket' def __init__(self, *args, **kw): super().__init__(*args, **kw) self.peer_id = '0' self.redis = redis_session() self.pubsub = self.redis.pubsub(ignore_subscribe_messages=True) self.pubsub.subscribe(keep_alive=lambda: True) self.callback = PeriodicCallback(self.pubsub.get_message, 50, 0.1) self.callback.start() def on_message(self, message): message = json.loads(message) app_log.info(message) subject = message.get('subject') if subject == 'introduction': self.on_introduction(message) elif subject == 'connection-negotiation': self.on_connection_negotiation(message) elif subject == 'peer-update': self.on_router_reply(message) elif subject == 'rejection': self.on_router_reply(message) def on_introduction(self, message): sender = message['sender'] self.peer_id = sender.split('/', 3)[2] self.pubsub.subscribe(**{'peer-%s' % self.peer_id: self.forward}) if RendezvousHandler.ROUTER: app_log.info('%s (peer) joined', self.peer_id) self.send_message(sender, 'role-update', ['peer']) self.send_message(sender, 'peer-update', [RendezvousHandler.ROUTER]) else: app_log.info('%s (router) joined', self.peer_id) roles = ['router', 'peer'] RendezvousHandler.ROUTER.update({ 'peerId': self.peer_id, 'roles': roles, 'quality': 1.0 }) self.send_message(sender, 'role-update', roles) def on_connection_negotiation(self, message): receiver = message['receiver'] receiver_id = receiver.split('/', 3)[2] ((_, numsub), ) = self.redis.pubsub_numsub('peer-%s' % receiver_id) if numsub == 0: receiver_id = self.ROUTER.get('peerId') self.redis.publish('peer-%s' % receiver_id, self.dumps(message)) def on_router_reply(self, message): sender = message['sender'] sender_id = sender.split('/', 3)[2] if sender_id == RendezvousHandler.ROUTER.get('peerId'): receiver = message['receiver'] receiver_id = receiver.split('/', 3)[2] self.redis.publish('peer-%s' % receiver_id, self.dumps(message)) def send_message(self, receiver, subject, body): message = { 'subject': subject, 'sender': self.MY_ADDRESS, 'receiver': receiver, 'body': body } self.write_message(self.dumps(message)) def on_close(self): super().on_close() if self.peer_id == RendezvousHandler.ROUTER.get('peerId'): RendezvousHandler.ROUTER.clear() app_log.info('%s (router) left', self.peer_id) else: app_log.info('%s left', self.peer_id)
class FileLister: def __init__(self, lister, groups, include_missing=False): self.lister = lister self.groups = groups self.include_missing = include_missing self.has_changed = False self.files = collections.OrderedDict() self.all_file_names = set() self.all_dir_names = set() self.executor = concurrent.futures.ThreadPoolExecutor(20) self.refresh() self.periodic = PeriodicCallback( lambda: self.background_refresh(False), 300000) #refresh file list every x seconds (in ms) self.periodic.start() def is_path_allowed(self, path): return path in self.all_file_names @run_on_executor() def background_refresh(self, initial=False): ''' Non blocking version of refresh (runs in a separate thread) When initial is False, remote directories are refreshed (which is slow) ''' self.refresh(initial) def refresh(self, initial=True): log.debug('refreshing group file listings') self.files = collections.OrderedDict() self.all_dir_names = set() for group, paths in self.groups.items(): files = self.files.setdefault(group, []) for path in paths: log.debug('checking path: {} {}'.format(group, path)) if is_ipaddress(group): if not initial and path.endswith('/'): self.all_dir_names.add(Path(path)) files.extend( self.lister.listdir_netpath(group, path, files_only=True)) #log.debug('ip {} added {}'.format(group, files)) elif not path.endswith('/'): files.append(path) #log.debug('ip {} added {}'.format(group, path)) else: if os.path.isdir(path): self.all_dir_names.add(path) files.extend(self.lister.listdir(path)) else: files.append(path) if is_ipaddress(group): self.files[group] = [('/{}{}'.format( group, '/{}'.format(file) if not file.startswith('/') else file), None, None) for file in files] else: self.files[group] = list( self.lister.statfiles(files, self.include_missing)) afn = (i[0] for values in self.files.values() for i in values) afn = {os.path.abspath(i) for i in afn} self.has_changed = (afn != self.all_file_names) self.all_file_names = afn
class WSHandler(tornado.websocket.WebSocketHandler): def open(self): global socketOK print 'connection opened...' socketOK = True self.callback = PeriodicCallback(self.sendToSocket, 100) self.callback.start() def on_message(self, message): global x1, x2, Kpvx, Kivx, Kdvx, Kpxi, Kixi, Kdxi jsonMessage = json.loads(message) if jsonMessage.get('vref') != None: x1 = float(jsonMessage.get('vref')) / 100 #print ("x1: %.2f" % x1) if jsonMessage.get('xiref') != None: x2 = (float(jsonMessage.get('xiref'))) * 3.141592 / 180 #print ("x2: %.2f" % x2) if jsonMessage.get('Kpvx') != None: Kpvx = float(jsonMessage.get('Kpvx')) #print ("Kpvx: %.2f" % Kpvx) if jsonMessage.get('Kivx') != None: Kivx = float(jsonMessage.get('Kivx')) #print ("Kivx: %.2f" % Kivx) if jsonMessage.get('Kdvx') != None: Kdvx = float(jsonMessage.get('Kdvx')) #print ("Kdvx: %.2f" % Kdvx) if jsonMessage.get('Kpxi') != None: Kpxi = float(jsonMessage.get('Kpxi')) #print ("Kpxi: %.2f" % Kpxi) if jsonMessage.get('Kixi') != None: Kixi = float(jsonMessage.get('Kixi')) #print ("Kixi: %.2f" % Kixi) if jsonMessage.get('Kdxi') != None: Kdxi = float(jsonMessage.get('Kdxi')) #print ("Kdxi: %.2f" % Kdxi) if not socketOK: x1 = 0 x2 = 0. def on_close(self): global socketOK, commandeDroit, commandeGauche print 'connection closed...' socketOK = False commandeDroit = 0. commandeGauche = 0. def sendToSocket(self): global started, codeurDroitDeltaPos, codeurGaucheDeltaPos, socketOK, commandeDroit, commandeGauche, vxref, \ xiref, vxmes, ximes, tensionAlim, distance, omegaDroit, omegaGauche tcourant = time.time() - T0 aEnvoyer = json.dumps({ 'Temps':("%.2f" % tcourant), \ 'Consigne vitesse longitudinale':("%.2f" % x1), \ 'Consigne vitesse de rotation':("%.2f" % (180 * x2/3.141592)), \ 'Vitesse longitudinale':("%.2f" % vxmes), \ 'Vitesse de rotation':("%.2f" % (180 * ximes/3.141592)), \ 'omegaDroit':("%.2f" % omegaDroit), \ 'omegaGauche':("%.2f" % omegaGauche), \ 'commandeDroit':("%.2f" % commandeDroit), \ 'commandeGauche':("%.2f" % commandeGauche), \ 'TensionAlim':("%.2f" % tensionAlim), \ 'distance':("%.2f" % distance), \ 'Raw':("%.2f" % tcourant) + "," + \ ("%.2f" % x1) + "," + \ ("%.2f" % x2) + "," + \ ("%.2f" % vxmes) + "," + \ ("%.2f" % (180 * ximes/3.141592)) + "," + \ ("%.2f" % omegaDroit) + "," + \ ("%.2f" % omegaGauche) + "," + \ ("%.2f" % commandeDroit) + "," + \ ("%.2f" % commandeGauche) + "," + \ ("%.2f" % tensionAlim) + "," + \ ("%.2f" % distance)}) if socketOK: try: self.write_message(aEnvoyer) except: pass def check_origin(self, origin): # Voir http://www.tornadoweb.org/en/stable/websocket.html#tornado.websocket.WebSocketHandler.check_origin # et http://www.arundhaj.com/blog/tornado-error-during-websocket-handshake.html return True