def delete_module_request(handler): names = handler.get_arguments('name') log.debug('names: {0}'.format(names)) cmdq = InternalCommandQueue() for name in names: # Note: delete and unload from all slaves not just the executing one cmdq.add(handler.track.host, 'delete/module?name={0}'.format(name)) return delete_module(handler.request, names)
def delete_modules_request(handler): result = list_module(handler, None) names = result['data']['info'].keys() log.debug('names: {0}'.format(names)) cmdq = InternalCommandQueue() for name in names: # Note: delete and unload from all slaves not just the executing one cmdq.add(handler.track.host, 'delete/module?name={0}'.format(name)) return delete_module(handler.request, names)
def start_server(self): """Make Tornado app, start server and Tornado ioloop. """ dbenv = default_env.copy() dbenv.update((k[6:], coerce_mongo_param(k[6:], v)) for k, v in \ self.cfg.iteritems() if k.startswith('mongo.')) log.debug('mongo params: {0}'.format(dbenv)) retry_count = int(self.cfg.get('retry_count', 10)) retry_interval = int(self.cfg.get('retry_interval', 10)) # getting database mongo_client = None for i in range(retry_count): try: mongo_client = init_mongo(dbenv) break except Exception as ex: log.warn('mongo not available, try again in {0} secs. Error: {1}'.format(retry_interval, ex)) time.sleep(retry_interval) if not mongo_client: log.critical('Unable to connect to mongo, exiting ...') sys.exit(1) log.info('mongo server_info: {0}'.format( mongo_client.connection.server_info())) slave, master = start_redis(self.cfg) self.cfg['is_cluster'] = False if slave != master: log.info('redis master is not the same as the slave') self.cfg['is_cluster'] = True self.cfg['ext_cache'] = init_ext_cache(self.cfg) tornado_app = self.get_app() log.info('Started with "{0}" config'.format(tornado_app.settings)) server = tornado.httpserver.HTTPServer(tornado_app) server.conn_params.decompress = self.cfg['decompress_request'] tornado_port = self.cfg['tornado.port'] try: server.bind(tornado_port) except Exception: # see http://stackoverflow.com/questions/16153804/tornado-socket-error-on-arm server.bind(tornado_port, '0.0.0.0') server.start(self.cfg['num_processes']) max_process_workers = self.cfg.get('max_process_workers') if max_process_workers: max_process_workers = int(max_process_workers) tornado_app.settings['process_executor'] = ProcessPoolExecutor(max_process_workers) log.info('started with {0} worker processes'.format(tornado_app.settings['process_executor']._max_workers)) cmd_queue = InternalCommandQueue() cmd_queue_poll_interval = self.cfg.get('cmd_queue_poll_interval', 60 * 1000) tornado.ioloop.PeriodicCallback(cmd_queue.process, cmd_queue_poll_interval).start() tornado.ioloop.IOLoop.instance().start()
def _get_cmdq(self): from stubo.utils.command_queue import InternalCommandQueue return InternalCommandQueue(self.redis_server)