def start_server(self): """Make Tornado app, start server and Tornado ioloop. """ dbenv = default_env.copy() dbenv.update((k[6:], coerce_mongo_param(k[6:], v)) for k, v in self.cfg.iteritems() if k.startswith("mongo.")) log.debug("mongo params: {0}".format(dbenv)) retry_count = int(self.cfg.get("retry_count", 10)) retry_interval = int(self.cfg.get("retry_interval", 10)) # getting database mongo_client = None for i in range(retry_count): try: mongo_client = init_mongo(dbenv) break except Exception as ex: log.warn("mongo not available, try again in {0} secs. Error: {1}".format(retry_interval, ex)) time.sleep(retry_interval) if not mongo_client: log.critical("Unable to connect to mongo, exiting ...") sys.exit(1) log.info("mongo server_info: {0}".format(mongo_client.connection.server_info())) # ensure tracker and scenario_stub indexing create_tracker_collection(mongo_client) ensure_scenario_stub_indexes(mongo_client) slave, master = start_redis(self.cfg) self.cfg["is_cluster"] = False if slave != master: log.info("redis master is not the same as the slave") self.cfg["is_cluster"] = True self.cfg["ext_cache"] = init_ext_cache(self.cfg) tornado_app = self.get_app() log.info('Started with "{0}" config'.format(tornado_app.settings)) server = tornado.httpserver.HTTPServer(tornado_app) server.conn_params.decompress = self.cfg["decompress_request"] tornado_port = self.cfg["tornado.port"] try: server.bind(tornado_port) except Exception: # see http://stackoverflow.com/questions/16153804/tornado-socket-error-on-arm server.bind(tornado_port, "0.0.0.0") server.start(self.cfg["num_processes"]) max_process_workers = self.cfg.get("max_process_workers") if max_process_workers: max_process_workers = int(max_process_workers) tornado_app.settings["process_executor"] = ProcessPoolExecutor(max_process_workers) log.info("started with {0} worker processes".format(tornado_app.settings["process_executor"]._max_workers)) cmd_queue = InternalCommandQueue() cmd_queue_poll_interval = self.cfg.get("cmd_queue_poll_interval", 60 * 1000) tornado.ioloop.PeriodicCallback(cmd_queue.process, cmd_queue_poll_interval).start() tornado.ioloop.IOLoop.instance().start()
def start_server(self): """Make Tornado app, start server and Tornado ioloop. """ dbenv = default_env.copy() dbenv.update((k[6:], coerce_mongo_param(k[6:], v)) for k, v in \ self.cfg.iteritems() if k.startswith('mongo.')) log.debug('mongo params: {0}'.format(dbenv)) retry_count = int(self.cfg.get('retry_count', 10)) retry_interval = int(self.cfg.get('retry_interval', 10)) mongo_client = None for i in range(retry_count): try: mongo_client = init_mongo(dbenv) break except: log.warn('mongo not available, try again in {0} ' 'secs'.format(retry_interval)) time.sleep(retry_interval) if not mongo_client: log.critical('Unable to connect to mongo, exiting ...') sys.exit(1) log.info('mongo server_info: {0}'.format( mongo_client.connection.server_info())) slave, master = start_redis(self.cfg) self.cfg['is_cluster'] = False if slave != master: log.info('redis master is not the same as the slave') self.cfg['is_cluster'] = True self.cfg['ext_cache'] = init_ext_cache(self.cfg) tornado_app = self.get_app() log.info('Started with "{0}" config'.format(tornado_app.settings)) server = tornado.httpserver.HTTPServer(tornado_app) server.conn_params.decompress = self.cfg['decompress_request'] tornado_port = self.cfg['tornado.port'] try: server.bind(tornado_port) except Exception: # see http://stackoverflow.com/questions/16153804/tornado-socket-error-on-arm server.bind(tornado_port, '0.0.0.0') server.start(self.cfg['num_processes']) max_process_workers = self.cfg.get('max_process_workers') if max_process_workers: max_process_workers = int(max_process_workers) tornado_app.settings['process_executor'] = ProcessPoolExecutor(max_process_workers) log.info('started with {0} worker processes'.format(tornado_app.settings['process_executor']._max_workers)) cmd_queue = InternalCommandQueue() cmd_queue_poll_interval = self.cfg.get('cmd_queue_poll_interval', 60*1000) tornado.ioloop.PeriodicCallback(cmd_queue.process, cmd_queue_poll_interval).start() tornado.ioloop.IOLoop.instance().start()
def get_app(self): from tornado.ioloop import IOLoop from stubo.service.run_stubo import TornadoManager from stubo.utils import init_mongo, start_redis, init_ext_cache import motor self.testdb = testdb_name() self.cfg.update({ 'redis.host': '127.0.0.1', 'redis.port': 6379, 'redis.db': 9, 'redis_master.host': '127.0.0.1', 'redis_master.port': 6379, 'redis_master.db': 9, 'request_cache_limit': 10, }) self.db = init_mongo({ 'tz_aware': True, 'db': self.testdb }) args = {'capped': True, 'size': 100000} self.db.create_collection("tracker", **args) self.db.tracker.create_index('start_time', -1) # add motor driver client = motor.MotorClient() self.mdb = client[self.testdb] self.cfg.update({'mdb': self.mdb}) # install() asserts that its not been initialised so setting it directly # self.io_loop.install() IOLoop._instance = self.io_loop tm = TornadoManager(os.environ.get('STUBO_CONFIG_FILE_PATH')) self.redis_server, _ = start_redis(self.cfg) tm.cfg['ext_cache'] = init_ext_cache(self.cfg) tm.cfg['mongo.db'] = self.testdb tm.cfg.update(self.cfg) app = tm.get_app() self.app = app from concurrent.futures import ProcessPoolExecutor self.app.settings['process_executor'] = ProcessPoolExecutor() return app
def get_app(self): from tornado.ioloop import IOLoop from stubo.service.run_stubo import TornadoManager from stubo.utils import init_mongo, start_redis, init_ext_cache import motor self.testdb = testdb_name() self.cfg.update({ 'redis.host': '127.0.0.1', 'redis.port': 6379, 'redis.db': 9, 'redis_master.host': '127.0.0.1', 'redis_master.port': 6379, 'redis_master.db': 9, 'request_cache_limit': 10, }) self.db = init_mongo({'tz_aware': True, 'db': self.testdb}) args = {'capped': True, 'size': 100000} self.db.create_collection("tracker", **args) self.db.tracker.create_index('start_time', -1) # add motor driver client = motor.MotorClient() self.mdb = client[self.testdb] self.cfg.update({'mdb': self.mdb}) # install() asserts that its not been initialised so setting it directly # self.io_loop.install() IOLoop._instance = self.io_loop tm = TornadoManager(os.environ.get('STUBO_CONFIG_FILE_PATH')) self.redis_server, _ = start_redis(self.cfg) tm.cfg['ext_cache'] = init_ext_cache(self.cfg) tm.cfg['mongo.db'] = self.testdb tm.cfg.update(self.cfg) app = tm.get_app() self.app = app from concurrent.futures import ProcessPoolExecutor self.app.settings['process_executor'] = ProcessPoolExecutor() return app
def start_server(self): """Make Tornado app, start server and Tornado ioloop. """ dbenv = default_env.copy() dbenv.update((k[6:], coerce_mongo_param(k[6:], v)) for k, v in \ self.cfg.iteritems() if k.startswith('mongo.')) log.debug('mongo params: {0}'.format(dbenv)) retry_count = int(self.cfg.get('retry_count', 10)) retry_interval = int(self.cfg.get('retry_interval', 10)) # getting database mongo_client = None for i in range(retry_count): try: mongo_client = init_mongo(dbenv) break except Exception as ex: log.warn( 'mongo not available, try again in {0} secs. Error: {1}'. format(retry_interval, ex)) time.sleep(retry_interval) if not mongo_client: log.critical('Unable to connect to mongo, exiting ...') sys.exit(1) log.info('mongo server_info: {0}'.format( mongo_client.connection.server_info())) # ensure tracker and scenario_stub indexing create_tracker_collection(mongo_client) ensure_scenario_stub_indexes(mongo_client) slave, master = start_redis(self.cfg) self.cfg['is_cluster'] = False if slave != master: log.info('redis master is not the same as the slave') self.cfg['is_cluster'] = True self.cfg['ext_cache'] = init_ext_cache(self.cfg) tornado_app = self.get_app() log.info('Started with "{0}" config'.format(tornado_app.settings)) server = tornado.httpserver.HTTPServer(tornado_app) server.conn_params.decompress = self.cfg['decompress_request'] tornado_port = self.cfg['tornado.port'] try: server.bind(tornado_port) except Exception: # see http://stackoverflow.com/questions/16153804/tornado-socket-error-on-arm server.bind(tornado_port, '0.0.0.0') server.start(self.cfg['num_processes']) max_process_workers = self.cfg.get('max_process_workers') if max_process_workers: max_process_workers = int(max_process_workers) tornado_app.settings['process_executor'] = ProcessPoolExecutor( max_process_workers) log.info('started with {0} worker processes'.format( tornado_app.settings['process_executor']._max_workers)) cmd_queue = InternalCommandQueue() cmd_queue_poll_interval = self.cfg.get('cmd_queue_poll_interval', 60 * 1000) tornado.ioloop.PeriodicCallback(cmd_queue.process, cmd_queue_poll_interval).start() tornado.ioloop.IOLoop.instance().start()