def start_server(self): """Make Tornado app, start server and Tornado ioloop. """ dbenv = default_env.copy() dbenv.update((k[6:], coerce_mongo_param(k[6:], v)) for k, v in self.cfg.iteritems() if k.startswith("mongo.")) log.debug("mongo params: {0}".format(dbenv)) retry_count = int(self.cfg.get("retry_count", 10)) retry_interval = int(self.cfg.get("retry_interval", 10)) # getting database mongo_client = None for i in range(retry_count): try: mongo_client = init_mongo(dbenv) break except Exception as ex: log.warn("mongo not available, try again in {0} secs. Error: {1}".format(retry_interval, ex)) time.sleep(retry_interval) if not mongo_client: log.critical("Unable to connect to mongo, exiting ...") sys.exit(1) log.info("mongo server_info: {0}".format(mongo_client.connection.server_info())) # ensure tracker and scenario_stub indexing create_tracker_collection(mongo_client) ensure_scenario_stub_indexes(mongo_client) slave, master = start_redis(self.cfg) self.cfg["is_cluster"] = False if slave != master: log.info("redis master is not the same as the slave") self.cfg["is_cluster"] = True self.cfg["ext_cache"] = init_ext_cache(self.cfg) tornado_app = self.get_app() log.info('Started with "{0}" config'.format(tornado_app.settings)) server = tornado.httpserver.HTTPServer(tornado_app) server.conn_params.decompress = self.cfg["decompress_request"] tornado_port = self.cfg["tornado.port"] try: server.bind(tornado_port) except Exception: # see http://stackoverflow.com/questions/16153804/tornado-socket-error-on-arm server.bind(tornado_port, "0.0.0.0") server.start(self.cfg["num_processes"]) max_process_workers = self.cfg.get("max_process_workers") if max_process_workers: max_process_workers = int(max_process_workers) tornado_app.settings["process_executor"] = ProcessPoolExecutor(max_process_workers) log.info("started with {0} worker processes".format(tornado_app.settings["process_executor"]._max_workers)) cmd_queue = InternalCommandQueue() cmd_queue_poll_interval = self.cfg.get("cmd_queue_poll_interval", 60 * 1000) tornado.ioloop.PeriodicCallback(cmd_queue.process, cmd_queue_poll_interval).start() tornado.ioloop.IOLoop.instance().start()
def start_server(self): """Make Tornado app, start server and Tornado ioloop. """ dbenv = default_env.copy() dbenv.update((k[6:], coerce_mongo_param(k[6:], v)) for k, v in \ self.cfg.iteritems() if k.startswith('mongo.')) log.debug('mongo params: {0}'.format(dbenv)) retry_count = int(self.cfg.get('retry_count', 10)) retry_interval = int(self.cfg.get('retry_interval', 10)) # getting database mongo_client = None for i in range(retry_count): try: mongo_client = init_mongo(dbenv) break except Exception as ex: log.warn('mongo not available, try again in {0} secs. Error: {1}'.format(retry_interval, ex)) time.sleep(retry_interval) if not mongo_client: log.critical('Unable to connect to mongo, exiting ...') sys.exit(1) log.info('mongo server_info: {0}'.format( mongo_client.connection.server_info())) slave, master = start_redis(self.cfg) self.cfg['is_cluster'] = False if slave != master: log.info('redis master is not the same as the slave') self.cfg['is_cluster'] = True self.cfg['ext_cache'] = init_ext_cache(self.cfg) tornado_app = self.get_app() log.info('Started with "{0}" config'.format(tornado_app.settings)) server = tornado.httpserver.HTTPServer(tornado_app) server.conn_params.decompress = self.cfg['decompress_request'] tornado_port = self.cfg['tornado.port'] try: server.bind(tornado_port) except Exception: # see http://stackoverflow.com/questions/16153804/tornado-socket-error-on-arm server.bind(tornado_port, '0.0.0.0') server.start(self.cfg['num_processes']) max_process_workers = self.cfg.get('max_process_workers') if max_process_workers: max_process_workers = int(max_process_workers) tornado_app.settings['process_executor'] = ProcessPoolExecutor(max_process_workers) log.info('started with {0} worker processes'.format(tornado_app.settings['process_executor']._max_workers)) cmd_queue = InternalCommandQueue() cmd_queue_poll_interval = self.cfg.get('cmd_queue_poll_interval', 60 * 1000) tornado.ioloop.PeriodicCallback(cmd_queue.process, cmd_queue_poll_interval).start() tornado.ioloop.IOLoop.instance().start()
def test_coerce_mongo_param(self): from stubo.model.db import coerce_mongo_param self.assertEqual(8001, coerce_mongo_param('port', '8001')) self.assertEqual(8001, coerce_mongo_param('port', 8001)) self.assertEqual(10, coerce_mongo_param('max_pool_size', '10')) self.assertEqual(True, coerce_mongo_param('tz_aware', 'true')) self.assertEqual(True, coerce_mongo_param('tz_aware', True)) self.assertEqual(False, coerce_mongo_param('tz_aware', 0)) self.assertEqual(0, coerce_mongo_param('bogus', 0))
def start_server(self): """Make Tornado app, start server and Tornado ioloop. """ dbenv = default_env.copy() dbenv.update((k[6:], coerce_mongo_param(k[6:], v)) for k, v in \ self.cfg.iteritems() if k.startswith('mongo.')) log.debug('mongo params: {0}'.format(dbenv)) retry_count = int(self.cfg.get('retry_count', 10)) retry_interval = int(self.cfg.get('retry_interval', 10)) mongo_client = None for i in range(retry_count): try: mongo_client = init_mongo(dbenv) break except: log.warn('mongo not available, try again in {0} ' 'secs'.format(retry_interval)) time.sleep(retry_interval) if not mongo_client: log.critical('Unable to connect to mongo, exiting ...') sys.exit(1) log.info('mongo server_info: {0}'.format( mongo_client.connection.server_info())) slave, master = start_redis(self.cfg) self.cfg['is_cluster'] = False if slave != master: log.info('redis master is not the same as the slave') self.cfg['is_cluster'] = True self.cfg['ext_cache'] = init_ext_cache(self.cfg) tornado_app = self.get_app() log.info('Started with "{0}" config'.format(tornado_app.settings)) server = tornado.httpserver.HTTPServer(tornado_app) server.conn_params.decompress = self.cfg['decompress_request'] tornado_port = self.cfg['tornado.port'] try: server.bind(tornado_port) except Exception: # see http://stackoverflow.com/questions/16153804/tornado-socket-error-on-arm server.bind(tornado_port, '0.0.0.0') server.start(self.cfg['num_processes']) max_process_workers = self.cfg.get('max_process_workers') if max_process_workers: max_process_workers = int(max_process_workers) tornado_app.settings['process_executor'] = ProcessPoolExecutor(max_process_workers) log.info('started with {0} worker processes'.format(tornado_app.settings['process_executor']._max_workers)) cmd_queue = InternalCommandQueue() cmd_queue_poll_interval = self.cfg.get('cmd_queue_poll_interval', 60*1000) tornado.ioloop.PeriodicCallback(cmd_queue.process, cmd_queue_poll_interval).start() tornado.ioloop.IOLoop.instance().start()
def purge_stubs(): # importing helper handler from testing deps from stubo.testing import DummyRequestHandler parser = ArgumentParser( description="Purge stubs older than given expiry date." ) parser.add_argument('-l', '--list', action='store_const', const=True, dest='list_only', help="Just list the stubs to delete.") parser.add_argument('-e', '--expiry', default=14, dest='expiry', help="expiry is number of days from now (default is 14).") parser.add_argument('--host', default='all', dest='host', help="specify the host uri to use (defaults to all)") parser.add_argument('-c', '--config', dest='config', help='Path to configuration file (defaults to $CWD/etc/dev.ini)', metavar='FILE') args = parser.parse_args() list_only = args.list_only or False expiry_days = args.expiry expiry = datetime.today().date() - timedelta(int(expiry_days)) host = args.host config = args.config or get_default_config() logging.config.fileConfig(config) settings = read_config(config) dbenv = default_env.copy() dbenv.update((k[6:], coerce_mongo_param(k[6:], v)) for k, v in \ settings.iteritems() if k.startswith('mongo.')) log.debug('mongo params: {0}'.format(dbenv)) log.info('purge stubs whereby all sessions in the scenario were last used before {0}'.format(expiry)) db_conn = init_mongo(dbenv).connection slave, master = start_redis(settings) response = list_scenarios(host) if 'error' in response: print response['error'] sys.exit(-1) handler = DummyRequestHandler() session_handler = DummyRequestHandler() for scenario_key in response['data']['scenarios']: log.debug("*** scenario '{0}' ***".format(scenario_key)) hostname, scenario = scenario_key.split(':') if host != 'all' and host != hostname: continue handler.host = hostname handler.request.host = '{0}:8001'.format(hostname) session_handler.host = hostname session_handler.request.host = '{0}:8001'.format(hostname) handler.request.arguments['scenario'] = [scenario] status = get_status(handler) if 'error' in status: log.warn('get_status error: {0}'.format(status['error'])) else: scenario_last_used = [] sessions = status['data']['sessions'] for session in zip(*sessions)[0]: log.debug("*** -> session '{0}' ***".format(session)) session_handler.request.arguments['session'] = [session] session_status = get_status(session_handler) if 'error' in session_status: log.warn('get_status error: {0}'.format(status['error'])) else: last_used = session_status['data']['session'].get('last_used', '-') if last_used != '-': scenario_last_used.append(as_date(last_used[0:10])) if scenario_last_used and (max(scenario_last_used) < expiry): log.info("sessions in scenario '{0}' were last used '{1}' which" " is before expiry date '{2}'".format(scenario_key, max(scenario_last_used), expiry)) if not list_only: response = delete_stubs(handler, scenario_name=scenario, force=True) if 'error' in response: log.error('delete stubs error: {0}'.format(response['error'])) else: log.info('deleted stubs: {0}'.format(response['data']))