def standalone(): """ Initializes Tornado and our application. Forks worker processes to handle requests. Does not return until all child processes exit normally. """ # Parse arguments parser = argparse.ArgumentParser(description="Ellis web server") parser.add_argument("--background", action="store_true", help="Detach and run server in background") args = parser.parse_args() # We don't initialize logging until we fork because we want each child to # have its own logging and it's awkward to reconfigure logging that is # defined by the parent. application = create_application() if args.background: # Get a new logfile, rotating the old one if present. err_log_name = os.path.join(settings.LOGS_DIR, settings.LOG_FILE_PREFIX + "-err.log") try: os.rename(err_log_name, err_log_name + ".old") except OSError: pass # Fork into background. utils.daemonize(err_log_name) utils.install_sigusr1_handler(settings.LOG_FILE_PREFIX) # Drop a pidfile. pid = os.getpid() with open(settings.PID_FILE, "w") as pidfile: pidfile.write(str(pid) + "\n") # Fork off a child process per core. In the parent process, the # fork_processes call blocks until the children exit. num_processes = settings.TORNADO_PROCESSES_PER_CORE * tornado.process.cpu_count() task_id = tornado.process.fork_processes(num_processes) if task_id is not None: logging_config.configure_logging(settings.LOG_LEVEL, settings.LOGS_DIR, settings.LOG_FILE_PREFIX, task_id) # We're a child process, start up. _log.info("Process %s starting up", task_id) connection.init_connection() http_server = httpserver.HTTPServer(application) unix_socket = bind_unix_socket(settings.HTTP_UNIX + "-" + str(task_id), 0666); http_server.add_socket(unix_socket) homestead.ping() background.start_background_worker_io_loop() io_loop = tornado.ioloop.IOLoop.instance() io_loop.start() else: # This shouldn't happen since the children should run their IOLoops # forever. _log.critical("Children all exited")
def standalone(start, num, pstn, realm): connection.init_connection() s = connection.Session() create_count = 0 if not start: start = 5108580271 if pstn else 6505550000 for x in xrange(num): if pstn: public_id = "sip:+1%d@%s" % (start + x, realm) else: public_id = "sip:%d@%s" % (start + x, realm) try: numbers.add_number_to_pool(s, public_id, pstn, False) except IntegrityError: # Entry already exists, not creating in db pass else: create_count += 1 s.commit() print "Created %d numbers, %d already present in database" % (create_count, num - create_count)
import json from tornado.ioloop import IOLoop from tornado.httpclient import AsyncHTTPClient from metaswitch.common import ifcs, utils from metaswitch.ellis import logging_config from metaswitch.ellis.data import numbers, connection from metaswitch.ellis.remote import homestead, xdm from metaswitch.ellis import settings _log = logging.getLogger("ellis.create_numbers") AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient", max_clients=100) connection.init_connection() db_session = connection.Session() pending_requests = 0 inconsistent_uris = set() stats = {"Assigned numbers in Ellis": 0, "Unassigned numbers in Ellis": 0, "Credentials & associations deleted": 0, "Simservs & iFCs deleted": 0, "Missing IFCs re-created": 0, "Errors": 0} def create_get_handler(sip_uri, on_found=None, on_not_found=None): """ Handler that asserts that a resource exists, executing the on_not_found handler if not """
def standalone(): """ Initializes Tornado and our application. Forks worker processes to handle requests. Does not return until all child processes exit normally. """ # Parse arguments parser = argparse.ArgumentParser(description="Ellis web server") parser.add_argument("--background", action="store_true", help="Detach and run server in background") args = parser.parse_args() # We don't initialize logging until we fork because we want each child to # have its own logging and it's awkward to reconfigure logging that is # defined by the parent. application = create_application() listening_on_some_port = False http_sockets = None https_sockets = None if settings.ALLOW_HTTP: http_sockets = bind_sockets(settings.HTTP_PORT, address=settings.LOCAL_IP) listening_on_some_port = True if (os.path.exists(settings.TLS_CERTIFICATE) and os.path.exists(settings.TLS_PRIVATE_KEY)): https_sockets = bind_sockets(settings.HTTPS_PORT, address=settings.LOCAL_IP) listening_on_some_port = True if not listening_on_some_port: # We usually don't configure logging until after we fork but since # we're about to crash... logging_config.configure_logging("parent") _log.critical("Failed to listen on any ports.") raise Exception("Failed to listen on any ports") if args.background: # Get a new logfile, rotating the old one if present. err_log_name = os.path.join(settings.LOGS_DIR, settings.LOG_FILE_PREFIX + "-err.log") try: os.rename(err_log_name, err_log_name + ".old") except OSError: pass # Fork into background. utils.daemonize(err_log_name) utils.install_sigusr1_handler(settings.LOG_FILE_PREFIX) # Drop a pidfile. pid = os.getpid() with open(settings.PID_FILE, "w") as pidfile: pidfile.write(str(pid) + "\n") # Fork off a child process per core. In the parent process, the # fork_processes call blocks until the children exit. num_processes = settings.TORNADO_PROCESSES_PER_CORE * tornado.process.cpu_count() task_id = tornado.process.fork_processes(num_processes) if task_id is not None: logging_config.configure_logging(task_id) # We're a child process, start up. _log.info("Process %s starting up", task_id) connection.init_connection() if http_sockets: _log.info("Going to listen for HTTP on port %s", settings.HTTP_PORT) http_server = httpserver.HTTPServer(application) http_server.add_sockets(http_sockets) else: _log.info("Not starting HTTP, set ALLOW_HTTP in local_settings.py to enable HTTP.") if https_sockets: _log.info("Going to listen for HTTPS on port %s", settings.HTTPS_PORT) https_server = httpserver.HTTPServer(application, ssl_options={ "certfile": settings.TLS_CERTIFICATE, "keyfile": settings.TLS_PRIVATE_KEY, }) https_server.add_sockets(https_sockets) else: _log.critical("Not starting HTTPS") homestead.ping() background.start_background_worker_io_loop() io_loop = tornado.ioloop.IOLoop.instance() io_loop.start() else: # This shouldn't happen since the children should run their IOLoops # forever. _log.critical("Children all exited")
# of the source code repository by which you are accessing this code, then # the license outlined in that COPYING file applies to your use. # Otherwise no rights are granted except for those provided to you by # Metaswitch Networks in a separate written agreement. from metaswitch.ellis.data import connection def standalone(): db_sess = connection.Session() c = db_sess.execute("SELECT count(*) FROM numbers WHERE NOT pstn AND owner_id IS NULL;") non_pstn_avail_count = c.fetchone()[0] c = db_sess.execute("SELECT count(*) FROM numbers WHERE NOT pstn;") non_pstn_total_count = c.fetchone()[0] c = db_sess.execute("SELECT count(*) FROM numbers WHERE pstn AND owner_id IS NULL;") pstn_avail_count = c.fetchone()[0] c = db_sess.execute("SELECT count(*) FROM numbers WHERE pstn;") pstn_total_count = c.fetchone()[0] print "Available non-PSTN numbers %s" % non_pstn_avail_count print "Taken non-PSTN numbers %s" % (non_pstn_total_count - non_pstn_avail_count) print "Total non-PSTN numbers %s" % non_pstn_total_count print "" print "Available PSTN numbers %s" % pstn_avail_count print "Taken PSTN numbers %s" % (pstn_total_count - pstn_avail_count) print "Total PSTN numbers %s" % pstn_total_count if __name__ == '__main__': connection.init_connection() standalone()
def standalone(): """ Initializes Tornado and our application. Forks worker processes to handle requests. Does not return until all child processes exit normally. """ # Parse arguments parser = argparse.ArgumentParser(description="Ellis web server") parser.add_argument("--background", action="store_true", help="Detach and run server in background") parser.add_argument("--log-level", default=2, type=int) args = parser.parse_args() prctl.prctl(prctl.NAME, "ellis") # We don't initialize logging until we fork because we want each child to # have its own logging and it's awkward to reconfigure logging that is # defined by the parent. application = create_application() if args.background: # Get a new logfile, rotating the old one if present. err_log_name = os.path.join(settings.LOGS_DIR, settings.LOG_FILE_PREFIX + "-err.log") try: os.rename(err_log_name, err_log_name + ".old") except OSError: pass # Fork into background. utils.daemonize(err_log_name) utils.install_sigusr1_handler(settings.LOG_FILE_PREFIX) # Drop a pidfile. We must keep a reference to the file object here, as this keeps # the file locked and provides extra protection against two processes running at # once. pidfile_lock = None try: pidfile_lock = utils.lock_and_write_pid_file(settings.PID_FILE) # noqa except IOError: # We failed to take the lock - another process is already running exit(1) # Only run one process, not one per core - we don't need the performance # and this keeps everything in one log file prctl.prctl(prctl.NAME, "ellis") logging_config.configure_logging( utils.map_clearwater_log_level(args.log_level), settings.LOGS_DIR, settings.LOG_FILE_PREFIX) _log.info("Ellis process starting up") connection.init_connection() http_server = httpserver.HTTPServer(application) unix_socket = bind_unix_socket(settings.HTTP_UNIX, 0666) http_server.add_socket(unix_socket) homestead.ping() background.start_background_worker_io_loop() io_loop = tornado.ioloop.IOLoop.instance() io_loop.start()