Beispiel #1
0
def standalone():
    """
    Entry point to script
    """
    logging_config.configure_logging(settings.LOG_LEVEL, settings.LOGS_DIR, settings.LOG_FILE_PREFIX, "sync_databases")
    check_existing_uris()
    remove_nonexisting_uris()
    IOLoop.instance().start()
Beispiel #2
0
def main(args):
    syslog.openlog("config-manager", syslog.LOG_PID)
    pdlogs.STARTUP.log()
    try:
        arguments = docopt(__doc__, argv=args)
    except DocoptExit:
        pdlogs.EXITING_BAD_CONFIG.log()
        raise

    local_ip = arguments['--local-ip']
    local_site = arguments['--local-site']
    log_dir = arguments['--log-directory']
    log_level = LOG_LEVELS.get(arguments['--log-level'], logging.DEBUG)

    stdout_err_log = os.path.join(log_dir, "config-manager.output.log")

    if not arguments['--foreground']:
        utils.daemonize(stdout_err_log)

    logging_config.configure_logging(log_level, log_dir, "config-manager", show_thread=True)

    # urllib3 logs a WARNING log whenever it recreates a connection, but our
    # etcd usage does this frequently (to allow watch timeouts), so deliberately
    # ignore this log
    urllib_logger = logging.getLogger('urllib3')
    urllib_logger.setLevel(logging.ERROR)

    utils.install_sigusr1_handler("config-manager")

    # Drop a pidfile.
    pid = os.getpid()
    with open(arguments['--pidfile'], "w") as pidfile:
        pidfile.write(str(pid) + "\n")

    plugins_dir = "/usr/share/clearwater/clearwater-config-manager/plugins/"
    plugins = load_plugins_in_dir(plugins_dir)
    plugins.sort(key=lambda x: x.key())
    threads = []

    files = [p.file() for p in plugins]
    alarm = ConfigAlarm(files)

    for plugin in plugins:
        syncer = EtcdSynchronizer(plugin, local_ip, local_site, alarm)
        thread = Thread(target=syncer.main, name=plugin.__class__.__name__)
        thread.start()

        threads.append(thread)
        _log.info("Loaded plugin %s" % plugin)

    while any([thread.isAlive() for thread in threads]):
        for thread in threads:
            if thread.isAlive():
                thread.join(1)

    _log.info("Clearwater Configuration Manager shutting down")
    pdlogs.EXITING.log()
    syslog.closelog()
Beispiel #3
0
def standalone():
    """
    Initializes Tornado and our application.  Forks worker processes to handle
    requests.  Does not return until all child processes exit normally.
    """

    # Parse arguments
    parser = argparse.ArgumentParser(description="Ellis web server")
    parser.add_argument("--background", action="store_true", help="Detach and run server in background")
    args = parser.parse_args()

    # We don't initialize logging until we fork because we want each child to
    # have its own logging and it's awkward to reconfigure logging that is
    # defined by the parent.
    application = create_application()

    if args.background:
        # Get a new logfile, rotating the old one if present.
        err_log_name = os.path.join(settings.LOGS_DIR, settings.LOG_FILE_PREFIX + "-err.log")
        try:
            os.rename(err_log_name, err_log_name + ".old")
        except OSError:
            pass
        # Fork into background.
        utils.daemonize(err_log_name)

    utils.install_sigusr1_handler(settings.LOG_FILE_PREFIX)

    # Drop a pidfile.
    pid = os.getpid()
    with open(settings.PID_FILE, "w") as pidfile:
        pidfile.write(str(pid) + "\n")

    # Fork off a child process per core.  In the parent process, the
    # fork_processes call blocks until the children exit.
    num_processes = settings.TORNADO_PROCESSES_PER_CORE * tornado.process.cpu_count()
    task_id = tornado.process.fork_processes(num_processes)
    if task_id is not None:
        logging_config.configure_logging(settings.LOG_LEVEL, settings.LOGS_DIR, settings.LOG_FILE_PREFIX, task_id)
        # We're a child process, start up.
        _log.info("Process %s starting up", task_id)
        connection.init_connection()

        http_server = httpserver.HTTPServer(application)
        unix_socket = bind_unix_socket(settings.HTTP_UNIX + "-" + str(task_id),
                                       0666);
        http_server.add_socket(unix_socket)

        homestead.ping()
        background.start_background_worker_io_loop()
        io_loop = tornado.ioloop.IOLoop.instance()
        io_loop.start()
    else:
        # This shouldn't happen since the children should run their IOLoops
        # forever.
        _log.critical("Children all exited")
Beispiel #4
0
def standalone(args):
    global io_loop
    global server_prefix
    global get_rate
    configure_logging(settings.LOG_LEVEL, settings.LOGS_DIR, settings.LOG_FILE_PREFIX, "stresstool")
    logging.getLogger().setLevel(logging.INFO)
    logging.getLogger("crest").setLevel(logging.DEBUG)
    _log.info("Starting stress against %s", args.server)
    server_prefix = "http://%s" % args.server
    get_rate = args.rate
    io_loop = IOLoop.instance()
    io_loop.add_callback(simulate_xdm_gets)
    io_loop.add_callback(print_histograms)
    io_loop.start()
    def testLogging(self, mock_open):
        """Test that configure_logging() will result in writing to file."""
        configure_logging(logging.DEBUG,
                          ".",
                          "test_log_prefix",
                          task_id="Fred",
                          show_thread=True)

        args, kwargs = mock_open.call_args

        filename_re = "\.\/test_log_prefix-Fred_\d*T\d*Z.txt"
        match = re.compile(filename_re).match(args[0])
        self.assertIsNotNone(match, msg="Unexpected log file name")
        self.assertEqual(args[1], 65, msg="Unexpected log file open flags")
        self.assertEqual(args[2], 420, msg="Unexpected log file open mode")
Beispiel #6
0
def standalone(args):
    global io_loop
    global server_prefix
    global get_rate
    configure_logging(settings.LOG_LEVEL, settings.LOGS_DIR,
                      settings.LOG_FILE_PREFIX, "stresstool")
    logging.getLogger().setLevel(logging.INFO)
    logging.getLogger("crest").setLevel(logging.DEBUG)
    _log.info("Starting stress against %s", args.server)
    server_prefix = "http://%s" % args.server
    get_rate = args.rate
    io_loop = IOLoop.instance()
    io_loop.add_callback(simulate_xdm_gets)
    io_loop.add_callback(print_histograms)
    io_loop.start()
Beispiel #7
0
    def testLogging(self, mock_open):
        """Test that configure_logging() will result in writing to file."""
        configure_logging(logging.DEBUG,
                          ".",
                          "test_log_prefix",
                          task_id="Fred",
                          show_thread=True)

        args, kwargs = mock_open.call_args

        filename_re = "\.\/test_log_prefix-Fred_\d*T\d*Z.txt"
        match = re.compile(filename_re).match(args[0])
        self.assertIsNotNone(match, msg="Unexpected log file name")
        self.assertEqual(args[1], 65, msg="Unexpected log file open flags")
        self.assertEqual(args[2], 420, msg="Unexpected log file open mode")
Beispiel #8
0
# Special Exception
# Metaswitch Networks Ltd  grants you permission to copy, modify,
# propagate, and distribute a work formed by combining OpenSSL with The
# Software, or a work derivative of such a combination, even if such
# copying, modification, propagation, or distribution would otherwise
# violate the terms of the GPL. You must comply with the GPL in all
# respects for all of the code used other than OpenSSL.
# "OpenSSL" means OpenSSL toolkit software distributed by the OpenSSL
# Project and licensed under the OpenSSL Licenses, or a work based on such
# software and licensed under the OpenSSL Licenses.
# "OpenSSL Licenses" means the OpenSSL License and Original SSLeay License
# under which the OpenSSL Project distributes the OpenSSL toolkit software,
# as those licenses appear in the file LICENSE-OPENSSL.


import logging

from metaswitch.crest.tools import db
from metaswitch.common import logging_config
from metaswitch.crest import settings

_log = logging.getLogger("crest.create_db")

def standalone():
    db.create_tables(_log)

if __name__ == '__main__':
    logging_config.configure_logging(settings.LOG_LEVEL, settings.LOGS_DIR, settings.LOG_FILE_PREFIX, "create_db")

    standalone()
Beispiel #9
0
def main(args):
    syslog.openlog("cluster-manager", syslog.LOG_PID)
    pdlogs.STARTUP.log()
    try:
        arguments = docopt(__doc__, argv=args)
    except DocoptExit:
        pdlogs.EXITING_BAD_CONFIG.log()
        raise

    mgmt_ip = arguments['--mgmt-local-ip']
    sig_ip = arguments['--sig-local-ip']
    local_site_name = arguments['--local-site']
    remote_site_name = arguments['--remote-site']
    remote_cassandra_seeds = arguments['--remote-cassandra-seeds']
    if remote_cassandra_seeds:
        remote_cassandra_seeds = remote_cassandra_seeds.split(',')
    else:
        remote_cassandra_seeds = []
    signaling_namespace = arguments.get('--signaling-namespace')
    local_uuid = UUID(arguments['--uuid'])
    etcd_key = arguments.get('--etcd-key')
    etcd_cluster_key = arguments.get('--etcd-cluster-key')
    cluster_manager_enabled = arguments['--cluster-manager-enabled']
    log_dir = arguments['--log-directory']
    log_level = LOG_LEVELS.get(arguments['--log-level'], logging.DEBUG)

    stdout_err_log = os.path.join(log_dir, "cluster-manager.output.log")

    # Check that there's an etcd_cluster_key value passed to the cluster
    # manager
    if etcd_cluster_key == "":
        # The etcd_cluster_key isn't valid, and possibly get weird entries in
        # the etcd database if we allow the cluster_manager to start
        pdlogs.EXITING_MISSING_ETCD_CLUSTER_KEY.log()
        exit(1)

    if not arguments['--foreground']:
        utils.daemonize(stdout_err_log)

    # Process names are limited to 15 characters, so abbreviate
    prctl.prctl(prctl.NAME, "cw-cluster-mgr")

    logging_config.configure_logging(log_level, log_dir, "cluster-manager", show_thread=True)

    # urllib3 logs a WARNING log whenever it recreates a connection, but our
    # etcd usage does this frequently (to allow watch timeouts), so deliberately
    # ignore this log
    urllib_logger = logging.getLogger('urllib3')
    urllib_logger.setLevel(logging.ERROR)

    utils.install_sigusr1_handler("cluster-manager")

    # Drop a pidfile. We must keep a reference to the file object here, as this keeps
    # the file locked and provides extra protection against two processes running at
    # once.
    pidfile_lock = None
    try:
        pidfile_lock = utils.lock_and_write_pid_file(arguments['--pidfile']) # noqa
    except IOError:
        # We failed to take the lock - another process is already running
        exit(1)

    plugins_dir = "/usr/share/clearwater/clearwater-cluster-manager/plugins/"
    plugins = load_plugins_in_dir(plugins_dir,
                                  PluginParams(ip=sig_ip,
                                               mgmt_ip=mgmt_ip,
                                               local_site=local_site_name,
                                               remote_site=remote_site_name,
                                               remote_cassandra_seeds=remote_cassandra_seeds,
                                               signaling_namespace=signaling_namespace,
                                               uuid=local_uuid,
                                               etcd_key=etcd_key,
                                               etcd_cluster_key=etcd_cluster_key))
    plugins.sort(key=lambda x: x.key())
    plugins_to_use = []
    files = []
    skip = False
    for plugin in plugins:
        for plugin_file in plugin.files():
            if plugin_file in files:
                _log.info("Skipping plugin {} because {} "
                          "is already managed by another plugin"
                          .format(plugin, plugin_file))
                skip = True

        if not skip:
            plugins_to_use.append(plugin)
            files.extend(plugin.files())

    synchronizers = []
    threads = []

    if cluster_manager_enabled == "N":
        # Don't start any threads as we don't want the cluster manager to run
        pdlogs.DO_NOT_START.log()
    elif etcd_cluster_key == "DO_NOT_CLUSTER":
        # Don't start any threads as we don't want this box to cluster
        pdlogs.DO_NOT_CLUSTER.log()
    else:
        for plugin in plugins_to_use:
            syncer = EtcdSynchronizer(plugin, sig_ip, etcd_ip=mgmt_ip)
            syncer.start_thread()

            synchronizers.append(syncer)
            threads.append(syncer.thread)
            _log.info("Loaded plugin %s" % plugin)


    install_sigquit_handler(synchronizers)
    utils.install_sigterm_handler(synchronizers)

    while any([thread.isAlive() for thread in threads]):
        for thread in threads:
            if thread.isAlive():
                thread.join(1)

    _log.info("No plugin threads running, waiting for a SIGTERM or SIGQUIT")
    while not utils.should_quit and not should_quit:
        sleep(1)
    _log.info("Quitting")
    _log.debug("%d threads outstanding at exit" % activeCount())
    pdlogs.EXITING.log()
    syslog.closelog()
Beispiel #10
0
def main(args):
    syslog.openlog("queue-manager", syslog.LOG_PID)
    pdlogs.STARTUP.log()
    try:
        arguments = docopt(__doc__, argv=args)
    except DocoptExit:
        pdlogs.EXITING_BAD_CONFIG.log()
        raise

    local_ip = arguments['--local-ip']
    local_site = arguments['--local-site']
    etcd_key = arguments['--etcd-key']
    node_type = arguments['--node-type']
    log_dir = arguments['--log-directory']
    log_level = LOG_LEVELS.get(arguments['--log-level'], logging.DEBUG)
    wait_plugin_complete = arguments['--wait-plugin-complete']

    stdout_err_log = os.path.join(log_dir, "queue-manager.output.log")

    if not arguments['--foreground']:
        utils.daemonize(stdout_err_log)

    # Process names are limited to 15 characters, so abbreviate
    prctl.prctl(prctl.NAME, "cw-queue-mgr")

    logging_config.configure_logging(log_level, log_dir, "queue-manager", show_thread=True)

    # urllib3 logs a WARNING log whenever it recreates a connection, but our
    # etcd usage does this frequently (to allow watch timeouts), so deliberately
    # ignore this log
    urllib_logger = logging.getLogger('urllib3')
    urllib_logger.setLevel(logging.ERROR)

    utils.install_sigusr1_handler("queue-manager")

    # Drop a pidfile. We must keep a reference to the file object here, as this keeps
    # the file locked and provides extra protection against two processes running at
    # once.
    pidfile_lock = None
    try:
        pidfile_lock = utils.lock_and_write_pid_file(arguments['--pidfile']) # noqa
    except IOError:
        # We failed to take the lock - another process is already running
        exit(1)

    plugins_dir = "/usr/share/clearwater/clearwater-queue-manager/plugins/"
    plugins = load_plugins_in_dir(plugins_dir,
                                  PluginParams(wait_plugin_complete=wait_plugin_complete))
    plugins.sort(key=lambda x: x.key())
    synchronizers = []
    threads = []

    for plugin in plugins:
        syncer = EtcdSynchronizer(plugin, local_ip, local_site, etcd_key, node_type)
        syncer.start_thread()

        synchronizers.append(syncer)
        threads.append(syncer.thread)
        _log.info("Loaded plugin %s" % plugin)

    utils.install_sigterm_handler(synchronizers)

    while any([thr.isAlive() for thr in threads]):
        for thr in threads:
            if thr.isAlive():
                thr.join(1)

    while not utils.should_quit:
        sleep(1)

    _log.info("Clearwater Queue Manager shutting down")
    pdlogs.EXITING.log()
    syslog.closelog()
Beispiel #11
0
def main(args):
    syslog.openlog("cluster-manager", syslog.LOG_PID)
    pdlogs.STARTUP.log()
    try:
        arguments = docopt(__doc__, argv=args)
    except DocoptExit:
        pdlogs.EXITING_BAD_CONFIG.log()
        raise

    mgmt_ip = arguments['--mgmt-local-ip']
    sig_ip = arguments['--sig-local-ip']
    local_site_name = arguments['--local-site']
    remote_site_name = arguments['--remote-site']
    remote_cassandra_seeds = arguments['--remote-cassandra-seeds']
    if remote_cassandra_seeds:
        remote_cassandra_seeds = remote_cassandra_seeds.split(',')
    else:
        remote_cassandra_seeds = []
    signaling_namespace = arguments.get('--signaling-namespace')
    local_uuid = UUID(arguments['--uuid'])
    etcd_key = arguments.get('--etcd-key')
    etcd_cluster_key = arguments.get('--etcd-cluster-key')
    cluster_manager_enabled = arguments['--cluster-manager-enabled']
    log_dir = arguments['--log-directory']
    log_level = LOG_LEVELS.get(arguments['--log-level'], logging.DEBUG)

    stdout_err_log = os.path.join(log_dir, "cluster-manager.output.log")

    # Check that there's an etcd_cluster_key value passed to the cluster
    # manager
    if etcd_cluster_key == "":
        # The etcd_cluster_key isn't valid, and possibly get weird entries in
        # the etcd database if we allow the cluster_manager to start
        pdlogs.EXITING_MISSING_ETCD_CLUSTER_KEY.log()
        exit(1)

    if not arguments['--foreground']:
        utils.daemonize(stdout_err_log)

    # Process names are limited to 15 characters, so abbreviate
    prctl.prctl(prctl.NAME, "cw-cluster-mgr")

    logging_config.configure_logging(log_level,
                                     log_dir,
                                     "cluster-manager",
                                     show_thread=True)

    # urllib3 logs a WARNING log whenever it recreates a connection, but our
    # etcd usage does this frequently (to allow watch timeouts), so deliberately
    # ignore this log
    urllib_logger = logging.getLogger('urllib3')
    urllib_logger.setLevel(logging.ERROR)

    utils.install_sigusr1_handler("cluster-manager")

    # Drop a pidfile. We must keep a reference to the file object here, as this keeps
    # the file locked and provides extra protection against two processes running at
    # once.
    pidfile_lock = None
    try:
        pidfile_lock = utils.lock_and_write_pid_file(
            arguments['--pidfile'])  # noqa
    except IOError:
        # We failed to take the lock - another process is already running
        exit(1)

    plugins_dir = "/usr/share/clearwater/clearwater-cluster-manager/plugins/"
    plugins = load_plugins_in_dir(
        plugins_dir,
        PluginParams(ip=sig_ip,
                     mgmt_ip=mgmt_ip,
                     local_site=local_site_name,
                     remote_site=remote_site_name,
                     remote_cassandra_seeds=remote_cassandra_seeds,
                     signaling_namespace=signaling_namespace,
                     uuid=local_uuid,
                     etcd_key=etcd_key,
                     etcd_cluster_key=etcd_cluster_key))
    plugins.sort(key=lambda x: x.key())
    plugins_to_use = []
    files = []
    skip = False
    for plugin in plugins:
        for plugin_file in plugin.files():
            if plugin_file in files:
                _log.info("Skipping plugin {} because {} "
                          "is already managed by another plugin".format(
                              plugin, plugin_file))
                skip = True

        if not skip:
            plugins_to_use.append(plugin)
            files.extend(plugin.files())

    synchronizers = []
    threads = []

    if cluster_manager_enabled == "N":
        # Don't start any threads as we don't want the cluster manager to run
        pdlogs.DO_NOT_START.log()
    elif etcd_cluster_key == "DO_NOT_CLUSTER":
        # Don't start any threads as we don't want this box to cluster
        pdlogs.DO_NOT_CLUSTER.log()
    else:
        for plugin in plugins_to_use:
            syncer = EtcdSynchronizer(plugin, sig_ip, etcd_ip=mgmt_ip)
            syncer.start_thread()

            synchronizers.append(syncer)
            threads.append(syncer.thread)
            _log.info("Loaded plugin %s" % plugin)

    install_sigquit_handler(synchronizers)
    install_sigterm_handler(synchronizers)

    while any([thread.isAlive() for thread in threads]):
        for thread in threads:
            if thread.isAlive():
                thread.join(1)

    _log.info("No plugin threads running, waiting for a SIGTERM or SIGQUIT")
    while not should_quit:
        sleep(1)
    _log.info("Quitting")
    _log.debug("%d threads outstanding at exit" % activeCount())
    pdlogs.EXITING.log()
    syslog.closelog()
Beispiel #12
0
def main(args):
    syslog.openlog("cluster-manager", syslog.LOG_PID)
    pdlogs.STARTUP.log()
    try:
        arguments = docopt(__doc__, argv=args)
    except DocoptExit:
        pdlogs.EXITING_BAD_CONFIG.log()
        raise

    mgmt_ip = arguments['--mgmt-local-ip']
    sig_ip = arguments['--sig-local-ip']
    local_site_name = arguments['--local-site']
    remote_site_name = arguments['--remote-site']
    signaling_namespace = arguments.get('--signaling-namespace')
    log_dir = arguments['--log-directory']
    log_level = LOG_LEVELS.get(arguments['--log-level'], logging.DEBUG)

    stdout_err_log = os.path.join(log_dir, "cluster-manager.output.log")

    if not arguments['--foreground']:
        utils.daemonize(stdout_err_log)

    logging_config.configure_logging(log_level, log_dir, "cluster-manager", show_thread=True)

    # urllib3 logs a WARNING log whenever it recreates a connection, but our
    # etcd usage does this frequently (to allow watch timeouts), so deliberately
    # ignore this log
    urllib_logger = logging.getLogger('urllib3')
    urllib_logger.setLevel(logging.ERROR)

    utils.install_sigusr1_handler("cluster-manager")

    # Drop a pidfile.
    pid = os.getpid()
    with open(arguments['--pidfile'], "w") as pidfile:
        pidfile.write(str(pid) + "\n")

    plugins_dir = "/usr/share/clearwater/clearwater-cluster-manager/plugins/"
    plugins = load_plugins_in_dir(plugins_dir,
                                  PluginParams(ip=sig_ip,
                                               mgmt_ip=mgmt_ip,
                                               local_site=local_site_name,
                                               remote_site=remote_site_name,
                                               signaling_namespace=signaling_namespace))
    plugins.sort(key=lambda x: x.key())
    plugins_to_use = []
    files = []
    skip = False
    for plugin in plugins:
        for plugin_file in plugin.files():
            if plugin_file in files:
                _log.info("Skipping plugin {} because {} "
                          "is already managed by another plugin"
                          .format(plugin, plugin_file))
                skip = True

        if not skip:
            plugins_to_use.append(plugin)
            files.extend(plugin.files())

    synchronizers = []
    threads = []
    for plugin in plugins_to_use:
        syncer = EtcdSynchronizer(plugin, sig_ip, etcd_ip=mgmt_ip)
        syncer.start_thread()

        synchronizers.append(syncer)
        threads.append(syncer.thread)
        _log.info("Loaded plugin %s" % plugin)

    install_sigquit_handler(synchronizers)
    install_sigterm_handler(synchronizers)

    while any([thread.isAlive() for thread in threads]):
        for thread in threads:
            if thread.isAlive():
                thread.join(1)

    _log.info("No plugin threads running, waiting for a SIGTERM or SIGQUIT")
    while not should_quit:
        sleep(1)
    _log.info("Quitting")
    _log.debug("%d threads outstanding at exit" % activeCount())
    pdlogs.EXITING.log()
    syslog.closelog()
    # Use os.exit to skip exit handlers - otherwise the concurrent.futures exit
    # handler waits for an infinite wait to end
    os._exit(0)
Beispiel #13
0
def standalone():
    """
    Initializes Tornado and our application.  Forks worker processes to handle
    requests.  Does not return until all child processes exit normally.
    """

    # Parse arguments
    parser = argparse.ArgumentParser(description="Ellis web server")
    parser.add_argument("--background",
                        action="store_true",
                        help="Detach and run server in background")
    parser.add_argument("--log-level", default=2, type=int)
    args = parser.parse_args()

    prctl.prctl(prctl.NAME, "ellis")

    # We don't initialize logging until we fork because we want each child to
    # have its own logging and it's awkward to reconfigure logging that is
    # defined by the parent.
    application = create_application()

    if args.background:
        # Get a new logfile, rotating the old one if present.
        err_log_name = os.path.join(settings.LOGS_DIR,
                                    settings.LOG_FILE_PREFIX + "-err.log")
        try:
            os.rename(err_log_name, err_log_name + ".old")
        except OSError:
            pass
        # Fork into background.
        utils.daemonize(err_log_name)

    utils.install_sigusr1_handler(settings.LOG_FILE_PREFIX)

    # Drop a pidfile. We must keep a reference to the file object here, as this keeps
    # the file locked and provides extra protection against two processes running at
    # once.
    pidfile_lock = None
    try:
        pidfile_lock = utils.lock_and_write_pid_file(settings.PID_FILE)  # noqa
    except IOError:
        # We failed to take the lock - another process is already running
        exit(1)

    # Only run one process, not one per core - we don't need the performance
    # and this keeps everything in one log file
    prctl.prctl(prctl.NAME, "ellis")
    logging_config.configure_logging(
        utils.map_clearwater_log_level(args.log_level), settings.LOGS_DIR,
        settings.LOG_FILE_PREFIX)
    _log.info("Ellis process starting up")
    connection.init_connection()

    http_server = httpserver.HTTPServer(application)
    unix_socket = bind_unix_socket(settings.HTTP_UNIX, 0666)
    http_server.add_socket(unix_socket)

    homestead.ping()
    background.start_background_worker_io_loop()
    io_loop = tornado.ioloop.IOLoop.instance()
    io_loop.start()
Beispiel #14
0
    # from the line delete count
    stats["Simservs & iFCs deleted"] -= stats["Unassigned numbers in Ellis"]
    print "\nSummary:"
    table_format = "{:<40}{}"
    for s in stats:
        print table_format.format(s, stats[s])


def standalone():
    """
    Entry point to script
    """
    check_existing_uris()
    remove_nonexisting_uris()
    IOLoop.instance().start()


if __name__ == '__main__':
    parser = OptionParser()
    parser.add_option("--log-level", dest="log_level", default=2, type="int")
    (options, args) = parser.parse_args()

    if args:
        parser.print_help()
    else:
        logging_config.configure_logging(
            utils.map_clearwater_log_level(options.log_level),
            settings.LOGS_DIR, settings.LOG_FILE_PREFIX, "sync_databases")

        standalone()
Beispiel #15
0
def standalone():
    """
    Initializes Tornado and our application.  Forks worker processes to handle
    requests.  Does not return until all child processes exit normally.
    """
    # Hack to work-around issue with Cyclone and UNIX domain sockets
    twisted.internet.address.UNIXAddress.host = "localhost"

    # Parse arguments
    parser = argparse.ArgumentParser(description="Crest web server")
    parser.add_argument("--background", action="store_true", help="Detach and run server in background")
    parser.add_argument("--signaling-namespace", action="store_true", help="Server running in signaling namespace")
    parser.add_argument("--worker-processes", default=1, type=int)
    parser.add_argument("--shared-http-tcp-fd", default=None, type=int)
    parser.add_argument("--process-id", default=0, type=int)
    args = parser.parse_args()

    # Set process name.
    prctl.prctl(prctl.NAME, settings.PROCESS_NAME)

    # We don't initialize logging until we fork because we want each child to
    # have its own logging and it's awkward to reconfigure logging that is
    # defined by the parent.
    application = create_application()

    if args.background:
        # Get a new logfile, rotating the old one if present.
        err_log_name = os.path.join(settings.LOGS_DIR, settings.LOG_FILE_PREFIX + "-err.log")
        try:
            os.rename(err_log_name, err_log_name + ".old")
        except OSError:
            pass
        # Fork into background.
        utils.daemonize(err_log_name)

    utils.install_sigusr1_handler(settings.LOG_FILE_PREFIX)

    # Setup logging
    syslog.openlog(settings.LOG_FILE_PREFIX, syslog.LOG_PID)
    logging_config.configure_logging(settings.LOG_LEVEL, settings.LOGS_DIR, settings.LOG_FILE_PREFIX, args.process_id)
    twisted.python.log.addObserver(on_twisted_log)

    pdlogs.CREST_STARTING.log()

    # setup accumulators and counters for statistics gathering
    api.base.setupStats(args.process_id, args.worker_processes)

    # Initialize reactor ports and create worker sub-processes
    if args.process_id == 0:
        # Main process startup, create pidfile.

        # We must keep a reference to the file object here, as this keeps
        # the file locked and provides extra protection against two processes running at
        # once.
        pidfile_lock = None
        try:
            pidfile_lock = utils.lock_and_write_pid_file(settings.PID_FILE) # noqa
        except IOError:
            # We failed to take the lock - another process is already running
            exit(1)

        # Create UNIX domain socket for nginx front-end (used for
        # normal operation and as a bridge from the default namespace to the signaling
        # namespace in a multiple interface configuration).
        bind_safely(reactor, args.process_id, application)
        pdlogs.CREST_UP.log()

        if args.signaling_namespace and settings.PROCESS_NAME == "homer":
            # Running in signaling namespace as Homer, create TCP socket for XDMS requests
            # from signaling interface
            _log.info("Going to listen for HTTP on TCP port %s", settings.HTTP_PORT)
            http_tcp_port = reactor.listenTCP(settings.HTTP_PORT, application, interface=settings.LOCAL_IP)

            # Spin up worker sub-processes, passing TCP file descriptor
            for process_id in range(1, args.worker_processes):
                reactor.spawnProcess(None, executable, [executable, __file__,
                                     "--shared-http-tcp-fd", str(http_tcp_port.fileno()),
                                     "--process-id", str(process_id)],
                                     childFDs={0: 0, 1: 1, 2: 2, http_tcp_port.fileno(): http_tcp_port.fileno()},
                                     env = os.environ)
        else:
            # Spin up worker sub-processes
            for process_id in range(1, args.worker_processes):
                reactor.spawnProcess(None, executable, [executable, __file__,
                                     "--process-id", str(process_id)],
                                     childFDs={0: 0, 1: 1, 2: 2},
                                     env = os.environ)
    else:
        # Sub-process startup, ensure we die if our parent does.
        prctl.prctl(prctl.PDEATHSIG, signal.SIGTERM)

        # Create UNIX domain socket for nginx front-end based on process ID.
        bind_safely(reactor, args.process_id, application)

        # Create TCP socket if file descriptor was passed.
        if args.shared_http_tcp_fd:
            reactor.adoptStreamPort(args.shared_http_tcp_fd, AF_INET, application)

    # We need to catch the shutdown request so that we can properly stop
    # the ZMQ interface; otherwise the reactor won't shut down on a SIGTERM
    # and will be SIGKILLed when the service is stopped.
    reactor.addSystemEventTrigger('before', 'shutdown', on_before_shutdown)

    # Kick off the reactor to start listening on configured ports
    reactor.run()
Beispiel #16
0
def standalone():
    """
    Initializes Tornado and our application.  Forks worker processes to handle
    requests.  Does not return until all child processes exit normally.
    """
    # Hack to work-around issue with Cyclone and UNIX domain sockets
    twisted.internet.address.UNIXAddress.host = "localhost"

    # Parse arguments
    parser = argparse.ArgumentParser(description="Homer web server")
    parser.add_argument("--background", action="store_true", help="Detach and run server in background")
    parser.add_argument("--signaling-namespace", action="store_true", help="Server running in signaling namespace")
    parser.add_argument("--worker-processes", default=1, type=int)
    parser.add_argument("--shared-http-tcp-fd", default=None, type=int)
    parser.add_argument("--process-id", default=0, type=int)
    args = parser.parse_args()

    # We don't initialize logging until we fork because we want each child to
    # have its own logging and it's awkward to reconfigure logging that is
    # defined by the parent.
    application = create_application()

    if args.background:
        # Get a new logfile, rotating the old one if present.
        err_log_name = os.path.join(settings.LOGS_DIR, settings.LOG_FILE_PREFIX + "-err.log")
        try:
            os.rename(err_log_name, err_log_name + ".old")
        except OSError:
            pass
        # Fork into background.
        utils.daemonize(err_log_name)

    # Drop a pidfile.
    pid = os.getpid()
    with open(settings.PID_FILE, "w") as pidfile:
        pidfile.write(str(pid) + "\n")

    utils.install_sigusr1_handler(settings.LOG_FILE_PREFIX)

    # Setup logging
    logging_config.configure_logging(settings.LOG_LEVEL, settings.LOGS_DIR, settings.LOG_FILE_PREFIX, args.process_id)

    # setup accumulators and counters for statistics gathering
    api.base.setupStats(args.process_id, args.worker_processes)

    # Initialize reactor ports and create worker sub-processes
    if args.process_id == 0:
        # Main process startup, create UNIX domain socket for nginx front-end (used for
        # normal operation and as a bridge from the default namespace to the signaling
        # namespace in a multiple interface configuration).
        _lock_fd = bind_safely(reactor, args.process_id, application)

        if args.signaling_namespace and settings.PROCESS_NAME == "homer":
            # Running in signaling namespace as Homer, create TCP socket for XDMS requests
            # from signaling interface
            _log.info("Going to listen for HTTP on TCP port %s", settings.HTTP_PORT)
            http_tcp_port = reactor.listenTCP(settings.HTTP_PORT, application, interface=settings.LOCAL_IP)

            # Spin up worker sub-processes, passing TCP file descriptor
            for process_id in range(1, args.worker_processes):
                reactor.spawnProcess(None, executable, [executable, __file__,
                                     "--shared-http-tcp-fd", str(http_tcp_port.fileno()),
                                     "--process-id", str(process_id)],
                                     childFDs={0: 0, 1: 1, 2: 2, http_tcp_port.fileno(): http_tcp_port.fileno()},
                                     env = os.environ)
        else:
            # Spin up worker sub-processes
            for process_id in range(1, args.worker_processes):
                reactor.spawnProcess(None, executable, [executable, __file__,
                                     "--process-id", str(process_id)],
                                     childFDs={0: 0, 1: 1, 2: 2},
                                     env = os.environ)
    else:
        # Sub-process startup, create UNIX domain socket for nginx front-end based on
        # process ID.
        _lock_fd = bind_safely(reactor, args.process_id, application)

        # Create TCP socket if file descriptor was passed.
        if args.shared_http_tcp_fd:
            reactor.adoptStreamPort(args.shared_http_tcp_fd, AF_INET, application)

    # Kick off the reactor to start listening on configured ports
    reactor.run()
Beispiel #17
0
def main(args):
    syslog.openlog("queue-manager", syslog.LOG_PID)
    pdlogs.STARTUP.log()
    try:
        arguments = docopt(__doc__, argv=args)
    except DocoptExit:
        pdlogs.EXITING_BAD_CONFIG.log()
        raise

    local_ip = arguments['--local-ip']
    local_site = arguments['--local-site']
    etcd_key = arguments['--etcd-key']
    node_type = arguments['--node-type']
    log_dir = arguments['--log-directory']
    log_level = LOG_LEVELS.get(arguments['--log-level'], logging.DEBUG)
    wait_plugin_complete = arguments['--wait-plugin-complete']

    stdout_err_log = os.path.join(log_dir, "queue-manager.output.log")

    if not arguments['--foreground']:
        utils.daemonize(stdout_err_log)

    # Process names are limited to 15 characters, so abbreviate
    prctl.prctl(prctl.NAME, "cw-queue-mgr")

    logging_config.configure_logging(log_level,
                                     log_dir,
                                     "queue-manager",
                                     show_thread=True)

    # urllib3 logs a WARNING log whenever it recreates a connection, but our
    # etcd usage does this frequently (to allow watch timeouts), so deliberately
    # ignore this log
    urllib_logger = logging.getLogger('urllib3')
    urllib_logger.setLevel(logging.ERROR)

    utils.install_sigusr1_handler("queue-manager")

    # Drop a pidfile. We must keep a reference to the file object here, as this keeps
    # the file locked and provides extra protection against two processes running at
    # once.
    pidfile_lock = None
    try:
        pidfile_lock = utils.lock_and_write_pid_file(
            arguments['--pidfile'])  # noqa
    except IOError:
        # We failed to take the lock - another process is already running
        exit(1)

    plugins_dir = "/usr/share/clearwater/clearwater-queue-manager/plugins/"
    plugins = load_plugins_in_dir(
        plugins_dir, PluginParams(wait_plugin_complete=wait_plugin_complete))
    plugins.sort(key=lambda x: x.key())
    synchronizers = []
    threads = []

    # Load the plugins, but don't start them until we've installed the SIGTERM
    # handler, as that handler will gracefully shut down any running
    # synchronizers on receiving a SIGTERM
    for plugin in plugins:
        syncer = EtcdSynchronizer(plugin, local_ip, local_site, etcd_key,
                                  node_type)
        synchronizers.append(syncer)
        threads.append(syncer.thread)
        _log.info("Loaded plugin %s" % plugin)

    utils.install_sigterm_handler(synchronizers)

    # Now start the plugin threads
    for syncer in synchronizers:
        syncer.start_thread()
        _log.info("Started thread for plugin %s" % syncer._plugin)

    while any([thr.isAlive() for thr in threads]):
        for thr in threads:
            if thr.isAlive():
                thr.join(1)

    while not utils.should_quit:
        sleep(1)

    _log.info("Clearwater Queue Manager shutting down")
    pdlogs.EXITING.log()
    syslog.closelog()
Beispiel #18
0
def standalone():
    """
    Initializes Tornado and our application.  Forks worker processes to handle
    requests.  Does not return until all child processes exit normally.
    """
    # Hack to work-around issue with Cyclone and UNIX domain sockets
    twisted.internet.address.UNIXAddress.host = "localhost"

    # Parse arguments
    parser = argparse.ArgumentParser(description="Crest web server")
    parser.add_argument("--background",
                        action="store_true",
                        help="Detach and run server in background")
    parser.add_argument("--signaling-namespace",
                        action="store_true",
                        help="Server running in signaling namespace")
    parser.add_argument("--worker-processes", default=1, type=int)
    parser.add_argument("--shared-http-tcp-fd", default=None, type=int)
    parser.add_argument("--process-id", default=0, type=int)
    parser.add_argument("--log-level", default=2, type=int)
    args = parser.parse_args()

    # Set process name.
    prctl.prctl(prctl.NAME, settings.PROCESS_NAME)

    # We don't initialize logging until we fork because we want each child to
    # have its own logging and it's awkward to reconfigure logging that is
    # defined by the parent.
    application = create_application()

    if args.background:
        # Get a new logfile, rotating the old one if present.
        err_log_name = os.path.join(settings.LOGS_DIR,
                                    settings.LOG_FILE_PREFIX + "-err.log")
        try:
            os.rename(err_log_name, err_log_name + ".old")
        except OSError:
            pass
        # Fork into background.
        utils.daemonize(err_log_name)

    utils.install_sigusr1_handler(settings.LOG_FILE_PREFIX)

    # Setup logging
    syslog.openlog(settings.LOG_FILE_PREFIX, syslog.LOG_PID)

    logging_config.configure_logging(
        utils.map_clearwater_log_level(args.log_level), settings.LOGS_DIR,
        settings.LOG_FILE_PREFIX, args.process_id)

    twisted.python.log.addObserver(on_twisted_log)

    pdlogs.CREST_STARTING.log()

    # setup accumulators and counters for statistics gathering
    api.base.setupStats(args.process_id, args.worker_processes)

    # Initialize reactor ports and create worker sub-processes
    if args.process_id == 0:
        # Main process startup, create pidfile.

        # We must keep a reference to the file object here, as this keeps
        # the file locked and provides extra protection against two processes running at
        # once.
        pidfile_lock = None
        try:
            pidfile_lock = utils.lock_and_write_pid_file(
                settings.PID_FILE)  # noqa
        except IOError:
            # We failed to take the lock - another process is already running
            exit(1)

        # Create UNIX domain socket for nginx front-end (used for
        # normal operation and as a bridge from the default namespace to the signaling
        # namespace in a multiple interface configuration).
        bind_safely(reactor, args.process_id, application)
        pdlogs.CREST_UP.log()

        if args.signaling_namespace and settings.PROCESS_NAME == "homer":
            # Running in signaling namespace as Homer, create TCP socket for XDMS requests
            # from signaling interface
            _log.info("Going to listen for HTTP on TCP port %s",
                      settings.HTTP_PORT)
            http_tcp_port = reactor.listenTCP(settings.HTTP_PORT,
                                              application,
                                              interface=settings.LOCAL_IP)

            # Spin up worker sub-processes, passing TCP file descriptor
            for process_id in range(1, args.worker_processes):
                reactor.spawnProcess(
                    None,
                    executable, [
                        executable, __file__, "--shared-http-tcp-fd",
                        str(http_tcp_port.fileno()), "--process-id",
                        str(process_id)
                    ],
                    childFDs={
                        0: 0,
                        1: 1,
                        2: 2,
                        http_tcp_port.fileno(): http_tcp_port.fileno()
                    },
                    env=os.environ)
        else:
            # Spin up worker sub-processes
            for process_id in range(1, args.worker_processes):
                reactor.spawnProcess(
                    None,
                    executable,
                    [executable, __file__, "--process-id",
                     str(process_id)],
                    childFDs={
                        0: 0,
                        1: 1,
                        2: 2
                    },
                    env=os.environ)
    else:
        # Sub-process startup, ensure we die if our parent does.
        prctl.prctl(prctl.PDEATHSIG, signal.SIGTERM)

        # Create UNIX domain socket for nginx front-end based on process ID.
        bind_safely(reactor, args.process_id, application)

        # Create TCP socket if file descriptor was passed.
        if args.shared_http_tcp_fd:
            reactor.adoptStreamPort(args.shared_http_tcp_fd, AF_INET,
                                    application)

    # We need to catch the shutdown request so that we can properly stop
    # the ZMQ interface; otherwise the reactor won't shut down on a SIGTERM
    # and will be SIGKILLed when the service is stopped.
    reactor.addSystemEventTrigger('before', 'shutdown', on_before_shutdown)

    # Kick off the reactor to start listening on configured ports
    reactor.run()
Beispiel #19
0
                      default=1,
                      help="Create this many numbers, if not specified, only one number will be created")
    parser.add_option("-p",
                      "--pstn",
                      action="store_true",
                      dest="pstn",
                      default=False,
                      help="If --pstn is specified, a PSTN-enabled number will be created")
    parser.add_option("-r",
                      "--realm",
                      dest="realm",
                      type="string",
                      default=settings.SIP_DIGEST_REALM,
                      help="Create numbers in this digest realm - if not specified, the home domain will be used")
    parser.add_option("--log-level",
                      dest="log_level",
                      default=2,
                      type="int")
    (options, args) = parser.parse_args()

    if args:
        parser.print_help()
    else:
        logging_config.configure_logging(
                utils.map_clearwater_log_level(options.log_level),
                settings.LOGS_DIR,
                settings.LOG_FILE_PREFIX,
                "create_db")

        standalone(options.start, options.num, options.pstn, options.realm)
Beispiel #20
0
def main(args):
    syslog.openlog("config-manager", syslog.LOG_PID)
    pdlogs.STARTUP.log()
    try:
        arguments = docopt(__doc__, argv=args)
    except DocoptExit:
        pdlogs.EXITING_BAD_CONFIG.log()
        raise

    local_ip = arguments['--local-ip']
    local_site = arguments['--local-site']
    etcd_key = arguments['--etcd-key']
    log_dir = arguments['--log-directory']
    log_level = LOG_LEVELS.get(arguments['--log-level'], logging.DEBUG)

    stdout_err_log = os.path.join(log_dir, "config-manager.output.log")

    if not arguments['--foreground']:
        utils.daemonize(stdout_err_log)

    # Process names are limited to 15 characters, so abbreviate
    prctl.prctl(prctl.NAME, "cw-config-mgr")

    logging_config.configure_logging(log_level, log_dir, "config-manager", show_thread=True)

    # urllib3 logs a WARNING log whenever it recreates a connection, but our
    # etcd usage does this frequently (to allow watch timeouts), so deliberately
    # ignore this log
    urllib_logger = logging.getLogger('urllib3')
    urllib_logger.setLevel(logging.ERROR)

    utils.install_sigusr1_handler("config-manager")

    # Drop a pidfile. We must keep a reference to the file object here, as this keeps
    # the file locked and provides extra protection against two processes running at
    # once.
    pidfile_lock = None
    try:
        pidfile_lock = utils.lock_and_write_pid_file(arguments['--pidfile']) # noqa
    except IOError:
        # We failed to take the lock - another process is already running
        exit(1)

    plugins_dir = "/usr/share/clearwater/clearwater-config-manager/plugins/"
    plugins = load_plugins_in_dir(plugins_dir)
    plugins.sort(key=lambda x: x.key())
    threads = []

    files = [p.file() for p in plugins]
    alarm = ConfigAlarm(files)

    for plugin in plugins:
        syncer = EtcdSynchronizer(plugin, local_ip, local_site, alarm, etcd_key)
        syncer.start_thread()

        threads.append(syncer.thread)
        _log.info("Loaded plugin %s" % plugin)

    while any([thr.isAlive() for thr in threads]):
        for thr in threads:
            if thr.isAlive():
                thr.join(1)

    _log.info("Clearwater Configuration Manager shutting down")
    pdlogs.EXITING.log()
    syslog.closelog()