Example #1
0
 def launch(command):
     # terminate the child process when the parent dies
     try:
         prctl.prctl(prctl.PDEATHSIG, signal.SIGTERM)
     except:
         pass
     args = shlex.split(command)
     os.execv(args[0], args)
Example #2
0
 def launch(command):
     # terminate the child process when the parent dies
     try:
         prctl.prctl(prctl.PDEATHSIG, signal.SIGTERM)
     except:
         pass
     args = shlex.split(command)
     os.execv(args[0], args)
Example #3
0
File: caoe.py Project: XuYong/CaoE
def exit_when_parent_or_child_dies(sig):
    gid = os.getpgrp()
    signal(SIGCHLD, make_child_die_signal_handler(gid))

    try:
        from prctl import prctl, PDEATHSIG
        signal(SIGHUP, make_quit_signal_handler(gid))
        # give me SIGHUP if my parent dies
        prctl(PDEATHSIG, SIGHUP)
        pause()

    except ImportError:
        # fallback to polling status of parent
        while True:
            if os.getppid() == 1:
                # parent died, suicide
                signal(SIGTERM, SIG_DFL)
                os.killpg(gid, sig)
                sys.exit()
            time.sleep(5)
Example #4
0
def exit_when_parent_or_child_dies(sig):
    gid = os.getpgrp()
    signal(SIGCHLD, make_child_die_signal_handler(gid))

    try:
        from prctl import prctl, PDEATHSIG
        signal(SIGHUP, make_quit_signal_handler(gid))
        # give me SIGHUP if my parent dies
        prctl(PDEATHSIG, SIGHUP)
        pause()

    except ImportError:
        # fallback to polling status of parent
        while True:
            if os.getppid() == 1:
                # parent died, suicide
                signal(SIGTERM, SIG_DFL)
                os.killpg(gid, sig)
                sys.exit()
            time.sleep(5)
Example #5
0
def main(args):
    syslog.openlog("queue-manager", syslog.LOG_PID)
    pdlogs.STARTUP.log()
    try:
        arguments = docopt(__doc__, argv=args)
    except DocoptExit:
        pdlogs.EXITING_BAD_CONFIG.log()
        raise

    local_ip = arguments['--local-ip']
    local_site = arguments['--local-site']
    etcd_key = arguments['--etcd-key']
    node_type = arguments['--node-type']
    log_dir = arguments['--log-directory']
    log_level = LOG_LEVELS.get(arguments['--log-level'], logging.DEBUG)
    wait_plugin_complete = arguments['--wait-plugin-complete']

    stdout_err_log = os.path.join(log_dir, "queue-manager.output.log")

    if not arguments['--foreground']:
        utils.daemonize(stdout_err_log)

    # Process names are limited to 15 characters, so abbreviate
    prctl.prctl(prctl.NAME, "cw-queue-mgr")

    logging_config.configure_logging(log_level, log_dir, "queue-manager", show_thread=True)

    # urllib3 logs a WARNING log whenever it recreates a connection, but our
    # etcd usage does this frequently (to allow watch timeouts), so deliberately
    # ignore this log
    urllib_logger = logging.getLogger('urllib3')
    urllib_logger.setLevel(logging.ERROR)

    utils.install_sigusr1_handler("queue-manager")

    # Drop a pidfile. We must keep a reference to the file object here, as this keeps
    # the file locked and provides extra protection against two processes running at
    # once.
    pidfile_lock = None
    try:
        pidfile_lock = utils.lock_and_write_pid_file(arguments['--pidfile']) # noqa
    except IOError:
        # We failed to take the lock - another process is already running
        exit(1)

    plugins_dir = "/usr/share/clearwater/clearwater-queue-manager/plugins/"
    plugins = load_plugins_in_dir(plugins_dir,
                                  PluginParams(wait_plugin_complete=wait_plugin_complete))
    plugins.sort(key=lambda x: x.key())
    synchronizers = []
    threads = []

    for plugin in plugins:
        syncer = EtcdSynchronizer(plugin, local_ip, local_site, etcd_key, node_type)
        syncer.start_thread()

        synchronizers.append(syncer)
        threads.append(syncer.thread)
        _log.info("Loaded plugin %s" % plugin)

    utils.install_sigterm_handler(synchronizers)

    while any([thr.isAlive() for thr in threads]):
        for thr in threads:
            if thr.isAlive():
                thr.join(1)

    while not utils.should_quit:
        sleep(1)

    _log.info("Clearwater Queue Manager shutting down")
    pdlogs.EXITING.log()
    syslog.closelog()
Example #6
0
def main(args):
    syslog.openlog("cluster-manager", syslog.LOG_PID)
    pdlogs.STARTUP.log()
    try:
        arguments = docopt(__doc__, argv=args)
    except DocoptExit:
        pdlogs.EXITING_BAD_CONFIG.log()
        raise

    mgmt_ip = arguments['--mgmt-local-ip']
    sig_ip = arguments['--sig-local-ip']
    local_site_name = arguments['--local-site']
    remote_site_name = arguments['--remote-site']
    remote_cassandra_seeds = arguments['--remote-cassandra-seeds']
    if remote_cassandra_seeds:
        remote_cassandra_seeds = remote_cassandra_seeds.split(',')
    else:
        remote_cassandra_seeds = []
    signaling_namespace = arguments.get('--signaling-namespace')
    local_uuid = UUID(arguments['--uuid'])
    etcd_key = arguments.get('--etcd-key')
    etcd_cluster_key = arguments.get('--etcd-cluster-key')
    cluster_manager_enabled = arguments['--cluster-manager-enabled']
    log_dir = arguments['--log-directory']
    log_level = LOG_LEVELS.get(arguments['--log-level'], logging.DEBUG)

    stdout_err_log = os.path.join(log_dir, "cluster-manager.output.log")

    # Check that there's an etcd_cluster_key value passed to the cluster
    # manager
    if etcd_cluster_key == "":
        # The etcd_cluster_key isn't valid, and possibly get weird entries in
        # the etcd database if we allow the cluster_manager to start
        pdlogs.EXITING_MISSING_ETCD_CLUSTER_KEY.log()
        exit(1)

    if not arguments['--foreground']:
        utils.daemonize(stdout_err_log)

    # Process names are limited to 15 characters, so abbreviate
    prctl.prctl(prctl.NAME, "cw-cluster-mgr")

    logging_config.configure_logging(log_level,
                                     log_dir,
                                     "cluster-manager",
                                     show_thread=True)

    # urllib3 logs a WARNING log whenever it recreates a connection, but our
    # etcd usage does this frequently (to allow watch timeouts), so deliberately
    # ignore this log
    urllib_logger = logging.getLogger('urllib3')
    urllib_logger.setLevel(logging.ERROR)

    utils.install_sigusr1_handler("cluster-manager")

    # Drop a pidfile. We must keep a reference to the file object here, as this keeps
    # the file locked and provides extra protection against two processes running at
    # once.
    pidfile_lock = None
    try:
        pidfile_lock = utils.lock_and_write_pid_file(
            arguments['--pidfile'])  # noqa
    except IOError:
        # We failed to take the lock - another process is already running
        exit(1)

    plugins_dir = "/usr/share/clearwater/clearwater-cluster-manager/plugins/"
    plugins = load_plugins_in_dir(
        plugins_dir,
        PluginParams(ip=sig_ip,
                     mgmt_ip=mgmt_ip,
                     local_site=local_site_name,
                     remote_site=remote_site_name,
                     remote_cassandra_seeds=remote_cassandra_seeds,
                     signaling_namespace=signaling_namespace,
                     uuid=local_uuid,
                     etcd_key=etcd_key,
                     etcd_cluster_key=etcd_cluster_key))
    plugins.sort(key=lambda x: x.key())
    plugins_to_use = []
    files = []
    skip = False
    for plugin in plugins:
        for plugin_file in plugin.files():
            if plugin_file in files:
                _log.info("Skipping plugin {} because {} "
                          "is already managed by another plugin".format(
                              plugin, plugin_file))
                skip = True

        if not skip:
            plugins_to_use.append(plugin)
            files.extend(plugin.files())

    synchronizers = []
    threads = []

    if cluster_manager_enabled == "N":
        # Don't start any threads as we don't want the cluster manager to run
        pdlogs.DO_NOT_START.log()
    elif etcd_cluster_key == "DO_NOT_CLUSTER":
        # Don't start any threads as we don't want this box to cluster
        pdlogs.DO_NOT_CLUSTER.log()
    else:
        for plugin in plugins_to_use:
            syncer = EtcdSynchronizer(plugin, sig_ip, etcd_ip=mgmt_ip)
            syncer.start_thread()

            synchronizers.append(syncer)
            threads.append(syncer.thread)
            _log.info("Loaded plugin %s" % plugin)

    install_sigquit_handler(synchronizers)
    install_sigterm_handler(synchronizers)

    while any([thread.isAlive() for thread in threads]):
        for thread in threads:
            if thread.isAlive():
                thread.join(1)

    _log.info("No plugin threads running, waiting for a SIGTERM or SIGQUIT")
    while not should_quit:
        sleep(1)
    _log.info("Quitting")
    _log.debug("%d threads outstanding at exit" % activeCount())
    pdlogs.EXITING.log()
    syslog.closelog()
Example #7
0
 def bind():
     # terminate the child process when the parent dies
     try:
         prctl.prctl(prctl.PDEATHSIG, signal.SIGTERM)
     except:
         pass
Example #8
0
def standalone():
    """
    Initializes Tornado and our application.  Forks worker processes to handle
    requests.  Does not return until all child processes exit normally.
    """

    # Parse arguments
    parser = argparse.ArgumentParser(description="Ellis web server")
    parser.add_argument("--background",
                        action="store_true",
                        help="Detach and run server in background")
    parser.add_argument("--log-level", default=2, type=int)
    args = parser.parse_args()

    prctl.prctl(prctl.NAME, "ellis")

    # We don't initialize logging until we fork because we want each child to
    # have its own logging and it's awkward to reconfigure logging that is
    # defined by the parent.
    application = create_application()

    if args.background:
        # Get a new logfile, rotating the old one if present.
        err_log_name = os.path.join(settings.LOGS_DIR,
                                    settings.LOG_FILE_PREFIX + "-err.log")
        try:
            os.rename(err_log_name, err_log_name + ".old")
        except OSError:
            pass
        # Fork into background.
        utils.daemonize(err_log_name)

    utils.install_sigusr1_handler(settings.LOG_FILE_PREFIX)

    # Drop a pidfile. We must keep a reference to the file object here, as this keeps
    # the file locked and provides extra protection against two processes running at
    # once.
    pidfile_lock = None
    try:
        pidfile_lock = utils.lock_and_write_pid_file(settings.PID_FILE)  # noqa
    except IOError:
        # We failed to take the lock - another process is already running
        exit(1)

    # Only run one process, not one per core - we don't need the performance
    # and this keeps everything in one log file
    prctl.prctl(prctl.NAME, "ellis")
    logging_config.configure_logging(
        utils.map_clearwater_log_level(args.log_level), settings.LOGS_DIR,
        settings.LOG_FILE_PREFIX)
    _log.info("Ellis process starting up")
    connection.init_connection()

    http_server = httpserver.HTTPServer(application)
    unix_socket = bind_unix_socket(settings.HTTP_UNIX, 0666)
    http_server.add_socket(unix_socket)

    homestead.ping()
    background.start_background_worker_io_loop()
    io_loop = tornado.ioloop.IOLoop.instance()
    io_loop.start()
Example #9
0
	# file/open call
	#
	fileobject.open  = _fo_open
	fileobject.close = _fo_close

def nowait():
	'''nowait

	NOTE: GLOBAL SIGNAL CHANGE!

	Do not wait for the terminated/exiting fileobject, since this can
	block. To prevent the processes from becoming unreaped zombies we
	disable the SIGCHILD signal. (see man wait(2))
	'''
	global NOWAIT
	NOWAIT = True

	signal.signal(signal.SIGCHLD, signal.SIG_IGN)

	
if __name__ == '__main__':
	import prctl

	prctl.prctl(prctl.PDEATHSIG, signal.SIGTERM)

	handler = FileObjectHandler(sys.stdin, sys.stdout)
	value   = handler.run()
	sys.exit(value)
#
# end..
Example #10
0
 def bind():
     # terminate the child process when the parent dies
     try:
         prctl.prctl(prctl.PDEATHSIG, signal.SIGTERM)
     except:
         pass
Example #11
0
    try:
        resource.setrlimit(resource.RLIMIT_NOFILE, (max_fd, max_fd))
    except ValueError, e:
        if not os.getuid():
            print 'MAX FD error: %s, %d' % (e.args[0], max_fd)
    try:
        resource.setrlimit(resource.RLIMIT_CORE, (-1, -1))
    except ValueError, e:
        print 'CORE size error:', e

    if not os.getuid():
        os.setgid(1)
        os.setuid(1)

    try:
        prctl.prctl(prctl.DUMPABLE, 1)
    except (AttributeError, ValueError, prctl.PrctlError), e:
        print 'PRCTL DUMPABLE error:', e

    if logdir and pidfile:
        pidfile = logdir + '/' + pidfile
        try:
            fd = open(pidfile, 'w')
        except IOError, e:
            print 'IO error: %s' % (e.args[1])
            return None
        else:
            fd.write('%d' % os.getpid())
            fd.close()

    if logdir and logfile:
Example #12
0
    try:
        resource.setrlimit(resource.RLIMIT_NOFILE, (max_fd, max_fd))
    except ValueError, e:
        if not os.getuid():
            print 'MAX FD error: %s, %d' % (e.args[0], max_fd)
    try:
        resource.setrlimit(resource.RLIMIT_CORE, (-1, -1))
    except ValueError, e:
        print 'CORE size error:', e

    if not os.getuid():
        os.setgid(1)
        os.setuid(1)

    try:
        prctl.prctl(prctl.DUMPABLE, 1)
    except (AttributeError, ValueError, prctl.PrctlError), e:
        print 'PRCTL DUMPABLE error:', e

    if logdir and pidfile:
        pidfile = logdir + '/' + pidfile
        try:
            fd = open(pidfile, 'w')
        except IOError, e:
            print 'IO error: %s' % (e.args[1])
            return None
        else:
            fd.write('%d' % os.getpid())
            fd.close()

    if logdir and logfile:
Example #13
0
import datetime
import StringIO
import aespckfile
from PIL import Image
try:
    import simplejson as json
except:
    import json

sys.path.append((os.path.dirname(__file__) or ".") + "/../")

import config

try:
    import prctl
    prctl.prctl(prctl.DUMPABLE, 0)
except ImportError:
    pass

locale.setlocale(locale.LC_ALL, '')

parsedate = lambda x: datetime.datetime.strptime(x,"%Y-%m-%d").date()
prettyformat = lambda x: locale.currency(float(x)/100, grouping=True)

digitre = re.compile("^#?\d{2,}$")

def imgtrim(img):
    """This will trim any whitespace around the image
    http://stackoverflow.com/questions/9396312/use-python-pil-or-similar-to-shrink-whitespace"""
    im = Image.open(StringIO.StringIO(img))
    pix = numpy.asarray(im)
Example #14
0
def main():
    """Main entry point for Glances.

    Select the mode (standalone, client or server)
    Run it...
    """
    # Log Glances and PSutil version
    logger.info('Start Glances {0}'.format(__version__))
    logger.info('{0} {1} and PSutil {2} detected'.format(
        platform.python_implementation(), platform.python_version(),
        __psutil_version))

    # Share global var
    global core, standalone, client, server, webserver

    # Create the Glances main instance
    core = GlancesMain()
    prctl.prctl(prctl.PDEATHSIG, signal.SIGTERM)
    # Catch the CTRL-C signal
    signal.signal(signal.SIGINT, __signal_handler)

    # Glances can be ran in standalone, client or server mode
    if core.is_standalone():
        logger.info("Start standalone mode")

        # Import the Glances standalone module
        from glances.core.glances_standalone import GlancesStandalone

        # Init the standalone mode
        standalone = GlancesStandalone(config=core.get_config(),
                                       args=core.get_args())

        # Start the standalone (CLI) loop
        standalone.serve_forever()

    elif core.is_client():
        if core.is_client_browser():
            logger.info("Start client mode (browser)")

            # Import the Glances client browser module
            from glances.core.glances_client_browser import GlancesClientBrowser

            # Init the client
            client = GlancesClientBrowser(config=core.get_config(),
                                          args=core.get_args())

        else:
            logger.info("Start client mode")

            # Import the Glances client module
            from glances.core.glances_client import GlancesClient

            # Init the client
            client = GlancesClient(config=core.get_config(),
                                   args=core.get_args())

            # Test if client and server are in the same major version
            if not client.login():
                logger.critical(
                    "The server version is not compatible with the client")
                sys.exit(2)

        # Start the client loop
        client.serve_forever()

        # Shutdown the client
        client.end()

    elif core.is_server():
        logger.info("Start server mode")

        # Import the Glances server module
        from glances.core.glances_server import GlancesServer

        args = core.get_args()

        server = GlancesServer(cached_time=core.cached_time,
                               config=core.get_config(),
                               args=args)
        print('Glances server is running on {0}:{1}'.format(
            args.bind_address, args.port))

        # Set the server login/password (if -P/--password tag)
        if args.password != "":
            server.add_user(args.username, args.password)

        # Start the server loop
        server.serve_forever()

        # Shutdown the server?
        server.server_close()

    elif core.is_webserver():
        logger.info("Start web server mode")

        # Import the Glances web server module
        from glances.core.glances_webserver import GlancesWebServer

        # Init the web server mode
        webserver = GlancesWebServer(config=core.get_config(),
                                     args=core.get_args())

        # Start the web server loop
        webserver.serve_forever()
Example #15
0
def standalone():
    """
    Initializes Tornado and our application.  Forks worker processes to handle
    requests.  Does not return until all child processes exit normally.
    """
    # Hack to work-around issue with Cyclone and UNIX domain sockets
    twisted.internet.address.UNIXAddress.host = "localhost"

    # Parse arguments
    parser = argparse.ArgumentParser(description="Crest web server")
    parser.add_argument("--background",
                        action="store_true",
                        help="Detach and run server in background")
    parser.add_argument("--signaling-namespace",
                        action="store_true",
                        help="Server running in signaling namespace")
    parser.add_argument("--worker-processes", default=1, type=int)
    parser.add_argument("--shared-http-tcp-fd", default=None, type=int)
    parser.add_argument("--process-id", default=0, type=int)
    parser.add_argument("--log-level", default=2, type=int)
    args = parser.parse_args()

    # Set process name.
    prctl.prctl(prctl.NAME, settings.PROCESS_NAME)

    # We don't initialize logging until we fork because we want each child to
    # have its own logging and it's awkward to reconfigure logging that is
    # defined by the parent.
    application = create_application()

    if args.background:
        # Get a new logfile, rotating the old one if present.
        err_log_name = os.path.join(settings.LOGS_DIR,
                                    settings.LOG_FILE_PREFIX + "-err.log")
        try:
            os.rename(err_log_name, err_log_name + ".old")
        except OSError:
            pass
        # Fork into background.
        utils.daemonize(err_log_name)

    utils.install_sigusr1_handler(settings.LOG_FILE_PREFIX)

    # Setup logging
    syslog.openlog(settings.LOG_FILE_PREFIX, syslog.LOG_PID)

    logging_config.configure_logging(
        utils.map_clearwater_log_level(args.log_level), settings.LOGS_DIR,
        settings.LOG_FILE_PREFIX, args.process_id)

    twisted.python.log.addObserver(on_twisted_log)

    pdlogs.CREST_STARTING.log()

    # setup accumulators and counters for statistics gathering
    api.base.setupStats(args.process_id, args.worker_processes)

    # Initialize reactor ports and create worker sub-processes
    if args.process_id == 0:
        # Main process startup, create pidfile.

        # We must keep a reference to the file object here, as this keeps
        # the file locked and provides extra protection against two processes running at
        # once.
        pidfile_lock = None
        try:
            pidfile_lock = utils.lock_and_write_pid_file(
                settings.PID_FILE)  # noqa
        except IOError:
            # We failed to take the lock - another process is already running
            exit(1)

        # Create UNIX domain socket for nginx front-end (used for
        # normal operation and as a bridge from the default namespace to the signaling
        # namespace in a multiple interface configuration).
        bind_safely(reactor, args.process_id, application)
        pdlogs.CREST_UP.log()

        if args.signaling_namespace and settings.PROCESS_NAME == "homer":
            # Running in signaling namespace as Homer, create TCP socket for XDMS requests
            # from signaling interface
            _log.info("Going to listen for HTTP on TCP port %s",
                      settings.HTTP_PORT)
            http_tcp_port = reactor.listenTCP(settings.HTTP_PORT,
                                              application,
                                              interface=settings.LOCAL_IP)

            # Spin up worker sub-processes, passing TCP file descriptor
            for process_id in range(1, args.worker_processes):
                reactor.spawnProcess(
                    None,
                    executable, [
                        executable, __file__, "--shared-http-tcp-fd",
                        str(http_tcp_port.fileno()), "--process-id",
                        str(process_id)
                    ],
                    childFDs={
                        0: 0,
                        1: 1,
                        2: 2,
                        http_tcp_port.fileno(): http_tcp_port.fileno()
                    },
                    env=os.environ)
        else:
            # Spin up worker sub-processes
            for process_id in range(1, args.worker_processes):
                reactor.spawnProcess(
                    None,
                    executable,
                    [executable, __file__, "--process-id",
                     str(process_id)],
                    childFDs={
                        0: 0,
                        1: 1,
                        2: 2
                    },
                    env=os.environ)
    else:
        # Sub-process startup, ensure we die if our parent does.
        prctl.prctl(prctl.PDEATHSIG, signal.SIGTERM)

        # Create UNIX domain socket for nginx front-end based on process ID.
        bind_safely(reactor, args.process_id, application)

        # Create TCP socket if file descriptor was passed.
        if args.shared_http_tcp_fd:
            reactor.adoptStreamPort(args.shared_http_tcp_fd, AF_INET,
                                    application)

    # We need to catch the shutdown request so that we can properly stop
    # the ZMQ interface; otherwise the reactor won't shut down on a SIGTERM
    # and will be SIGKILLed when the service is stopped.
    reactor.addSystemEventTrigger('before', 'shutdown', on_before_shutdown)

    # Kick off the reactor to start listening on configured ports
    reactor.run()
Example #16
0
def main(args):
    syslog.openlog("queue-manager", syslog.LOG_PID)
    pdlogs.STARTUP.log()
    try:
        arguments = docopt(__doc__, argv=args)
    except DocoptExit:
        pdlogs.EXITING_BAD_CONFIG.log()
        raise

    local_ip = arguments['--local-ip']
    local_site = arguments['--local-site']
    etcd_key = arguments['--etcd-key']
    node_type = arguments['--node-type']
    log_dir = arguments['--log-directory']
    log_level = LOG_LEVELS.get(arguments['--log-level'], logging.DEBUG)
    wait_plugin_complete = arguments['--wait-plugin-complete']

    stdout_err_log = os.path.join(log_dir, "queue-manager.output.log")

    if not arguments['--foreground']:
        utils.daemonize(stdout_err_log)

    # Process names are limited to 15 characters, so abbreviate
    prctl.prctl(prctl.NAME, "cw-queue-mgr")

    logging_config.configure_logging(log_level,
                                     log_dir,
                                     "queue-manager",
                                     show_thread=True)

    # urllib3 logs a WARNING log whenever it recreates a connection, but our
    # etcd usage does this frequently (to allow watch timeouts), so deliberately
    # ignore this log
    urllib_logger = logging.getLogger('urllib3')
    urllib_logger.setLevel(logging.ERROR)

    utils.install_sigusr1_handler("queue-manager")

    # Drop a pidfile. We must keep a reference to the file object here, as this keeps
    # the file locked and provides extra protection against two processes running at
    # once.
    pidfile_lock = None
    try:
        pidfile_lock = utils.lock_and_write_pid_file(
            arguments['--pidfile'])  # noqa
    except IOError:
        # We failed to take the lock - another process is already running
        exit(1)

    plugins_dir = "/usr/share/clearwater/clearwater-queue-manager/plugins/"
    plugins = load_plugins_in_dir(
        plugins_dir, PluginParams(wait_plugin_complete=wait_plugin_complete))
    plugins.sort(key=lambda x: x.key())
    synchronizers = []
    threads = []

    # Load the plugins, but don't start them until we've installed the SIGTERM
    # handler, as that handler will gracefully shut down any running
    # synchronizers on receiving a SIGTERM
    for plugin in plugins:
        syncer = EtcdSynchronizer(plugin, local_ip, local_site, etcd_key,
                                  node_type)
        synchronizers.append(syncer)
        threads.append(syncer.thread)
        _log.info("Loaded plugin %s" % plugin)

    utils.install_sigterm_handler(synchronizers)

    # Now start the plugin threads
    for syncer in synchronizers:
        syncer.start_thread()
        _log.info("Started thread for plugin %s" % syncer._plugin)

    while any([thr.isAlive() for thr in threads]):
        for thr in threads:
            if thr.isAlive():
                thr.join(1)

    while not utils.should_quit:
        sleep(1)

    _log.info("Clearwater Queue Manager shutting down")
    pdlogs.EXITING.log()
    syslog.closelog()
Example #17
0
def main(args):
    syslog.openlog("cluster-manager", syslog.LOG_PID)
    pdlogs.STARTUP.log()
    try:
        arguments = docopt(__doc__, argv=args)
    except DocoptExit:
        pdlogs.EXITING_BAD_CONFIG.log()
        raise

    mgmt_ip = arguments['--mgmt-local-ip']
    sig_ip = arguments['--sig-local-ip']
    local_site_name = arguments['--local-site']
    remote_site_name = arguments['--remote-site']
    remote_cassandra_seeds = arguments['--remote-cassandra-seeds']
    if remote_cassandra_seeds:
        remote_cassandra_seeds = remote_cassandra_seeds.split(',')
    else:
        remote_cassandra_seeds = []
    signaling_namespace = arguments.get('--signaling-namespace')
    local_uuid = UUID(arguments['--uuid'])
    etcd_key = arguments.get('--etcd-key')
    etcd_cluster_key = arguments.get('--etcd-cluster-key')
    cluster_manager_enabled = arguments['--cluster-manager-enabled']
    log_dir = arguments['--log-directory']
    log_level = LOG_LEVELS.get(arguments['--log-level'], logging.DEBUG)

    stdout_err_log = os.path.join(log_dir, "cluster-manager.output.log")

    # Check that there's an etcd_cluster_key value passed to the cluster
    # manager
    if etcd_cluster_key == "":
        # The etcd_cluster_key isn't valid, and possibly get weird entries in
        # the etcd database if we allow the cluster_manager to start
        pdlogs.EXITING_MISSING_ETCD_CLUSTER_KEY.log()
        exit(1)

    if not arguments['--foreground']:
        utils.daemonize(stdout_err_log)

    # Process names are limited to 15 characters, so abbreviate
    prctl.prctl(prctl.NAME, "cw-cluster-mgr")

    logging_config.configure_logging(log_level, log_dir, "cluster-manager", show_thread=True)

    # urllib3 logs a WARNING log whenever it recreates a connection, but our
    # etcd usage does this frequently (to allow watch timeouts), so deliberately
    # ignore this log
    urllib_logger = logging.getLogger('urllib3')
    urllib_logger.setLevel(logging.ERROR)

    utils.install_sigusr1_handler("cluster-manager")

    # Drop a pidfile. We must keep a reference to the file object here, as this keeps
    # the file locked and provides extra protection against two processes running at
    # once.
    pidfile_lock = None
    try:
        pidfile_lock = utils.lock_and_write_pid_file(arguments['--pidfile']) # noqa
    except IOError:
        # We failed to take the lock - another process is already running
        exit(1)

    plugins_dir = "/usr/share/clearwater/clearwater-cluster-manager/plugins/"
    plugins = load_plugins_in_dir(plugins_dir,
                                  PluginParams(ip=sig_ip,
                                               mgmt_ip=mgmt_ip,
                                               local_site=local_site_name,
                                               remote_site=remote_site_name,
                                               remote_cassandra_seeds=remote_cassandra_seeds,
                                               signaling_namespace=signaling_namespace,
                                               uuid=local_uuid,
                                               etcd_key=etcd_key,
                                               etcd_cluster_key=etcd_cluster_key))
    plugins.sort(key=lambda x: x.key())
    plugins_to_use = []
    files = []
    skip = False
    for plugin in plugins:
        for plugin_file in plugin.files():
            if plugin_file in files:
                _log.info("Skipping plugin {} because {} "
                          "is already managed by another plugin"
                          .format(plugin, plugin_file))
                skip = True

        if not skip:
            plugins_to_use.append(plugin)
            files.extend(plugin.files())

    synchronizers = []
    threads = []

    if cluster_manager_enabled == "N":
        # Don't start any threads as we don't want the cluster manager to run
        pdlogs.DO_NOT_START.log()
    elif etcd_cluster_key == "DO_NOT_CLUSTER":
        # Don't start any threads as we don't want this box to cluster
        pdlogs.DO_NOT_CLUSTER.log()
    else:
        for plugin in plugins_to_use:
            syncer = EtcdSynchronizer(plugin, sig_ip, etcd_ip=mgmt_ip)
            syncer.start_thread()

            synchronizers.append(syncer)
            threads.append(syncer.thread)
            _log.info("Loaded plugin %s" % plugin)


    install_sigquit_handler(synchronizers)
    utils.install_sigterm_handler(synchronizers)

    while any([thread.isAlive() for thread in threads]):
        for thread in threads:
            if thread.isAlive():
                thread.join(1)

    _log.info("No plugin threads running, waiting for a SIGTERM or SIGQUIT")
    while not utils.should_quit and not should_quit:
        sleep(1)
    _log.info("Quitting")
    _log.debug("%d threads outstanding at exit" % activeCount())
    pdlogs.EXITING.log()
    syslog.closelog()
Example #18
0
    #
    fileobject.open = _fo_open
    fileobject.close = _fo_close


def nowait():
    '''nowait

    NOTE: GLOBAL SIGNAL CHANGE!

    Do not wait for the terminated/exiting fileobject, since this can
    block. To prevent the processes from becoming unreaped zombies we
    disable the SIGCHILD signal. (see man wait(2))
    '''
    global NOWAIT
    NOWAIT = True

    signal.signal(signal.SIGCHLD, signal.SIG_IGN)


if __name__ == '__main__':
    import prctl

    prctl.prctl(prctl.PDEATHSIG, signal.SIGTERM)

    handler = FileObjectHandler(sys.stdin, sys.stdout)
    value = handler.run()
    sys.exit(value)
#
# end..
Example #19
0
def main():
    """Main entry point for Glances.

    Select the mode (standalone, client or server)
    Run it...
    """
    # Log Glances and PSutil version
    logger.info('Start Glances {0}'.format(__version__))
    logger.info('{0} {1} and PSutil {2} detected'.format(
        platform.python_implementation(),
        platform.python_version(),
        __psutil_version))

    # Share global var
    global core, standalone, client, server, webserver

    # Create the Glances main instance
    core = GlancesMain()
    prctl.prctl(prctl.PDEATHSIG, signal.SIGTERM)
    # Catch the CTRL-C signal
    signal.signal(signal.SIGINT, __signal_handler)

    # Glances can be ran in standalone, client or server mode
    if core.is_standalone():
        logger.info("Start standalone mode")

        # Import the Glances standalone module
        from glances.core.glances_standalone import GlancesStandalone

        # Init the standalone mode
        standalone = GlancesStandalone(config=core.get_config(),
                                       args=core.get_args())

        # Start the standalone (CLI) loop
        standalone.serve_forever()

    elif core.is_client():
        if core.is_client_browser():
            logger.info("Start client mode (browser)")

            # Import the Glances client browser module
            from glances.core.glances_client_browser import GlancesClientBrowser

            # Init the client
            client = GlancesClientBrowser(config=core.get_config(),
                                          args=core.get_args())

        else:
            logger.info("Start client mode")

            # Import the Glances client module
            from glances.core.glances_client import GlancesClient

            # Init the client
            client = GlancesClient(config=core.get_config(),
                                   args=core.get_args())

            # Test if client and server are in the same major version
            if not client.login():
                logger.critical("The server version is not compatible with the client")
                sys.exit(2)

        # Start the client loop
        client.serve_forever()

        # Shutdown the client
        client.end()

    elif core.is_server():
        logger.info("Start server mode")

        # Import the Glances server module
        from glances.core.glances_server import GlancesServer

        args = core.get_args()

        server = GlancesServer(cached_time=core.cached_time,
                               config=core.get_config(),
                               args=args)
        print('Glances server is running on {0}:{1}'.format(args.bind_address, args.port))

        # Set the server login/password (if -P/--password tag)
        if args.password != "":
            server.add_user(args.username, args.password)

        # Start the server loop
        server.serve_forever()

        # Shutdown the server?
        server.server_close()

    elif core.is_webserver():
        logger.info("Start web server mode")

        # Import the Glances web server module
        from glances.core.glances_webserver import GlancesWebServer

        # Init the web server mode
        webserver = GlancesWebServer(config=core.get_config(),
                                     args=core.get_args())

        # Start the web server loop
        webserver.serve_forever()
Example #20
0
def pre_exec():
    """ Used to cause s3 downloads to die when the parent dies"""
    prctl.prctl(prctl.PDEATHSIG, signal.SIGTERM)
Example #21
0
def standalone():
    """
    Initializes Tornado and our application.  Forks worker processes to handle
    requests.  Does not return until all child processes exit normally.
    """
    # Hack to work-around issue with Cyclone and UNIX domain sockets
    twisted.internet.address.UNIXAddress.host = "localhost"

    # Parse arguments
    parser = argparse.ArgumentParser(description="Crest web server")
    parser.add_argument("--background", action="store_true", help="Detach and run server in background")
    parser.add_argument("--signaling-namespace", action="store_true", help="Server running in signaling namespace")
    parser.add_argument("--worker-processes", default=1, type=int)
    parser.add_argument("--shared-http-tcp-fd", default=None, type=int)
    parser.add_argument("--process-id", default=0, type=int)
    args = parser.parse_args()

    # Set process name.
    prctl.prctl(prctl.NAME, settings.PROCESS_NAME)

    # We don't initialize logging until we fork because we want each child to
    # have its own logging and it's awkward to reconfigure logging that is
    # defined by the parent.
    application = create_application()

    if args.background:
        # Get a new logfile, rotating the old one if present.
        err_log_name = os.path.join(settings.LOGS_DIR, settings.LOG_FILE_PREFIX + "-err.log")
        try:
            os.rename(err_log_name, err_log_name + ".old")
        except OSError:
            pass
        # Fork into background.
        utils.daemonize(err_log_name)

    utils.install_sigusr1_handler(settings.LOG_FILE_PREFIX)

    # Setup logging
    syslog.openlog(settings.LOG_FILE_PREFIX, syslog.LOG_PID)
    logging_config.configure_logging(settings.LOG_LEVEL, settings.LOGS_DIR, settings.LOG_FILE_PREFIX, args.process_id)
    twisted.python.log.addObserver(on_twisted_log)

    pdlogs.CREST_STARTING.log()

    # setup accumulators and counters for statistics gathering
    api.base.setupStats(args.process_id, args.worker_processes)

    # Initialize reactor ports and create worker sub-processes
    if args.process_id == 0:
        # Main process startup, create pidfile.

        # We must keep a reference to the file object here, as this keeps
        # the file locked and provides extra protection against two processes running at
        # once.
        pidfile_lock = None
        try:
            pidfile_lock = utils.lock_and_write_pid_file(settings.PID_FILE) # noqa
        except IOError:
            # We failed to take the lock - another process is already running
            exit(1)

        # Create UNIX domain socket for nginx front-end (used for
        # normal operation and as a bridge from the default namespace to the signaling
        # namespace in a multiple interface configuration).
        bind_safely(reactor, args.process_id, application)
        pdlogs.CREST_UP.log()

        if args.signaling_namespace and settings.PROCESS_NAME == "homer":
            # Running in signaling namespace as Homer, create TCP socket for XDMS requests
            # from signaling interface
            _log.info("Going to listen for HTTP on TCP port %s", settings.HTTP_PORT)
            http_tcp_port = reactor.listenTCP(settings.HTTP_PORT, application, interface=settings.LOCAL_IP)

            # Spin up worker sub-processes, passing TCP file descriptor
            for process_id in range(1, args.worker_processes):
                reactor.spawnProcess(None, executable, [executable, __file__,
                                     "--shared-http-tcp-fd", str(http_tcp_port.fileno()),
                                     "--process-id", str(process_id)],
                                     childFDs={0: 0, 1: 1, 2: 2, http_tcp_port.fileno(): http_tcp_port.fileno()},
                                     env = os.environ)
        else:
            # Spin up worker sub-processes
            for process_id in range(1, args.worker_processes):
                reactor.spawnProcess(None, executable, [executable, __file__,
                                     "--process-id", str(process_id)],
                                     childFDs={0: 0, 1: 1, 2: 2},
                                     env = os.environ)
    else:
        # Sub-process startup, ensure we die if our parent does.
        prctl.prctl(prctl.PDEATHSIG, signal.SIGTERM)

        # Create UNIX domain socket for nginx front-end based on process ID.
        bind_safely(reactor, args.process_id, application)

        # Create TCP socket if file descriptor was passed.
        if args.shared_http_tcp_fd:
            reactor.adoptStreamPort(args.shared_http_tcp_fd, AF_INET, application)

    # We need to catch the shutdown request so that we can properly stop
    # the ZMQ interface; otherwise the reactor won't shut down on a SIGTERM
    # and will be SIGKILLed when the service is stopped.
    reactor.addSystemEventTrigger('before', 'shutdown', on_before_shutdown)

    # Kick off the reactor to start listening on configured ports
    reactor.run()
Example #22
0
def main(args):
    syslog.openlog("config-manager", syslog.LOG_PID)
    pdlogs.STARTUP.log()
    try:
        arguments = docopt(__doc__, argv=args)
    except DocoptExit:
        pdlogs.EXITING_BAD_CONFIG.log()
        raise

    local_ip = arguments['--local-ip']
    local_site = arguments['--local-site']
    etcd_key = arguments['--etcd-key']
    log_dir = arguments['--log-directory']
    log_level = LOG_LEVELS.get(arguments['--log-level'], logging.DEBUG)

    stdout_err_log = os.path.join(log_dir, "config-manager.output.log")

    if not arguments['--foreground']:
        utils.daemonize(stdout_err_log)

    # Process names are limited to 15 characters, so abbreviate
    prctl.prctl(prctl.NAME, "cw-config-mgr")

    logging_config.configure_logging(log_level, log_dir, "config-manager", show_thread=True)

    # urllib3 logs a WARNING log whenever it recreates a connection, but our
    # etcd usage does this frequently (to allow watch timeouts), so deliberately
    # ignore this log
    urllib_logger = logging.getLogger('urllib3')
    urllib_logger.setLevel(logging.ERROR)

    utils.install_sigusr1_handler("config-manager")

    # Drop a pidfile. We must keep a reference to the file object here, as this keeps
    # the file locked and provides extra protection against two processes running at
    # once.
    pidfile_lock = None
    try:
        pidfile_lock = utils.lock_and_write_pid_file(arguments['--pidfile']) # noqa
    except IOError:
        # We failed to take the lock - another process is already running
        exit(1)

    plugins_dir = "/usr/share/clearwater/clearwater-config-manager/plugins/"
    plugins = load_plugins_in_dir(plugins_dir)
    plugins.sort(key=lambda x: x.key())
    threads = []

    files = [p.file() for p in plugins]
    alarm = ConfigAlarm(files)

    for plugin in plugins:
        syncer = EtcdSynchronizer(plugin, local_ip, local_site, alarm, etcd_key)
        syncer.start_thread()

        threads.append(syncer.thread)
        _log.info("Loaded plugin %s" % plugin)

    while any([thr.isAlive() for thr in threads]):
        for thr in threads:
            if thr.isAlive():
                thr.join(1)

    _log.info("Clearwater Configuration Manager shutting down")
    pdlogs.EXITING.log()
    syslog.closelog()