def test_env_section(self): conf = get_config(_CONF['env_section']) watchers_conf = {} for watcher_conf in conf['watchers']: watchers_conf[watcher_conf['name']] = watcher_conf watcher1 = Watcher.load_from_config(watchers_conf['watcher1']) watcher2 = Watcher.load_from_config(watchers_conf['watcher2']) self.assertEquals('lie', watcher1.env['CAKE']) self.assertEquals('cake', watcher2.env['LIE']) for watcher in [watcher1, watcher2]: self.assertEquals("%s:/bin" % os.getenv('PATH'), watcher.env['PATH'])
def load_from_config(cls, config_file): cfg = get_config(config_file) # hack reload ioloop to use the monkey patched version reload(ioloop) watchers = [] for watcher in cfg.get('watchers', []): watchers.append(Watcher.load_from_config(watcher)) sockets = [] for socket in cfg.get('sockets', []): sockets.append(CircusSocket.load_from_config(socket)) # creating arbiter arbiter = cls(watchers, cfg['endpoint'], cfg['pubsub_endpoint'], check_delay=cfg.get('check_delay', 1.), prereload_fn=cfg.get('prereload_fn'), stats_endpoint=cfg.get('stats_endpoint'), plugins=cfg.get('plugins'), sockets=sockets) return arbiter
def load_from_config(cls, config_file): cfg = get_config(config_file) # hack reload ioloop to use the monkey patched version reload(ioloop) watchers = [] for watcher in cfg.get("watchers", []): watchers.append(Watcher.load_from_config(watcher)) sockets = [] for socket in cfg.get("sockets", []): sockets.append(CircusSocket.load_from_config(socket)) # creating arbiter arbiter = cls( watchers, cfg["endpoint"], cfg["pubsub_endpoint"], check_delay=cfg.get("check_delay", 1.0), prereload_fn=cfg.get("prereload_fn"), stats_endpoint=cfg.get("stats_endpoint"), plugins=cfg.get("plugins"), sockets=sockets, ) return arbiter
def load_from_config(cls, config_file): cfg = get_config(config_file) watchers = [] for watcher in cfg.get('watchers', []): watchers.append(Watcher.load_from_config(watcher)) sockets = [] for socket in cfg.get('sockets', []): sockets.append(CircusSocket.load_from_config(socket)) # creating arbiter arbiter = cls(watchers, cfg['endpoint'], cfg['pubsub_endpoint'], check_delay=cfg.get('check_delay', 1.), prereload_fn=cfg.get('prereload_fn'), stats_endpoint=cfg.get('stats_endpoint'), plugins=cfg.get('plugins'), sockets=sockets, warmup_delay=cfg.get('warmup_delay', 0), httpd=cfg.get('httpd', False), httpd_host=cfg.get('httpd_host', 'localhost'), httpd_port=cfg.get('httpd_port', 8080), debug=cfg.get('debug', False), stream_backend=cfg.get('stream_backend', 'thread'), ssh_server=cfg.get('ssh_server', None)) return arbiter
def load_from_config(cls, config_file): cfg = get_config(config_file) watchers = [] for watcher in cfg.get('watchers', []): watchers.append(Watcher.load_from_config(watcher)) sockets = [] for socket in cfg.get('sockets', []): sockets.append(CircusSocket.load_from_config(socket)) httpd = cfg.get('httpd', False) if httpd: # controlling that we have what it takes to run the web UI # if something is missing this will tell the user try: import circusweb # NOQA except ImportError: logger.error('You need to install circus-web') sys.exit(1) # creating arbiter arbiter = cls(watchers, cfg['endpoint'], cfg['pubsub_endpoint'], check_delay=cfg.get('check_delay', 1.), prereload_fn=cfg.get('prereload_fn'), stats_endpoint=cfg.get('stats_endpoint'), plugins=cfg.get('plugins'), sockets=sockets, warmup_delay=cfg.get('warmup_delay', 0), httpd=httpd, httpd_host=cfg.get('httpd_host', 'localhost'), httpd_port=cfg.get('httpd_port', 8080), debug=cfg.get('debug', False), ssh_server=cfg.get('ssh_server', None)) return arbiter
def test_issue310(self): ''' https://github.com/mozilla-services/circus/pull/310 Allow $(circus.sockets.name) to be used in args. ''' conf = get_config(_CONF['issue310']) watcher = Watcher.load_from_config(conf['watchers'][0]) socket = CircusSocket.load_from_config(conf['sockets'][0]) try: watcher.initialize(None, {'web': socket}, None) process = Process(watcher._nextwid, watcher.cmd, args=watcher.args, working_dir=watcher.working_dir, shell=watcher.shell, uid=watcher.uid, gid=watcher.gid, env=watcher.env, rlimits=watcher.rlimits, spawn=False, executable=watcher.executable, use_fds=watcher.use_sockets, watcher=watcher) sockets_fds = watcher._get_sockets_fds() formatted_args = process.format_args(sockets_fds=sockets_fds) fd = sockets_fds['web'] self.assertEqual(formatted_args, ['foo', '--fd', str(fd)]) finally: socket.close()
def load_from_config(cls, config_file): cfg = get_config(config_file) # hack reload ioloop to use the monkey patched version reload(ioloop) watchers = [] for watcher in cfg.get('watchers', []): watchers.append(Watcher.load_from_config(watcher)) sockets = [] for socket in cfg.get('sockets', []): sockets.append(CircusSocket.load_from_config(socket)) # creating arbiter arbiter = cls(watchers, cfg['endpoint'], cfg['pubsub_endpoint'], check_delay=cfg.get('check_delay', 1.), prereload_fn=cfg.get('prereload_fn'), stats_endpoint=cfg.get('stats_endpoint'), plugins=cfg.get('plugins'), sockets=sockets, warmup_delay=cfg.get('warmup_delay', 0), httpd=cfg.get('httpd', False), httpd_host=cfg.get('httpd_host', 'localhost'), httpd_port=cfg.get('httpd_port', 8080), debug=cfg.get('debug', False), stream_backend=cfg.get('stream_backend', 'thread'), ssh_server=cfg.get('ssh_server', None)) return arbiter
def test_issue310(self): """ https://github.com/mozilla-services/circus/pull/310 Allow $(circus.sockets.name) to be used in args. """ conf = get_config(_CONF["issue310"]) watcher = Watcher.load_from_config(conf["watchers"][0]) socket = CircusSocket.load_from_config(conf["sockets"][0]) watcher.initialize(None, {"web": socket}, None) process = Process( watcher._process_counter, watcher.cmd, args=watcher.args, working_dir=watcher.working_dir, shell=watcher.shell, uid=watcher.uid, gid=watcher.gid, env=watcher.env, rlimits=watcher.rlimits, spawn=False, executable=watcher.executable, use_fds=watcher.use_sockets, watcher=watcher, ) fd = watcher._get_sockets_fds()["web"] formatted_args = process.format_args() self.assertEquals(formatted_args, ["foo", "--fd", str(fd)])
def load_from_config(cls, config_file): cfg = get_config(config_file) # hack reload ioloop to use the monkey patched version reload(ioloop) watchers = [] for watcher in cfg.get('watchers', []): watchers.append(Watcher.load_from_config(watcher)) sockets = [] for socket in cfg.get('sockets', []): sockets.append(CircusSocket.load_from_config(socket)) # creating arbiter arbiter = cls(watchers, cfg['endpoint'], cfg['pubsub_endpoint'], check_delay=cfg.get('check_delay', 1.), prereload_fn=cfg.get('prereload_fn'), stats_endpoint=cfg.get('stats_endpoint'), plugins=cfg.get('plugins'), sockets=sockets, warmup_delay=cfg.get('warmup_delay', 0), httpd=cfg.get('httpd', False), httpd_host=cfg.get('httpd_host', 'localhost'), httpd_port=cfg.get('httpd_port', 8080)) return arbiter
def test_issue1088(self): # #1088 - graceful_timeout should be float conf = get_config(_CONF['issue1088']) watcher = conf['watchers'][0] self.assertEqual(watcher['graceful_timeout'], 25.5) watcher = Watcher.load_from_config(conf['watchers'][0]) watcher.stop()
def load_from_config(cls, config_file, loop=None): cfg = get_config(config_file) watchers = [] for watcher in cfg.get('watchers', []): watchers.append(Watcher.load_from_config(watcher)) sockets = [] for socket_ in cfg.get('sockets', []): sockets.append(CircusSocket.load_from_config(socket_)) httpd = cfg.get('httpd', False) if httpd: # controlling that we have what it takes to run the web UI # if something is missing this will tell the user try: import circusweb # NOQA except ImportError: logger.error('You need to install circus-web') sys.exit(1) # creating arbiter arbiter = cls(watchers, cfg['endpoint'], cfg['pubsub_endpoint'], check_delay=cfg.get('check_delay', 1.), prereload_fn=cfg.get('prereload_fn'), statsd=cfg.get('statsd', False), stats_endpoint=cfg.get('stats_endpoint'), papa_endpoint=cfg.get('papa_endpoint'), multicast_endpoint=cfg.get('multicast_endpoint'), plugins=cfg.get('plugins'), sockets=sockets, warmup_delay=cfg.get('warmup_delay', 0), httpd=httpd, loop=loop, httpd_host=cfg.get('httpd_host', 'localhost'), httpd_port=cfg.get('httpd_port', 8080), debug=cfg.get('debug', False), debug_gc=cfg.get('debug_gc', False), ssh_server=cfg.get('ssh_server', None), pidfile=cfg.get('pidfile', None), loglevel=cfg.get('loglevel', None), logoutput=cfg.get('logoutput', None), loggerconfig=cfg.get('loggerconfig', None), fqdn_prefix=cfg.get('fqdn_prefix', None), umask=cfg['umask'], endpoint_owner=cfg.get('endpoint_owner', None)) # store the cfg which will be used, so it can be used later # for checking if the cfg has been changed arbiter._cfg = cls.get_arbiter_config(cfg) arbiter.config_file = config_file return arbiter
def load(watcher_conf): watcher = Watcher.load_from_config(watcher_conf.copy()) process = Process(watcher._nextwid, watcher.cmd, args=watcher.args, working_dir=watcher.working_dir, shell=watcher.shell, uid=watcher.uid, gid=watcher.gid, env=watcher.env, rlimits=watcher.rlimits, spawn=False, executable=watcher.executable, use_fds=watcher.use_sockets, watcher=watcher) return process.format_args()
def load_from_config(cls, config_file, loop=None): cfg = get_config(config_file) watchers = [] for watcher in cfg.get("watchers", []): watchers.append(Watcher.load_from_config(watcher)) sockets = [] for socket in cfg.get("sockets", []): sockets.append(CircusSocket.load_from_config(socket)) httpd = cfg.get("httpd", False) if httpd: # controlling that we have what it takes to run the web UI # if something is missing this will tell the user try: import circusweb # NOQA except ImportError: logger.error("You need to install circus-web") sys.exit(1) # creating arbiter arbiter = cls( watchers, cfg["endpoint"], cfg["pubsub_endpoint"], check_delay=cfg.get("check_delay", 1.0), prereload_fn=cfg.get("prereload_fn"), statsd=cfg.get("statsd", False), stats_endpoint=cfg.get("stats_endpoint"), multicast_endpoint=cfg.get("multicast_endpoint"), plugins=cfg.get("plugins"), sockets=sockets, warmup_delay=cfg.get("warmup_delay", 0), httpd=httpd, loop=loop, httpd_host=cfg.get("httpd_host", "localhost"), httpd_port=cfg.get("httpd_port", 8080), debug=cfg.get("debug", False), ssh_server=cfg.get("ssh_server", None), pidfile=cfg.get("pidfile", None), loglevel=cfg.get("loglevel", None), logoutput=cfg.get("logoutput", None), fqdn_prefix=cfg.get("fqdn_prefix", None), umask=cfg["umask"], ) # store the cfg which will be used, so it can be used later # for checking if the cfg has been changed arbiter._cfg = cls.get_arbiter_config(cfg) arbiter.config_file = config_file return arbiter
def test_env_section(self): conf = get_config(_CONF["env_section"]) watchers_conf = {} for watcher_conf in conf["watchers"]: watchers_conf[watcher_conf["name"]] = watcher_conf watcher1 = Watcher.load_from_config(watchers_conf["watcher1"]) watcher2 = Watcher.load_from_config(watchers_conf["watcher2"]) self.assertEqual("lie", watcher1.env["CAKE"]) self.assertEqual("cake", watcher2.env["LIE"]) for watcher in [watcher1, watcher2]: self.assertEqual("%s:/bin" % os.getenv("PATH"), watcher.env["PATH"]) self.assertEqual("test1", watcher1.env["TEST1"]) self.assertEqual("test1", watcher2.env["TEST1"]) self.assertEqual("test2", watcher1.env["TEST2"]) self.assertEqual("test2", watcher2.env["TEST2"]) self.assertEqual("test3", watcher1.env["TEST3"]) self.assertEqual("test3", watcher2.env["TEST3"])
def load_from_config(cls, config_file): cfg = get_config(config_file) # hack reload ioloop to use the monkey patched version reload(ioloop) watchers = [] for watcher in cfg.get('watchers', []): watchers.append(Watcher.load_from_config(watcher)) # creating arbiter arbiter = cls(watchers, cfg['endpoint'], cfg['pubsub_endpoint'], check_delay=cfg.get('check_delay', 1.), prereload_fn=cfg.get('prereload_fn')) return arbiter
def load(watcher_conf): watcher = Watcher.load_from_config(watcher_conf.copy()) # Make sure we don't close the sockets as we will be # launching the Watcher with IS_WINDOWS=True watcher.use_sockets = True process = Process('test', watcher._nextwid, watcher.cmd, args=watcher.args, working_dir=watcher.working_dir, shell=watcher.shell, uid=watcher.uid, gid=watcher.gid, env=watcher.env, rlimits=watcher.rlimits, spawn=False, executable=watcher.executable, use_fds=watcher.use_sockets, watcher=watcher) return process.format_args()
def load(watcher_conf): watcher = Watcher.load_from_config(watcher_conf.copy()) # Make sure we don't close the sockets as we will be # launching the Watcher with IS_WINDOWS=True watcher.use_sockets = True process = Process(watcher._nextwid, watcher.cmd, args=watcher.args, working_dir=watcher.working_dir, shell=watcher.shell, uid=watcher.uid, gid=watcher.gid, env=watcher.env, rlimits=watcher.rlimits, spawn=False, executable=watcher.executable, use_fds=watcher.use_sockets, watcher=watcher) return process.format_args()
def message(self, *args, **opts): if len(args) != 2: raise ArgumentError("invalid number of arguments") config_file, name = args config = get_config(config_file) watchers = config.get('watchers', []) config = filter(lambda w: w['name'] == name, watchers) if not config: raise ArgumentError("watcher not found") config = config[0] config['stdout_stream'] = {'class': 'StdoutStream'} config['stderr_stream'] = {'class': 'StdoutStream'} config['_exec'] = True watcher = Watcher.load_from_config(config) # the next call will exec(), replacing the current process watcher.start()
def load_from_config(cls, config_file): cfg = get_config(config_file) # hack reload ioloop to use the monkey patched version reload(ioloop) watchers = [] for watcher in cfg.get("watchers", []): watchers.append(Watcher.load_from_config(watcher)) # creating arbiter arbiter = cls( watchers, cfg["endpoint"], cfg["pubsub_endpoint"], check_delay=cfg.get("check_delay", 1.0), prereload_fn=cfg.get("prereload_fn"), ) return arbiter
def test_issue310(self): """ https://github.com/mozilla-services/circus/pull/310 Allow $(circus.sockets.name) to be used in args. """ conf = get_config(_CONF["issue310"]) watcher = Watcher.load_from_config(conf["watchers"][0]) socket = CircusSocket.load_from_config(conf["sockets"][0]) try: watcher.initialize(None, {"web": socket}, None) if IS_WINDOWS: # We can't close the sockets on Windows as we # are redirecting stdout watcher.use_sockets = True process = Process( watcher._nextwid, watcher.cmd, args=watcher.args, working_dir=watcher.working_dir, shell=watcher.shell, uid=watcher.uid, gid=watcher.gid, env=watcher.env, rlimits=watcher.rlimits, spawn=False, executable=watcher.executable, use_fds=watcher.use_sockets, watcher=watcher, ) sockets_fds = watcher._get_sockets_fds() formatted_args = process.format_args(sockets_fds=sockets_fds) fd = sockets_fds["web"] self.assertEqual(formatted_args, ["foo", "--fd", str(fd)]) finally: socket.close()
def get_arbiter( watchers, controller=None, pubsub_endpoint=None, stats_endpoint=None, env=None, name=None, context=None, background=False, stream_backend="thread", plugins=None, debug=False, proc_name="circusd", ): """Creates a Arbiter and a single watcher in it. Options: - **watchers** -- a list of watchers. A watcher in that case is a dict containing: - **name** -- the name of the watcher (default: None) - **cmd** -- the command line used to run the Watcher. - **args** -- the args for the command (list or string). - **executable** -- When executable is given, the first item in the args sequence obtained from **cmd** is still treated by most programs as the command name, which can then be different from the actual executable name. It becomes the display name for the executing program in utilities such as **ps**. - **numprocesses** -- the number of processes to spawn (default: 1). - **warmup_delay** -- the delay in seconds between two spawns (default: 0) - **shell** -- if True, the processes are run in the shell (default: False) - **working_dir** - the working dir for the processes (default: None) - **uid** -- the user id used to run the processes (default: None) - **gid** -- the group id used to run the processes (default: None) - **env** -- the environment passed to the processes (default: None) - **send_hup**: if True, a process reload will be done by sending the SIGHUP signal. (default: False) - **stdout_stream**: a mapping containing the options for configuring the stdout stream. Default to None. When provided, may contain: - **class**: the fully qualified name of the class to use for streaming. Defaults to circus.stream.FileStream - **refresh_time**: the delay between two stream checks. Defaults to 0.3 seconds. - any other key will be passed the class constructor. - **stderr_stream**: a mapping containing the options for configuring the stderr stream. Default to None. When provided, may contain: - **class**: the fully qualified name of the class to use for streaming. Defaults to circus.stream.FileStream - **refresh_time**: the delay between two stream checks. Defaults to 0.3 seconds. - any other key will be passed the class constructor. - **max_retry**: the number of times we attempt to start a process, before we abandon and stop the whole watcher. (default: 5) - **controller** -- the zmq entry point (default: 'tcp://127.0.0.1:5555') - **pubsub_endpoint** -- the zmq entry point for the pubsub (default: 'tcp://127.0.0.1:5556') - **stats_endpoint** -- the stats endpoint. If not provided, the *circusd-stats* process will not be launched. (default: None) - **context** -- the zmq context (default: None) - **background** -- If True, the arbiter is launched in a thread in the background (default: False) - **stream_backend** -- the backend that will be used for the streaming process. Can be *thread* or *gevent*. When set to *gevent* you need to have *gevent* and *gevent_zmq* installed. (default: thread) - **plugins** -- a list of plugins. Each item is a mapping with: - **use** -- Fully qualified name that points to the plugin class - every other value is passed to the plugin in the **config** option - **debug** -- If True the arbiter is launched in debug mode (default: False) - **proc_name** -- the arbiter process name (default: circusd) """ from circus.util import DEFAULT_ENDPOINT_DEALER, DEFAULT_ENDPOINT_SUB if controller is None: controller = DEFAULT_ENDPOINT_DEALER if pubsub_endpoint is None: pubsub_endpoint = DEFAULT_ENDPOINT_SUB if stream_backend == "gevent": try: import gevent # NOQA from gevent import monkey # NOQA try: import zmq.eventloop as old_io import zmq.green as zmq # NOQA old_io.ioloop.Poller = zmq.Poller except ImportError: # older version try: from gevent_zeromq import monkey_patch, IOLOOP_IS_MONKEYPATCHED # NOQA # NOQA monkey_patch() warnings.warn("gevent_zeromq is deprecated, please " "use PyZMQ >= 2.2.0.1") except ImportError: raise ImportError(_MSG) monkey.patch_all() except ImportError: sys.stderr.write("stream_backend set to gevent, " + "but gevent isn't installed\n") sys.stderr.write("Exiting...") sys.exit(1) from circus.watcher import Watcher if background: from circus.arbiter import ThreadedArbiter as Arbiter # NOQA else: from circus.arbiter import Arbiter # NOQA _watchers = [] for watcher in watchers: cmd = watcher["cmd"] watcher["name"] = watcher.get("name", os.path.basename(cmd.split()[0])) watcher["stream_backend"] = stream_backend _watchers.append(Watcher.load_from_config(watcher)) return Arbiter( _watchers, controller, pubsub_endpoint, stats_endpoint=stats_endpoint, context=context, plugins=plugins, debug=debug, proc_name=proc_name, )
def get_arbiter(watchers, controller='tcp://127.0.0.1:5555', pubsub_endpoint='tcp://127.0.0.1:5556', stats_endpoint=None, env=None, name=None, context=None, background=False, stream_backend="thread", plugins=None): """Creates a Arbiter and a single watcher in it. Options: - **watchers** -- a list of watchers. A watcher in that case is a dict containing: - **name** -- the name of the watcher (default: None) - **cmd** -- the command line used to run the Watcher. - **args** -- the args for the command (list or string). - **executable** -- When executable is given, the first item in the args sequence obtained from **cmd** is still treated by most programs as the command name, which can then be different from the actual executable name. It becomes the display name for the executing program in utilities such as **ps**. - **numprocesses** -- the number of processes to spawn (default: 1). - **warmup_delay** -- the delay in seconds between two spawns (default: 0) - **shell** -- if True, the processes are run in the shell (default: False) - **working_dir** - the working dir for the processes (default: None) - **uid** -- the user id used to run the processes (default: None) - **gid** -- the group id used to run the processes (default: None) - **env** -- the environment passed to the processes (default: None) - **send_hup**: if True, a process reload will be done by sending the SIGHUP signal. (default: False) - **stdout_stream**: a mapping containing the options for configuring the stdout stream. Default to None. When provided, may contain: - **class**: the fully qualified name of the class to use for streaming. Defaults to circus.stream.FileStream - **refresh_time**: the delay between two stream checks. Defaults to 0.3 seconds. - any other key will be passed the class constructor. - **stderr_stream**: a mapping containing the options for configuring the stderr stream. Default to None. When provided, may contain: - **class**: the fully qualified name of the class to use for streaming. Defaults to circus.stream.FileStream - **refresh_time**: the delay between two stream checks. Defaults to 0.3 seconds. - any other key will be passed the class constructor. - **max_retry**: the number of times we attempt to start a process, before we abandon and stop the whole watcher. (default: 5) - **controller** -- the zmq entry point (default: 'tcp://127.0.0.1:5555') - **pubsub_endpoint** -- the zmq entry point for the pubsub (default: 'tcp://127.0.0.1:5556') - **stats_endpoint** -- the stats endpoint. If not provided, the *circusd-stats* process will not be launched. (default: None) - **context** -- the zmq context (default: None) - **background** -- If True, the arbiter is launched in a thread in the background (default: False) - **stream_backend** -- the backend that will be used for the streaming process. Can be *thread* or *gevent*. When set to *gevent* you need to have *gevent* and *gevent_zmq* installed. (default: thread) - **plugins** -- a list of plugins. Each item is a mapping with: - **use** -- Fully qualified name that points to the plugin class - every other value is passed to the plugin in the **config** option """ if stream_backend == 'gevent': try: import gevent # NOQA import gevent_zeromq # NOQA except ImportError: sys.stderr.write("stream_backend set to gevent, " + "but gevent or gevent_zeromq isn't installed\n") sys.stderr.write("Exiting...") sys.exit(1) from gevent import monkey from gevent_zeromq import monkey_patch monkey.patch_all() monkey_patch() from circus.watcher import Watcher if background: from circus.arbiter import ThreadedArbiter as Arbiter # NOQA else: from circus.arbiter import Arbiter # NOQA _watchers = [] for watcher in watchers: cmd = watcher['cmd'] watcher['name'] = watcher.get('name', os.path.basename(cmd.split()[0])) watcher['stream_backend'] = stream_backend _watchers.append(Watcher.load_from_config(watcher)) return Arbiter(_watchers, controller, pubsub_endpoint, stats_endpoint=stats_endpoint, context=context, plugins=plugins)
def test_env_from_string(self): config = {'name': 'foobar', 'cmd': 'foobar', 'env': 'coconuts=migrate'} watcher = Watcher.load_from_config(config) self.assertEquals(watcher.env, {'coconuts': 'migrate'})
def __init__(self, watchers, endpoint, pubsub_endpoint, check_delay=.5, prereload_fn=None, context=None, loop=None, statsd=False, stats_endpoint=None, statsd_close_outputs=False, multicast_endpoint=None, plugins=None, sockets=None, warmup_delay=0, httpd=False, httpd_host='localhost', httpd_port=8080, httpd_close_outputs=False, debug=False, ssh_server=None, proc_name='circusd', pidfile=None, loglevel=None, logoutput=None, fqdn_prefix=None): self.watchers = watchers self.endpoint = endpoint self.check_delay = check_delay self.prereload_fn = prereload_fn self.pubsub_endpoint = pubsub_endpoint self.multicast_endpoint = multicast_endpoint self.proc_name = proc_name self.ssh_server = ssh_server self.evpub_socket = None self.pidfile = pidfile self.loglevel = loglevel self.logoutput = logoutput socket_fqdn = socket.getfqdn() if fqdn_prefix is None: fqdn = socket_fqdn else: fqdn = '{}@{}'.format(fqdn_prefix, socket_fqdn) self.fqdn = fqdn self.ctrl = self.loop = None self.socket_event = False # initialize zmq context self._init_context(context) self.pid = os.getpid() self._watchers_names = {} self.alive = True self._lock = RLock() self.debug = debug if self.debug: self.stdout_stream = self.stderr_stream = {'class': 'StdoutStream'} else: self.stdout_stream = self.stderr_stream = None # initializing circusd-stats as a watcher when configured self.statsd = statsd self.stats_endpoint = stats_endpoint if self.statsd: cmd = "%s -c 'from circus import stats; stats.main()'" % \ sys.executable cmd += ' --endpoint %s' % self.endpoint cmd += ' --pubsub %s' % self.pubsub_endpoint cmd += ' --statspoint %s' % self.stats_endpoint if ssh_server is not None: cmd += ' --ssh %s' % ssh_server if debug: cmd += ' --log-level DEBUG' stats_watcher = Watcher('circusd-stats', cmd, use_sockets=True, singleton=True, stdout_stream=self.stdout_stream, stderr_stream=self.stderr_stream, copy_env=True, copy_path=True, close_child_stderr=statsd_close_outputs, close_child_stdout=statsd_close_outputs) self.watchers.append(stats_watcher) # adding the httpd if httpd: cmd = ("%s -c 'from circusweb import circushttpd; " "circushttpd.main()'") % sys.executable cmd += ' --endpoint %s' % self.endpoint cmd += ' --fd $(circus.sockets.circushttpd)' if ssh_server is not None: cmd += ' --ssh %s' % ssh_server httpd_watcher = Watcher('circushttpd', cmd, use_sockets=True, singleton=True, stdout_stream=self.stdout_stream, stderr_stream=self.stderr_stream, copy_env=True, copy_path=True, close_child_stderr=httpd_close_outputs, close_child_stdout=httpd_close_outputs) self.watchers.append(httpd_watcher) httpd_socket = CircusSocket(name='circushttpd', host=httpd_host, port=httpd_port) # adding the socket if sockets is None: sockets = [httpd_socket] else: sockets.append(httpd_socket) # adding each plugin as a watcher ch_stderr = self.stderr_stream is None ch_stdout = self.stdout_stream is None if plugins is not None: for plugin in plugins: fqn = plugin['use'] cmd = get_plugin_cmd(plugin, self.endpoint, self.pubsub_endpoint, self.check_delay, ssh_server, debug=self.debug) plugin_cfg = dict(cmd=cmd, priority=1, singleton=True, stdout_stream=self.stdout_stream, stderr_stream=self.stderr_stream, copy_env=True, copy_path=True, close_child_stderr=ch_stderr, close_child_stdout=ch_stdout) plugin_cfg.update(plugin) if 'name' not in plugin_cfg: plugin_cfg['name'] = fqn plugin_watcher = Watcher.load_from_config(plugin_cfg) self.watchers.append(plugin_watcher) self.sockets = CircusSockets(sockets) self.warmup_delay = warmup_delay
def __init__(self, watchers, endpoint, pubsub_endpoint, check_delay=.5, prereload_fn=None, context=None, loop=None, statsd=False, stats_endpoint=None, statsd_close_outputs=False, multicast_endpoint=None, plugins=None, sockets=None, warmup_delay=0, httpd=False, httpd_host='localhost', httpd_port=8080, httpd_close_outputs=False, debug=False, ssh_server=None, proc_name='circusd', pidfile=None, loglevel=None, logoutput=None, fqdn_prefix=None): self.watchers = watchers self.endpoint = endpoint self.check_delay = check_delay self.prereload_fn = prereload_fn self.pubsub_endpoint = pubsub_endpoint self.multicast_endpoint = multicast_endpoint self.proc_name = proc_name self.ssh_server = ssh_server self.pidfile = pidfile self.loglevel = loglevel self.logoutput = logoutput socket_fqdn = socket.getfqdn() if fqdn_prefix is None: fqdn = socket_fqdn else: fqdn = '{}@{}'.format(fqdn_prefix, socket_fqdn) self.fqdn = fqdn self.ctrl = self.loop = None self.socket_event = False # initialize zmq context self._init_context(context) self.pid = os.getpid() self._watchers_names = {} self.alive = True self._lock = RLock() self.debug = debug if self.debug: self.stdout_stream = self.stderr_stream = {'class': 'StdoutStream'} else: self.stdout_stream = self.stderr_stream = None # initializing circusd-stats as a watcher when configured self.statsd = statsd self.stats_endpoint = stats_endpoint if self.statsd: cmd = "%s -c 'from circus import stats; stats.main()'" % \ sys.executable cmd += ' --endpoint %s' % self.endpoint cmd += ' --pubsub %s' % self.pubsub_endpoint cmd += ' --statspoint %s' % self.stats_endpoint if ssh_server is not None: cmd += ' --ssh %s' % ssh_server if debug: cmd += ' --log-level DEBUG' stats_watcher = Watcher('circusd-stats', cmd, use_sockets=True, singleton=True, stdout_stream=self.stdout_stream, stderr_stream=self.stderr_stream, copy_env=True, copy_path=True, close_child_stderr=statsd_close_outputs, close_child_stdout=statsd_close_outputs) self.watchers.append(stats_watcher) # adding the httpd if httpd: cmd = ("%s -c 'from circusweb import circushttpd; " "circushttpd.main()'") % sys.executable cmd += ' --endpoint %s' % self.endpoint cmd += ' --fd $(circus.sockets.circushttpd)' if ssh_server is not None: cmd += ' --ssh %s' % ssh_server httpd_watcher = Watcher('circushttpd', cmd, use_sockets=True, singleton=True, stdout_stream=self.stdout_stream, stderr_stream=self.stderr_stream, copy_env=True, copy_path=True, close_child_stderr=httpd_close_outputs, close_child_stdout=httpd_close_outputs) self.watchers.append(httpd_watcher) httpd_socket = CircusSocket(name='circushttpd', host=httpd_host, port=httpd_port) # adding the socket if sockets is None: sockets = [httpd_socket] else: sockets.append(httpd_socket) # adding each plugin as a watcher ch_stderr = self.stderr_stream is None ch_stdout = self.stdout_stream is None if plugins is not None: for plugin in plugins: fqn = plugin['use'] cmd = get_plugin_cmd(plugin, self.endpoint, self.pubsub_endpoint, self.check_delay, ssh_server, debug=self.debug) plugin_cfg = dict(cmd=cmd, priority=1, singleton=True, stdout_stream=self.stdout_stream, stderr_stream=self.stderr_stream, copy_env=True, copy_path=True, close_child_stderr=ch_stderr, close_child_stdout=ch_stdout) plugin_cfg.update(plugin) if 'name' not in plugin_cfg: plugin_cfg['name'] = fqn plugin_watcher = Watcher.load_from_config(plugin_cfg) self.watchers.append(plugin_watcher) self.sockets = CircusSockets(sockets) self.warmup_delay = warmup_delay
def test_watcher_env_var(self): conf = get_config(_CONF['env_var']) watcher = Watcher.load_from_config(conf['watchers'][0]) self.assertEqual("%s:/bin" % os.getenv('PATH'), watcher.env['PATH']) watcher.stop()
def __init__(self, watchers, endpoint, pubsub_endpoint, check_delay=1.0, prereload_fn=None, context=None, loop=None, statsd=False, stats_endpoint=None, statsd_close_outputs=False, multicast_endpoint=None, plugins=None, sockets=None, warmup_delay=0, httpd=False, httpd_host='localhost', httpd_port=8080, httpd_close_outputs=False, debug=False, debug_gc=False, ssh_server=None, proc_name='circusd', pidfile=None, loglevel=None, logoutput=None, loggerconfig=None, fqdn_prefix=None, umask=None, endpoint_owner=None): self.watchers = watchers self.endpoint = endpoint self.check_delay = check_delay self.prereload_fn = prereload_fn self.pubsub_endpoint = pubsub_endpoint self.multicast_endpoint = multicast_endpoint self.proc_name = proc_name self.ssh_server = ssh_server self.evpub_socket = None self.pidfile = pidfile self.loglevel = loglevel self.logoutput = logoutput self.loggerconfig = loggerconfig self.umask = umask self.endpoint_owner = endpoint_owner self._running = False try: # getfqdn appears to fail in Python3.3 in the unittest # framework so fall back to gethostname socket_fqdn = socket.getfqdn() except KeyError: socket_fqdn = socket.gethostname() if fqdn_prefix is None: fqdn = socket_fqdn else: fqdn = '{}@{}'.format(fqdn_prefix, socket_fqdn) self.fqdn = fqdn self.ctrl = self.loop = None self._provided_loop = False self.socket_event = False if loop is not None: self._provided_loop = True self.loop = loop # initialize zmq context self._init_context(context) self.pid = os.getpid() self._watchers_names = {} self._stopping = False self._restarting = False self.debug = debug self._exclusive_running_command = None if self.debug: self.stdout_stream = self.stderr_stream = {'class': 'StdoutStream'} else: self.stdout_stream = self.stderr_stream = None self.debug_gc = debug_gc if debug_gc: gc.set_debug(gc.DEBUG_LEAK) # initializing circusd-stats as a watcher when configured self.statsd = statsd self.stats_endpoint = stats_endpoint if self.statsd: cmd = "%s -c 'from circus import stats; stats.main()'" % \ sys.executable cmd += ' --endpoint %s' % self.endpoint cmd += ' --pubsub %s' % self.pubsub_endpoint cmd += ' --statspoint %s' % self.stats_endpoint if ssh_server is not None: cmd += ' --ssh %s' % ssh_server if debug: cmd += ' --log-level DEBUG' elif self.loglevel: cmd += ' --log-level ' + self.loglevel if self.logoutput: cmd += ' --log-output ' + self.logoutput stats_watcher = Watcher('circusd-stats', cmd, use_sockets=True, singleton=True, stdout_stream=self.stdout_stream, stderr_stream=self.stderr_stream, copy_env=True, copy_path=True, close_child_stderr=statsd_close_outputs, close_child_stdout=statsd_close_outputs) self.watchers.append(stats_watcher) # adding the httpd if httpd: # adding the socket httpd_socket = CircusSocket(name='circushttpd', host=httpd_host, port=httpd_port) if sockets is None: sockets = [httpd_socket] else: sockets.append(httpd_socket) cmd = ("%s -c 'from circusweb import circushttpd; " "circushttpd.main()'") % sys.executable cmd += ' --endpoint %s' % self.endpoint cmd += ' --fd $(circus.sockets.circushttpd)' if ssh_server is not None: cmd += ' --ssh %s' % ssh_server # Adding the watcher httpd_watcher = Watcher('circushttpd', cmd, use_sockets=True, singleton=True, stdout_stream=self.stdout_stream, stderr_stream=self.stderr_stream, copy_env=True, copy_path=True, close_child_stderr=httpd_close_outputs, close_child_stdout=httpd_close_outputs) self.watchers.append(httpd_watcher) # adding each plugin as a watcher ch_stderr = self.stderr_stream is None ch_stdout = self.stdout_stream is None if plugins is not None: for plugin in plugins: fqn = plugin['use'] cmd = get_plugin_cmd(plugin, self.endpoint, self.pubsub_endpoint, self.check_delay, ssh_server, debug=self.debug, loglevel=self.loglevel, logoutput=self.logoutput) plugin_cfg = dict(cmd=cmd, priority=1, singleton=True, stdout_stream=self.stdout_stream, stderr_stream=self.stderr_stream, copy_env=True, copy_path=True, close_child_stderr=ch_stderr, close_child_stdout=ch_stdout) plugin_cfg.update(plugin) if 'name' not in plugin_cfg: plugin_cfg['name'] = fqn plugin_watcher = Watcher.load_from_config(plugin_cfg) self.watchers.append(plugin_watcher) self.sockets = CircusSockets(sockets) self.warmup_delay = warmup_delay
def __call__(self, watchers, controller=None, pubsub_endpoint=None, statsd=False, stats_endpoint=None, statsd_close_outputs=False, multicast_endpoint=None, env=None, name=None, context=None, background=False, stream_backend="thread", plugins=None, debug=False, proc_name="circusd"): """Creates a Arbiter and a single watcher in it. Options: - **watchers** -- a list of watchers. A watcher in that case is a dict containing: - **name** -- the name of the watcher (default: None) - **cmd** -- the command line used to run the Watcher. - **args** -- the args for the command (list or string). - **executable** -- When executable is given, the first item in the args sequence obtained from **cmd** is still treated by most programs as the command name, which can then be different from the actual executable name. It becomes the display name for the executing program in utilities such as **ps**. - **numprocesses** -- the number of processes to spawn (default: 1). - **warmup_delay** -- the delay in seconds between two spawns (default: 0) - **shell** -- if True, the processes are run in the shell (default: False) - **working_dir** - the working dir for the processes (default: None) - **uid** -- the user id used to run the processes (default: None) - **gid** -- the group id used to run the processes (default: None) - **env** -- the environment passed to the processes (default: None) - **send_hup**: if True, a process reload will be done by sending the SIGHUP signal. (default: False) - **stdout_stream**: a mapping containing the options for configuring the stdout stream. Default to None. When provided, may contain: - **class**: the fully qualified name of the class to use for streaming. Defaults to circus.stream.FileStream - any other key will be passed the class constructor. - **stderr_stream**: a mapping containing the options for configuring the stderr stream. Default to None. When provided, may contain: - **class**: the fully qualified name of the class to use for streaming. Defaults to circus.stream.FileStream - any other key will be passed the class constructor. - **max_retry**: the number of times we attempt to start a process, before we abandon and stop the whole watcher. (default: 5) - **hooks**: callback functions for hooking into the watcher startup and shutdown process. **hooks** is a dict where each key is the hook name and each value is a 2-tuple with the name of the callable or the callabled itself and a boolean flag indicating if an exception occuring in the hook should not be ignored. Possible values for the hook name: *before_start*, *after_start*, *before_stop*, *after_stop*, *before_signal*, *after_signal* - **controller** -- the zmq entry point (default: 'tcp://127.0.0.1:5555') - **pubsub_endpoint** -- the zmq entry point for the pubsub (default: 'tcp://127.0.0.1:5556') - **stats_endpoint** -- the stats endpoint. If not provided, the *circusd-stats* process will not be launched. (default: None) - **statsd_close_outputs** -- if True sends the circusd-stats stdout/stderr to /dev/null (default: False) - **context** -- the zmq context (default: None) - **background** -- If True, the arbiter is launched in a thread in the background (default: False) - **stream_backend** -- the backend that will be used for the streaming process. Can be *thread* or *gevent*. When set to *gevent* you need to have *gevent* and *gevent_zmq* installed. (default: thread) - **plugins** -- a list of plugins. Each item is a mapping with: - **use** -- Fully qualified name that points to the plugin class - every other value is passed to the plugin in the **config** option - **debug** -- If True the arbiter is launched in debug mode (default: False) - **proc_name** -- the arbiter process name (default: circusd) """ from circus.util import (DEFAULT_ENDPOINT_DEALER, DEFAULT_ENDPOINT_SUB, DEFAULT_ENDPOINT_MULTICAST, DEFAULT_ENDPOINT_STATS) if controller is None: controller = DEFAULT_ENDPOINT_DEALER if pubsub_endpoint is None: pubsub_endpoint = DEFAULT_ENDPOINT_SUB if multicast_endpoint is None: multicast_endpoint = DEFAULT_ENDPOINT_MULTICAST if stats_endpoint is None and statsd: stats_endpoint = DEFAULT_ENDPOINT_STATS elif stats_endpoint is not None and not statsd: warnings.warn( "You defined a stats_endpoint without " "setting up statsd to True.", DeprecationWarning) statsd = True from circus.watcher import Watcher Arbiter = self._get_arbiter_klass(background=background) _watchers = [] for watcher in watchers: cmd = watcher['cmd'] watcher['name'] = watcher.get('name', os.path.basename(cmd.split()[0])) watcher['stream_backend'] = stream_backend _watchers.append(Watcher.load_from_config(watcher)) return Arbiter(_watchers, controller, pubsub_endpoint, statsd=statsd, stats_endpoint=stats_endpoint, statsd_close_outputs=statsd_close_outputs, multicast_endpoint=multicast_endpoint, context=context, plugins=plugins, debug=debug, proc_name=proc_name)
def reload_from_config(self, config_file=None, inside_circusd=False): new_cfg = get_config(config_file if config_file else self.config_file) # if arbiter is changed, reload everything if self.get_arbiter_config(new_cfg) != self._cfg: yield self._restart(inside_circusd=inside_circusd) return ignore_sn = set(['circushttpd']) ignore_wn = set(['circushttpd', 'circusd-stats']) # Gather socket names. current_sn = set([i.name for i in self.sockets.values()]) - ignore_sn new_sn = set([i['name'] for i in new_cfg.get('sockets', [])]) added_sn = new_sn - current_sn deleted_sn = current_sn - new_sn maybechanged_sn = current_sn - deleted_sn changed_sn = set([]) wn_with_changed_socket = set([]) wn_with_deleted_socket = set([]) # get changed sockets for n in maybechanged_sn: s = self.get_socket(n) if self.get_socket_config(new_cfg, n) != s._cfg: changed_sn.add(n) # just delete the socket and add it again deleted_sn.add(n) added_sn.add(n) # Get the watchers whichs use these, so they could be # deleted and added also for w in self.iter_watchers(): if 'circus.sockets.%s' % n.lower() in w.cmd: wn_with_changed_socket.add(w.name) # get deleted sockets for n in deleted_sn: s = self.get_socket(n) s.close() # Get the watchers whichs use these, these should not be # active anymore for w in self.iter_watchers(): if 'circus.sockets.%s' % n.lower() in w.cmd: wn_with_deleted_socket.add(w.name) del self.sockets[s.name] # get added sockets for n in added_sn: socket_config = self.get_socket_config(new_cfg, n) s = CircusSocket.load_from_config(socket_config) s.bind_and_listen() self.sockets[s.name] = s if added_sn or deleted_sn: # make sure all existing watchers get the new sockets in # their attributes and get the old removed # XXX: is this necessary? self.sockets is an mutable # object for watcher in self.iter_watchers(): # XXX: What happens as initalize is called on a # running watcher? watcher.initialize(self.evpub_socket, self.sockets, self) # Gather watcher names. current_wn = set([i.name for i in self.iter_watchers()]) - ignore_wn new_wn = set([i['name'] for i in new_cfg.get('watchers', [])]) new_wn = new_wn | set([i['name'] for i in new_cfg.get('plugins', [])]) added_wn = (new_wn - current_wn) | wn_with_changed_socket deleted_wn = current_wn - new_wn - wn_with_changed_socket maybechanged_wn = current_wn - deleted_wn changed_wn = set([]) if wn_with_deleted_socket and wn_with_deleted_socket not in new_wn: raise ValueError('Watchers %s uses a socket which is deleted' % wn_with_deleted_socket) # get changed watchers for n in maybechanged_wn: w = self.get_watcher(n) new_watcher_cfg = (self.get_watcher_config(new_cfg, n) or self.get_plugin_config(new_cfg, n)) old_watcher_cfg = w._cfg.copy() if 'env' in new_watcher_cfg: new_watcher_cfg['env'] = parse_env_dict(new_watcher_cfg['env']) # discarding env exceptions for key in _ENV_EXCEPTIONS: if 'env' in new_watcher_cfg and key in new_watcher_cfg['env']: del new_watcher_cfg['env'][key] if 'env' in new_watcher_cfg and key in old_watcher_cfg['env']: del old_watcher_cfg['env'][key] diff = DictDiffer(new_watcher_cfg, old_watcher_cfg).changed() if diff == set(['numprocesses']): # if nothing but the number of processes is # changed, just changes this w.set_numprocesses(int(new_watcher_cfg['numprocesses'])) changed = False else: changed = len(diff) > 0 if changed: # Others things are changed. Just delete and add the watcher. changed_wn.add(n) deleted_wn.add(n) added_wn.add(n) # delete watchers for n in deleted_wn: w = self.get_watcher(n) yield w._stop() del self._watchers_names[w.name.lower()] self.watchers.remove(w) # add watchers for n in added_wn: new_watcher_cfg = (self.get_plugin_config(new_cfg, n) or self.get_watcher_config(new_cfg, n)) w = Watcher.load_from_config(new_watcher_cfg) w.initialize(self.evpub_socket, self.sockets, self) yield self.start_watcher(w) self.watchers.append(w) self._watchers_names[w.name.lower()] = w
def reload_from_config(self, config_file=None): new_cfg = get_config(config_file if config_file else self.config_file) # if arbiter is changed, reload everything if self.get_arbiter_config(new_cfg) != self._cfg: raise ReloadArbiterException # Gather socket names. current_sn = set([i.name for i in self.sockets.values()]) new_sn = set([i['name'] for i in new_cfg.get('sockets', [])]) added_sn = new_sn - current_sn deleted_sn = current_sn - new_sn maybechanged_sn = current_sn - deleted_sn changed_sn = set([]) wn_with_changed_socket = set([]) wn_with_deleted_socket = set([]) # get changed sockets for n in maybechanged_sn: s = self.get_socket(n) if self.get_socket_config(new_cfg, n) != s._cfg: changed_sn.add(n) # just delete the socket and add it again deleted_sn.add(n) added_sn.add(n) # Get the watchers whichs use these, so they could be # deleted and added also for w in self.iter_watchers(): if 'circus.sockets.%s' % n.lower() in w.cmd: wn_with_changed_socket.add(w.name) # get deleted sockets for n in deleted_sn: s = self.get_socket(n) s.close() # Get the watchers whichs use these, these should not be # active anymore for w in self.iter_watchers(): if 'circus.sockets.%s' % n.lower() in w.cmd: wn_with_deleted_socket.add(w.name) del self.sockets[s.name] # get added sockets for n in added_sn: socket_config = self.get_socket_config(new_cfg, n) s = CircusSocket.load_from_config(socket_config) s.bind_and_listen() self.sockets[s.name] = s if added_sn or deleted_sn: # make sure all existing watchers get the new sockets in # their attributes and get the old removed # XXX: is this necessary? self.sockets is an mutable # object for watcher in self.iter_watchers(): # XXX: What happens as initalize is called on a # running watcher? watcher.initialize(self.evpub_socket, self.sockets, self) # Gather watcher names. current_wn = set([i.name for i in self.iter_watchers()]) new_wn = set([i['name'] for i in new_cfg.get('watchers', [])]) new_wn = new_wn | set([i['name'] for i in new_cfg.get('plugins', [])]) added_wn = (new_wn - current_wn) | wn_with_changed_socket deleted_wn = current_wn - new_wn - wn_with_changed_socket maybechanged_wn = current_wn - deleted_wn changed_wn = set([]) if wn_with_deleted_socket and wn_with_deleted_socket not in new_wn: raise ValueError('Watchers %s uses a socket which is deleted' % wn_with_deleted_socket) # get changed watchers for n in maybechanged_wn: w = self.get_watcher(n) new_watcher_cfg = (self.get_watcher_config(new_cfg, n) or self.get_plugin_config(new_cfg, n)) old_watcher_cfg = w._cfg.copy() if new_watcher_cfg != old_watcher_cfg: if not w.name.startswith('plugin:'): num_procs = new_watcher_cfg['numprocesses'] old_watcher_cfg['numprocesses'] = num_procs if new_watcher_cfg == old_watcher_cfg: # if nothing but the number of processes is # changed, just changes this w.set_numprocesses(int(num_procs)) continue # Others things are changed. Just delete and add the watcher. changed_wn.add(n) deleted_wn.add(n) added_wn.add(n) # delete watchers for n in deleted_wn: w = self.get_watcher(n) w.stop() del self._watchers_names[w.name.lower()] self.watchers.remove(w) # add watchers for n in added_wn: new_watcher_cfg = (self.get_plugin_config(new_cfg, n) or self.get_watcher_config(new_cfg, n)) w = Watcher.load_from_config(new_watcher_cfg) w.initialize(self.evpub_socket, self.sockets, self) self.start_watcher(w) self.watchers.append(w) self._watchers_names[w.name.lower()] = w return False
def test_watcher_graceful_timeout(self): conf = get_config(_CONF["issue210"]) watcher = Watcher.load_from_config(conf["watchers"][0]) watcher.stop()
def __call__(self, watchers, controller=None, pubsub_endpoint=None, statsd=False, stats_endpoint=None, statsd_close_outputs=False, multicast_endpoint=None, env=None, name=None, context=None, background=False, stream_backend="thread", httpd=False, plugins=None, debug=False, proc_name="circusd", loop=None, check_delay=1.0, **kw): """Creates a Arbiter and a single watcher in it. Options: - **watchers** -- a list of watchers. A watcher in that case is a dict containing: - **name** -- the name of the watcher (default: None) - **cmd** -- the command line used to run the Watcher. - **args** -- the args for the command (list or string). - **executable** -- When executable is given, the first item in the args sequence obtained from **cmd** is still treated by most programs as the command name, which can then be different from the actual executable name. It becomes the display name for the executing program in utilities such as **ps**. - **numprocesses** -- the number of processes to spawn (default: 1). - **warmup_delay** -- the delay in seconds between two spawns (default: 0) - **shell** -- if True, the processes are run in the shell (default: False) - **working_dir** - the working dir for the processes (default: None) - **uid** -- the user id used to run the processes (default: None) - **gid** -- the group id used to run the processes (default: None) - **env** -- the environment passed to the processes (default: None) - **send_hup**: if True, a process reload will be done by sending the SIGHUP signal. (default: False) - **stdout_stream**: a mapping containing the options for configuring the stdout stream. Default to None. When provided, may contain: - **class**: the fully qualified name of the class to use for streaming. Defaults to circus.stream.FileStream - any other key will be passed the class constructor. - **stderr_stream**: a mapping containing the options for configuring the stderr stream. Default to None. When provided, may contain: - **class**: the fully qualified name of the class to use for streaming. Defaults to circus.stream.FileStream - any other key will be passed the class constructor. - **max_retry**: the number of times we attempt to start a process, before we abandon and stop the whole watcher. (default: 5) - **hooks**: callback functions for hooking into the watcher startup and shutdown process. **hooks** is a dict where each key is the hook name and each value is a 2-tuple with the name of the callable or the callabled itself and a boolean flag indicating if an exception occuring in the hook should not be ignored. Possible values for the hook name: *before_start*, *after_start*, *before_spawn*, *after_spawn*, *before_stop*, *after_stop*, *before_signal*, *after_signal*, *extended_stats* - **controller** -- the zmq entry point (default: 'tcp://127.0.0.1:5555') - **pubsub_endpoint** -- the zmq entry point for the pubsub (default: 'tcp://127.0.0.1:5556') - **stats_endpoint** -- the stats endpoint. If not provided, the *circusd-stats* process will not be launched. (default: None) - **statsd_close_outputs** -- if True sends the circusd-stats stdout/stderr to /dev/null (default: False) - **context** -- the zmq context (default: None) - **background** -- If True, the arbiter is launched in a thread in the background (default: False) - **stream_backend** -- the backend that will be used for the streaming process. Can be *thread* or *gevent*. When set to *gevent* you need to have *gevent* and *gevent_zmq* installed. (default: thread) - **plugins** -- a list of plugins. Each item is a mapping with: - **use** -- Fully qualified name that points to the plugin class - every other value is passed to the plugin in the **config** option - **debug** -- If True the arbiter is launched in debug mode (default: False) - **proc_name** -- the arbiter process name (default: circusd) - **loop** -- the event loop (default: None) - **check_delay** -- the delay between two controller points (default: 1 s) """ from circus.util import (DEFAULT_ENDPOINT_DEALER, DEFAULT_ENDPOINT_SUB, DEFAULT_ENDPOINT_MULTICAST, DEFAULT_ENDPOINT_STATS) if controller is None: controller = DEFAULT_ENDPOINT_DEALER if pubsub_endpoint is None: pubsub_endpoint = DEFAULT_ENDPOINT_SUB if multicast_endpoint is None: multicast_endpoint = DEFAULT_ENDPOINT_MULTICAST if stats_endpoint is None and statsd: stats_endpoint = DEFAULT_ENDPOINT_STATS elif stats_endpoint is not None and not statsd: warnings.warn("You defined a stats_endpoint without " "setting up statsd to True.", DeprecationWarning) statsd = True from circus.watcher import Watcher Arbiter = self._get_arbiter_klass(background=background) _watchers = [] for watcher in watchers: cmd = watcher['cmd'] watcher['name'] = watcher.get('name', os.path.basename(cmd.split()[0])) watcher['stream_backend'] = stream_backend _watchers.append(Watcher.load_from_config(watcher)) return Arbiter(_watchers, controller, pubsub_endpoint, httpd=httpd, statsd=statsd, stats_endpoint=stats_endpoint, statsd_close_outputs=statsd_close_outputs, multicast_endpoint=multicast_endpoint, context=context, plugins=plugins, debug=debug, proc_name=proc_name, loop=loop, check_delay=check_delay, **kw)
def test_watcher_graceful_timeout(self): conf = get_config(_CONF['issue210']) watcher = Watcher.load_from_config(conf['watchers'][0]) watcher.stop()
def test_hooks(self): conf = get_config(_CONF['hooks']) watcher = Watcher.load_from_config(conf['watchers'][0]) self.assertEqual(watcher.hooks['before_start'].__doc__, hook.__doc__) self.assertTrue('before_start' not in watcher.ignore_hook_failure)
def __init__(self, watchers, endpoint, pubsub_endpoint, check_delay=1.0, prereload_fn=None, context=None, loop=None, statsd=False, stats_endpoint=None, statsd_close_outputs=False, multicast_endpoint=None, plugins=None, sockets=None, warmup_delay=0, httpd=False, httpd_host='localhost', httpd_port=8080, httpd_close_outputs=False, debug=False, debug_gc=False, ssh_server=None, proc_name='circusd', pidfile=None, loglevel=None, logoutput=None, fqdn_prefix=None, umask=None, endpoint_owner=None): self.watchers = watchers self.endpoint = endpoint self.check_delay = check_delay self.prereload_fn = prereload_fn self.pubsub_endpoint = pubsub_endpoint self.multicast_endpoint = multicast_endpoint self.proc_name = proc_name self.ssh_server = ssh_server self.evpub_socket = None self.pidfile = pidfile self.loglevel = loglevel self.logoutput = logoutput self.umask = umask self.endpoint_owner = endpoint_owner try: # getfqdn appears to fail in Python3.3 in the unittest # framework so fall back to gethostname socket_fqdn = socket.getfqdn() except KeyError: socket_fqdn = socket.gethostname() if fqdn_prefix is None: fqdn = socket_fqdn else: fqdn = '{}@{}'.format(fqdn_prefix, socket_fqdn) self.fqdn = fqdn self.ctrl = self.loop = None self._provided_loop = False self.socket_event = False if loop is not None: self._provided_loop = True self.loop = loop # initialize zmq context self._init_context(context) self.pid = os.getpid() self._watchers_names = {} self._stopping = False self._restarting = False self.debug = debug self._exclusive_running_command = None if self.debug: self.stdout_stream = self.stderr_stream = {'class': 'StdoutStream'} else: self.stdout_stream = self.stderr_stream = None self.debug_gc = debug_gc if debug_gc: gc.set_debug(gc.DEBUG_LEAK) # initializing circusd-stats as a watcher when configured self.statsd = statsd self.stats_endpoint = stats_endpoint if self.statsd: cmd = "%s -c 'from circus import stats; stats.main()'" % \ sys.executable cmd += ' --endpoint %s' % self.endpoint cmd += ' --pubsub %s' % self.pubsub_endpoint cmd += ' --statspoint %s' % self.stats_endpoint if ssh_server is not None: cmd += ' --ssh %s' % ssh_server if debug: cmd += ' --log-level DEBUG' stats_watcher = Watcher('circusd-stats', cmd, use_sockets=True, singleton=True, stdout_stream=self.stdout_stream, stderr_stream=self.stderr_stream, copy_env=True, copy_path=True, close_child_stderr=statsd_close_outputs, close_child_stdout=statsd_close_outputs) self.watchers.append(stats_watcher) # adding the httpd if httpd: # adding the socket httpd_socket = CircusSocket(name='circushttpd', host=httpd_host, port=httpd_port) if sockets is None: sockets = [httpd_socket] else: sockets.append(httpd_socket) cmd = ("%s -c 'from circusweb import circushttpd; " "circushttpd.main()'") % sys.executable cmd += ' --endpoint %s' % self.endpoint cmd += ' --fd $(circus.sockets.circushttpd)' if ssh_server is not None: cmd += ' --ssh %s' % ssh_server # Adding the watcher httpd_watcher = Watcher('circushttpd', cmd, use_sockets=True, singleton=True, stdout_stream=self.stdout_stream, stderr_stream=self.stderr_stream, copy_env=True, copy_path=True, close_child_stderr=httpd_close_outputs, close_child_stdout=httpd_close_outputs) self.watchers.append(httpd_watcher) # adding each plugin as a watcher ch_stderr = self.stderr_stream is None ch_stdout = self.stdout_stream is None if plugins is not None: for plugin in plugins: fqn = plugin['use'] cmd = get_plugin_cmd(plugin, self.endpoint, self.pubsub_endpoint, self.check_delay, ssh_server, debug=self.debug) plugin_cfg = dict(cmd=cmd, priority=1, singleton=True, stdout_stream=self.stdout_stream, stderr_stream=self.stderr_stream, copy_env=True, copy_path=True, close_child_stderr=ch_stderr, close_child_stdout=ch_stdout) plugin_cfg.update(plugin) if 'name' not in plugin_cfg: plugin_cfg['name'] = fqn plugin_watcher = Watcher.load_from_config(plugin_cfg) self.watchers.append(plugin_watcher) self.sockets = CircusSockets(sockets) self.warmup_delay = warmup_delay
def test_watcher_stop_signal(self): conf = get_config(_CONF['issue594']) self.assertEqual(conf['watchers'][0]['stop_signal'], signal.SIGINT) watcher = Watcher.load_from_config(conf['watchers'][0]) watcher.stop()
def get_arbiter(watchers, controller=None, pubsub_endpoint=None, stats_endpoint=None, env=None, name=None, context=None, background=False, stream_backend="thread", plugins=None, debug=False, proc_name="circusd"): """Creates a Arbiter and a single watcher in it. Options: - **watchers** -- a list of watchers. A watcher in that case is a dict containing: - **name** -- the name of the watcher (default: None) - **cmd** -- the command line used to run the Watcher. - **args** -- the args for the command (list or string). - **executable** -- When executable is given, the first item in the args sequence obtained from **cmd** is still treated by most programs as the command name, which can then be different from the actual executable name. It becomes the display name for the executing program in utilities such as **ps**. - **numprocesses** -- the number of processes to spawn (default: 1). - **warmup_delay** -- the delay in seconds between two spawns (default: 0) - **shell** -- if True, the processes are run in the shell (default: False) - **working_dir** - the working dir for the processes (default: None) - **uid** -- the user id used to run the processes (default: None) - **gid** -- the group id used to run the processes (default: None) - **env** -- the environment passed to the processes (default: None) - **send_hup**: if True, a process reload will be done by sending the SIGHUP signal. (default: False) - **stdout_stream**: a mapping containing the options for configuring the stdout stream. Default to None. When provided, may contain: - **class**: the fully qualified name of the class to use for streaming. Defaults to circus.stream.FileStream - **refresh_time**: the delay between two stream checks. Defaults to 0.3 seconds. - any other key will be passed the class constructor. - **stderr_stream**: a mapping containing the options for configuring the stderr stream. Default to None. When provided, may contain: - **class**: the fully qualified name of the class to use for streaming. Defaults to circus.stream.FileStream - **refresh_time**: the delay between two stream checks. Defaults to 0.3 seconds. - any other key will be passed the class constructor. - **max_retry**: the number of times we attempt to start a process, before we abandon and stop the whole watcher. (default: 5) - **hooks**: callback functions for hooking into the watcher startup and shutdown process. **hooks** is a dict where each key is the hook name and each value is a 2-tuple with the name of the callable or the callabled itself and a boolean flag indicating if an exception occuring in the hook should not be ignored. Possible values for the hook name: *before_start*, *after_start*, *before_stop*, *after_stop*. - **controller** -- the zmq entry point (default: 'tcp://127.0.0.1:5555') - **pubsub_endpoint** -- the zmq entry point for the pubsub (default: 'tcp://127.0.0.1:5556') - **stats_endpoint** -- the stats endpoint. If not provided, the *circusd-stats* process will not be launched. (default: None) - **context** -- the zmq context (default: None) - **background** -- If True, the arbiter is launched in a thread in the background (default: False) - **stream_backend** -- the backend that will be used for the streaming process. Can be *thread* or *gevent*. When set to *gevent* you need to have *gevent* and *gevent_zmq* installed. (default: thread) - **plugins** -- a list of plugins. Each item is a mapping with: - **use** -- Fully qualified name that points to the plugin class - every other value is passed to the plugin in the **config** option - **debug** -- If True the arbiter is launched in debug mode (default: False) - **proc_name** -- the arbiter process name (default: circusd) """ from circus.util import DEFAULT_ENDPOINT_DEALER, DEFAULT_ENDPOINT_SUB if controller is None: controller = DEFAULT_ENDPOINT_DEALER if pubsub_endpoint is None: pubsub_endpoint = DEFAULT_ENDPOINT_SUB if stream_backend == 'gevent': try: import gevent # NOQA except ImportError: sys.stderr.write("stream_backend set to gevent, " + "but gevent isn't installed\n") sys.stderr.write("Exiting...") sys.exit(1) from circus.watcher import Watcher if background: from circus.arbiter import ThreadedArbiter as Arbiter # NOQA else: from circus.arbiter import Arbiter # NOQA _watchers = [] for watcher in watchers: cmd = watcher['cmd'] watcher['name'] = watcher.get('name', os.path.basename(cmd.split()[0])) watcher['stream_backend'] = stream_backend _watchers.append(Watcher.load_from_config(watcher)) return Arbiter(_watchers, controller, pubsub_endpoint, stats_endpoint=stats_endpoint, context=context, plugins=plugins, debug=debug, proc_name=proc_name)
def test_watcher_env_var(self): conf = get_config(_CONF["env_var"]) watcher = Watcher.load_from_config(conf["watchers"][0]) self.assertEquals("%s:/bin" % os.getenv("PATH"), watcher.env["PATH"]) watcher.stop()