def _wait_child(self): try: # Don't block if no child processes have exited pid, status = os.waitpid(0, os.WNOHANG) if not pid: return None except OSError as exc: if exc.errno not in (errno.EINTR, errno.ECHILD): raise return None if os.WIFSIGNALED(status): sig = os.WTERMSIG(status) LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'), dict(pid=pid, sig=sig)) else: code = os.WEXITSTATUS(status) LOG.info(_LI('Child %(pid)s exited with status %(code)d'), dict(pid=pid, code=code)) if pid not in self.children: LOG.warning(_LW('pid %d not in child list'), pid) return None wrap = self.children.pop(pid) wrap.children.remove(pid) return wrap
def wait(self): """Loop waiting on children to die and respawning as necessary.""" systemd.notify_once() LOG.debug('Full set of CONF:') CONF.log_opt_values(LOG, logging.DEBUG) try: while True: self.handle_signal() self._respawn_children() # No signal means that stop was called. Don't clean up here. if not self.sigcaught: return signame = _signo_to_signame(self.sigcaught) LOG.info(_LI('Caught %s, stopping children'), signame) if not _is_sighup_and_daemon(self.sigcaught): break for pid in self.children: os.kill(pid, signal.SIGHUP) self.running = True self.sigcaught = None except eventlet.greenlet.GreenletExit: LOG.info(_LI("Wait called after thread killed. Cleaning up.")) self.stop()
def _start_child(self, wrap): if len(wrap.forktimes) > wrap.workers: # Limit ourselves to one process a second (over the period of # number of workers * 1 second). This will allow workers to # start up quickly but ensure we don't fork off children that # die instantly too quickly. if time.time() - wrap.forktimes[0] < wrap.workers: LOG.info(_LI('Forking too fast, sleeping')) time.sleep(1) wrap.forktimes.pop(0) wrap.forktimes.append(time.time()) pid = os.fork() if pid == 0: launcher = self._child_process(wrap.service) while True: self._child_process_handle_signal() status, signo = self._child_wait_for_exit_or_signal(launcher) if not _is_sighup_and_daemon(signo): break launcher.restart() os._exit(status) LOG.info(_LI('Started child %d'), pid) wrap.children.add(pid) self.children[pid] = wrap return pid
def _add_periodic_task(cls, task): """Add a periodic task to the list of periodic tasks. The task should already be decorated by @periodic_task. :return: whether task was actually enabled """ name = task._periodic_name if task._periodic_spacing < 0: LOG.info( _LI('Skipping periodic task %(task)s because ' 'its interval is negative'), {'task': name}) return False if not task._periodic_enabled: LOG.info( _LI('Skipping periodic task %(task)s because ' 'it is disabled'), {'task': name}) return False # A periodic spacing of zero indicates that this task should # be run on the default interval to avoid running too # frequently. if task._periodic_spacing == 0: task._periodic_spacing = DEFAULT_INTERVAL cls._periodic_tasks.append((name, task)) cls._periodic_spacing[name] = task._periodic_spacing return True
def wait(self): """Loop waiting on children to die and respawning as necessary.""" systemd.notify_once() LOG.debug('Full set of CONF:') CONF.log_opt_values(LOG, logging.DEBUG) try: while True: self.handle_signal() self._respawn_children() # No signal means that stop was called. Don't clean up here. if not self.sigcaught: return signame = _signo_to_signame(self.sigcaught) LOG.info(_LI('Caught %s, stopping children'), signame) if not _is_sighup_and_daemon(self.sigcaught): break cfg.CONF.reload_config_files() for service in set( [wrap.service for wrap in self.children.values()]): service.reset() for pid in self.children: os.kill(pid, signal.SIGHUP) self.running = True self.sigcaught = None except eventlet.greenlet.GreenletExit: LOG.info(_LI("Wait called after thread killed. Cleaning up.")) self.stop()
def get_filtered_objects(self, filter_classes, objs, filter_properties, index=0): """Get objects after filter :param filter_classes: filters that will be used to filter the objects :param objs: objects that will be filtered :param filter_properties: client filter properties :param index: This value needs to be increased in the caller function of get_filtered_objects when handling each resource. """ list_objs = list(objs) LOG.debug("Starting with %d host(s)", len(list_objs)) for filter_cls in filter_classes: cls_name = filter_cls.__name__ filter_class = filter_cls() if filter_class.run_filter_for_index(index): objs = filter_class.filter_all(list_objs, filter_properties) if objs is None: LOG.debug("Filter %(cls_name)s says to stop filtering", {'cls_name': cls_name}) return list_objs = list(objs) msg = (_LI("Filter %(cls_name)s returned %(obj_len)d host(s)") % {'cls_name': cls_name, 'obj_len': len(list_objs)}) if not list_objs: LOG.info(msg) break LOG.debug(msg) return list_objs
def acquire(self): basedir = os.path.dirname(self.fname) if not os.path.exists(basedir): fileutils.ensure_tree(basedir) LOG.info(_LI('Created lock path: %s'), basedir) self.lockfile = open(self.fname, 'w') while True: try: # Using non-blocking locks since green threads are not # patched to deal with blocking locking calls. # Also upon reading the MSDN docs for locking(), it seems # to have a laughable 10 attempts "blocking" mechanism. self.trylock() LOG.debug('Got file lock "%s"', self.fname) return True except IOError as e: if e.errno in (errno.EACCES, errno.EAGAIN): # external locks synchronise things like iptables # updates - give it some time to prevent busy spinning time.sleep(0.01) else: raise threading.ThreadError(_("Unable to acquire lock on" " `%(filename)s` due to" " %(exception)s") % {'filename': self.fname, 'exception': e})
def load_rules(self, force_reload=False): """Loads policy_path's rules. Policy file is cached and will be reloaded if modified. :param force_reload: Whether to reload rules from config file. """ if force_reload: self.use_conf = force_reload if self.use_conf: if not self.policy_path: self.policy_path = self._get_policy_path(self.policy_file) self._load_policy_file(self.policy_path, force_reload, overwrite=self.overwrite) for path in CONF.policy_dirs: try: path = self._get_policy_path(path) except cfg.ConfigFilesNotFoundError: LOG.info(_LI("Can not find policy directory: %s"), path) continue self._walk_through_policy_directory(path, self._load_policy_file, force_reload, False)
def _pipe_watcher(self): # This will block until the write end is closed when the parent # dies unexpectedly self.readpipe.read() LOG.info(_LI('Parent process has died unexpectedly, exiting')) sys.exit(1)
def _pipe_watcher(self): # This will block until the write end is closed when the parent # dies unexpectedly self.readpipe.read(1) LOG.info(_LI('Parent process has died unexpectedly, exiting')) sys.exit(1)
def __init__(cls, names, bases, dict_): """Metaclass that allows us to collect decorated periodic tasks.""" super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_) # NOTE(sirp): if the attribute is not present then we must be the base # class, so, go ahead an initialize it. If the attribute is present, # then we're a subclass so make a copy of it so we don't step on our # parent's toes. try: cls._periodic_tasks = cls._periodic_tasks[:] except AttributeError: cls._periodic_tasks = [] try: cls._periodic_spacing = cls._periodic_spacing.copy() except AttributeError: cls._periodic_spacing = {} for value in cls.__dict__.values(): if getattr(value, '_periodic_task', False): task = value name = task.__name__ if task._periodic_spacing < 0: LOG.info(_LI('Skipping periodic task %(task)s because ' 'its interval is negative'), {'task': name}) continue if not task._periodic_enabled: LOG.info(_LI('Skipping periodic task %(task)s because ' 'it is disabled'), {'task': name}) continue # A periodic spacing of zero indicates that this task should # be run on the default interval to avoid running too # frequently. if task._periodic_spacing == 0: task._periodic_spacing = DEFAULT_INTERVAL cls._periodic_tasks.append((name, task)) cls._periodic_spacing[name] = task._periodic_spacing
def remove_external_lock_file(name, lock_file_prefix=None): """Remove an external lock file when it's not used anymore This will be helpful when we have a lot of lock files """ with internal_lock(name): lock_file_path = _get_lock_path(name, lock_file_prefix) try: os.remove(lock_file_path) except OSError: LOG.info(_LI('Failed to remove file %(file)s'), {'file': lock_file_path})
def stop(self): """Terminate child processes and wait on each.""" self.running = False for pid in self.children: try: os.kill(pid, signal.SIGTERM) except OSError as exc: if exc.errno != errno.ESRCH: raise # Wait for children to die if self.children: LOG.info(_LI('Waiting on %d children to exit'), len(self.children)) while self.children: self._wait_child()
def _wait_for_exit_or_signal(self, ready_callback=None): status = None signo = 0 LOG.debug('Full set of CONF:') CONF.log_opt_values(LOG, logging.DEBUG) try: if ready_callback: ready_callback() super(ServiceLauncher, self).wait() except SignalExit as exc: signame = _signo_to_signame(exc.signo) LOG.info(_LI('Caught %s, exiting'), signame) status = exc.code signo = exc.signo except SystemExit as exc: status = exc.code finally: self.stop() return status, signo
def initialize_if_enabled(): backdoor_locals = { 'exit': _dont_use_this, # So we don't exit the entire process 'quit': _dont_use_this, # So we don't exit the entire process 'fo': _find_objects, 'pgt': _print_greenthreads, 'pnt': _print_nativethreads, } if CONF.backdoor_port is None: return None start_port, end_port = _parse_port_range(str(CONF.backdoor_port)) # NOTE(johannes): The standard sys.displayhook will print the value of # the last expression and set it to __builtin__._, which overwrites # the __builtin__._ that gettext sets. Let's switch to using pprint # since it won't interact poorly with gettext, and it's easier to # read the output too. def displayhook(val): if val is not None: pprint.pprint(val) sys.displayhook = displayhook sock = _listen('localhost', start_port, end_port, eventlet.listen) # In the case of backdoor port being zero, a port number is assigned by # listen(). In any case, pull the port number out here. port = sock.getsockname()[1] LOG.info( _LI('Eventlet backdoor listening on %(port)s for process %(pid)d') % { 'port': port, 'pid': os.getpid() }) eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, locals=backdoor_locals) return port
def get_filtered_objects(self, filter_classes, objs, filter_properties, index=0): """Get objects after filter :param filter_classes: filters that will be used to filter the objects :param objs: objects that will be filtered :param filter_properties: client filter properties :param index: This value needs to be increased in the caller function of get_filtered_objects when handling each resource. """ list_objs = list(objs) LOG.debug("Starting with %d host(s)", len(list_objs)) for filter_cls in filter_classes: cls_name = filter_cls.__name__ filter_class = filter_cls() if filter_class.run_filter_for_index(index): objs = filter_class.filter_all(list_objs, filter_properties) if objs is None: LOG.debug("Filter %(cls_name)s says to stop filtering", {'cls_name': cls_name}) return list_objs = list(objs) msg = ( _LI("Filter %(cls_name)s returned %(obj_len)d host(s)") % { 'cls_name': cls_name, 'obj_len': len(list_objs) }) if not list_objs: LOG.info(msg) break LOG.debug(msg) return list_objs
def _child_wait_for_exit_or_signal(self, launcher): status = 0 signo = 0 # NOTE(johannes): All exceptions are caught to ensure this # doesn't fallback into the loop spawning children. It would # be bad for a child to spawn more children. try: launcher.wait() except SignalExit as exc: signame = _signo_to_signame(exc.signo) LOG.info(_LI('Child caught %s, exiting'), signame) status = exc.code signo = exc.signo except SystemExit as exc: status = exc.code except BaseException: LOG.exception(_LE('Unhandled exception')) status = 2 finally: launcher.stop() return status, signo
def initialize_if_enabled(): backdoor_locals = { 'exit': _dont_use_this, # So we don't exit the entire process 'quit': _dont_use_this, # So we don't exit the entire process 'fo': _find_objects, 'pgt': _print_greenthreads, 'pnt': _print_nativethreads, } if CONF.backdoor_port is None: return None start_port, end_port = _parse_port_range(str(CONF.backdoor_port)) # NOTE(johannes): The standard sys.displayhook will print the value of # the last expression and set it to __builtin__._, which overwrites # the __builtin__._ that gettext sets. Let's switch to using pprint # since it won't interact poorly with gettext, and it's easier to # read the output too. def displayhook(val): if val is not None: pprint.pprint(val) sys.displayhook = displayhook sock = _listen('localhost', start_port, end_port, eventlet.listen) # In the case of backdoor port being zero, a port number is assigned by # listen(). In any case, pull the port number out here. port = sock.getsockname()[1] LOG.info( _LI('Eventlet backdoor listening on %(port)s for process %(pid)d') % {'port': port, 'pid': os.getpid()} ) eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, locals=backdoor_locals) return port
def launch_service(self, service, workers=1): wrap = ServiceWrapper(service, workers) LOG.info(_LI('Starting %d workers'), wrap.workers) while self.running and len(wrap.children) < wrap.workers: self._start_child(wrap)
def link_request_ids(context, source_id, target_id=None, stage=None, target_name=None, notifier=None): """Links the Request ID from the Source service to the Request ID returned from the Target service. Linkages are logged and emitted as INFO notifications. :params context: context object :params source_id: the Request ID of the source :params target_id: the Request ID of the target :params stage: optional event name extension to indicate which part of the linkage this is. :params target_name: human readable name of the target system you are talking to. :params notifier: notifier object A typical use case is: System A asking System B to perform some action. The linkages might look like this: .. code-block:: python link_request_ids(sys_A.request_ID, stage="start") # send request to System B and get request ID link_request_ids(sys_A.request_ID, target_id=sys_B.request.ID) # optionally wait for System B to complete link_request_ids(sys_A.request_ID, target_id=sys_B.request.ID, stage="end") But, it could be as simple as: .. code-block:: python link_request_ids(sys_A.request_ID, target_id=sys_B.request.ID) """ event_name = "request.link" if stage: event_name += ".%s" % stage rtarget_id = "" if target_id: rtarget_id = _("TargetId=%(id)s ") % {'id': target_id} rtarget_name = "" if target_name: rtarget_name = _("Target='%(name)s' ") % {'name': target_name} arrow = "" if target_name or target_id: arrow = " -> " LOG.info(_LI("Request ID Link: %(event_name)s " "'%(source_id)s'%(arrow)s" "%(target_name)s%(target_id)s") % { "event_name": event_name, "source_id": source_id, "target_name": rtarget_name, "arrow": arrow, "target_id": rtarget_id}) if notifier: payload = {"source_request_id": source_id, "target_request_id": target_id, "target_name": target_name, "stage": stage} notifier.info(context, event_name, payload)