def run_child(self): def child_hup(*args): """Shuts down child processes, existing requests are handled.""" signal.signal(signal.SIGHUP, signal.SIG_IGN) eventlet.wsgi.is_accepting = False self.sock.close() pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, child_hup) signal.signal(signal.SIGTERM, signal.SIG_DFL) # ignore the interrupt signal to avoid a race whereby # a child worker receives the signal before the parent # and is respawned unnecessarily as a result signal.signal(signal.SIGINT, signal.SIG_IGN) # The child has no need to stash the unwrapped # socket, and the reference prevents a clean # exit on sighup self._sock = None self.run_server() LOG.info(_LI('Child %d exiting normally') % os.getpid()) # self.pool.waitall() is now called in wsgi's server so # it's safe to exit here sys.exit(0) else: LOG.info(_LI('Started child %s') % pid) self.children.add(pid)
def run_child(self): def child_hup(*args): """Shuts down child processes, existing requests are handled.""" signal.signal(signal.SIGHUP, signal.SIG_IGN) eventlet.wsgi.is_accepting = False self.sock.close() pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, child_hup) signal.signal(signal.SIGTERM, signal.SIG_DFL) # ignore the interrupt signal to avoid a race whereby # a child worker receives the signal before the parent # and is respawned unnecessarily as a result signal.signal(signal.SIGINT, signal.SIG_IGN) # The child has no need to stash the unwrapped # socket, and the reference prevents a clean # exit on sighup self._sock = None self.run_server() LOG.info(_LI('Child %d exiting normally'), os.getpid()) # self.pool.waitall() is now called in wsgi's server so # it's safe to exit here sys.exit(0) else: LOG.info(_LI('Started child %s'), pid) self.children.add(pid)
def _remove_children(self, pid): if pid in self.children: self.children.remove(pid) LOG.info(_LI('Removed dead child %s'), pid) elif pid in self.stale_children: self.stale_children.remove(pid) LOG.info(_LI('Removed stale child %s'), pid) else: LOG.warning(_LW('Unrecognised child %s') % pid)
def _remove_children(self, pid): if pid in self.children: self.children.remove(pid) LOG.info(_LI('Removed dead child %s') % pid) elif pid in self.stale_children: self.stale_children.remove(pid) LOG.info(_LI('Removed stale child %s') % pid) else: LOG.warning(_LW('Unrecognised child %s') % pid)
def _single_run(self, application, sock): """Start a WSGI server in a new green thread.""" LOG.info(_LI("Starting single process server")) eventlet.wsgi.server(sock, application, custom_pool=self.pool, log=self._wsgi_logger, debug=False, keepalive=CONF.api.http_keepalive)
def delete_document_unknown_parent(self, doc_id, version=None): """Deletes a document that requires routing but where the routing is not known. Since this involves a query to find it, there is a potential race condition in between storing and indexing the document. A search is run to find the routing parent, and the document is then deleted. """ search_doc_id = doc_id if self.plugin.requires_role_separation: # Just look for the admin document though it doesn't matter which. # This has to match whatever we strip later with strip_role_suffix search_doc_id += ADMIN_ID_SUFFIX # Necessary to request 'fields' for e-s 1.x, not for 2.x query = {'filter': {'term': {'_id': search_doc_id}}, 'fields': ['_parent', '_routing']} search_results = self.engine.search(index=self.alias_name, doc_type=self.document_type, body=query) total_hits = search_results['hits']['total'] if not total_hits: ctx = {'doc_type': self.document_type, 'id': doc_id} LOG.warning(_LI( "No results found for %(doc_type)s id %(id)s; can't find " "routing to delete") % ctx) return # There are results. Check that there's only one unique result (may be # two in different indices from the same alias). ES 1.x and 2.x differ # slightly; metafields are returned by default and not in 'fields' in # 2.x whether you like it or not distinct_results = set((r['_id'], get_metafield(r, '_parent')) for r in search_results['hits']['hits']) if len(distinct_results) != 1: ctx = {'count': len(distinct_results), 'results': ", ".join(distinct_results), 'doc_type': self.document_type, 'id': doc_id} LOG.error(_LE("%(count)d distinct results (%(results)s) found for " "get_document_by_query for %(doc_type)s id %(id)s") % ctx) first_hit = search_results['hits']['hits'][0] routing = get_metafield(first_hit, '_routing') parent = get_metafield(first_hit, '_parent') parent = strip_role_suffix(parent, ADMIN_ID_SUFFIX) LOG.debug("Deleting %s id %s with parent %s routing %s", self.document_type, doc_id, parent, routing) delete_info = {'_id': doc_id, '_parent': parent} if routing: delete_info['_routing'] = routing if version: delete_info['_version'] = version self.delete_document(delete_info)
def _verify_and_respawn_children(self, pid, status): if len(self.stale_children) == 0: LOG.debug('No stale children') if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0: LOG.error( _LE('Not respawning child %d, cannot ' 'recover from termination') % pid) if not self.children and not self.stale_children: LOG.info(_LI('All workers have terminated. Exiting')) self.running = False else: if len(self.children) < self.workers: self.run_child()
def start_wsgi(self): if self.workers == 0: # Useful for profiling, test, debug etc. self.pool = self.create_pool() self.pool.spawn_n(self._single_run, self.application, self.sock) return else: LOG.info(_LI("Starting %d workers"), self.workers) signal.signal(signal.SIGTERM, self.kill_children) signal.signal(signal.SIGINT, self.kill_children) signal.signal(signal.SIGHUP, self.hup) while len(self.children) < self.workers: self.run_child()
def _es_reindex_worker(self, es_reindex, resource_groups, index_names): """Helper to re-index using the ES reindex helper, allowing all ES re-indexes to occur simultaneously. We may need to cleanup. See sig_handler() for more info. """ for group in six.iterkeys(index_names): # Grab the correct tuple as a list, convert list to a # single tuple, extract second member (the search # alias) of tuple. alias_search = \ [a for a in resource_groups if a[0] == group][0][1] LOG.info( _LI("ES Reindex start from %(src)s to %(dst)s " "for types %(types)s") % { 'src': alias_search, 'dst': index_names[group], 'types': ', '.join(es_reindex) }) dst_index = index_names[group] try: es_utils.reindex(src_index=alias_search, dst_index=dst_index, type_list=es_reindex) es_utils.refresh_index(dst_index) LOG.info( _LI("ES Reindex end from %(src)s to %(dst)s " "for types %(types)s") % { 'src': alias_search, 'dst': index_names[group], 'types': ', '.join(es_reindex) }) except Exception as e: LOG.exception( _LE("Failed to setup index extension " "%(ex)s: %(e)s") % { 'ex': dst_index, 'e': e }) raise
def _verify_and_respawn_children(self, pid, status): if len(self.stale_children) == 0: LOG.debug('No stale children') if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0: LOG.error(_LE('Not respawning child %d, cannot ' 'recover from termination') % pid) if not self.children and not self.stale_children: LOG.info( _LI('All workers have terminated. Exiting')) self.running = False else: if len(self.children) < self.workers: self.run_child()
def start_wsgi(self): if self.workers == 0: # Useful for profiling, test, debug etc. self.pool = self.create_pool() self.pool.spawn_n(self._single_run, self.application, self.sock) return else: LOG.info(_LI("Starting %d workers") % self.workers) signal.signal(signal.SIGTERM, self.kill_children) signal.signal(signal.SIGINT, self.kill_children) signal.signal(signal.SIGHUP, self.hup) while len(self.children) < self.workers: self.run_child()
def _plugin_api(self, plugin_obj, index_names): """Helper to re-index using the plugin API within a thread, allowing all plugins to re-index simultaneously. We may need to cleanup. See sig_handler() for more info. """ gname = plugin_obj.resource_group_name index_name = index_names[gname] dtype = plugin_obj.document_type LOG.info( _LI("API Reindex start %(type)s into %(index_name)s") % { 'type': dtype, 'index_name': index_name }) try: plugin_obj.index_initial_data(index_name=index_name) es_utils.refresh_index(index_name) LOG.info( _LI("API Reindex end %(type)s into %(index_name)s") % { 'type': dtype, 'index_name': index_name }) except exceptions.EndpointNotFound: # Display a warning, do not propagate. doc = plugin_obj.get_document_type() LOG.warning( _LW("Service is not available for plugin: " "%(doc)s") % {"doc": doc}) except Exception as e: LOG.exception( _LE("Failed to setup index extension " "%(ex)s: %(e)s") % { 'ex': index_name, 'e': e }) raise
def _log_notification(self, handler, ctxt, doc_type, event_type, payload, metadata): project = ctxt.get('project_id', ctxt.get('tenant_id', ctxt.get('tenant', '-'))) if not project: # Try to get it from the payload, but not very hard project = payload.get('tenant_id', payload.get('project_id')) log_context = {'event_type': event_type, 'doc_type': doc_type, 'timestamp': metadata['timestamp'], 'project': project} payload_fields = handler.get_log_fields(event_type, payload) additional = " ".join("%s:%s" % (k, v or '-') for k, v in payload_fields) log_context['additional'] = additional or '' LOG.info(_LI("Starting %(doc_type)s %(event_type)s \"%(timestamp)s\" " "project_id:%(project)s %(additional)s"), log_context) return log_context
def wait_on_children(self): while self.running: try: pid, status = os.wait() if os.WIFEXITED(status) or os.WIFSIGNALED(status): self._remove_children(pid) self._verify_and_respawn_children(pid, status) except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise except KeyboardInterrupt: LOG.info(_LI('Caught keyboard interrupt. Exiting.')) break except exception.SIGHUPInterrupt: self.reload() continue eventlet.greenio.shutdown_safe(self.sock) self.sock.close() LOG.debug('Exited')
def prepare_index(self, index_name): """Prepare a new index for usage with this listener. We need to be caled immediately after a new index is created, but before it gets associated with an alias. Prepping means we will add the settings and mapping for this listener's document type. """ if self.parent_plugin_type(): LOG.debug(_LI( "Skipping index prep for %(doc_type)s; will be handled by " "parent (%(parent_type)s)") % {"doc_type": self.document_type, "parent_type": self.parent_plugin_type()}) return self.check_mapping_sort_fields() for child_plugin in self.child_plugins: child_plugin.check_mapping_sort_fields() # Prepare the new index for this document type. self.setup_index_mapping(index_name=index_name)
def create_or_update(self, payload, timestamp): port_id = payload['port']['id'] if payload['port'].get('device_owner', None) == 'network:dhcp': # TODO(sjmc7): Remove this once we can get proper notifications # about DHCP ports. # See https://bugs.launchpad.net/searchlight/+bug/1558790 LOG.info(_LI("Skipping notification for DHCP port %s. If neutron " "is sending notifications for DHCP ports, the " "Searchlight plugin should be updated to process " "them.") % port_id) return LOG.debug("Updating port information for %s", port_id) # Version doesn't really make a huge amount of sense here but # is better than nothing port = serialize_port(payload['port']) version = self.get_version(port, timestamp) self.index_helper.save_document(port, version=version)
def create_or_update(self, event_type, payload, timestamp): port_id = payload['port']['id'] if payload['port'].get('device_owner', None) == 'network:dhcp': # TODO(sjmc7): Remove this once we can get proper notifications # about DHCP ports. # See https://bugs.launchpad.net/searchlight/+bug/1558790 LOG.info( _LI("Skipping notification for DHCP port %s. If neutron " "is sending notifications for DHCP ports, the " "Searchlight plugin should be updated to process " "them."), port_id) return LOG.debug("Updating port information for %s", port_id) # Version doesn't really make a huge amount of sense here but # is better than nothing port = serialize_port(payload['port']) version = self.get_version(port, timestamp) self.index_helper.save_document(port, version=version) return pipeline.IndexItem(self.index_helper.plugin, event_type, payload, port)
def delete_document_unknown_parent(self, doc_id, version=None): """Deletes a document that requires routing but where the routing is not known. Since this involves a query to find it, there is a potential race condition in between storing and indexing the document. A search is run to find the routing parent, and the document is then deleted. """ search_doc_id = doc_id if self.plugin.requires_role_separation: # Just look for the admin document though it doesn't matter which. # This has to match whatever we strip later with strip_role_suffix search_doc_id += ADMIN_ID_SUFFIX # Necessary to request 'fields' for e-s 1.x, not for 2.x query = { 'filter': { 'term': { '_id': search_doc_id } }, 'fields': ['_parent', '_routing'] } search_results = self.engine.search(index=self.alias_name, doc_type=self.document_type, body=query) total_hits = search_results['hits']['total'] if not total_hits: ctx = {'doc_type': self.document_type, 'id': doc_id} LOG.warning( _LI("No results found for %(doc_type)s id %(id)s; can't find " "routing to delete") % ctx) return # There are results. Check that there's only one unique result (may be # two in different indices from the same alias). ES 1.x and 2.x differ # slightly; metafields are returned by default and not in 'fields' in # 2.x whether you like it or not distinct_results = set((r['_id'], get_metafield(r, '_parent')) for r in search_results['hits']['hits']) if len(distinct_results) != 1: ctx = { 'count': len(distinct_results), 'results': ", ".join(distinct_results), 'doc_type': self.document_type, 'id': doc_id } LOG.error( _LE("%(count)d distinct results (%(results)s) found for " "get_document_by_query for %(doc_type)s id %(id)s") % ctx) first_hit = search_results['hits']['hits'][0] routing = get_metafield(first_hit, '_routing') parent = get_metafield(first_hit, '_parent') parent = strip_role_suffix(parent, ADMIN_ID_SUFFIX) LOG.debug("Deleting %s id %s with parent %s routing %s", self.document_type, doc_id, parent, routing) delete_info = {'_id': doc_id, '_parent': parent} if routing: delete_info['_routing'] = routing if version: delete_info['_version'] = version self.delete_document(delete_info)
def _log_finished(self, log_context): LOG.info(_LI("Finished %(doc_type)s %(event_type)s \"%(timestamp)s\" " "project_id:%(project)s %(additional)s"), log_context)
def __init__(self, app): LOG.info(_LI("Initialized gzip middleware")) super(GzipMiddleware, self).__init__(app)