def _process_func(watcher, watcher_recycle_func): while True: try: watcher.process() except Exception as e: # Recycle watcher vlog.exception("Failure in watcher %s" % type(watcher).__name__) vlog.warn("Regenerating watcher because of \"%s\" and " "reconnecting to stream using function %s" % (str(e), watcher_recycle_func.__name__)) watcher = watcher_recycle_func()
def _sync_k8s_services(): if variables.OVN_MODE == "overlay": mode = ovn_k8s.modes.overlay.OvnNB() else: return try: services = kubernetes.get_all_services(variables.K8S_API_SERVER) if services: mode.sync_services(services) except Exception as e: vlog.exception("failed in _sync_k8s_services (%s)" % (str(e)))
def _sync_k8s_pods(): if config.get_option('ovn_mode') == "overlay": mode = ovn_k8s.modes.overlay.OvnNB() else: return try: pods = kubernetes.get_all_pods(variables.K8S_API_SERVER) if pods: mode.sync_pods(pods) except Exception as e: vlog.exception("failed in _sync_k8s_pods (%s)" % (str(e)))
def _process_func(watcher, watcher_recycle_func): while True: try: watcher.process() except Exception as e: # Recycle watcher if not isinstance(e, exceptions.APIServerTimeout): vlog.exception("Failure in watcher %s" % type(watcher).__name__) vlog.warn("Regenerating watcher because of \"%s\" and " "reconnecting to stream using function %s" % (str(e), watcher_recycle_func.__name__)) watcher = watcher_recycle_func()
def delete_logical_port(self, event): data = event.metadata pod_name = data['metadata']['name'] namespace = data['metadata']['namespace'] logical_port = "%s_%s" % (namespace, pod_name) if not pod_name: vlog.err("absent pod name in pod %s. " "unable to delete logical port" % data) return try: ovn_nbctl("--if-exists", "lsp-del", logical_port) except Exception: vlog.exception("failure in delete_logical_port: lsp-del") return vlog.info("deleted logical port %s" % logical_port)
def _get_socket(self, host, port, backlog): bind_addr = (host, port) # TODO(dims): eventlet's green dns/socket module does not actually # support IPv6 in getaddrinfo(). We need to get around this in the # future or monitor upstream for a fix try: info = socket.getaddrinfo(bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM)[0] family = info[0] bind_addr = info[-1] except Exception: vlog.exception(("Unable to listen on %(host)s:%(port)s") % {'host': host, 'port': port}) sys.exit(1) sock = None retry_until = time.time() + RETRY_UNTIL_WINDOW while not sock and time.time() < retry_until: try: sock = eventlet.listen(bind_addr, backlog=backlog, family=family) except socket.error as err: if err.errno != errno.EADDRINUSE: raise eventlet.sleep(0.1) if not sock: raise RuntimeError(("Could not bind to %(host)s:%(port)s " "after trying for %(time)d seconds") % {'host': host, 'port': port, 'time': RETRY_UNTIL_WINDOW}) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # sockets can hang around forever without keepalive sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) if hasattr(socket, 'TCP_KEEPIDLE'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, TCP_KEEPIDLE) return sock
def delete_logical_port(self, event): data = event.metadata pod_name = data['metadata']['name'] namespace = data['metadata']['namespace'] logical_port = "%s_%s" % (namespace, pod_name) if not pod_name: vlog.err("absent pod name in pod %s. " "unable to delete logical port" % data) return annotations = data['metadata']['annotations'] ip_address = self._get_ip_address_from_annotations(annotations) if ip_address: self._delete_k8s_l4_port_name_cache(data, ip_address) try: ovn_nbctl("--if-exists", "lsp-del", logical_port) except Exception: vlog.exception("failure in delete_logical_port: lsp-del") return vlog.info("deleted logical port %s" % logical_port)
def start_threads(): pool = greenpool.GreenPool() pool.spawn(_unixctl_run) pool.spawn(conn_processor.run_processor) try: pod_watcher_inst = _create_k8s_pod_watcher() service_watcher_inst = _create_k8s_service_watcher() endpoint_watcher_inst = _create_k8s_endpoint_watcher() except Exception as e: # TODO: We need a better re-try mechanism. This is only # known to be a problem when there is no k8s-apiserver running # right when the watcher is started. After the initial connection # we have a retry mechanism for any loss of connection. vlog.exception("failed to create watchers (%s)" % (str(e))) sys.exit(1) pool.spawn(_process_func, pod_watcher_inst, _create_k8s_pod_watcher) pool.spawn(_process_func, service_watcher_inst, _create_k8s_service_watcher) pool.spawn(_process_func, endpoint_watcher_inst, _create_k8s_endpoint_watcher) pool.waitall()
ad_model = UserGroupDataModel() def ad_update_thread(): while True: ad_model.update_from_ad() # XXX: blocks eventlet time.sleep(3) wsgi_server.pool.spawn_n(ad_update_thread) ad_row_handler = CollectionHandler('/tables/ad-groups/rows', ad_model) api.register_handler(ad_row_handler, 0) # Add static tables to model tables_model.add_item({'sample': 'schema', 'id': 'ad-groups'}, 'ad-groups') vlog.info("Starting congress server") wsgi_server.start(api, args.http_listen_port, args.http_listen_addr) wsgi_server.wait() #TODO: trigger watcher for policy outputs if __name__ == '__main__': try: main() except SystemExit: # Let system.exit() calls complete normally raise except: vlog.exception("traceback") sys.exit(ovs.daemon.RESTART_EXIT_CODE)
seqno = idl.change_seqno # Sequence number when last processed the db while not exiting: unixctl_server.run() aaa_util_run() if exiting: break poller = ovs.poller.Poller() unixctl_server.wait(poller) idl.wait(poller) poller.block() #Daemon Exit unixctl_server.close() idl.close() return if __name__ == '__main__': try: main() except SystemExit: # Let system.exit() calls complete normally raise except: vlog.exception("traceback") sys.exit(ovs.daemon.RESTART_EXIT_CODE)