def main(): NotifierNS() TendrlNS() NS.notifier.definitions.save() NS.notifier.config.save() NS.publisher_id = "notifier" if NS.config.data.get("with_internal_profiling", False): from tendrl.commons import profiler profiler.start() tendrl_notifier_manager = TendrlNotifierManager() tendrl_notifier_manager.start() complete = threading.Event() def terminate(signum, frame): log("debug", "notifier", { "message": 'Signal handler: stopping', }) complete.set() tendrl_notifier_manager.stop() def reload_config(signum, frame): NS.notifier.ns.setup_common_objects() signal.signal(signal.SIGINT, terminate) signal.signal(signal.SIGTERM, terminate) signal.signal(signal.SIGHUP, reload_config) while not complete.is_set(): complete.wait(timeout=1)
def main(): AlertingNS() TendrlNS() NS.alerting.definitions.save() NS.alerting.config.save() NS.publisher_id = "alerting" if NS.config.data.get("with_internal_profiling", False): from tendrl.commons import profiler profiler.start() tendrl_alerting_manager = TendrlAlertingManager() tendrl_alerting_manager.start() complete = gevent.event.Event() def terminate(sig, frame): Event( Message("debug", "alerting", { "message": 'Signal handler: stopping', })) tendrl_alerting_manager.stop() complete.set() gevent.signal(signal.SIGINT, terminate) gevent.signal(signal.SIGTERM, terminate) while not complete.is_set(): complete.wait(timeout=1)
def main(): PerformanceMonitoringNS() TendrlNS() NS.publisher_id = "performance_monitoring" NS.time_series_db_manager = TimeSeriesDBManager() NS.performance_monitoring.definitions.save() NS.performance_monitoring.config.save() if NS.config.data.get("with_internal_profiling", False): from tendrl.commons import profiler profiler.start() tendrl_perf_manager = TendrlPerformanceManager() def terminate(sig, frame): Event( Message( priority="debug", publisher=NS.publisher_id, payload={"message": "Signal handler: stopping"} ) ) tendrl_perf_manager.stop() signal.signal(signal.SIGINT, terminate) tendrl_perf_manager.start()
def main(): NodeMonitoringNS() TendrlNS() NS.type = "monitoring" complete = gevent.event.Event() NS.state_sync_thread = NodeMonitoringSyncStateThread() NS.node_monitoring.definitions.save() NS.node_monitoring.config.save() NS.publisher_id = "node_monitoring" if NS.config.data.get("with_internal_profiling", False): from tendrl.commons import profiler profiler.start() manager = NodeMonitoringManager() manager.start() def shutdown(): Event( Message(priority="debug", publisher=NS.publisher_id, payload={"message": "Signal handler: stopping"})) complete.set() gevent.signal(signal.SIGTERM, shutdown) gevent.signal(signal.SIGINT, shutdown) while not complete.is_set(): complete.wait(timeout=1)
def main(): monitoring_integration.MonitoringIntegrationNS() TendrlNS() grafana_conn_count = 0 while grafana_conn_count < 10: if not utils.port_open(NS.config.data["grafana_port"], NS.config.data["grafana_host"]): grafana_conn_count = grafana_conn_count + 1 time.sleep(4) else: break if grafana_conn_count == 10: logger.log("error", NS.get("publisher_id", None), {'message': "Cannot connect to Grafana"}) return NS.type = "monitoring" NS.publisher_id = "monitoring_integration" if NS.config.data.get("with_internal_profiling", False): from tendrl.commons import profiler profiler.start() NS.monitoring.config.save() NS.monitoring.definitions.save() NS.sync_thread = sync.MonitoringIntegrationSdsSyncThread() monitoring_integration_manager = MonitoringIntegrationManager() monitoring_integration_manager.start() complete = threading.Event() NS.node_context = NS.node_context.load() current_tags = list(NS.node_context.tags) current_tags += ["tendrl/integration/monitoring"] NS.node_context.tags = list(set(current_tags)) NS.node_context.save() def shutdown(signum, frame): complete.set() NS.sync_thread.stop() def reload_config(signum, frame): NS.monitoring.ns.setup_common_objects() signal.signal(signal.SIGTERM, shutdown) signal.signal(signal.SIGINT, shutdown) signal.signal(signal.SIGHUP, reload_config) while not complete.is_set(): complete.wait(timeout=1)
def main(): ceph_integration.CephIntegrationNS() TendrlNS() NS.type = "sds" NS.publisher_id = "ceph_integration" from tendrl.ceph_integration import sds_sync NS.state_sync_thread = sds_sync.CephIntegrationSdsSyncStateThread() NS.node_context.save() # Check if Integration is part of any Tendrl imported/created sds cluster try: NS.tendrl_context = NS.tendrl_context.load() Event( Message(priority="info", publisher=NS.publisher_id, payload={ "message": "Integration %s is part of sds cluster" % NS.tendrl_context.integration_id })) except etcd.EtcdKeyNotFound: Event( Message(priority="debug", publisher=NS.publisher_id, payload={ "message": "Node %s is not part of any sds cluster" % NS.node_context.node_id })) raise Exception("Integration cannot be started, " "please Import or Create sds cluster in Tendrl " "and include Node %s" % NS.node_context.node_id) if NS.tendrl_context.integration_id is None: Event( Message(priority="debug", publisher=NS.publisher_id, payload={ "message": "Node %s is not part of any sds cluster" % NS.node_context.node_id })) raise Exception("Integration cannot be started, " "please Import or Create sds cluster in Tendrl " "and include Node %s" % NS.node_context.node_id) NS.tendrl_context.save() NS.ceph.definitions.save() NS.ceph.config.save() if NS.config.data.get("with_internal_profiling", False): from tendrl.commons import profiler profiler.start() m = CephIntegrationManager() m.start() complete = gevent.event.Event() def shutdown(): Event( Message(priority="info", publisher=NS.publisher_id, payload={"message": "Signal handler: stopping"})) complete.set() gevent.signal(signal.SIGTERM, shutdown) gevent.signal(signal.SIGINT, shutdown) while not complete.is_set(): complete.wait(timeout=1)
def main(): gluster_integration.GlusterIntegrationNS() TendrlNS() NS.type = "sds" NS.publisher_id = "gluster_integration" NS.state_sync_thread = sds_sync.GlusterIntegrationSdsSyncStateThread() NS.message_handler_thread = GlusterNativeMessageHandler() while NS.tendrl_context.integration_id is None or \ NS.tendrl_context.integration_id == "": logger.log( "debug", NS.publisher_id, { "message": "Waiting for tendrl-node-agent %s to " "detect sds cluster (integration_id not found)" % NS.node_context.node_id } ) NS.tendrl_context = NS.tendrl_context.load() logger.log( "debug", NS.publisher_id, { "message": "Integration %s is part of sds cluster" % NS.tendrl_context.integration_id } ) NS.gluster.definitions.save() NS.gluster.config.save() pm = ProvisioningManager("GdeployPlugin") NS.gdeploy_plugin = pm.get_plugin() if NS.config.data.get("with_internal_profiling", False): from tendrl.commons import profiler profiler.start() m = GlusterIntegrationManager() m.start() complete = threading.Event() def shutdown(signum, frame): logger.log( "debug", NS.publisher_id, {"message": "Signal handler: stopping"} ) # Remove the node's name from gluster server tag try: gl_srvr_list = etcd_utils.read( "/indexes/tags/gluster/server" ).value gl_srvr_list = json.loads(gl_srvr_list) if NS.node_context.node_id in gl_srvr_list: gl_srvr_list.remove(NS.node_context.node_id) etcd_utils.write( "/indexes/tags/gluster/server", json.dumps(gl_srvr_list) ) node_tags = json.loads(NS.node_context.tags) if 'provisioner/%s' % NS.tendrl_context.integration_id \ in node_tags: etcd_utils.delete( "/indexes/tags/provisioner/%s" % NS.tendrl_context.integration_id, recursive=True ) int_srvr_list = etcd_utils.read( "/indexes/tags/tendrl/integration/gluster" ).value int_srvr_list = json.loads(int_srvr_list) if NS.node_context.node_id in int_srvr_list: int_srvr_list.remove(NS.node_context.node_id) etcd_utils.write( "/indexes/tags/tendrl/integration/gluster", json.dumps(int_srvr_list) ) except etcd.EtcdKeyNotFound: logger.log( "debug", NS.publisher_id, { "message": "Couldnt remove node from " "gluster servers list tag." "integration_id: %s, node_id: %s" % ( NS.tendrl_context.integration_id, NS.node_context.node_id ) } ) pass complete.set() m.stop() def reload_config(signum, frame): logger.log( "debug", NS.publisher_id, { "message": "Signal handler: SIGHUP," " reload service config" } ) NS.gluster.ns.setup_common_objects() signal.signal(signal.SIGTERM, shutdown) signal.signal(signal.SIGINT, shutdown) signal.signal(signal.SIGHUP, reload_config) while not complete.is_set(): complete.wait(timeout=1)
def main(): gluster_integration.GlusterIntegrationNS() TendrlNS() NS.type = "sds" NS.publisher_id = "gluster_integration" NS.state_sync_thread = sds_sync.GlusterIntegrationSdsSyncStateThread() while NS.tendrl_context.integration_id is None or \ NS.tendrl_context.integration_id == "": logger.log( "debug", NS.publisher_id, { "message": "Waiting for tendrl-node-agent %s to " "detect sds cluster (integration_id not found)" % NS.node_context.node_id } ) NS.tendrl_context = NS.tendrl_context.load() logger.log( "debug", NS.publisher_id, { "message": "Integration %s is part of sds cluster" % NS.tendrl_context.integration_id } ) # Creating object form GlusterNativeMessageHandler # After integration_id found NS.message_handler_thread = GlusterNativeMessageHandler() NS.gluster.definitions.save() NS.gluster.config.save() if NS.config.data.get("with_internal_profiling", False): from tendrl.commons import profiler profiler.start() m = GlusterIntegrationManager() m.start() complete = threading.Event() def shutdown(signum, frame): logger.log( "debug", NS.publisher_id, {"message": "Signal handler: stopping"} ) # Remove the node's name from gluster server tag try: gl_srvr_list = etcd_utils.read( "/indexes/tags/gluster/server" ).value gl_srvr_list = json.loads(gl_srvr_list) if NS.node_context.node_id in gl_srvr_list: gl_srvr_list.remove(NS.node_context.node_id) etcd_utils.write( "/indexes/tags/gluster/server", json.dumps(gl_srvr_list) ) node_tags = NS.node_context.tags if 'provisioner/%s' % NS.tendrl_context.integration_id \ in node_tags: etcd_utils.delete( "/indexes/tags/provisioner/%s" % NS.tendrl_context.integration_id, recursive=True ) int_srvr_list = etcd_utils.read( "/indexes/tags/tendrl/integration/gluster" ).value int_srvr_list = json.loads(int_srvr_list) if NS.node_context.node_id in int_srvr_list: int_srvr_list.remove(NS.node_context.node_id) etcd_utils.write( "/indexes/tags/tendrl/integration/gluster", json.dumps(int_srvr_list) ) except etcd.EtcdKeyNotFound: logger.log( "debug", NS.publisher_id, { "message": "Couldnt remove node from " "gluster servers list tag." "integration_id: %s, node_id: %s" % ( NS.tendrl_context.integration_id, NS.node_context.node_id ) } ) pass complete.set() m.stop() def reload_config(signum, frame): logger.log( "debug", NS.publisher_id, { "message": "Signal handler: SIGHUP," " reload service config" } ) NS.gluster.ns.setup_common_objects() signal.signal(signal.SIGTERM, shutdown) signal.signal(signal.SIGINT, shutdown) signal.signal(signal.SIGHUP, reload_config) while not complete.is_set(): complete.wait(timeout=1)
def main(): # NS.node_agent contains the config object, # hence initialize it before any other NS node_agent.NodeAgentNS() # Init NS.tendrl TendrlNS() # Init NS.provisioning # TODO(team) remove NS.provisioner and use NS.provisioning.{ceph, gluster} # provisioning.ProvisioningNS() # Init NS.integrations.ceph # TODO(team) add all short circuited ceph(import/create) NS.tendrl.flows # to NS.integrations.ceph # ceph.CephIntegrationNS() # Init NS.integrations.gluster # TODO(team) add all short circuited ceph(import/create) NS.tendrl.flows # to NS.integrations.ceph GlusterIntegrationNS() # Compile all definitions NS.compiled_definitions = \ NS.node_agent.objects.CompiledDefinitions() NS.compiled_definitions.merge_definitions([ NS.tendrl.definitions, NS.node_agent.definitions, NS.integrations.gluster.definitions ]) NS.node_agent.compiled_definitions = NS.compiled_definitions # Every process needs to set a NS.type # Allowed types are "node", "integration", "monitoring" NS.type = "node" NS.first_node_inventory_sync = True NS.state_sync_thread = node_sync.NodeAgentSyncThread() NS.compiled_definitions.save() NS.node_context.save() NS.tendrl_context.save() NS.node_agent.definitions.save() # NS.integrations.ceph.definitions.save() NS.node_agent.config.save() NS.publisher_id = "node_agent" NS.message_handler_thread = MessageHandler() NS.gluster_provisioner = GlusterProvisioningManager( NS.tendrl.definitions.get_parsed_defs()["namespace.tendrl"] ['gluster_provisioner']) if NS.config.data.get("with_internal_profiling", False): from tendrl.commons import profiler profiler.start() NS.gluster_sds_sync_running = False m = NodeAgentManager() m.start() complete = threading.Event() def shutdown(signum, frame): Event( Message(priority="debug", publisher=NS.publisher_id, payload={"message": "Signal handler: stopping"})) complete.set() m.stop() if NS.gluster_sds_sync_running: NS.gluster_integrations_sync_thread.stop() def reload_config(signum, frame): Event( Message(priority="debug", publisher=NS.publisher_id, payload={"message": "Signal handler: SIGHUP"})) NS.config = NS.config.__class__() NS.config.save() signal.signal(signal.SIGTERM, shutdown) signal.signal(signal.SIGINT, shutdown) signal.signal(signal.SIGHUP, reload_config) while not complete.is_set(): complete.wait(timeout=1)
def main(): gluster_integration.GlusterIntegrationNS() TendrlNS() NS.type = "sds" NS.publisher_id = "gluster_integration" NS.state_sync_thread = sds_sync.GlusterIntegrationSdsSyncStateThread() NS.message_handler_thread = GlusterNativeMessageHandler() while NS.tendrl_context.integration_id is None or \ NS.tendrl_context.integration_id == "": logger.log( "debug", NS.publisher_id, { "message": "Waiting for tendrl-node-agent %s to " "detect sds cluster (integration_id not found)" % NS.node_context.node_id }) NS.tendrl_context = NS.tendrl_context.load() logger.log( "debug", NS.publisher_id, { "message": "Integration %s is part of sds cluster" % NS.tendrl_context.integration_id }) NS.gluster.definitions.save() NS.gluster.config.save() pm = ProvisioningManager("GdeployPlugin") NS.gdeploy_plugin = pm.get_plugin() if NS.config.data.get("with_internal_profiling", False): from tendrl.commons import profiler profiler.start() m = GlusterIntegrationManager() m.start() complete = threading.Event() def shutdown(signum, frame): logger.log("debug", NS.publisher_id, {"message": "Signal handler: stopping"}) complete.set() m.stop() def reload_config(signum, frame): logger.log( "debug", NS.publisher_id, {"message": "Signal handler: SIGHUP," " reload service config"}) NS.gluster.ns.setup_common_objects() signal.signal(signal.SIGTERM, shutdown) signal.signal(signal.SIGINT, shutdown) signal.signal(signal.SIGHUP, reload_config) while not complete.is_set(): complete.wait(timeout=1)
def main(): gluster_integration.GlusterIntegrationNS() TendrlNS() NS.type = "sds" NS.publisher_id = "gluster_integration" NS.state_sync_thread = sds_sync.GlusterIntegrationSdsSyncStateThread() NS.message_handler_thread = GlusterNativeMessageHandler() NS.node_context.save() try: NS.tendrl_context = NS.tendrl_context.load() Event( Message(priority="debug", publisher=NS.publisher_id, payload={ "message": "Integration %s is part of sds cluster" % NS.tendrl_context.integration_id })) except etcd.EtcdKeyNotFound: Event( Message(priority="debug", publisher=NS.publisher_id, payload={ "message": "Node %s is not part of any sds cluster" % NS.node_context.node_id })) raise Exception("Integration cannot be started," " please Import or Create sds cluster" " in Tendrl and include Node %s" % NS.node_context.node_id) NS.tendrl_context.save() NS.gluster.definitions.save() NS.gluster.config.save() pm = ProvisioningManager("GdeployPlugin") NS.gdeploy_plugin = pm.get_plugin() if NS.config.data.get("with_internal_profiling", False): from tendrl.commons import profiler profiler.start() m = GlusterIntegrationManager() m.start() complete = threading.Event() def shutdown(signum, frame): Event( Message(priority="debug", publisher=NS.publisher_id, payload={"message": "Signal handler: stopping"})) complete.set() m.stop() def reload_config(signum, frame): Event( Message(priority="debug", publisher=NS.publisher_id, payload={ "message": "Signal handler: SIGHUP, reload service config" })) NS.config = NS.config.__class__() NS.config.save() signal.signal(signal.SIGTERM, shutdown) signal.signal(signal.SIGINT, shutdown) signal.signal(signal.SIGHUP, reload_config) while not complete.is_set(): complete.wait(timeout=1)