def sandesh_init(self): """ Init sandesh """ self._sandesh = Sandesh() # Reset the sandesh send rate limit value if self._args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( self._args.sandesh_send_rate_limit) self.redefine_sandesh_handles() self._sandesh.init_generator( self._module_name, self._hostname, self._node_type_name, self._instance_id, self._args.random_collectors, '%s_context' % self.context, int(self._args.http_server_port), ['cfgm_common', '%s.sandesh' % self.module_pkg], self.discovery, logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf) self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # connection state init ConnectionState.init( self._sandesh, self._hostname, self._module_name, self._instance_id, staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self.table)
def _sandesh_init(self, discovery): sandesh_instance = Sandesh() # Reset the sandesh send rate limit value if self._args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( \ self._args.sandesh_send_rate_limit) sandesh.ServiceInstanceList.handle_request =\ self.sandesh_si_handle_request sandesh_instance.init_generator( self._module_name, self._hostname, self._node_type_name, self._instance_id, self._args.collectors, 'svc_monitor_context', int(self._args.http_server_port), ['cfgm_common', 'svc_monitor.sandesh'], discovery, logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf) sandesh_instance.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) return sandesh_instance
def sandesh_init(self): """ Init sandesh """ self._sandesh = Sandesh() # Reset the sandesh send rate limit value if self._args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( self._args.sandesh_send_rate_limit) self.redefine_sandesh_handles() self._sandesh.init_generator( self._module_name, self._hostname, self._node_type_name, self._instance_id, self._args.collectors, '%s_context' % self.context, int(self._args.http_server_port), ['cfgm_common', '%s.sandesh' % self.module_pkg], self.discovery, logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf) self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # connection state init ConnectionState.init( self._sandesh, self._hostname, self._module_name, self._instance_id, staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self.table)
def sandesh_init(self): """ Init Sandesh """ self._sandesh = Sandesh() # Reset sandesh send rate limit value. if self._args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( self._args.sandesh_send_rate_limit) # Register custom sandesh request handlers. self._redefine_sandesh_handles() # Initialize Sandesh generator. self._sandesh.init_generator( self._module["name"], self._module["hostname"], self._module["node_type_name"], self._module["instance_id"], self._args.collectors, 'kube_manager_context', int(self._args.http_server_port), ['cfgm_common', 'kube_manager.sandesh', 'kube_introspect.sandesh'], self._module["discovery"], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf) # Set Sandesh logging params. self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # Connect to collector. ConnectionState.init( self._sandesh, self._module["hostname"], self._module["name"], self._module["instance_id"], staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self._module["table"])
def sandesh_init(self): """ Init Sandesh """ self._sandesh = Sandesh() # Reset sandesh send rate limit value. if self._args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( self._args.sandesh_send_rate_limit) # Initialize Sandesh generator. self._sandesh.init_generator( self.module['name'], self.module['hostname'], self.module['node_type_name'], self.module['instance_id'], self._args.collectors, 'mesos_manager_context', int(self._args.http_server_port), ['cfgm_common', 'mesos_manager.sandesh'], self.module['discovery'], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf) # Set Sandesh logging params. self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # Connect to collector. ConnectionState.init(self._sandesh, self.module['hostname'], self.module['name'], self.module['instance_id'], staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self.module['table'])
def test_systemlog_msg_rate_limit(self): systemlog_msg = SystemLogTest() self._expected_type = SandeshType.SYSTEM self._expected_hints = 0 SandeshSystem.set_sandesh_send_rate_limit(10) time.sleep(1) for i in range(0, 15): systemlog_msg.send(sandesh=sandesh_global) self.assertEqual( 5, sandesh_global.msg_stats().message_type_stats() ['SystemLogTest'].messages_sent_dropped_rate_limited) # try to set negative values to the rate limit SandeshSystem.set_sandesh_send_rate_limit(-10) self.assertEqual(SandeshSystem.get_sandesh_send_rate_limit(), 10)
def test_sandesh_queue_level_drop(self): # Increase rate limit SandeshSystem.set_sandesh_send_rate_limit(100) levels = list(range(SandeshLevel.SYS_EMERG, SandeshLevel.SYS_DEBUG)) queue_level_drop = 0 mlevels = list(levels) mlevels.append(SandeshLevel.SYS_DEBUG) for send_level in levels: sandesh_global.send_level = mock.MagicMock(return_value=send_level) for sandesh_level in levels: systemlog = SystemLogTest(level=sandesh_level) systemlog.send() if sandesh_level >= send_level: queue_level_drop += 1 self.assertEqual( queue_level_drop, sandesh_global.msg_stats().aggregate_stats(). messages_sent_dropped_queue_level)
def sandesh_sending_params_set_handle_request(self, sandesh_req): # Set the sending params if sandesh_req.system_logs_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( sandesh_req.system_logs_rate_limit) if sandesh_req.disable_object_logs is not None: self._sandesh.disable_sending_object_logs( sandesh_req.disable_object_logs) if sandesh_req.disable_all_logs is not None: self._sandesh.disable_sending_all_messages( sandesh_req.disable_all_logs) # Return the sending params sandesh_sending_resp = SandeshSendingParams( system_logs_rate_limit=SandeshSystem.get_sandesh_send_rate_limit(), disable_object_logs=self._sandesh.is_sending_object_logs_disabled( ), disable_all_logs=self._sandesh.is_sending_all_messages_disabled(), dscp=self.sandesh_get_dscp()) sandesh_sending_resp.response(sandesh_req.context(), sandesh=self._sandesh)
def sandesh_init(self): """ Init Sandesh """ self._sandesh = Sandesh() # Reset sandesh send rate limit value. if self._args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( self._args.sandesh_send_rate_limit) # Initialize Sandesh generator. self._sandesh.init_generator( self.module['name'], self.module['hostname'], self.module['node_type_name'], self.module['instance_id'], self._args.collectors, 'mesos_manager_context', int(self._args.http_server_port), ['cfgm_common', 'mesos_manager.sandesh'], self.module['discovery'], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf, config=self._args.sandesh_config) # Set Sandesh logging params. self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # Connect to collector. ConnectionState.init( self._sandesh, self.module['hostname'], self.module['name'], self.module['instance_id'], staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self.module['table'])
def main(args_str=' '.join(sys.argv[1:])): # Parse Arguments node_parser = argparse.ArgumentParser(add_help=False) node_parser.add_argument("--nodetype", default='contrail-analytics', help='Type of node which nodemgr is managing') try: args, remaining_argv = node_parser.parse_known_args(args_str.split()) except: usage() disc_options = {'server': socket.gethostname(), 'port': 5998} default = {'rules': '', 'collectors': [], 'hostip': '127.0.0.1', 'minimum_diskgb': 256, 'contrail_databases': 'config analytics', 'cassandra_repair_interval': 24, 'sandesh_send_rate_limit': \ SandeshSystem.get_sandesh_send_rate_limit(), } node_type = args.nodetype if (node_type == 'contrail-analytics'): config_file = '/etc/contrail/contrail-analytics-nodemgr.conf' default['collectors'] = ['127.0.0.1:8086'] elif (node_type == 'contrail-config'): config_file = '/etc/contrail/contrail-config-nodemgr.conf' elif (node_type == 'contrail-control'): config_file = '/etc/contrail/contrail-control-nodemgr.conf' elif (node_type == 'contrail-vrouter'): config_file = '/etc/contrail/contrail-vrouter-nodemgr.conf' elif (node_type == 'contrail-database'): config_file = '/etc/contrail/contrail-database-nodemgr.conf' else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return config = ConfigParser.SafeConfigParser() config.read([config_file]) if 'DEFAULT' in config.sections(): default.update(dict(config.items('DEFAULT'))) if 'DISCOVERY' in config.sections(): disc_options.update(dict(config.items('DISCOVERY'))) disc_options['discovery_server'] = disc_options.pop('server') disc_options['discovery_port'] = disc_options.pop('port') if 'COLLECTOR' in config.sections(): try: collector = config.get('COLLECTOR', 'server_list') default['collectors'] = collector.split() except ConfigParser.NoOptionError as e: pass parser = argparse.ArgumentParser(parents=[node_parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter) default.update(disc_options) parser.set_defaults(**default) parser.add_argument("--rules", help='Rules file to use for processing events') parser.add_argument("--discovery_server", help='IP address of Discovery Server') parser.add_argument("--discovery_port", type=int, help='Port of Discovery Server') parser.add_argument("--collectors", nargs='+', help='Collector addresses in format' + 'ip1:port1 ip2:port2') parser.add_argument("--sandesh_send_rate_limit", type=int, help="Sandesh send rate limit in messages/sec") if (node_type == 'contrail-database'): parser.add_argument("--minimum_diskgb", type=int, help="Minimum disk space in GB's") parser.add_argument("--contrail_databases", nargs='+', help='Contrail databases on this node' + 'in format: config analytics' ) parser.add_argument("--hostip", help="IP address of host") parser.add_argument("--cassandra_repair_interval", type=int, help="Time in hours to periodically run " "nodetool repair for cassandra maintenance") try: _args = parser.parse_args(remaining_argv) except: usage() rule_file = _args.rules discovery_server = _args.discovery_server sys.stderr.write("Discovery server: " + discovery_server + "\n") discovery_port = _args.discovery_port sys.stderr.write("Discovery port: " + str(discovery_port) + "\n") collector_addr = _args.collectors sys.stderr.write("Collector address: " + str(collector_addr) + "\n") if _args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit(_args.sandesh_send_rate_limit) # done parsing arguments if not 'SUPERVISOR_SERVER_URL' in os.environ: sys.stderr.write('Node manager must be run as a supervisor event ' 'listener\n') sys.stderr.flush() return prog = None if (node_type == 'contrail-analytics'): prog = AnalyticsEventManager( rule_file, discovery_server, discovery_port, collector_addr) elif (node_type == 'contrail-config'): prog = ConfigEventManager( rule_file, discovery_server, discovery_port, collector_addr) elif (node_type == 'contrail-control'): prog = ControlEventManager( rule_file, discovery_server, discovery_port, collector_addr) elif (node_type == 'contrail-vrouter'): prog = VrouterEventManager( rule_file, discovery_server, discovery_port, collector_addr) elif (node_type == 'contrail-database'): hostip = _args.hostip minimum_diskgb = _args.minimum_diskgb contrail_databases = _args.contrail_databases cassandra_repair_interval = _args.cassandra_repair_interval prog = DatabaseEventManager( rule_file, discovery_server, discovery_port, collector_addr, hostip, minimum_diskgb, contrail_databases, cassandra_repair_interval) else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return prog.process() prog.send_nodemgr_process_status() prog.send_process_state_db(prog.group_names) gevent.joinall([gevent.spawn(prog.runforever)])
def main(args_str=" ".join(sys.argv[1:])): # Parse Arguments node_parser = argparse.ArgumentParser(add_help=False) node_parser.add_argument("--nodetype", default="contrail-analytics", help="Type of node which nodemgr is managing") try: args, remaining_argv = node_parser.parse_known_args(args_str.split()) except: usage() disc_options = {"server": socket.gethostname(), "port": 5998} default = { "rules": "", "collectors": [], "hostip": "127.0.0.1", "minimum_diskgb": 256, "contrail_databases": "config analytics", "cassandra_repair_interval": 24, "cassandra_repair_logdir": "/var/log/contrail/", "sandesh_send_rate_limit": SandeshSystem.get_sandesh_send_rate_limit(), } node_type = args.nodetype if node_type == "contrail-analytics": config_file = "/etc/contrail/contrail-analytics-nodemgr.conf" elif node_type == "contrail-config": config_file = "/etc/contrail/contrail-config-nodemgr.conf" elif node_type == "contrail-control": config_file = "/etc/contrail/contrail-control-nodemgr.conf" elif node_type == "contrail-vrouter": config_file = "/etc/contrail/contrail-vrouter-nodemgr.conf" elif node_type == "contrail-database": config_file = "/etc/contrail/contrail-database-nodemgr.conf" else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return if os.path.exists(config_file) == False: sys.stderr.write("config file " + config_file + " is not present" + "\n") return config = ConfigParser.SafeConfigParser() config.read([config_file]) if "DEFAULT" in config.sections(): default.update(dict(config.items("DEFAULT"))) if "DISCOVERY" in config.sections(): disc_options.update(dict(config.items("DISCOVERY"))) disc_options["discovery_server"] = disc_options.pop("server") disc_options["discovery_port"] = disc_options.pop("port") if "COLLECTOR" in config.sections(): try: collector = config.get("COLLECTOR", "server_list") default["collectors"] = collector.split() except ConfigParser.NoOptionError as e: pass parser = argparse.ArgumentParser(parents=[node_parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter) default.update(disc_options) parser.set_defaults(**default) parser.add_argument("--rules", help="Rules file to use for processing events") parser.add_argument("--discovery_server", help="IP address of Discovery Server") parser.add_argument("--discovery_port", type=int, help="Port of Discovery Server") parser.add_argument("--collectors", nargs="+", help="Collector addresses in format" + "ip1:port1 ip2:port2") parser.add_argument("--sandesh_send_rate_limit", type=int, help="Sandesh send rate limit in messages/sec") if node_type == "contrail-database": parser.add_argument("--minimum_diskgb", type=int, help="Minimum disk space in GB's") parser.add_argument( "--contrail_databases", nargs="+", help="Contrail databases on this node" + "in format: config analytics" ) parser.add_argument("--hostip", help="IP address of host") parser.add_argument( "--cassandra_repair_interval", type=int, help="Time in hours to periodically run " "nodetool repair for cassandra maintenance", ) parser.add_argument("--cassandra_repair_logdir", help="Directory for storing repair logs") try: _args = parser.parse_args(remaining_argv) except: usage() rule_file = _args.rules discovery_server = _args.discovery_server sys.stderr.write("Discovery server: " + discovery_server + "\n") discovery_port = _args.discovery_port sys.stderr.write("Discovery port: " + str(discovery_port) + "\n") collector_addr = _args.collectors sys.stderr.write("Collector address: " + str(collector_addr) + "\n") if _args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit(_args.sandesh_send_rate_limit) # done parsing arguments if not "SUPERVISOR_SERVER_URL" in os.environ: sys.stderr.write("Node manager must be run as a supervisor event " "listener\n") sys.stderr.flush() return prog = None if node_type == "contrail-analytics": prog = AnalyticsEventManager(rule_file, discovery_server, discovery_port, collector_addr) elif node_type == "contrail-config": cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = ConfigEventManager( rule_file, discovery_server, discovery_port, collector_addr, cassandra_repair_interval, cassandra_repair_logdir, ) elif node_type == "contrail-control": prog = ControlEventManager(rule_file, discovery_server, discovery_port, collector_addr) elif node_type == "contrail-vrouter": prog = VrouterEventManager(rule_file, discovery_server, discovery_port, collector_addr) elif node_type == "contrail-database": hostip = _args.hostip minimum_diskgb = _args.minimum_diskgb contrail_databases = _args.contrail_databases cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = DatabaseEventManager( rule_file, discovery_server, discovery_port, collector_addr, hostip, minimum_diskgb, contrail_databases, cassandra_repair_interval, cassandra_repair_logdir, ) else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return prog.process() prog.send_nodemgr_process_status() prog.send_process_state_db(prog.group_names) gevent.joinall([gevent.spawn(prog.runforever)])
def main(args_str=' '.join(sys.argv[1:])): # Parse Arguments node_parser = argparse.ArgumentParser(add_help=False) node_parser.add_argument("--nodetype", default='contrail-analytics', help='Type of node which nodemgr is managing') try: args, remaining_argv = node_parser.parse_known_args(args_str.split()) except: usage() disc_options = {'server': socket.gethostname(), 'port': 5998} default = {'rules': '', 'collectors': [], 'hostip': '127.0.0.1', 'minimum_diskgb': 256, 'contrail_databases': 'config analytics', 'cassandra_repair_interval': 24, 'cassandra_repair_logdir': '/var/log/contrail/', 'sandesh_send_rate_limit': \ SandeshSystem.get_sandesh_send_rate_limit(), } sandesh_opts = { 'sandesh_keyfile': '/etc/contrail/ssl/private/server-privkey.pem', 'sandesh_certfile': '/etc/contrail/ssl/certs/server.pem', 'sandesh_ca_cert': '/etc/contrail/ssl/certs/ca-cert.pem', 'sandesh_ssl_enable': False, 'introspect_ssl_enable': False } node_type = args.nodetype if (node_type == 'contrail-analytics'): config_file = '/etc/contrail/contrail-analytics-nodemgr.conf' elif (node_type == 'contrail-config'): config_file = '/etc/contrail/contrail-config-nodemgr.conf' elif (node_type == 'contrail-control'): config_file = '/etc/contrail/contrail-control-nodemgr.conf' elif (node_type == 'contrail-vrouter'): config_file = '/etc/contrail/contrail-vrouter-nodemgr.conf' elif (node_type == 'contrail-database'): config_file = '/etc/contrail/contrail-database-nodemgr.conf' else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return if (os.path.exists(config_file) == False): sys.stderr.write("config file " + config_file + " is not present" + "\n") return config = ConfigParser.SafeConfigParser() config.read([config_file]) if 'DEFAULTS' in config.sections(): default.update(dict(config.items('DEFAULTS'))) if 'DISCOVERY' in config.sections(): disc_options.update(dict(config.items('DISCOVERY'))) disc_options['discovery_server'] = disc_options.pop('server') disc_options['discovery_port'] = disc_options.pop('port') if 'COLLECTOR' in config.sections(): try: collector = config.get('COLLECTOR', 'server_list') default['collectors'] = collector.split() except ConfigParser.NoOptionError as e: pass if 'SANDESH' in config.sections(): sandesh_opts.update(dict(config.items('SANDESH'))) parser = argparse.ArgumentParser( parents=[node_parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter) default.update(disc_options) default.update(sandesh_opts) parser.set_defaults(**default) parser.add_argument("--rules", help='Rules file to use for processing events') parser.add_argument("--discovery_server", help='IP address of Discovery Server') parser.add_argument("--discovery_port", type=int, help='Port of Discovery Server') parser.add_argument("--collectors", nargs='+', help='Collector addresses in format' + 'ip1:port1 ip2:port2') parser.add_argument("--sandesh_send_rate_limit", type=int, help="Sandesh send rate limit in messages/sec") parser.add_argument("--sandesh_keyfile", help="Sandesh ssl private key") parser.add_argument("--sandesh_certfile", help="Sandesh ssl certificate") parser.add_argument("--sandesh_ca_cert", help="Sandesh CA ssl certificate") parser.add_argument("--sandesh_ssl_enable", action="store_true", help="Enable ssl for sandesh connection") parser.add_argument("--introspect_ssl_enable", action="store_true", help="Enable ssl for introspect connection") if (node_type == 'contrail-database'): parser.add_argument("--minimum_diskGB", type=int, dest='minimum_diskgb', help="Minimum disk space in GB's") parser.add_argument("--contrail_databases", nargs='+', help='Contrail databases on this node' + 'in format: config analytics') parser.add_argument("--hostip", help="IP address of host") parser.add_argument("--cassandra_repair_interval", type=int, help="Time in hours to periodically run " "nodetool repair for cassandra maintenance") parser.add_argument("--cassandra_repair_logdir", help="Directory for storing repair logs") try: _args = parser.parse_args(remaining_argv) except: usage() rule_file = _args.rules discovery_server = _args.discovery_server sys.stderr.write("Discovery server: " + discovery_server + "\n") discovery_port = _args.discovery_port sys.stderr.write("Discovery port: " + str(discovery_port) + "\n") collector_addr = _args.collectors sys.stderr.write("Collector address: " + str(collector_addr) + "\n") # randomize collector list _args.chksum = "" if _args.collectors: _args.chksum = hashlib.md5("".join(_args.collectors)).hexdigest() _args.random_collectors = random.sample(_args.collectors, len(_args.collectors)) _args.collectors = _args.random_collectors collector_addr = _args.collectors sys.stderr.write("Random Collector address: " + str(collector_addr) + "\n") if _args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( _args.sandesh_send_rate_limit) sandesh_config = SandeshConfig(_args.sandesh_keyfile, _args.sandesh_certfile, _args.sandesh_ca_cert, _args.sandesh_ssl_enable, _args.introspect_ssl_enable) # done parsing arguments prog = None if (node_type == 'contrail-analytics'): if not rule_file: rule_file = "/etc/contrail/supervisord_analytics_files/" + \ "contrail-analytics.rules" unit_names = [ 'contrail-collector.service', 'contrail-analytics-api.service', 'contrail-snmp-collector.service', 'contrail-query-engine.service', 'contrail-alarm-gen.service', 'contrail-topology.service', 'contrail-analytics-nodemgr.service', ] prog = AnalyticsEventManager(rule_file, unit_names, discovery_server, discovery_port, collector_addr, sandesh_config) elif (node_type == 'contrail-config'): if not rule_file: rule_file = "/etc/contrail/supervisord_config_files/" + \ "contrail-config.rules" unit_names = [ 'contrail-api.service', 'contrail-schema.service', 'contrail-svc-monitor.service', 'contrail-device-manager.service', 'contrail-discovery.service', 'contrail-config-nodemgr.service', 'ifmap.service', ] cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = ConfigEventManager(rule_file, unit_names, discovery_server, discovery_port, collector_addr, sandesh_config, cassandra_repair_interval, cassandra_repair_logdir) elif (node_type == 'contrail-control'): if not rule_file: rule_file = "/etc/contrail/supervisord_control_files/" + \ "contrail-control.rules" unit_names = [ 'contrail-control.service', 'contrail-dns.service', 'contrail-named.service', 'contrail-control-nodemgr.service', ] prog = ControlEventManager(rule_file, unit_names, discovery_server, discovery_port, collector_addr, sandesh_config) elif (node_type == 'contrail-vrouter'): if not rule_file: rule_file = "/etc/contrail/supervisord_vrouter_files/" + \ "contrail-vrouter.rules" unit_names = [ 'contrail-vrouter-agent.service', 'contrail-vrouter-nodemgr.service', ] prog = VrouterEventManager(rule_file, unit_names, discovery_server, discovery_port, collector_addr, sandesh_config) elif (node_type == 'contrail-database'): if not rule_file: rule_file = "/etc/contrail/supervisord_database_files/" + \ "contrail-database.rules" unit_names = [ 'contrail-database.service', 'kafka.service', 'contrail-database-nodemgr.service', ] hostip = _args.hostip minimum_diskgb = _args.minimum_diskgb contrail_databases = _args.contrail_databases cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = DatabaseEventManager(rule_file, unit_names, discovery_server, discovery_port, collector_addr, sandesh_config, hostip, minimum_diskgb, contrail_databases, cassandra_repair_interval, cassandra_repair_logdir) else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return prog.process() prog.send_nodemgr_process_status() prog.send_process_state_db(prog.group_names) prog.config_file = config_file prog.collector_chksum = _args.chksum """ @sighup Reconfig of collector list """ gevent.signal(signal.SIGHUP, prog.nodemgr_sighup_handler) gevent.joinall([ gevent.spawn(prog.runforever), gevent.spawn(prog.run_periodically(prog.do_periodic_events, 60)) ])
def main(args_str=' '.join(sys.argv[1:])): # Parse Arguments node_parser = argparse.ArgumentParser(add_help=False) node_parser.add_argument("--nodetype", default='contrail-analytics', help='Type of node which nodemgr is managing') try: args, remaining_argv = node_parser.parse_known_args(args_str.split()) except: usage() disc_options = {'server': socket.gethostname(), 'port': 5998, 'ssl': False} default = {'rules': '', 'collectors': [], 'hostip': '127.0.0.1', 'db_port': '9042', 'minimum_diskgb': 256, 'contrail_databases': 'config analytics', 'cassandra_repair_interval': 24, 'cassandra_repair_logdir': '/var/log/contrail/', 'sandesh_send_rate_limit': \ SandeshSystem.get_sandesh_send_rate_limit(), 'cassandra_use_ssl': 'true', } node_type = args.nodetype if (node_type == 'contrail-analytics'): config_file = '/etc/contrail/contrail-analytics-nodemgr.conf' elif (node_type == 'contrail-config'): config_file = '/etc/contrail/contrail-config-nodemgr.conf' elif (node_type == 'contrail-control'): config_file = '/etc/contrail/contrail-control-nodemgr.conf' elif (node_type == 'contrail-vrouter'): config_file = '/etc/contrail/contrail-vrouter-nodemgr.conf' elif (node_type == 'contrail-database'): config_file = '/etc/contrail/contrail-database-nodemgr.conf' else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return if (os.path.exists(config_file) == False): sys.stderr.write("config file " + config_file + " is not present" + "\n") return config = ConfigParser.SafeConfigParser() config.read([config_file]) if 'DEFAULT' in config.sections(): default.update(dict(config.items('DEFAULT'))) if 'DEFAULTS' in config.sections(): default.update(dict(config.items('DEFAULTS'))) if 'DISCOVERY' in config.sections(): disc_options.update(dict(config.items('DISCOVERY'))) if 'ssl' in config.options('DISCOVERY'): disc_options['ssl'] = config.getboolean('DISCOVERY', 'ssl') disc_options['discovery_server'] = disc_options.pop('server') disc_options['discovery_port'] = disc_options.pop('port') disc_options['discovery_ssl'] = disc_options.get('ssl') disc_options['discovery_cert'] = disc_options.get('cert') disc_options['discovery_key'] = disc_options.get('key') disc_options['discovery_cacert'] = disc_options.get('cacert') if 'COLLECTOR' in config.sections(): try: collector = config.get('COLLECTOR', 'server_list') default['collectors'] = collector.split() except ConfigParser.NoOptionError as e: pass parser = argparse.ArgumentParser( parents=[node_parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter) default.update(disc_options) parser.set_defaults(**default) parser.add_argument("--rules", help='Rules file to use for processing events') parser.add_argument("--discovery_server", help='IP address of Discovery Server') parser.add_argument("--discovery_port", type=int, help='Port of Discovery Server') parser.add_argument("--discovery_cert", help="Discovery Server ssl certificate") parser.add_argument("--discovery_key", help="Discovery Server ssl key") parser.add_argument("--discovery_cacert", help="Discovery Server ssl CA certificate") parser.add_argument("--discovery_ssl", action="store_true", help="Discovery service is configured with ssl") parser.add_argument("--collectors", nargs='+', help='Collector addresses in format' + 'ip1:port1 ip2:port2') parser.add_argument("--sandesh_send_rate_limit", type=int, help="Sandesh send rate limit in messages/sec") if (node_type == 'contrail-database' or node_type == 'contrail-config'): parser.add_argument("--minimum_diskGB", type=int, dest='minimum_diskgb', help="Minimum disk space in GB's") parser.add_argument("--contrail_databases", nargs='+', help='Contrail databases on this node' + 'in format: config analytics') parser.add_argument("--hostip", help="IP address of host") parser.add_argument("--db_port", help="Cassandra DB cql port") parser.add_argument("--cassandra_repair_interval", type=int, help="Time in hours to periodically run " "nodetool repair for cassandra maintenance") parser.add_argument("--cassandra_repair_logdir", help="Directory for storing repair logs") parser.add_argument( "--cassandra_use_ssl", help="To connect SSL enabled cassandra. values: true|false") try: _args = parser.parse_args(remaining_argv) except: usage() rule_file = _args.rules discovery_server = _args.discovery_server sys.stderr.write("Discovery server: " + discovery_server + "\n") discovery_port = _args.discovery_port sys.stderr.write("Discovery port: " + str(discovery_port) + "\n") collector_addr = _args.collectors sys.stderr.write("Collector address: " + str(collector_addr) + "\n") if _args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( _args.sandesh_send_rate_limit) # done parsing arguments if not 'SUPERVISOR_SERVER_URL' in os.environ: sys.stderr.write('Node manager must be run as a supervisor event ' 'listener\n') sys.stderr.flush() return prog = None kwargs = {} if _args.discovery_ssl: kwargs.update({ 'cert': _args.discovery_cert, 'key': _args.discovery_key, 'cacert': _args.discovery_cacert }) if _args.cassandra_use_ssl.lower() == 'true': kwargs.update({'cassandra_use_ssl': True}) if (node_type == 'contrail-analytics'): prog = AnalyticsEventManager(rule_file, discovery_server, discovery_port, collector_addr, **kwargs) elif (node_type == 'contrail-config'): hostip = _args.hostip db_port = _args.db_port minimum_diskgb = _args.minimum_diskgb contrail_databases = _args.contrail_databases cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = ConfigEventManager(rule_file, discovery_server, discovery_port, collector_addr, hostip, db_port, minimum_diskgb, contrail_databases, cassandra_repair_interval, cassandra_repair_logdir, **kwargs) elif (node_type == 'contrail-control'): prog = ControlEventManager(rule_file, discovery_server, discovery_port, collector_addr, **kwargs) elif (node_type == 'contrail-vrouter'): prog = VrouterEventManager(rule_file, discovery_server, discovery_port, collector_addr, **kwargs) elif (node_type == 'contrail-database'): hostip = _args.hostip db_port = _args.db_port minimum_diskgb = _args.minimum_diskgb contrail_databases = _args.contrail_databases cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = DatabaseEventManager(rule_file, discovery_server, discovery_port, collector_addr, hostip, db_port, minimum_diskgb, contrail_databases, cassandra_repair_interval, cassandra_repair_logdir, **kwargs) else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return prog.process() prog.send_nodemgr_process_status() prog.send_process_state_db(prog.group_names) gevent.joinall([gevent.spawn(prog.runforever)])