def main(args_str=' '.join(sys.argv[1:])): # Parse Arguments node_parser = argparse.ArgumentParser(add_help=False) node_parser.add_argument("--nodetype", default='contrail-analytics', help='Type of node which nodemgr is managing') try: args, remaining_argv = node_parser.parse_known_args(args_str.split()) except: usage() disc_options = {'server': socket.gethostname(), 'port': 5998} default = { 'rules': '', 'collectors': [], 'hostip': '127.0.0.1', 'minimum_diskgb': 256 } node_type = args.nodetype if (node_type == 'contrail-analytics'): config_file = '/etc/contrail/contrail-analytics-nodemgr.conf' default['collectors'] = ['127.0.0.1:8086'] elif (node_type == 'contrail-config'): config_file = '/etc/contrail/contrail-config-nodemgr.conf' elif (node_type == 'contrail-control'): config_file = '/etc/contrail/contrail-control-nodemgr.conf' elif (node_type == 'contrail-vrouter'): config_file = '/etc/contrail/contrail-vrouter-nodemgr.conf' elif (node_type == 'contrail-database'): config_file = '/etc/contrail/contrail-database-nodemgr.conf' else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return config = ConfigParser.SafeConfigParser() config.read([config_file]) if 'DEFAULT' in config.sections(): default.update(dict(config.items('DEFAULT'))) if 'DISCOVERY' in config.sections(): disc_options.update(dict(config.items('DISCOVERY'))) disc_options['discovery_server'] = disc_options.pop('server') disc_options['discovery_port'] = disc_options.pop('port') if 'COLLECTOR' in config.sections(): try: collector = config.get('COLLECTOR', 'server_list') default['collectors'] = collector.split() except ConfigParser.NoOptionError as e: pass parser = argparse.ArgumentParser( parents=[node_parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter) default.update(disc_options) parser.set_defaults(**default) parser.add_argument("--rules", help='Rules file to use for processing events') parser.add_argument("--discovery_server", help='IP address of Discovery Server') parser.add_argument("--discovery_port", type=int, help='Port of Discovery Server') parser.add_argument("--collectors", nargs='+', help='Collector addresses in format' + 'ip1:port1 ip2:port2') if (node_type == 'contrail-database'): parser.add_argument("--minimum_diskgb", type=int, help="Minimum disk space in GB's") parser.add_argument("--hostip", help="IP address of host") try: _args = parser.parse_args(remaining_argv) except: usage() rule_file = _args.rules discovery_server = _args.discovery_server sys.stderr.write("Discovery server: " + discovery_server + "\n") discovery_port = _args.discovery_port sys.stderr.write("Discovery port: " + str(discovery_port) + "\n") collector_addr = _args.collectors sys.stderr.write("Collector address: " + str(collector_addr) + "\n") # done parsing arguments if not 'SUPERVISOR_SERVER_URL' in os.environ: sys.stderr.write('Node manager must be run as a supervisor event ' 'listener\n') sys.stderr.flush() return prog = None if (node_type == 'contrail-analytics'): prog = AnalyticsEventManager(rule_file, discovery_server, discovery_port, collector_addr) elif (node_type == 'contrail-config'): prog = ConfigEventManager(rule_file, discovery_server, discovery_port, collector_addr) elif (node_type == 'contrail-control'): prog = ControlEventManager(rule_file, discovery_server, discovery_port, collector_addr) elif (node_type == 'contrail-vrouter'): prog = VrouterEventManager(rule_file, discovery_server, discovery_port, collector_addr) elif (node_type == 'contrail-database'): hostip = _args.hostip minimum_diskgb = _args.minimum_diskgb prog = DatabaseEventManager(rule_file, discovery_server, discovery_port, collector_addr, hostip, minimum_diskgb) else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return prog.process() prog.send_nodemgr_process_status() prog.send_process_state_db(prog.group_names) gevent.joinall([gevent.spawn(prog.runforever)])
def main(args_str=' '.join(sys.argv[1:])): # Parse Arguments node_parser = argparse.ArgumentParser(add_help=False) node_parser.add_argument("--nodetype", default='contrail-analytics', help='Type of node which nodemgr is managing') try: args, remaining_argv = node_parser.parse_known_args(args_str.split()) except: usage() disc_options = {'server': socket.gethostname(), 'port': 5998} default = {'rules': '', 'collectors': [], 'hostip': '127.0.0.1', 'minimum_diskgb': 256, 'contrail_databases': 'config analytics', 'cassandra_repair_interval': 24, 'sandesh_send_rate_limit': \ SandeshSystem.get_sandesh_send_rate_limit(), } node_type = args.nodetype if (node_type == 'contrail-analytics'): config_file = '/etc/contrail/contrail-analytics-nodemgr.conf' default['collectors'] = ['127.0.0.1:8086'] elif (node_type == 'contrail-config'): config_file = '/etc/contrail/contrail-config-nodemgr.conf' elif (node_type == 'contrail-control'): config_file = '/etc/contrail/contrail-control-nodemgr.conf' elif (node_type == 'contrail-vrouter'): config_file = '/etc/contrail/contrail-vrouter-nodemgr.conf' elif (node_type == 'contrail-database'): config_file = '/etc/contrail/contrail-database-nodemgr.conf' else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return config = ConfigParser.SafeConfigParser() config.read([config_file]) if 'DEFAULT' in config.sections(): default.update(dict(config.items('DEFAULT'))) if 'DISCOVERY' in config.sections(): disc_options.update(dict(config.items('DISCOVERY'))) disc_options['discovery_server'] = disc_options.pop('server') disc_options['discovery_port'] = disc_options.pop('port') if 'COLLECTOR' in config.sections(): try: collector = config.get('COLLECTOR', 'server_list') default['collectors'] = collector.split() except ConfigParser.NoOptionError as e: pass parser = argparse.ArgumentParser(parents=[node_parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter) default.update(disc_options) parser.set_defaults(**default) parser.add_argument("--rules", help='Rules file to use for processing events') parser.add_argument("--discovery_server", help='IP address of Discovery Server') parser.add_argument("--discovery_port", type=int, help='Port of Discovery Server') parser.add_argument("--collectors", nargs='+', help='Collector addresses in format' + 'ip1:port1 ip2:port2') parser.add_argument("--sandesh_send_rate_limit", type=int, help="Sandesh send rate limit in messages/sec") if (node_type == 'contrail-database'): parser.add_argument("--minimum_diskgb", type=int, help="Minimum disk space in GB's") parser.add_argument("--contrail_databases", nargs='+', help='Contrail databases on this node' + 'in format: config analytics' ) parser.add_argument("--hostip", help="IP address of host") parser.add_argument("--cassandra_repair_interval", type=int, help="Time in hours to periodically run " "nodetool repair for cassandra maintenance") try: _args = parser.parse_args(remaining_argv) except: usage() rule_file = _args.rules discovery_server = _args.discovery_server sys.stderr.write("Discovery server: " + discovery_server + "\n") discovery_port = _args.discovery_port sys.stderr.write("Discovery port: " + str(discovery_port) + "\n") collector_addr = _args.collectors sys.stderr.write("Collector address: " + str(collector_addr) + "\n") if _args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit(_args.sandesh_send_rate_limit) # done parsing arguments if not 'SUPERVISOR_SERVER_URL' in os.environ: sys.stderr.write('Node manager must be run as a supervisor event ' 'listener\n') sys.stderr.flush() return prog = None if (node_type == 'contrail-analytics'): prog = AnalyticsEventManager( rule_file, discovery_server, discovery_port, collector_addr) elif (node_type == 'contrail-config'): prog = ConfigEventManager( rule_file, discovery_server, discovery_port, collector_addr) elif (node_type == 'contrail-control'): prog = ControlEventManager( rule_file, discovery_server, discovery_port, collector_addr) elif (node_type == 'contrail-vrouter'): prog = VrouterEventManager( rule_file, discovery_server, discovery_port, collector_addr) elif (node_type == 'contrail-database'): hostip = _args.hostip minimum_diskgb = _args.minimum_diskgb contrail_databases = _args.contrail_databases cassandra_repair_interval = _args.cassandra_repair_interval prog = DatabaseEventManager( rule_file, discovery_server, discovery_port, collector_addr, hostip, minimum_diskgb, contrail_databases, cassandra_repair_interval) else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return prog.process() prog.send_nodemgr_process_status() prog.send_process_state_db(prog.group_names) gevent.joinall([gevent.spawn(prog.runforever)])
def main(args_str=" ".join(sys.argv[1:])): # Parse Arguments node_parser = argparse.ArgumentParser(add_help=False) node_parser.add_argument("--nodetype", default="contrail-analytics", help="Type of node which nodemgr is managing") try: args, remaining_argv = node_parser.parse_known_args(args_str.split()) except: usage() disc_options = {"server": socket.gethostname(), "port": 5998} default = { "rules": "", "collectors": [], "hostip": "127.0.0.1", "minimum_diskgb": 256, "contrail_databases": "config analytics", "cassandra_repair_interval": 24, "cassandra_repair_logdir": "/var/log/contrail/", "sandesh_send_rate_limit": SandeshSystem.get_sandesh_send_rate_limit(), } node_type = args.nodetype if node_type == "contrail-analytics": config_file = "/etc/contrail/contrail-analytics-nodemgr.conf" elif node_type == "contrail-config": config_file = "/etc/contrail/contrail-config-nodemgr.conf" elif node_type == "contrail-control": config_file = "/etc/contrail/contrail-control-nodemgr.conf" elif node_type == "contrail-vrouter": config_file = "/etc/contrail/contrail-vrouter-nodemgr.conf" elif node_type == "contrail-database": config_file = "/etc/contrail/contrail-database-nodemgr.conf" else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return if os.path.exists(config_file) == False: sys.stderr.write("config file " + config_file + " is not present" + "\n") return config = ConfigParser.SafeConfigParser() config.read([config_file]) if "DEFAULT" in config.sections(): default.update(dict(config.items("DEFAULT"))) if "DISCOVERY" in config.sections(): disc_options.update(dict(config.items("DISCOVERY"))) disc_options["discovery_server"] = disc_options.pop("server") disc_options["discovery_port"] = disc_options.pop("port") if "COLLECTOR" in config.sections(): try: collector = config.get("COLLECTOR", "server_list") default["collectors"] = collector.split() except ConfigParser.NoOptionError as e: pass parser = argparse.ArgumentParser(parents=[node_parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter) default.update(disc_options) parser.set_defaults(**default) parser.add_argument("--rules", help="Rules file to use for processing events") parser.add_argument("--discovery_server", help="IP address of Discovery Server") parser.add_argument("--discovery_port", type=int, help="Port of Discovery Server") parser.add_argument("--collectors", nargs="+", help="Collector addresses in format" + "ip1:port1 ip2:port2") parser.add_argument("--sandesh_send_rate_limit", type=int, help="Sandesh send rate limit in messages/sec") if node_type == "contrail-database": parser.add_argument("--minimum_diskgb", type=int, help="Minimum disk space in GB's") parser.add_argument( "--contrail_databases", nargs="+", help="Contrail databases on this node" + "in format: config analytics" ) parser.add_argument("--hostip", help="IP address of host") parser.add_argument( "--cassandra_repair_interval", type=int, help="Time in hours to periodically run " "nodetool repair for cassandra maintenance", ) parser.add_argument("--cassandra_repair_logdir", help="Directory for storing repair logs") try: _args = parser.parse_args(remaining_argv) except: usage() rule_file = _args.rules discovery_server = _args.discovery_server sys.stderr.write("Discovery server: " + discovery_server + "\n") discovery_port = _args.discovery_port sys.stderr.write("Discovery port: " + str(discovery_port) + "\n") collector_addr = _args.collectors sys.stderr.write("Collector address: " + str(collector_addr) + "\n") if _args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit(_args.sandesh_send_rate_limit) # done parsing arguments if not "SUPERVISOR_SERVER_URL" in os.environ: sys.stderr.write("Node manager must be run as a supervisor event " "listener\n") sys.stderr.flush() return prog = None if node_type == "contrail-analytics": prog = AnalyticsEventManager(rule_file, discovery_server, discovery_port, collector_addr) elif node_type == "contrail-config": cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = ConfigEventManager( rule_file, discovery_server, discovery_port, collector_addr, cassandra_repair_interval, cassandra_repair_logdir, ) elif node_type == "contrail-control": prog = ControlEventManager(rule_file, discovery_server, discovery_port, collector_addr) elif node_type == "contrail-vrouter": prog = VrouterEventManager(rule_file, discovery_server, discovery_port, collector_addr) elif node_type == "contrail-database": hostip = _args.hostip minimum_diskgb = _args.minimum_diskgb contrail_databases = _args.contrail_databases cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = DatabaseEventManager( rule_file, discovery_server, discovery_port, collector_addr, hostip, minimum_diskgb, contrail_databases, cassandra_repair_interval, cassandra_repair_logdir, ) else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return prog.process() prog.send_nodemgr_process_status() prog.send_process_state_db(prog.group_names) gevent.joinall([gevent.spawn(prog.runforever)])
def main(args_str=' '.join(sys.argv[1:])): # Parse Arguments node_parser = argparse.ArgumentParser(add_help=False) node_parser.add_argument("--nodetype", default='contrail-analytics', help='Type of node which nodemgr is managing') try: args, remaining_argv = node_parser.parse_known_args(args_str.split()) except: usage() disc_options = {'server': socket.gethostname(), 'port': 5998} default = {'rules': '', 'collectors': [], 'hostip': '127.0.0.1', 'minimum_diskgb': 256, 'contrail_databases': 'config analytics', 'cassandra_repair_interval': 24, 'cassandra_repair_logdir': '/var/log/contrail/', 'sandesh_send_rate_limit': \ SandeshSystem.get_sandesh_send_rate_limit(), } sandesh_opts = { 'sandesh_keyfile': '/etc/contrail/ssl/private/server-privkey.pem', 'sandesh_certfile': '/etc/contrail/ssl/certs/server.pem', 'sandesh_ca_cert': '/etc/contrail/ssl/certs/ca-cert.pem', 'sandesh_ssl_enable': False, 'introspect_ssl_enable': False } node_type = args.nodetype if (node_type == 'contrail-analytics'): config_file = '/etc/contrail/contrail-analytics-nodemgr.conf' elif (node_type == 'contrail-config'): config_file = '/etc/contrail/contrail-config-nodemgr.conf' elif (node_type == 'contrail-control'): config_file = '/etc/contrail/contrail-control-nodemgr.conf' elif (node_type == 'contrail-vrouter'): config_file = '/etc/contrail/contrail-vrouter-nodemgr.conf' elif (node_type == 'contrail-database'): config_file = '/etc/contrail/contrail-database-nodemgr.conf' else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return if (os.path.exists(config_file) == False): sys.stderr.write("config file " + config_file + " is not present" + "\n") return config = ConfigParser.SafeConfigParser() config.read([config_file]) if 'DEFAULTS' in config.sections(): default.update(dict(config.items('DEFAULTS'))) if 'DISCOVERY' in config.sections(): disc_options.update(dict(config.items('DISCOVERY'))) disc_options['discovery_server'] = disc_options.pop('server') disc_options['discovery_port'] = disc_options.pop('port') if 'COLLECTOR' in config.sections(): try: collector = config.get('COLLECTOR', 'server_list') default['collectors'] = collector.split() except ConfigParser.NoOptionError as e: pass if 'SANDESH' in config.sections(): sandesh_opts.update(dict(config.items('SANDESH'))) parser = argparse.ArgumentParser( parents=[node_parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter) default.update(disc_options) default.update(sandesh_opts) parser.set_defaults(**default) parser.add_argument("--rules", help='Rules file to use for processing events') parser.add_argument("--discovery_server", help='IP address of Discovery Server') parser.add_argument("--discovery_port", type=int, help='Port of Discovery Server') parser.add_argument("--collectors", nargs='+', help='Collector addresses in format' + 'ip1:port1 ip2:port2') parser.add_argument("--sandesh_send_rate_limit", type=int, help="Sandesh send rate limit in messages/sec") parser.add_argument("--sandesh_keyfile", help="Sandesh ssl private key") parser.add_argument("--sandesh_certfile", help="Sandesh ssl certificate") parser.add_argument("--sandesh_ca_cert", help="Sandesh CA ssl certificate") parser.add_argument("--sandesh_ssl_enable", action="store_true", help="Enable ssl for sandesh connection") parser.add_argument("--introspect_ssl_enable", action="store_true", help="Enable ssl for introspect connection") if (node_type == 'contrail-database'): parser.add_argument("--minimum_diskGB", type=int, dest='minimum_diskgb', help="Minimum disk space in GB's") parser.add_argument("--contrail_databases", nargs='+', help='Contrail databases on this node' + 'in format: config analytics') parser.add_argument("--hostip", help="IP address of host") parser.add_argument("--cassandra_repair_interval", type=int, help="Time in hours to periodically run " "nodetool repair for cassandra maintenance") parser.add_argument("--cassandra_repair_logdir", help="Directory for storing repair logs") try: _args = parser.parse_args(remaining_argv) except: usage() rule_file = _args.rules discovery_server = _args.discovery_server sys.stderr.write("Discovery server: " + discovery_server + "\n") discovery_port = _args.discovery_port sys.stderr.write("Discovery port: " + str(discovery_port) + "\n") collector_addr = _args.collectors sys.stderr.write("Collector address: " + str(collector_addr) + "\n") # randomize collector list _args.chksum = "" if _args.collectors: _args.chksum = hashlib.md5("".join(_args.collectors)).hexdigest() _args.random_collectors = random.sample(_args.collectors, len(_args.collectors)) _args.collectors = _args.random_collectors collector_addr = _args.collectors sys.stderr.write("Random Collector address: " + str(collector_addr) + "\n") if _args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( _args.sandesh_send_rate_limit) sandesh_config = SandeshConfig(_args.sandesh_keyfile, _args.sandesh_certfile, _args.sandesh_ca_cert, _args.sandesh_ssl_enable, _args.introspect_ssl_enable) # done parsing arguments prog = None if (node_type == 'contrail-analytics'): if not rule_file: rule_file = "/etc/contrail/supervisord_analytics_files/" + \ "contrail-analytics.rules" unit_names = [ 'contrail-collector.service', 'contrail-analytics-api.service', 'contrail-snmp-collector.service', 'contrail-query-engine.service', 'contrail-alarm-gen.service', 'contrail-topology.service', 'contrail-analytics-nodemgr.service', ] prog = AnalyticsEventManager(rule_file, unit_names, discovery_server, discovery_port, collector_addr, sandesh_config) elif (node_type == 'contrail-config'): if not rule_file: rule_file = "/etc/contrail/supervisord_config_files/" + \ "contrail-config.rules" unit_names = [ 'contrail-api.service', 'contrail-schema.service', 'contrail-svc-monitor.service', 'contrail-device-manager.service', 'contrail-discovery.service', 'contrail-config-nodemgr.service', 'ifmap.service', ] cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = ConfigEventManager(rule_file, unit_names, discovery_server, discovery_port, collector_addr, sandesh_config, cassandra_repair_interval, cassandra_repair_logdir) elif (node_type == 'contrail-control'): if not rule_file: rule_file = "/etc/contrail/supervisord_control_files/" + \ "contrail-control.rules" unit_names = [ 'contrail-control.service', 'contrail-dns.service', 'contrail-named.service', 'contrail-control-nodemgr.service', ] prog = ControlEventManager(rule_file, unit_names, discovery_server, discovery_port, collector_addr, sandesh_config) elif (node_type == 'contrail-vrouter'): if not rule_file: rule_file = "/etc/contrail/supervisord_vrouter_files/" + \ "contrail-vrouter.rules" unit_names = [ 'contrail-vrouter-agent.service', 'contrail-vrouter-nodemgr.service', ] prog = VrouterEventManager(rule_file, unit_names, discovery_server, discovery_port, collector_addr, sandesh_config) elif (node_type == 'contrail-database'): if not rule_file: rule_file = "/etc/contrail/supervisord_database_files/" + \ "contrail-database.rules" unit_names = [ 'contrail-database.service', 'kafka.service', 'contrail-database-nodemgr.service', ] hostip = _args.hostip minimum_diskgb = _args.minimum_diskgb contrail_databases = _args.contrail_databases cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = DatabaseEventManager(rule_file, unit_names, discovery_server, discovery_port, collector_addr, sandesh_config, hostip, minimum_diskgb, contrail_databases, cassandra_repair_interval, cassandra_repair_logdir) else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return prog.process() prog.send_nodemgr_process_status() prog.send_process_state_db(prog.group_names) prog.config_file = config_file prog.collector_chksum = _args.chksum """ @sighup Reconfig of collector list """ gevent.signal(signal.SIGHUP, prog.nodemgr_sighup_handler) gevent.joinall([ gevent.spawn(prog.runforever), gevent.spawn(prog.run_periodically(prog.do_periodic_events, 60)) ])
def main(args_str=' '.join(sys.argv[1:])): # Parse Arguments node_parser = argparse.ArgumentParser(add_help=False) node_parser.add_argument("--nodetype", default='contrail-analytics', help='Type of node which nodemgr is managing') try: args, remaining_argv = node_parser.parse_known_args(args_str.split()) except: usage() disc_options = {'server': socket.gethostname(), 'port': 5998, 'ssl': False} default = {'rules': '', 'collectors': [], 'hostip': '127.0.0.1', 'db_port': '9042', 'minimum_diskgb': 256, 'contrail_databases': 'config analytics', 'cassandra_repair_interval': 24, 'cassandra_repair_logdir': '/var/log/contrail/', 'sandesh_send_rate_limit': \ SandeshSystem.get_sandesh_send_rate_limit(), 'cassandra_use_ssl': 'true', } node_type = args.nodetype if (node_type == 'contrail-analytics'): config_file = '/etc/contrail/contrail-analytics-nodemgr.conf' elif (node_type == 'contrail-config'): config_file = '/etc/contrail/contrail-config-nodemgr.conf' elif (node_type == 'contrail-control'): config_file = '/etc/contrail/contrail-control-nodemgr.conf' elif (node_type == 'contrail-vrouter'): config_file = '/etc/contrail/contrail-vrouter-nodemgr.conf' elif (node_type == 'contrail-database'): config_file = '/etc/contrail/contrail-database-nodemgr.conf' else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return if (os.path.exists(config_file) == False): sys.stderr.write("config file " + config_file + " is not present" + "\n") return config = ConfigParser.SafeConfigParser() config.read([config_file]) if 'DEFAULT' in config.sections(): default.update(dict(config.items('DEFAULT'))) if 'DEFAULTS' in config.sections(): default.update(dict(config.items('DEFAULTS'))) if 'DISCOVERY' in config.sections(): disc_options.update(dict(config.items('DISCOVERY'))) if 'ssl' in config.options('DISCOVERY'): disc_options['ssl'] = config.getboolean('DISCOVERY', 'ssl') disc_options['discovery_server'] = disc_options.pop('server') disc_options['discovery_port'] = disc_options.pop('port') disc_options['discovery_ssl'] = disc_options.get('ssl') disc_options['discovery_cert'] = disc_options.get('cert') disc_options['discovery_key'] = disc_options.get('key') disc_options['discovery_cacert'] = disc_options.get('cacert') if 'COLLECTOR' in config.sections(): try: collector = config.get('COLLECTOR', 'server_list') default['collectors'] = collector.split() except ConfigParser.NoOptionError as e: pass parser = argparse.ArgumentParser( parents=[node_parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter) default.update(disc_options) parser.set_defaults(**default) parser.add_argument("--rules", help='Rules file to use for processing events') parser.add_argument("--discovery_server", help='IP address of Discovery Server') parser.add_argument("--discovery_port", type=int, help='Port of Discovery Server') parser.add_argument("--discovery_cert", help="Discovery Server ssl certificate") parser.add_argument("--discovery_key", help="Discovery Server ssl key") parser.add_argument("--discovery_cacert", help="Discovery Server ssl CA certificate") parser.add_argument("--discovery_ssl", action="store_true", help="Discovery service is configured with ssl") parser.add_argument("--collectors", nargs='+', help='Collector addresses in format' + 'ip1:port1 ip2:port2') parser.add_argument("--sandesh_send_rate_limit", type=int, help="Sandesh send rate limit in messages/sec") if (node_type == 'contrail-database' or node_type == 'contrail-config'): parser.add_argument("--minimum_diskGB", type=int, dest='minimum_diskgb', help="Minimum disk space in GB's") parser.add_argument("--contrail_databases", nargs='+', help='Contrail databases on this node' + 'in format: config analytics') parser.add_argument("--hostip", help="IP address of host") parser.add_argument("--db_port", help="Cassandra DB cql port") parser.add_argument("--cassandra_repair_interval", type=int, help="Time in hours to periodically run " "nodetool repair for cassandra maintenance") parser.add_argument("--cassandra_repair_logdir", help="Directory for storing repair logs") parser.add_argument( "--cassandra_use_ssl", help="To connect SSL enabled cassandra. values: true|false") try: _args = parser.parse_args(remaining_argv) except: usage() rule_file = _args.rules discovery_server = _args.discovery_server sys.stderr.write("Discovery server: " + discovery_server + "\n") discovery_port = _args.discovery_port sys.stderr.write("Discovery port: " + str(discovery_port) + "\n") collector_addr = _args.collectors sys.stderr.write("Collector address: " + str(collector_addr) + "\n") if _args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( _args.sandesh_send_rate_limit) # done parsing arguments if not 'SUPERVISOR_SERVER_URL' in os.environ: sys.stderr.write('Node manager must be run as a supervisor event ' 'listener\n') sys.stderr.flush() return prog = None kwargs = {} if _args.discovery_ssl: kwargs.update({ 'cert': _args.discovery_cert, 'key': _args.discovery_key, 'cacert': _args.discovery_cacert }) if _args.cassandra_use_ssl.lower() == 'true': kwargs.update({'cassandra_use_ssl': True}) if (node_type == 'contrail-analytics'): prog = AnalyticsEventManager(rule_file, discovery_server, discovery_port, collector_addr, **kwargs) elif (node_type == 'contrail-config'): hostip = _args.hostip db_port = _args.db_port minimum_diskgb = _args.minimum_diskgb contrail_databases = _args.contrail_databases cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = ConfigEventManager(rule_file, discovery_server, discovery_port, collector_addr, hostip, db_port, minimum_diskgb, contrail_databases, cassandra_repair_interval, cassandra_repair_logdir, **kwargs) elif (node_type == 'contrail-control'): prog = ControlEventManager(rule_file, discovery_server, discovery_port, collector_addr, **kwargs) elif (node_type == 'contrail-vrouter'): prog = VrouterEventManager(rule_file, discovery_server, discovery_port, collector_addr, **kwargs) elif (node_type == 'contrail-database'): hostip = _args.hostip db_port = _args.db_port minimum_diskgb = _args.minimum_diskgb contrail_databases = _args.contrail_databases cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = DatabaseEventManager(rule_file, discovery_server, discovery_port, collector_addr, hostip, db_port, minimum_diskgb, contrail_databases, cassandra_repair_interval, cassandra_repair_logdir, **kwargs) else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return prog.process() prog.send_nodemgr_process_status() prog.send_process_state_db(prog.group_names) gevent.joinall([gevent.spawn(prog.runforever)])
def main(args_str=' '.join(sys.argv[1:])): # Parse Arguments node_parser = argparse.ArgumentParser(add_help=False) node_parser.add_argument("--nodetype", default='contrail-analytics', help='Type of node which nodemgr is managing') try: args, remaining_argv = node_parser.parse_known_args(args_str.split()) except: usage() default = { 'rules': '', 'collectors': [], 'hostip': '127.0.0.1', 'minimum_diskgb': 256, 'contrail_databases': 'config analytics', 'cassandra_repair_interval': 24, 'cassandra_repair_logdir': '/var/log/contrail/', 'log_local': False, 'log_level': SandeshLevel.SYS_DEBUG, 'log_category': '', 'log_file': Sandesh._DEFAULT_LOG_FILE, 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY } default.update(SandeshConfig.get_default_options(['DEFAULTS'])) sandesh_opts = SandeshConfig.get_default_options() node_type = args.nodetype if (node_type == 'contrail-analytics'): config_file = '/etc/contrail/contrail-analytics-nodemgr.conf' elif (node_type == 'contrail-config'): config_file = '/etc/contrail/contrail-config-nodemgr.conf' elif (node_type == 'contrail-control'): config_file = '/etc/contrail/contrail-control-nodemgr.conf' elif (node_type == 'contrail-vrouter'): config_file = '/etc/contrail/contrail-vrouter-nodemgr.conf' elif (node_type == 'contrail-database'): config_file = '/etc/contrail/contrail-database-nodemgr.conf' else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return if (os.path.exists(config_file) == False): sys.stderr.write("config file " + config_file + " is not present" + "\n") return config = ConfigParser.SafeConfigParser() config.read([config_file]) if 'DEFAULTS' in config.sections(): default.update(dict(config.items('DEFAULTS'))) if 'COLLECTOR' in config.sections(): try: collector = config.get('COLLECTOR', 'server_list') default['collectors'] = collector.split() except ConfigParser.NoOptionError as e: pass SandeshConfig.update_options(sandesh_opts, config) parser = argparse.ArgumentParser( parents=[node_parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter) default.update(sandesh_opts) parser.set_defaults(**default) parser.add_argument("--rules", help='Rules file to use for processing events') parser.add_argument("--collectors", nargs='+', help='Collector addresses in format' + 'ip1:port1 ip2:port2') parser.add_argument("--log_local", action="store_true", help="Enable local logging of sandesh messages") parser.add_argument( "--log_level", help="Severity level for local logging of sandesh messages") parser.add_argument( "--log_category", help="Category filter for local logging of sandesh messages") parser.add_argument("--log_file", help="Filename for the logs to be written to") parser.add_argument("--use_syslog", action="store_true", help="Use syslog for logging") parser.add_argument("--syslog_facility", help="Syslog facility to receive log lines") SandeshConfig.add_parser_arguments(parser, add_dscp=True) if (node_type == 'contrail-database' or node_type == 'contrail-config'): parser.add_argument("--minimum_diskGB", type=int, dest='minimum_diskgb', help="Minimum disk space in GB's") parser.add_argument("--contrail_databases", nargs='+', help='Contrail databases on this node' + 'in format: config analytics') parser.add_argument("--hostip", help="IP address of host") parser.add_argument("--cassandra_repair_interval", type=int, help="Time in hours to periodically run " "nodetool repair for cassandra maintenance") parser.add_argument("--cassandra_repair_logdir", help="Directory for storing repair logs") try: _args = parser.parse_args(remaining_argv) except: usage() rule_file = _args.rules # randomize collector list _args.chksum = "" if _args.collectors: _args.chksum = hashlib.md5("".join(_args.collectors)).hexdigest() _args.random_collectors = random.sample(_args.collectors, len(_args.collectors)) _args.collectors = _args.random_collectors # done parsing arguments prog = None if (node_type == 'contrail-analytics'): if not rule_file: rule_file = "/etc/contrail/supervisord_analytics_files/" + \ "contrail-analytics.rules" unit_names = [ 'contrail-collector.service', 'contrail-analytics-api.service', 'contrail-snmp-collector.service', 'contrail-query-engine.service', 'contrail-alarm-gen.service', 'contrail-topology.service', 'contrail-analytics-nodemgr.service', ] prog = AnalyticsEventManager(_args, rule_file, unit_names) elif (node_type == 'contrail-config'): if not rule_file: rule_file = "/etc/contrail/supervisord_config_files/" + \ "contrail-config.rules" unit_names = [ 'contrail-api.service', 'contrail-schema.service', 'contrail-svc-monitor.service', 'contrail-device-manager.service', 'contrail-config-nodemgr.service', ] prog = ConfigEventManager(_args, rule_file, unit_names) elif (node_type == 'contrail-control'): if not rule_file: rule_file = "/etc/contrail/supervisord_control_files/" + \ "contrail-control.rules" unit_names = [ 'contrail-control.service', 'contrail-dns.service', 'contrail-named.service', 'contrail-control-nodemgr.service', ] prog = ControlEventManager(_args, rule_file, unit_names) elif (node_type == 'contrail-vrouter'): if not rule_file: rule_file = "/etc/contrail/supervisord_vrouter_files/" + \ "contrail-vrouter.rules" unit_names = [ 'contrail-vrouter-agent.service', 'contrail-vrouter-nodemgr.service', ] prog = VrouterEventManager(_args, rule_file, unit_names) elif (node_type == 'contrail-database'): if not rule_file: rule_file = "/etc/contrail/supervisord_database_files/" + \ "contrail-database.rules" unit_names = [ 'contrail-database.service', 'kafka.service', 'contrail-database-nodemgr.service', ] prog = DatabaseEventManager(_args, rule_file, unit_names) else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return prog.process() prog.send_nodemgr_process_status() prog.send_process_state_db(prog.group_names) prog.config_file = config_file prog.collector_chksum = _args.chksum prog.random_collectors = _args.random_collectors """ @sighup Reconfig of collector list """ gevent.signal(signal.SIGHUP, prog.nodemgr_sighup_handler) gevent.joinall([ gevent.spawn(prog.runforever), gevent.spawn(prog.run_periodically(prog.do_periodic_events, 60)) ])
def main(args_str=' '.join(sys.argv[1:])): # Parse Arguments node_parser = argparse.ArgumentParser(add_help=False) node_parser.add_argument("--nodetype", default='contrail-analytics', help='Type of node which nodemgr is managing') try: args, remaining_argv = node_parser.parse_known_args(args_str.split()) except: usage() default = {'rules': '', 'collectors': [], 'hostip': '127.0.0.1', 'db_port': '9042', 'minimum_diskgb': 256, 'corefile_path': '/var/crashes', 'contrail_databases': 'config analytics', 'cassandra_repair_interval': 24, 'cassandra_repair_logdir': '/var/log/contrail/', 'log_local': False, 'log_level': SandeshLevel.SYS_DEBUG, 'log_category': '', 'log_file': Sandesh._DEFAULT_LOG_FILE, 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY } default.update(SandeshConfig.get_default_options(['DEFAULTS'])) sandesh_opts = SandeshConfig.get_default_options() node_type = args.nodetype if (node_type == 'contrail-analytics'): config_file = '/etc/contrail/contrail-analytics-nodemgr.conf' elif (node_type == 'contrail-config'): config_file = '/etc/contrail/contrail-config-nodemgr.conf' elif (node_type == 'contrail-control'): config_file = '/etc/contrail/contrail-control-nodemgr.conf' elif (node_type == 'contrail-vrouter'): config_file = '/etc/contrail/contrail-vrouter-nodemgr.conf' elif (node_type == 'contrail-database'): config_file = '/etc/contrail/contrail-database-nodemgr.conf' else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return if (os.path.exists(config_file) == False): sys.stderr.write("config file " + config_file + " is not present" + "\n") return config = ConfigParser.SafeConfigParser() config.read([config_file]) if 'DEFAULTS' in config.sections(): default.update(dict(config.items('DEFAULTS'))) if 'COLLECTOR' in config.sections(): try: collector = config.get('COLLECTOR', 'server_list') default['collectors'] = collector.split() except ConfigParser.NoOptionError as e: pass SandeshConfig.update_options(sandesh_opts, config) parser = argparse.ArgumentParser(parents=[node_parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter) default.update(sandesh_opts) parser.set_defaults(**default) parser.add_argument("--rules", help='Rules file to use for processing events') parser.add_argument("--collectors", nargs='+', help='Collector addresses in format' + 'ip1:port1 ip2:port2') parser.add_argument("--log_local", action="store_true", help="Enable local logging of sandesh messages") parser.add_argument("--log_level", help="Severity level for local logging of sandesh messages") parser.add_argument("--log_category", help="Category filter for local logging of sandesh messages") parser.add_argument("--log_file", help="Filename for the logs to be written to") parser.add_argument("--use_syslog", action="store_true", help="Use syslog for logging") parser.add_argument("--syslog_facility", help="Syslog facility to receive log lines") parser.add_argument("--corefile_path", help="Location where coredump files are stored") SandeshConfig.add_parser_arguments(parser, add_dscp=True) if (node_type == 'contrail-database' or node_type == 'contrail-config'): parser.add_argument("--minimum_diskGB", type=int, dest='minimum_diskgb', help="Minimum disk space in GB's") parser.add_argument("--contrail_databases", nargs='+', help='Contrail databases on this node' + 'in format: config analytics' ) parser.add_argument("--hostip", help="IP address of host") parser.add_argument("--db_port", help="Cassandra DB cql port") parser.add_argument("--cassandra_repair_interval", type=int, help="Time in hours to periodically run " "nodetool repair for cassandra maintenance") parser.add_argument("--cassandra_repair_logdir", help="Directory for storing repair logs") try: _args = parser.parse_args(remaining_argv) except: usage() rule_file = _args.rules # randomize collector list _args.chksum = "" if _args.collectors: _args.chksum = hashlib.md5("".join(_args.collectors)).hexdigest() _args.random_collectors = random.sample(_args.collectors, len(_args.collectors)) _args.collectors = _args.random_collectors # done parsing arguments prog = None if (node_type == 'contrail-analytics'): if not rule_file: rule_file = "/etc/contrail/supervisord_analytics_files/" + \ "contrail-analytics.rules" unit_names = ['contrail-collector.service', 'contrail-analytics-api.service', 'contrail-snmp-collector.service', 'contrail-query-engine.service', 'contrail-alarm-gen.service', 'contrail-topology.service', 'contrail-analytics-nodemgr.service', ] prog = AnalyticsEventManager(_args, rule_file, unit_names) elif (node_type == 'contrail-config'): if not rule_file: rule_file = "/etc/contrail/supervisord_config_files/" + \ "contrail-config.rules" unit_names = ['contrail-api.service', 'contrail-schema.service', 'contrail-svc-monitor.service', 'contrail-device-manager.service', 'contrail-config-nodemgr.service', ] prog = ConfigEventManager(_args, rule_file, unit_names) elif (node_type == 'contrail-control'): if not rule_file: rule_file = "/etc/contrail/supervisord_control_files/" + \ "contrail-control.rules" unit_names = ['contrail-control.service', 'contrail-dns.service', 'contrail-named.service', 'contrail-control-nodemgr.service', ] prog = ControlEventManager(_args, rule_file, unit_names) elif (node_type == 'contrail-vrouter'): if not rule_file: rule_file = "/etc/contrail/supervisord_vrouter_files/" + \ "contrail-vrouter.rules" unit_names = ['contrail-vrouter-agent.service', 'contrail-vrouter-nodemgr.service', ] prog = VrouterEventManager(_args, rule_file, unit_names) elif (node_type == 'contrail-database'): if not rule_file: rule_file = "/etc/contrail/supervisord_database_files/" + \ "contrail-database.rules" unit_names = ['contrail-database.service', 'kafka.service', 'contrail-database-nodemgr.service', ] prog = DatabaseEventManager(_args, rule_file, unit_names) else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return prog.process() prog.send_nodemgr_process_status() prog.send_process_state_db(prog.group_names) prog.config_file = config_file prog.collector_chksum = _args.chksum prog.random_collectors = _args.random_collectors """ @sighup Reconfig of collector list """ gevent.signal(signal.SIGHUP, prog.nodemgr_sighup_handler) gevent.joinall([gevent.spawn(prog.runforever), gevent.spawn(prog.run_periodically(prog.do_periodic_events, 60))])