def sandesh_init(self): """ Init Sandesh """ self._sandesh = Sandesh() # Reset sandesh send rate limit value. if self._args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( self._args.sandesh_send_rate_limit) # Initialize Sandesh generator. self._sandesh.init_generator( self.module['name'], self.module['hostname'], self.module['node_type_name'], self.module['instance_id'], self._args.collectors, 'mesos_manager_context', int(self._args.http_server_port), ['cfgm_common', 'mesos_manager.sandesh'], self.module['discovery'], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf) # Set Sandesh logging params. self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # Connect to collector. ConnectionState.init(self._sandesh, self.module['hostname'], self.module['name'], self.module['instance_id'], staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self.module['table'])
def sandesh_init(self): """ Init Sandesh """ self._sandesh = Sandesh() # Reset sandesh send rate limit value. if self._args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( self._args.sandesh_send_rate_limit) # Register custom sandesh request handlers. self._redefine_sandesh_handles() # Initialize Sandesh generator. self._sandesh.init_generator( self._module["name"], self._module["hostname"], self._module["node_type_name"], self._module["instance_id"], self._args.collectors, 'kube_manager_context', int(self._args.http_server_port), ['cfgm_common', 'kube_manager.sandesh', 'kube_introspect.sandesh'], self._module["discovery"], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf) # Set Sandesh logging params. self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # Connect to collector. ConnectionState.init( self._sandesh, self._module["hostname"], self._module["name"], self._module["instance_id"], staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self._module["table"])
def sandesh_init(self): """ Init sandesh """ self._sandesh = Sandesh() # Reset the sandesh send rate limit value if self._args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( self._args.sandesh_send_rate_limit) self.redefine_sandesh_handles() self._sandesh.init_generator( self._module_name, self._hostname, self._node_type_name, self._instance_id, self._args.collectors, '%s_context' % self.context, int(self._args.http_server_port), ['cfgm_common', '%s.sandesh' % self.module_pkg], self.discovery, logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf) self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # connection state init ConnectionState.init( self._sandesh, self._hostname, self._module_name, self._instance_id, staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self.table)
def sandesh_init(self): """ Init sandesh """ self._sandesh = Sandesh() # Reset the sandesh send rate limit value if self._args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( self._args.sandesh_send_rate_limit) self.redefine_sandesh_handles() self._sandesh.init_generator( self._module_name, self._hostname, self._node_type_name, self._instance_id, self._args.random_collectors, '%s_context' % self.context, int(self._args.http_server_port), ['cfgm_common', '%s.sandesh' % self.module_pkg], self.discovery, logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf) self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # connection state init ConnectionState.init( self._sandesh, self._hostname, self._module_name, self._instance_id, staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self.table)
def _sandesh_init(self, discovery): sandesh_instance = Sandesh() # Reset the sandesh send rate limit value if self._args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( \ self._args.sandesh_send_rate_limit) sandesh.ServiceInstanceList.handle_request =\ self.sandesh_si_handle_request sandesh_instance.init_generator( self._module_name, self._hostname, self._node_type_name, self._instance_id, self._args.collectors, 'svc_monitor_context', int(self._args.http_server_port), ['cfgm_common', 'svc_monitor.sandesh'], discovery, logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf) sandesh_instance.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) return sandesh_instance
def test_systemlog_msg_rate_limit(self): systemlog_msg = SystemLogTest() self._expected_type = SandeshType.SYSTEM self._expected_hints = 0 SandeshSystem.set_sandesh_send_rate_limit(10) time.sleep(1) for i in range(0, 15): systemlog_msg.send(sandesh=sandesh_global) self.assertEqual( 5, sandesh_global.msg_stats().message_type_stats() ['SystemLogTest'].messages_sent_dropped_rate_limited) # try to set negative values to the rate limit SandeshSystem.set_sandesh_send_rate_limit(-10) self.assertEqual(SandeshSystem.get_sandesh_send_rate_limit(), 10)
def test_sandesh_queue_level_drop(self): # Increase rate limit SandeshSystem.set_sandesh_send_rate_limit(100) levels = list(range(SandeshLevel.SYS_EMERG, SandeshLevel.SYS_DEBUG)) queue_level_drop = 0 mlevels = list(levels) mlevels.append(SandeshLevel.SYS_DEBUG) for send_level in levels: sandesh_global.send_level = mock.MagicMock(return_value=send_level) for sandesh_level in levels: systemlog = SystemLogTest(level=sandesh_level) systemlog.send() if sandesh_level >= send_level: queue_level_drop += 1 self.assertEqual( queue_level_drop, sandesh_global.msg_stats().aggregate_stats(). messages_sent_dropped_queue_level)
def sandesh_sending_params_status_handle_request(self, sandesh_req): # Return the sending params sandesh_sending_resp = SandeshSendingParams( system_logs_rate_limit=SandeshSystem.get_sandesh_send_rate_limit(), disable_object_logs=self._sandesh.is_sending_object_logs_disabled( ), disable_all_logs=self._sandesh.is_sending_all_messages_disabled(), dscp=self.sandesh_get_dscp()) sandesh_sending_resp.response(sandesh_req.context(), sandesh=self._sandesh)
def sandesh_sending_params_set_handle_request(self, sandesh_req): # Set the sending params if sandesh_req.system_logs_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( sandesh_req.system_logs_rate_limit) if sandesh_req.disable_object_logs is not None: self._sandesh.disable_sending_object_logs( sandesh_req.disable_object_logs) if sandesh_req.disable_all_logs is not None: self._sandesh.disable_sending_all_messages( sandesh_req.disable_all_logs) # Return the sending params sandesh_sending_resp = SandeshSendingParams( system_logs_rate_limit=SandeshSystem.get_sandesh_send_rate_limit(), disable_object_logs=self._sandesh.is_sending_object_logs_disabled( ), disable_all_logs=self._sandesh.is_sending_all_messages_disabled(), dscp=self.sandesh_get_dscp()) sandesh_sending_resp.response(sandesh_req.context(), sandesh=self._sandesh)
def sandesh_init(self): """ Init Sandesh """ self._sandesh = Sandesh() # Reset sandesh send rate limit value. if self._args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( self._args.sandesh_send_rate_limit) # Initialize Sandesh generator. self._sandesh.init_generator( self.module['name'], self.module['hostname'], self.module['node_type_name'], self.module['instance_id'], self._args.collectors, 'mesos_manager_context', int(self._args.http_server_port), ['cfgm_common', 'mesos_manager.sandesh'], self.module['discovery'], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf, config=self._args.sandesh_config) # Set Sandesh logging params. self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # Connect to collector. ConnectionState.init( self._sandesh, self.module['hostname'], self.module['name'], self.module['instance_id'], staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self.module['table'])
def parse_args(args_str): ''' Eg. python svc_monitor.py --ifmap_server_ip 192.168.1.17 --ifmap_server_port 8443 --ifmap_username test --ifmap_password test --rabbit_server localhost --rabbit_port 5672 --rabbit_user guest --rabbit_password guest --cassandra_server_list 10.1.2.3:9160 --api_server_ip 10.1.2.3 --api_server_port 8082 --api_server_use_ssl False --zk_server_ip 10.1.2.3 --zk_server_port 2181 --collectors 127.0.0.1:8086 --disc_server_ip 127.0.0.1 --disc_server_port 5998 --http_server_port 8090 --log_local --log_level SYS_DEBUG --log_category test --log_file <stdout> --trace_file /var/log/contrail/svc-monitor.err --use_syslog --syslog_facility LOG_USER --cluster_id <testbed-name> --check_service_interval 60 [--region_name <name>] [--reset_config] ''' # Source any specified config/ini file # Turn off help, so we show all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--conf_file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str.split()) defaults = { 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'ifmap_server_ip': '127.0.0.1', 'ifmap_server_port': '8443', 'ifmap_username': '******', 'ifmap_password': '******', 'cassandra_server_list': '127.0.0.1:9160', 'api_server_ip': '127.0.0.1', 'api_server_port': '8082', 'api_server_use_ssl': False, 'zk_server_ip': '127.0.0.1', 'zk_server_port': '2181', 'collectors': None, 'disc_server_ip': None, 'disc_server_port': None, 'http_server_port': '8088', 'log_local': False, 'log_level': SandeshLevel.SYS_DEBUG, 'log_category': '', 'log_file': Sandesh._DEFAULT_LOG_FILE, 'trace_file': '/var/log/contrail/svc-monitor.err', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'region_name': None, 'cluster_id': '', 'logging_conf': '', 'logger_class': None, 'sandesh_send_rate_limit': SandeshSystem.get_sandesh_send_rate_limit(), 'check_service_interval': '60', } secopts = { 'use_certs': False, 'keyfile': '', 'certfile': '', 'ca_certs': '', 'ifmap_certauth_port': "8444", } ksopts = { 'auth_host': '127.0.0.1', 'auth_protocol': 'http', 'auth_port': '5000', 'auth_version': 'v2.0', 'auth_insecure': True, 'admin_user': '******', 'admin_password': '******', 'admin_tenant_name': 'default-domain' } schedops = { 'si_netns_scheduler_driver': 'svc_monitor.scheduler.vrouter_scheduler.RandomScheduler', 'analytics_server_ip': '127.0.0.1', 'analytics_server_port': '8081', 'availability_zone': None, 'netns_availability_zone': None, } cassandraopts = { 'cassandra_user': None, 'cassandra_password': None, } config = ConfigParser.SafeConfigParser() if args.conf_file: config.read(args.conf_file) defaults.update(dict(config.items("DEFAULTS"))) if ('SECURITY' in config.sections() and 'use_certs' in config.options('SECURITY')): if config.getboolean('SECURITY', 'use_certs'): secopts.update(dict(config.items("SECURITY"))) if 'KEYSTONE' in config.sections(): ksopts.update(dict(config.items("KEYSTONE"))) if 'SCHEDULER' in config.sections(): schedops.update(dict(config.items("SCHEDULER"))) if 'CASSANDRA' in config.sections(): cassandraopts.update(dict(config.items('CASSANDRA'))) # Override with CLI options # Don't surpress add_help here so it will handle -h parser = argparse.ArgumentParser( # Inherit options from config_parser parents=[conf_parser], # script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(secopts) defaults.update(ksopts) defaults.update(schedops) defaults.update(cassandraopts) parser.set_defaults(**defaults) parser.add_argument( "--ifmap_server_ip", help="IP address of ifmap server") parser.add_argument("--ifmap_server_port", help="Port of ifmap server") parser.add_argument("--ifmap_username", help="Username known to ifmap server") parser.add_argument("--ifmap_password", help="Password known to ifmap server") parser.add_argument( "--cassandra_server_list", help="List of cassandra servers in IP Address:Port format", nargs='+') parser.add_argument( "--reset_config", action="store_true", help="Warning! Destroy previous configuration and start clean") parser.add_argument("--api_server_ip", help="IP address of API server") parser.add_argument("--api_server_port", help="Port of API server") parser.add_argument("--api_server_use_ssl", help="Use SSL to connect with API server") parser.add_argument("--collectors", help="List of VNC collectors in ip:port format", nargs="+") parser.add_argument("--disc_server_ip", help="IP address of the discovery server") parser.add_argument("--disc_server_port", help="Port of the discovery server") parser.add_argument("--http_server_port", help="Port of local HTTP server") parser.add_argument( "--log_local", action="store_true", help="Enable local logging of sandesh messages") parser.add_argument( "--log_level", help="Severity level for local logging of sandesh messages") parser.add_argument( "--log_category", help="Category filter for local logging of sandesh messages") parser.add_argument("--log_file", help="Filename for the logs to be written to") parser.add_argument("--trace_file", help="Filename for the error " "backtraces to be written to") parser.add_argument("--use_syslog", action="store_true", help="Use syslog for logging") parser.add_argument("--syslog_facility", help="Syslog facility to receive log lines") parser.add_argument("--admin_user", help="Name of keystone admin user") parser.add_argument("--admin_password", help="Password of keystone admin user") parser.add_argument("--admin_tenant_name", help="Tenant name for keystone admin user") parser.add_argument("--region_name", help="Region name for openstack API") parser.add_argument("--cluster_id", help="Used for database keyspace separation") parser.add_argument( "--logging_conf", help=("Optional logging configuration file, default: None")) parser.add_argument( "--logger_class", help=("Optional external logger class, default: None")) parser.add_argument("--cassandra_user", help="Cassandra user name") parser.add_argument("--cassandra_password", help="Cassandra password") parser.add_argument("--sandesh_send_rate_limit", type=int, help="Sandesh send rate limit in messages/sec.") args = parser.parse_args(remaining_argv) args.config_sections = config if type(args.cassandra_server_list) is str: args.cassandra_server_list = args.cassandra_server_list.split() if type(args.collectors) is str: args.collectors = args.collectors.split() if args.region_name and args.region_name.lower() == 'none': args.region_name = None if args.availability_zone and args.availability_zone.lower() == 'none': args.availability_zone = None if args.netns_availability_zone and \ args.netns_availability_zone.lower() == 'none': args.netns_availability_zone = None return args
def parse_args(args_str): ''' Eg. python svc_monitor.py --ifmap_server_ip 192.168.1.17 --ifmap_server_port 8443 --ifmap_username test --ifmap_password test --rabbit_server localhost --rabbit_port 5672 --rabbit_user guest --rabbit_password guest --cassandra_server_list 10.1.2.3:9160 --api_server_ip 10.1.2.3 --api_server_port 8082 --api_server_use_ssl False --zk_server_ip 10.1.2.3 --zk_server_port 2181 --collectors 127.0.0.1:8086 --disc_server_ip 127.0.0.1 --disc_server_port 5998 --http_server_port 8090 --log_local --log_level SYS_DEBUG --log_category test --log_file <stdout> --trace_file /var/log/contrail/svc-monitor.err --use_syslog --syslog_facility LOG_USER --cluster_id <testbed-name> --check_service_interval 60 [--region_name <name>] [--reset_config] ''' # Source any specified config/ini file # Turn off help, so we show all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--conf_file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str.split()) defaults = { 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'ifmap_server_ip': '127.0.0.1', 'ifmap_server_port': '8443', 'ifmap_username': '******', 'ifmap_password': '******', 'cassandra_server_list': '127.0.0.1:9160', 'api_server_ip': '127.0.0.1', 'api_server_port': '8082', 'api_server_use_ssl': False, 'zk_server_ip': '127.0.0.1', 'zk_server_port': '2181', 'collectors': None, 'disc_server_ip': None, 'disc_server_port': None, 'http_server_port': '8088', 'log_local': False, 'log_level': SandeshLevel.SYS_DEBUG, 'log_category': '', 'log_file': Sandesh._DEFAULT_LOG_FILE, 'trace_file': '/var/log/contrail/svc-monitor.err', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'region_name': None, 'cluster_id': '', 'logging_conf': '', 'logger_class': None, 'sandesh_send_rate_limit': SandeshSystem.get_sandesh_send_rate_limit(), 'check_service_interval': '60', 'nova_endpoint_type': 'internalURL', } secopts = { 'use_certs': False, 'keyfile': '', 'certfile': '', 'ca_certs': '', 'ifmap_certauth_port': "8444", } ksopts = { 'auth_host': '127.0.0.1', 'auth_protocol': 'http', 'auth_port': '5000', 'auth_version': 'v2.0', 'auth_insecure': True, 'admin_user': '******', 'admin_password': '******', 'admin_tenant_name': 'admin' } schedops = { 'si_netns_scheduler_driver': 'svc_monitor.scheduler.vrouter_scheduler.RandomScheduler', 'analytics_server_ip': '127.0.0.1', 'analytics_server_port': '8081', 'availability_zone': None, 'netns_availability_zone': None, } cassandraopts = { 'cassandra_user': None, 'cassandra_password': None, } config = ConfigParser.SafeConfigParser() if args.conf_file: config.read(args.conf_file) defaults.update(dict(config.items("DEFAULTS"))) if ('SECURITY' in config.sections() and 'use_certs' in config.options('SECURITY')): if config.getboolean('SECURITY', 'use_certs'): secopts.update(dict(config.items("SECURITY"))) if 'KEYSTONE' in config.sections(): ksopts.update(dict(config.items("KEYSTONE"))) if 'SCHEDULER' in config.sections(): schedops.update(dict(config.items("SCHEDULER"))) if 'CASSANDRA' in config.sections(): cassandraopts.update(dict(config.items('CASSANDRA'))) # Override with CLI options # Don't surpress add_help here so it will handle -h parser = argparse.ArgumentParser( # Inherit options from config_parser parents=[conf_parser], # script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(secopts) defaults.update(ksopts) defaults.update(schedops) defaults.update(cassandraopts) parser.set_defaults(**defaults) parser.add_argument("--ifmap_server_ip", help="IP address of ifmap server") parser.add_argument("--ifmap_server_port", help="Port of ifmap server") parser.add_argument("--ifmap_username", help="Username known to ifmap server") parser.add_argument("--ifmap_password", help="Password known to ifmap server") parser.add_argument( "--cassandra_server_list", help="List of cassandra servers in IP Address:Port format", nargs='+') parser.add_argument( "--reset_config", action="store_true", help="Warning! Destroy previous configuration and start clean") parser.add_argument("--api_server_ip", help="IP address of API server") parser.add_argument("--api_server_port", help="Port of API server") parser.add_argument("--api_server_use_ssl", help="Use SSL to connect with API server") parser.add_argument("--collectors", help="List of VNC collectors in ip:port format", nargs="+") parser.add_argument("--disc_server_ip", help="IP address of the discovery server") parser.add_argument("--disc_server_port", help="Port of the discovery server") parser.add_argument("--http_server_port", help="Port of local HTTP server") parser.add_argument("--log_local", action="store_true", help="Enable local logging of sandesh messages") parser.add_argument( "--log_level", help="Severity level for local logging of sandesh messages") parser.add_argument( "--log_category", help="Category filter for local logging of sandesh messages") parser.add_argument("--log_file", help="Filename for the logs to be written to") parser.add_argument("--trace_file", help="Filename for the error " "backtraces to be written to") parser.add_argument("--use_syslog", action="store_true", help="Use syslog for logging") parser.add_argument("--syslog_facility", help="Syslog facility to receive log lines") parser.add_argument("--admin_user", help="Name of keystone admin user") parser.add_argument("--admin_password", help="Password of keystone admin user") parser.add_argument("--admin_tenant_name", help="Tenant name for keystone admin user") parser.add_argument("--region_name", help="Region name for openstack API") parser.add_argument("--cluster_id", help="Used for database keyspace separation") parser.add_argument( "--logging_conf", help=("Optional logging configuration file, default: None")) parser.add_argument("--logger_class", help=("Optional external logger class, default: None")) parser.add_argument("--cassandra_user", help="Cassandra user name") parser.add_argument("--cassandra_password", help="Cassandra password") parser.add_argument("--sandesh_send_rate_limit", type=int, help="Sandesh send rate limit in messages/sec.") parser.add_argument("--check_service_interval", help="Check service interval") args = parser.parse_args(remaining_argv) args.config_sections = config if type(args.cassandra_server_list) is str: args.cassandra_server_list = args.cassandra_server_list.split() if type(args.collectors) is str: args.collectors = args.collectors.split() if args.region_name and args.region_name.lower() == 'none': args.region_name = None if args.availability_zone and args.availability_zone.lower() == 'none': args.availability_zone = None if args.netns_availability_zone and \ args.netns_availability_zone.lower() == 'none': args.netns_availability_zone = None return args
def parse_args(): conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--config-file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(sys.argv) defaults = { 'http_server_port': HttpPortKubeManager, 'worker_id': '0', 'sandesh_send_rate_limit': SandeshSystem.get_sandesh_send_rate_limit(), 'collectors': None, 'logger_class': None, 'logging_conf': '', 'log_local': False, 'log_category': '', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'kube_object_cache': 'True', 'disc_server_ip': 'localhost', 'disc_server_port': DiscoveryServerPort, 'log_level': SandeshLevel.SYS_DEBUG, 'log_file': '/var/log/contrail/contrail-kube-manager.log', 'api_service_link_local' : 'True', } vnc_opts = { 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'rabbit_use_ssl': False, 'kombu_ssl_version': '', 'kombu_ssl_keyfile': '', 'kombu_ssl_certfile': '', 'kombu_ssl_ca_certs': '', 'cassandra_user': None, 'cassandra_password': None, 'cassandra_server_list': '', 'cluster_id': '', 'vnc_endpoint_ip': 'localhost', 'vnc_endpoint_port': ApiServerPort, 'admin_user' : '', 'admin_password' : '', 'admin_tenant' : '', } k8s_opts = { 'kubernetes_api_server': 'localhost', 'kubernetes_api_port': '8080', 'kubernetes_api_secure_port': None, 'kubernetes_api_secure_ip': None, 'kubernetes_service_name': 'kubernetes', 'service_subnets': '', 'pod_subnets': '', } config = ConfigParser.SafeConfigParser() if args.config_file: config.read(args.config_file) if 'VNC' in config.sections(): vnc_opts.update(dict(config.items("VNC"))) if 'KUBERNETES' in config.sections(): k8s_opts.update(dict(config.items("KUBERNETES"))) if 'DEFAULTS' in config.sections(): defaults.update(dict(config.items("DEFAULTS"))) parser = argparse.ArgumentParser( parents=[conf_parser], description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(vnc_opts) defaults.update(k8s_opts) parser.set_defaults(**defaults) args = parser.parse_args() if type(args.cassandra_server_list) is str: args.cassandra_server_list = args.cassandra_server_list.split() if type(args.pod_subnets) is str: args.pod_subnets = args.pod_subnets.split() if type(args.service_subnets) is str: args.service_subnets = args.service_subnets.split() return args
def parse_args(args_str): args_obj = None # Source any specified config/ini file # Turn off help, so we print all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--conf_file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str.split()) defaults = { 'reset_config': False, 'wipe_config': False, 'listen_ip_addr': _WEB_HOST, 'listen_port': _WEB_PORT, 'admin_port': _ADMIN_PORT, 'ifmap_server_ip': '127.0.0.1', 'ifmap_server_port': "8443", 'ifmap_queue_size': 10000, 'ifmap_max_message_size': 1024*1024, 'cassandra_server_list': "127.0.0.1:9160", 'ifmap_username': "******", 'ifmap_password': "******", 'collectors': None, 'http_server_port': '8084', 'log_local': True, 'log_level': SandeshLevel.SYS_NOTICE, 'log_category': '', 'log_file': Sandesh._DEFAULT_LOG_FILE, 'trace_file': '/var/log/contrail/vnc_openstack.err', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'logging_level': 'WARN', 'logging_conf': '', 'logger_class': None, 'multi_tenancy': True, 'multi_tenancy_with_rbac': False, 'disc_server_ip': None, 'disc_server_port': '5998', 'zk_server_ip': '127.0.0.1:2181', 'worker_id': '0', 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'rabbit_max_pending_updates': '4096', 'cluster_id': '', 'max_requests': 1024, 'sandesh_send_rate_limit': SandeshSystem.get_sandesh_send_rate_limit(), 'ifmap_health_check_interval': '60', # in seconds } # ssl options secopts = { 'use_certs': False, 'keyfile': '', 'certfile': '', 'ca_certs': '', 'ifmap_certauth_port': "8444", } # keystone options ksopts = { 'auth_host': '127.0.0.1', 'auth_port': '35357', 'auth_protocol': 'http', 'admin_user': '', 'admin_password': '', 'admin_tenant_name': '', 'insecure': True } # cassandra options cassandraopts = { 'cassandra_user' : None, 'cassandra_password' : None } config = None if args.conf_file: config = ConfigParser.SafeConfigParser({'admin_token': None}) config.read(args.conf_file) if 'DEFAULTS' in config.sections(): defaults.update(dict(config.items("DEFAULTS"))) if 'multi_tenancy' in config.options('DEFAULTS'): defaults['multi_tenancy'] = config.getboolean( 'DEFAULTS', 'multi_tenancy') if 'multi_tenancy_with_rbac' in config.options('DEFAULTS'): defaults['multi_tenancy_with_rbac'] = config.getboolean('DEFAULTS', 'multi_tenancy_with_rbac') if 'default_encoding' in config.options('DEFAULTS'): default_encoding = config.get('DEFAULTS', 'default_encoding') gen.resource_xsd.ExternalEncoding = default_encoding if 'SECURITY' in config.sections() and\ 'use_certs' in config.options('SECURITY'): if config.getboolean('SECURITY', 'use_certs'): secopts.update(dict(config.items("SECURITY"))) if 'KEYSTONE' in config.sections(): ksopts.update(dict(config.items("KEYSTONE"))) if 'QUOTA' in config.sections(): for (k, v) in config.items("QUOTA"): try: if str(k) != 'admin_token': vnc_quota.QuotaHelper.default_quota[str(k)] = int(v) except ValueError: pass if 'CASSANDRA' in config.sections(): cassandraopts.update(dict(config.items('CASSANDRA'))) # Override with CLI options # Don't surpress add_help here so it will handle -h parser = argparse.ArgumentParser( # Inherit options from config_parser parents=[conf_parser], # print script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(secopts) defaults.update(ksopts) defaults.update(cassandraopts) parser.set_defaults(**defaults) parser.add_argument( "--ifmap_server_ip", help="IP address of ifmap server") parser.add_argument( "--ifmap_server_port", help="Port of ifmap server") parser.add_argument( "--ifmap_queue_size", type=int, help="Size of the queue that holds " "pending messages to be sent to ifmap server") parser.add_argument( "--ifmap_max_message_size", type=int, help="Maximum size of message " "sent to ifmap server") # TODO should be from certificate parser.add_argument( "--ifmap_username", help="Username known to ifmap server") parser.add_argument( "--ifmap_password", help="Password known to ifmap server") parser.add_argument( "--cassandra_server_list", help="List of cassandra servers in IP Address:Port format", nargs='+') parser.add_argument( "--disc_server_ip", help="IP address of discovery server") parser.add_argument( "--disc_server_port", help="Port of discovery server") parser.add_argument( "--redis_server_ip", help="IP address of redis server") parser.add_argument( "--redis_server_port", help="Port of redis server") parser.add_argument( "--auth", choices=['keystone'], help="Type of authentication for user-requests") parser.add_argument( "--reset_config", action="store_true", help="Warning! Destroy previous configuration and start clean") parser.add_argument( "--wipe_config", action="store_true", help="Warning! Destroy previous configuration") parser.add_argument( "--listen_ip_addr", help="IP address to provide service on, default %s" % (_WEB_HOST)) parser.add_argument( "--listen_port", help="Port to provide service on, default %s" % (_WEB_PORT)) parser.add_argument( "--admin_port", help="Port with local auth for admin access, default %s" % (_ADMIN_PORT)) parser.add_argument( "--collectors", help="List of VNC collectors in ip:port format", nargs="+") parser.add_argument( "--http_server_port", help="Port of local HTTP server") parser.add_argument( "--ifmap_server_loc", help="Location of IFMAP server") parser.add_argument( "--log_local", action="store_true", help="Enable local logging of sandesh messages") parser.add_argument( "--log_level", help="Severity level for local logging of sandesh messages") parser.add_argument( "--logging_level", help=("Log level for python logging: DEBUG, INFO, WARN, ERROR default: %s" % defaults['logging_level'])) parser.add_argument( "--logging_conf", help=("Optional logging configuration file, default: None")) parser.add_argument( "--logger_class", help=("Optional external logger class, default: None")) parser.add_argument( "--log_category", help="Category filter for local logging of sandesh messages") parser.add_argument( "--log_file", help="Filename for the logs to be written to") parser.add_argument( "--trace_file", help="Filename for the errors backtraces to be written to") parser.add_argument("--use_syslog", action="store_true", help="Use syslog for logging") parser.add_argument("--syslog_facility", help="Syslog facility to receive log lines") parser.add_argument( "--multi_tenancy", action="store_true", help="Validate resource permissions (implies token validation)") parser.add_argument( "--multi_tenancy_with_rbac", action="store_true", help="Validate API and resource permissions (implies token validation)") parser.add_argument( "--worker_id", help="Worker Id") parser.add_argument( "--zk_server_ip", help="Ip address:port of zookeeper server") parser.add_argument( "--rabbit_server", help="Rabbitmq server address") parser.add_argument( "--rabbit_port", help="Rabbitmq server port") parser.add_argument( "--rabbit_user", help="Username for rabbit") parser.add_argument( "--rabbit_vhost", help="vhost for rabbit") parser.add_argument( "--rabbit_password", help="password for rabbit") parser.add_argument( "--rabbit_ha_mode", help="True if the rabbitmq cluster is mirroring all queue") parser.add_argument( "--rabbit_max_pending_updates", help="Max updates before stateful changes disallowed") parser.add_argument( "--cluster_id", help="Used for database keyspace separation") parser.add_argument( "--max_requests", type=int, help="Maximum number of concurrent requests served by api server") parser.add_argument("--cassandra_user", help="Cassandra user name") parser.add_argument("--cassandra_password", help="Cassandra password") parser.add_argument("--sandesh_send_rate_limit", type=int, help="Sandesh send rate limit in messages/sec.") parser.add_argument("--ifmap_health_check_interval", help="Interval seconds to check for ifmap health, default 60") args_obj, remaining_argv = parser.parse_known_args(remaining_argv) args_obj.config_sections = config if type(args_obj.cassandra_server_list) is str: args_obj.cassandra_server_list =\ args_obj.cassandra_server_list.split() if type(args_obj.collectors) is str: args_obj.collectors = args_obj.collectors.split() return args_obj, remaining_argv
def main(args_str=' '.join(sys.argv[1:])): # Parse Arguments node_parser = argparse.ArgumentParser(add_help=False) node_parser.add_argument("--nodetype", default='contrail-analytics', help='Type of node which nodemgr is managing') try: args, remaining_argv = node_parser.parse_known_args(args_str.split()) except: usage() disc_options = {'server': socket.gethostname(), 'port': 5998} default = {'rules': '', 'collectors': [], 'hostip': '127.0.0.1', 'minimum_diskgb': 256, 'contrail_databases': 'config analytics', 'cassandra_repair_interval': 24, 'sandesh_send_rate_limit': \ SandeshSystem.get_sandesh_send_rate_limit(), } node_type = args.nodetype if (node_type == 'contrail-analytics'): config_file = '/etc/contrail/contrail-analytics-nodemgr.conf' default['collectors'] = ['127.0.0.1:8086'] elif (node_type == 'contrail-config'): config_file = '/etc/contrail/contrail-config-nodemgr.conf' elif (node_type == 'contrail-control'): config_file = '/etc/contrail/contrail-control-nodemgr.conf' elif (node_type == 'contrail-vrouter'): config_file = '/etc/contrail/contrail-vrouter-nodemgr.conf' elif (node_type == 'contrail-database'): config_file = '/etc/contrail/contrail-database-nodemgr.conf' else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return config = ConfigParser.SafeConfigParser() config.read([config_file]) if 'DEFAULT' in config.sections(): default.update(dict(config.items('DEFAULT'))) if 'DISCOVERY' in config.sections(): disc_options.update(dict(config.items('DISCOVERY'))) disc_options['discovery_server'] = disc_options.pop('server') disc_options['discovery_port'] = disc_options.pop('port') if 'COLLECTOR' in config.sections(): try: collector = config.get('COLLECTOR', 'server_list') default['collectors'] = collector.split() except ConfigParser.NoOptionError as e: pass parser = argparse.ArgumentParser(parents=[node_parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter) default.update(disc_options) parser.set_defaults(**default) parser.add_argument("--rules", help='Rules file to use for processing events') parser.add_argument("--discovery_server", help='IP address of Discovery Server') parser.add_argument("--discovery_port", type=int, help='Port of Discovery Server') parser.add_argument("--collectors", nargs='+', help='Collector addresses in format' + 'ip1:port1 ip2:port2') parser.add_argument("--sandesh_send_rate_limit", type=int, help="Sandesh send rate limit in messages/sec") if (node_type == 'contrail-database'): parser.add_argument("--minimum_diskgb", type=int, help="Minimum disk space in GB's") parser.add_argument("--contrail_databases", nargs='+', help='Contrail databases on this node' + 'in format: config analytics' ) parser.add_argument("--hostip", help="IP address of host") parser.add_argument("--cassandra_repair_interval", type=int, help="Time in hours to periodically run " "nodetool repair for cassandra maintenance") try: _args = parser.parse_args(remaining_argv) except: usage() rule_file = _args.rules discovery_server = _args.discovery_server sys.stderr.write("Discovery server: " + discovery_server + "\n") discovery_port = _args.discovery_port sys.stderr.write("Discovery port: " + str(discovery_port) + "\n") collector_addr = _args.collectors sys.stderr.write("Collector address: " + str(collector_addr) + "\n") if _args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit(_args.sandesh_send_rate_limit) # done parsing arguments if not 'SUPERVISOR_SERVER_URL' in os.environ: sys.stderr.write('Node manager must be run as a supervisor event ' 'listener\n') sys.stderr.flush() return prog = None if (node_type == 'contrail-analytics'): prog = AnalyticsEventManager( rule_file, discovery_server, discovery_port, collector_addr) elif (node_type == 'contrail-config'): prog = ConfigEventManager( rule_file, discovery_server, discovery_port, collector_addr) elif (node_type == 'contrail-control'): prog = ControlEventManager( rule_file, discovery_server, discovery_port, collector_addr) elif (node_type == 'contrail-vrouter'): prog = VrouterEventManager( rule_file, discovery_server, discovery_port, collector_addr) elif (node_type == 'contrail-database'): hostip = _args.hostip minimum_diskgb = _args.minimum_diskgb contrail_databases = _args.contrail_databases cassandra_repair_interval = _args.cassandra_repair_interval prog = DatabaseEventManager( rule_file, discovery_server, discovery_port, collector_addr, hostip, minimum_diskgb, contrail_databases, cassandra_repair_interval) else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return prog.process() prog.send_nodemgr_process_status() prog.send_process_state_db(prog.group_names) gevent.joinall([gevent.spawn(prog.runforever)])
def parse_args(args_str): args_obj = None # Source any specified config/ini file # Turn off help, so we print all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--conf_file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str.split()) defaults = { 'reset_config': False, 'wipe_config': False, 'listen_ip_addr': _WEB_HOST, 'listen_port': _WEB_PORT, 'admin_port': _ADMIN_PORT, 'ifmap_server_ip': '127.0.0.1', 'ifmap_server_port': "8443", 'ifmap_queue_size': 10000, 'ifmap_max_message_size': 1024 * 1024, 'cassandra_server_list': "127.0.0.1:9160", 'rdbms_server_list': "127.0.0.1:3306", 'rdbms_connection_config': "", 'ifmap_username': "******", 'ifmap_password': "******", 'collectors': None, 'http_server_port': '8084', 'log_local': True, 'log_level': SandeshLevel.SYS_NOTICE, 'log_category': '', 'log_file': Sandesh._DEFAULT_LOG_FILE, 'trace_file': '/var/log/contrail/vnc_openstack.err', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'logging_level': 'WARN', 'logging_conf': '', 'logger_class': None, 'multi_tenancy': None, 'aaa_mode': cfgm_common.AAA_MODE_DEFAULT_VALUE, 'disc_server_ip': None, 'disc_server_port': '5998', 'zk_server_ip': '127.0.0.1:2181', 'worker_id': '0', 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'rabbit_max_pending_updates': '4096', 'rabbit_health_check_interval': '120.0', # in seconds 'cluster_id': '', 'max_requests': 1024, 'region_name': 'RegionOne', 'sandesh_send_rate_limit': SandeshSystem.get_sandesh_send_rate_limit(), 'ifmap_health_check_interval': '60', # in seconds 'stale_lock_seconds': '5', # lock but no resource past this => stale 'cloud_admin_role': cfgm_common.CLOUD_ADMIN_ROLE, 'global_read_only_role': cfgm_common.GLOBAL_READ_ONLY_ROLE, 'rabbit_use_ssl': False, 'kombu_ssl_version': '', 'kombu_ssl_keyfile': '', 'kombu_ssl_certfile': '', 'kombu_ssl_ca_certs': '', 'object_cache_entries': '10000', # max number of objects cached for read 'object_cache_exclude_types': '', # csv of object types to *not* cache 'db_engine': 'cassandra', } # ssl options secopts = { 'use_certs': False, 'keyfile': '', 'certfile': '', 'ca_certs': '', 'ifmap_certauth_port': "8444", } # keystone options ksopts = { 'auth_host': '127.0.0.1', 'auth_port': '35357', 'auth_protocol': 'http', 'admin_user': '', 'admin_password': '', 'admin_tenant_name': '', 'insecure': True } # cassandra options cassandraopts = {'cassandra_user': None, 'cassandra_password': None} # rdbms options rdbmsopts = { 'rdbms_user': None, 'rdbms_password': None, 'rdbms_connection': None } config = None if args.conf_file: config = ConfigParser.SafeConfigParser({'admin_token': None}) config.read(args.conf_file) if 'DEFAULTS' in config.sections(): defaults.update(dict(config.items("DEFAULTS"))) if 'multi_tenancy' in config.options('DEFAULTS'): defaults['multi_tenancy'] = config.getboolean( 'DEFAULTS', 'multi_tenancy') if 'default_encoding' in config.options('DEFAULTS'): default_encoding = config.get('DEFAULTS', 'default_encoding') gen.resource_xsd.ExternalEncoding = default_encoding if 'SECURITY' in config.sections() and\ 'use_certs' in config.options('SECURITY'): if config.getboolean('SECURITY', 'use_certs'): secopts.update(dict(config.items("SECURITY"))) if 'KEYSTONE' in config.sections(): ksopts.update(dict(config.items("KEYSTONE"))) if 'QUOTA' in config.sections(): for (k, v) in config.items("QUOTA"): try: if str(k) != 'admin_token': vnc_quota.QuotaHelper.default_quota[str(k)] = int(v) except ValueError: pass if 'CASSANDRA' in config.sections(): cassandraopts.update(dict(config.items('CASSANDRA'))) if 'RDBMS' in config.sections(): rdbmsopts.update(dict(config.items('RDBMS'))) # Override with CLI options # Don't surpress add_help here so it will handle -h parser = argparse.ArgumentParser( # Inherit options from config_parser parents=[conf_parser], # print script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(secopts) defaults.update(ksopts) defaults.update(cassandraopts) defaults.update(rdbmsopts) parser.set_defaults(**defaults) parser.add_argument("--ifmap_server_ip", help="IP address of ifmap server") parser.add_argument("--ifmap_server_port", help="Port of ifmap server") parser.add_argument("--ifmap_queue_size", type=int, help="Size of the queue that holds " "pending messages to be sent to ifmap server") parser.add_argument("--ifmap_max_message_size", type=int, help="Maximum size of message " "sent to ifmap server") # TODO should be from certificate parser.add_argument("--ifmap_username", help="Username known to ifmap server") parser.add_argument("--ifmap_password", help="Password known to ifmap server") parser.add_argument( "--cassandra_server_list", help="List of cassandra servers in IP Address:Port format", nargs='+') parser.add_argument( "--rdbms_server_list", help="List of cassandra servers in IP Address:Port format", nargs='+') parser.add_argument("--rdbms_connection", help="DB Connection string") parser.add_argument("--disc_server_ip", help="IP address of discovery server") parser.add_argument("--disc_server_port", help="Port of discovery server") parser.add_argument("--redis_server_ip", help="IP address of redis server") parser.add_argument("--redis_server_port", help="Port of redis server") parser.add_argument("--auth", choices=['keystone'], help="Type of authentication for user-requests") parser.add_argument( "--reset_config", action="store_true", help="Warning! Destroy previous configuration and start clean") parser.add_argument("--wipe_config", action="store_true", help="Warning! Destroy previous configuration") parser.add_argument("--listen_ip_addr", help="IP address to provide service on, default %s" % (_WEB_HOST)) parser.add_argument("--listen_port", help="Port to provide service on, default %s" % (_WEB_PORT)) parser.add_argument( "--admin_port", help="Port with local auth for admin access, default %s" % (_ADMIN_PORT)) parser.add_argument("--collectors", help="List of VNC collectors in ip:port format", nargs="+") parser.add_argument("--http_server_port", help="Port of local HTTP server") parser.add_argument("--ifmap_server_loc", help="Location of IFMAP server") parser.add_argument("--log_local", action="store_true", help="Enable local logging of sandesh messages") parser.add_argument( "--log_level", help="Severity level for local logging of sandesh messages") parser.add_argument( "--logging_level", help=( "Log level for python logging: DEBUG, INFO, WARN, ERROR default: %s" % defaults['logging_level'])) parser.add_argument( "--logging_conf", help=("Optional logging configuration file, default: None")) parser.add_argument("--logger_class", help=("Optional external logger class, default: None")) parser.add_argument( "--log_category", help="Category filter for local logging of sandesh messages") parser.add_argument("--log_file", help="Filename for the logs to be written to") parser.add_argument( "--trace_file", help="Filename for the errors backtraces to be written to") parser.add_argument("--use_syslog", action="store_true", help="Use syslog for logging") parser.add_argument("--syslog_facility", help="Syslog facility to receive log lines") parser.add_argument( "--multi_tenancy", action="store_true", help="Validate resource permissions (implies token validation)") parser.add_argument("--aaa_mode", choices=cfgm_common.AAA_MODE_VALID_VALUES, help="AAA mode") parser.add_argument("--worker_id", help="Worker Id") parser.add_argument("--zk_server_ip", help="Ip address:port of zookeeper server") parser.add_argument("--rabbit_server", help="Rabbitmq server address") parser.add_argument("--rabbit_port", help="Rabbitmq server port") parser.add_argument("--rabbit_user", help="Username for rabbit") parser.add_argument("--rabbit_vhost", help="vhost for rabbit") parser.add_argument("--rabbit_password", help="password for rabbit") parser.add_argument( "--rabbit_ha_mode", help="True if the rabbitmq cluster is mirroring all queue") parser.add_argument("--rabbit_max_pending_updates", help="Max updates before stateful changes disallowed") parser.add_argument( "--rabbit_health_check_interval", help="Interval seconds between consumer heartbeats to rabbitmq") parser.add_argument("--cluster_id", help="Used for database keyspace separation") parser.add_argument( "--max_requests", type=int, help="Maximum number of concurrent requests served by api server") parser.add_argument("--cassandra_user", help="Cassandra user name") parser.add_argument("--cassandra_password", help="Cassandra password") parser.add_argument("--sandesh_send_rate_limit", type=int, help="Sandesh send rate limit in messages/sec.") parser.add_argument( "--ifmap_health_check_interval", help="Interval seconds to check for ifmap health, default 60") parser.add_argument( "--stale_lock_seconds", help="Time after which lock without resource is stale, default 60") parser.add_argument("--cloud_admin_role", help="Role name of cloud administrator") parser.add_argument( "--global_read_only_role", help="Role name of user with Read-Only access to all objects") parser.add_argument( "--object_cache_entries", help="Maximum number of objects cached for read, default 10000") parser.add_argument( "--object_cache_exclude_types", help="Comma separated values of object types to not cache") parser.add_argument("--db_engine", help="Database engine to use, default cassandra") args_obj, remaining_argv = parser.parse_known_args(remaining_argv) args_obj.config_sections = config if type(args_obj.cassandra_server_list) is str: args_obj.cassandra_server_list =\ args_obj.cassandra_server_list.split() if type(args_obj.rdbms_server_list) is str: args_obj.rdbms_server_list =\ args_obj.rdbms_server_list.split() if type(args_obj.collectors) is str: args_obj.collectors = args_obj.collectors.split() return args_obj, remaining_argv
def parse_args(): conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--config-file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(sys.argv) defaults = { 'listen_ip_addr': mesos_consts._WEB_HOST, 'listen_port': mesos_consts._WEB_PORT, 'http_server_port': HttpPortMesosManager, 'worker_id': '0', 'sandesh_send_rate_limit': SandeshSystem.get_sandesh_send_rate_limit(), 'collectors': None, 'logger_class': None, 'logging_conf': '', 'log_local': False, 'log_category': '', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, } vnc_opts = { 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'rabbit_use_ssl': False, 'kombu_ssl_version': '', 'kombu_ssl_keyfile': '', 'kombu_ssl_certfile': '', 'kombu_ssl_ca_certs': '', 'cassandra_server_ip': mesos_consts._CASSANDRA_HOST, 'cassandra_server_port': mesos_consts._CASSANDRA_PORT, 'cassandra_max_retries': mesos_consts._CASSANDRA_MAX_RETRIES, 'cassandra_timeout': mesos_consts._CASSANDRA_TIMEOUT, 'cassandra_user': None, 'cassandra_password': None, 'cluster_id': '', } mesos_opts = {} config = ConfigParser.SafeConfigParser() if args.config_file: config.read(args.config_file) if 'VNC' in config.sections(): vnc_opts.update(dict(config.items("VNC"))) if 'MESOS' in config.sections(): mesos_opts.update(dict(config.items("MESOS"))) if 'DEFAULTS' in config.sections(): defaults.update(dict(config.items("DEFAULTS"))) parser = argparse.ArgumentParser( parents=[conf_parser], description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(vnc_opts) defaults.update(mesos_opts) parser.set_defaults(**defaults) args = parser.parse_args() if type(args.cassandra_server_list) is str: args.cassandra_server_list = args.cassandra_server_list.split() if type(args.pod_subnets) is str: args.pod_subnets = args.pod_subnets.split() if type(args.service_subnets) is str: args.service_subnets = args.service_subnets.split() return args
def parse_args(args_str): args_obj = None # Source any specified config/ini file # Turn off help, so we print all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--conf_file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str.split()) defaults = { 'reset_config': False, 'wipe_config': False, 'listen_ip_addr': _WEB_HOST, 'listen_port': _WEB_PORT, 'admin_port': _ADMIN_PORT, 'cassandra_server_list': "127.0.0.1:9160", 'collectors': None, 'http_server_port': '8084', 'log_local': True, 'log_level': SandeshLevel.SYS_NOTICE, 'log_category': '', 'log_file': Sandesh._DEFAULT_LOG_FILE, 'trace_file': '/var/log/contrail/vnc_openstack.err', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'logging_conf': '', 'logger_class': None, 'multi_tenancy': None, 'aaa_mode': None, 'disc_server_ip': None, 'disc_server_port': '5998', 'disc_server_ssl': False, 'disc_server_cert': '/etc/contrail/ssl/server.pem,', 'disc_server_key': '/etc/contrail/ssl/private/server-privkey.pem', 'disc_server_cacert': '/etc/contrail/ssl/ca-cert.pem,', 'zk_server_ip': '127.0.0.1:2181', 'worker_id': '0', 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'rabbit_max_pending_updates': '4096', 'rabbit_health_check_interval': '120.0', # in seconds 'cluster_id': '', 'max_requests': 1024, 'region_name': 'RegionOne', 'sandesh_send_rate_limit': SandeshSystem.get_sandesh_send_rate_limit(), 'stale_lock_seconds': '5', # lock but no resource past this => stale 'cloud_admin_role': cfgm_common.CLOUD_ADMIN_ROLE, 'global_read_only_role': cfgm_common.GLOBAL_READ_ONLY_ROLE, 'rabbit_use_ssl': False, 'kombu_ssl_version': '', 'kombu_ssl_keyfile': '', 'kombu_ssl_certfile': '', 'kombu_ssl_ca_certs': '', 'object_cache_entries': '10000', # max number of objects cached for read 'object_cache_exclude_types': '', # csv of object types to *not* cache # IF-MAP client options to connect and maintain irond sessions 'ifmap_server_ip': '127.0.0.1', 'ifmap_server_port': "8443", 'ifmap_username': "******", 'ifmap_password': "******", 'ifmap_queue_size': 10000, 'ifmap_max_message_size': 1024 * 1024, 'ifmap_health_check_interval': '60', # in seconds 'override_rpf_default_by': None, 'max_request_size': 1024000, 'tcp_keepalive_enable': True, 'tcp_keepalive_idle_time': 7200, 'tcp_keepalive_interval': 75, 'tcp_keepalive_probes': 9, } # ssl options secopts = { 'use_certs': False, 'keyfile': '', 'certfile': '', 'ca_certs': '', } # keystone options ksopts = { 'signing_dir': '/var/lib/contrail/keystone-signing', 'auth_host': '127.0.0.1', 'auth_port': '35357', 'auth_protocol': 'http', 'admin_user': '', 'admin_password': '', 'admin_tenant_name': '', 'insecure': True, 'cafile': '', 'auth_type': 'password', 'auth_url': '', 'keystone_sync_on_demand': True, } # cassandra options cassandraopts = {'cassandra_user': None, 'cassandra_password': None} # ifmap server options ifmapopts = { # IF-MAP options to start self-managed and minimalist IF-MAP server # Listen IP and port 'ifmap_listen_ip': None, 'ifmap_listen_port': None, # Key ans certificate files path. If not set automatically create 'ifmap_key_path': '/var/lib/contrail/api-server/ifmap-cert/key', 'ifmap_cert_path': '/var/lib/contrail/api-server/ifmap-cert/cert', # Credentials: [(user1, password), (user2, password)] 'ifmap_credentials': [('control', 'secret')], } # RPF valid options rpf_valid_values = { 'enable': 'enable', 'enabled': 'enable', 'on': 'enable', '1': 'enable', 'disable': 'disable', 'disabled': 'disable', 'off': 'disable', '0': 'disable', } config = None if args.conf_file: config = ConfigParser.SafeConfigParser({'admin_token': None}) config.read(args.conf_file) if 'DEFAULTS' in config.sections(): defaults.update(dict(config.items("DEFAULTS"))) if 'multi_tenancy' in config.options('DEFAULTS'): defaults['multi_tenancy'] = config.getboolean( 'DEFAULTS', 'multi_tenancy') if 'disc_server_ssl' in config.options('DEFAULTS'): defaults['disc_server_ssl'] = config.getboolean( 'DEFAULTS', 'disc_server_ssl') if 'default_encoding' in config.options('DEFAULTS'): default_encoding = config.get('DEFAULTS', 'default_encoding') gen.resource_xsd.ExternalEncoding = default_encoding if 'override_rpf_default_by' in config.options('DEFAULTS'): defaults['override_rpf_default_by'] = config.get( 'DEFAULTS', 'override_rpf_default_by') if 'SECURITY' in config.sections() and\ 'use_certs' in config.options('SECURITY'): if config.getboolean('SECURITY', 'use_certs'): secopts.update(dict(config.items("SECURITY"))) if 'KEYSTONE' in config.sections(): ksopts.update(dict(config.items("KEYSTONE"))) if 'keystone_sync_on_demand' in config.options('KEYSTONE'): ksopts['keystone_sync_on_demand'] = config.getboolean( 'KEYSTONE', 'keystone_sync_on_demand') if 'QUOTA' in config.sections(): for (k, v) in config.items("QUOTA"): try: if str(k) != 'admin_token': vnc_quota.QuotaHelper.default_quota[str(k)] = int(v) except ValueError: pass if 'CASSANDRA' in config.sections(): cassandraopts.update(dict(config.items('CASSANDRA'))) if 'IFMAP_SERVER' in config.sections(): ifmapopts.update(dict(config.items('IFMAP_SERVER'))) # Override with CLI options # Don't surpress add_help here so it will handle -h parser = argparse.ArgumentParser( # Inherit options from config_parser parents=[conf_parser], # print script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(secopts) defaults.update(ksopts) defaults.update(cassandraopts) defaults.update(ifmapopts) parser.set_defaults(**defaults) parser.add_argument( "--cassandra_server_list", help="List of cassandra servers in IP Address:Port format", nargs='+') parser.add_argument("--cassandra_use_ssl", action="store_true", help="Enable TLS for cassandra connection") parser.add_argument("--cassandra_ca_certs", help="Cassandra CA certs") parser.add_argument("--disc_server_ip", help="IP address of discovery server") parser.add_argument("--disc_server_port", help="Port of discovery server") parser.add_argument("--disc_server_cert", help="Discovery Server ssl certificate") parser.add_argument("--disc_server_key", help="Discovery Server ssl key") parser.add_argument("--disc_server_cacert", help="Discovery Server ssl CA certificate") parser.add_argument("--disc_server_ssl", action="store_true", help="Discovery service is configured with ssl") parser.add_argument("--redis_server_ip", help="IP address of redis server") parser.add_argument("--redis_server_port", help="Port of redis server") parser.add_argument("--auth", choices=['keystone'], help="Type of authentication for user-requests") parser.add_argument( "--reset_config", action="store_true", help="Warning! Destroy previous configuration and start clean") parser.add_argument("--wipe_config", action="store_true", help="Warning! Destroy previous configuration") parser.add_argument("--listen_ip_addr", help="IP address to provide service on, default %s" % (_WEB_HOST)) parser.add_argument("--listen_port", help="Port to provide service on, default %s" % (_WEB_PORT)) parser.add_argument( "--admin_port", help="Port with local auth for admin access, default %s" % (_ADMIN_PORT)) parser.add_argument("--collectors", help="List of VNC collectors in ip:port format", nargs="+") parser.add_argument("--http_server_port", help="Port of local HTTP server") parser.add_argument("--log_local", action="store_true", help="Enable local logging of sandesh messages") parser.add_argument( "--log_level", help="Severity level for local logging of sandesh messages") parser.add_argument( "--logging_conf", help=("Optional logging configuration file, default: None")) parser.add_argument("--logger_class", help=("Optional external logger class, default: None")) parser.add_argument( "--log_category", help="Category filter for local logging of sandesh messages") parser.add_argument("--log_file", help="Filename for the logs to be written to") parser.add_argument( "--trace_file", help="Filename for the errors backtraces to be written to") parser.add_argument("--use_syslog", action="store_true", help="Use syslog for logging") parser.add_argument("--syslog_facility", help="Syslog facility to receive log lines") parser.add_argument( "--multi_tenancy", action="store_true", help="Validate resource permissions (implies token validation)") parser.add_argument("--aaa_mode", choices=cfgm_common.AAA_MODE_VALID_VALUES, help="AAA mode") parser.add_argument("--worker_id", help="Worker Id") parser.add_argument("--zk_server_ip", help="Ip address:port of zookeeper server") parser.add_argument("--rabbit_server", help="Rabbitmq server address") parser.add_argument("--rabbit_port", help="Rabbitmq server port") parser.add_argument("--rabbit_user", help="Username for rabbit") parser.add_argument("--rabbit_vhost", help="vhost for rabbit") parser.add_argument("--rabbit_password", help="password for rabbit") parser.add_argument( "--rabbit_ha_mode", help="True if the rabbitmq cluster is mirroring all queue") parser.add_argument("--rabbit_max_pending_updates", help="Max updates before stateful changes disallowed") parser.add_argument( "--rabbit_health_check_interval", help="Interval seconds between consumer heartbeats to rabbitmq") parser.add_argument("--cluster_id", help="Used for database keyspace separation") parser.add_argument( "--max_requests", type=int, help="Maximum number of concurrent requests served by api server") parser.add_argument("--cassandra_user", help="Cassandra user name") parser.add_argument("--cassandra_password", help="Cassandra password") parser.add_argument("--sandesh_send_rate_limit", type=int, help="Sandesh send rate limit in messages/sec.") parser.add_argument( "--stale_lock_seconds", help="Time after which lock without resource is stale, default 60") parser.add_argument("--cloud_admin_role", help="Role name of cloud administrator") parser.add_argument( "--global_read_only_role", help="Role name of user with Read-Only access to all objects") parser.add_argument( "--object_cache_entries", help="Maximum number of objects cached for read, default 10000") parser.add_argument( "--object_cache_exclude_types", help="Comma separated values of object types to not cache") parser.add_argument("--ifmap_server_ip", help="IP address of ifmap server") parser.add_argument("--ifmap_server_port", help="Port of ifmap server") parser.add_argument("--ifmap_username", help="Username known to ifmap server") parser.add_argument("--ifmap_password", help="Password known to ifmap server") parser.add_argument("--ifmap_queue_size", type=int, help="Size of the queue that holds " "pending messages to be sent to ifmap server") parser.add_argument("--ifmap_max_message_size", type=int, help="Maximum size of message " "sent to ifmap server") parser.add_argument( "--ifmap_health_check_interval", help="Interval seconds to check for ifmap health, default 60") parser.add_argument("--ifmap_listen_ip", help="IP to bind IF-MAP server (If not set, the VNC " "API server will use IF-MAP client to connect to " " an external IF-MAP server)") parser.add_argument("--ifmap_listen_port", help="TCP port to bind IF-MAP server (If not set, the " "VNC API server will use IF-MAP client to connect " " to an external IF-MAP server)") parser.add_argument("--ifmap_key_path", help="Key file path to use for IF-MAP server") parser.add_argument("--ifmap_cert_path", help="Certificate file path to use for IF-MAP server") parser.add_argument('--ifmap_credentials', help="List of user and password: <username:password>", type=user_password, nargs='*') parser.add_argument("--override_rpf_default_by", nargs="?", help="RPF default value to use when creating network") parser.add_argument( "--max_request_size", type=int, help="Maximum size of bottle requests served by api server") parser.add_argument("--tcp_keepalive_enable", action="store_true", help="Used to enable keepalive for tcp connection") parser.add_argument("--tcp_keepalive_idle_time", type=int, help="Used to set the keepalive timer in seconds") parser.add_argument("--tcp_keepalive_interval", type=int, help="Used to specify the tcp keepalive interval time") parser.add_argument("--tcp_keepalive_probes", type=int, help="Used to specify the tcp keepalive probes") args_obj, remaining_argv = parser.parse_known_args(remaining_argv) args_obj.conf_file = args.conf_file args_obj.config_sections = config if isinstance(args_obj.cassandra_server_list, (str, )): args_obj.cassandra_server_list = args_obj.cassandra_server_list.split() if isinstance(args_obj.collectors, (str, )): args_obj.collectors = args_obj.collectors.split() if isinstance(args_obj.override_rpf_default_by, (str, )): VirtualNetworkServer.rpf_default = rpf_valid_values.get( args_obj.override_rpf_default_by.lower()) args_obj.cassandra_use_ssl = (str( args_obj.cassandra_use_ssl).lower() == 'true') return args_obj, remaining_argv
def main(args_str=' '.join(sys.argv[1:])): # Parse Arguments node_parser = argparse.ArgumentParser(add_help=False) node_parser.add_argument("--nodetype", default='contrail-analytics', help='Type of node which nodemgr is managing') try: args, remaining_argv = node_parser.parse_known_args(args_str.split()) except: usage() disc_options = {'server': socket.gethostname(), 'port': 5998} default = {'rules': '', 'collectors': [], 'hostip': '127.0.0.1', 'minimum_diskgb': 256, 'contrail_databases': 'config analytics', 'cassandra_repair_interval': 24, 'cassandra_repair_logdir': '/var/log/contrail/', 'sandesh_send_rate_limit': \ SandeshSystem.get_sandesh_send_rate_limit(), } sandesh_opts = { 'sandesh_keyfile': '/etc/contrail/ssl/private/server-privkey.pem', 'sandesh_certfile': '/etc/contrail/ssl/certs/server.pem', 'sandesh_ca_cert': '/etc/contrail/ssl/certs/ca-cert.pem', 'sandesh_ssl_enable': False, 'introspect_ssl_enable': False } node_type = args.nodetype if (node_type == 'contrail-analytics'): config_file = '/etc/contrail/contrail-analytics-nodemgr.conf' elif (node_type == 'contrail-config'): config_file = '/etc/contrail/contrail-config-nodemgr.conf' elif (node_type == 'contrail-control'): config_file = '/etc/contrail/contrail-control-nodemgr.conf' elif (node_type == 'contrail-vrouter'): config_file = '/etc/contrail/contrail-vrouter-nodemgr.conf' elif (node_type == 'contrail-database'): config_file = '/etc/contrail/contrail-database-nodemgr.conf' else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return if (os.path.exists(config_file) == False): sys.stderr.write("config file " + config_file + " is not present" + "\n") return config = ConfigParser.SafeConfigParser() config.read([config_file]) if 'DEFAULTS' in config.sections(): default.update(dict(config.items('DEFAULTS'))) if 'DISCOVERY' in config.sections(): disc_options.update(dict(config.items('DISCOVERY'))) disc_options['discovery_server'] = disc_options.pop('server') disc_options['discovery_port'] = disc_options.pop('port') if 'COLLECTOR' in config.sections(): try: collector = config.get('COLLECTOR', 'server_list') default['collectors'] = collector.split() except ConfigParser.NoOptionError as e: pass if 'SANDESH' in config.sections(): sandesh_opts.update(dict(config.items('SANDESH'))) parser = argparse.ArgumentParser( parents=[node_parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter) default.update(disc_options) default.update(sandesh_opts) parser.set_defaults(**default) parser.add_argument("--rules", help='Rules file to use for processing events') parser.add_argument("--discovery_server", help='IP address of Discovery Server') parser.add_argument("--discovery_port", type=int, help='Port of Discovery Server') parser.add_argument("--collectors", nargs='+', help='Collector addresses in format' + 'ip1:port1 ip2:port2') parser.add_argument("--sandesh_send_rate_limit", type=int, help="Sandesh send rate limit in messages/sec") parser.add_argument("--sandesh_keyfile", help="Sandesh ssl private key") parser.add_argument("--sandesh_certfile", help="Sandesh ssl certificate") parser.add_argument("--sandesh_ca_cert", help="Sandesh CA ssl certificate") parser.add_argument("--sandesh_ssl_enable", action="store_true", help="Enable ssl for sandesh connection") parser.add_argument("--introspect_ssl_enable", action="store_true", help="Enable ssl for introspect connection") if (node_type == 'contrail-database'): parser.add_argument("--minimum_diskGB", type=int, dest='minimum_diskgb', help="Minimum disk space in GB's") parser.add_argument("--contrail_databases", nargs='+', help='Contrail databases on this node' + 'in format: config analytics') parser.add_argument("--hostip", help="IP address of host") parser.add_argument("--cassandra_repair_interval", type=int, help="Time in hours to periodically run " "nodetool repair for cassandra maintenance") parser.add_argument("--cassandra_repair_logdir", help="Directory for storing repair logs") try: _args = parser.parse_args(remaining_argv) except: usage() rule_file = _args.rules discovery_server = _args.discovery_server sys.stderr.write("Discovery server: " + discovery_server + "\n") discovery_port = _args.discovery_port sys.stderr.write("Discovery port: " + str(discovery_port) + "\n") collector_addr = _args.collectors sys.stderr.write("Collector address: " + str(collector_addr) + "\n") # randomize collector list _args.chksum = "" if _args.collectors: _args.chksum = hashlib.md5("".join(_args.collectors)).hexdigest() _args.random_collectors = random.sample(_args.collectors, len(_args.collectors)) _args.collectors = _args.random_collectors collector_addr = _args.collectors sys.stderr.write("Random Collector address: " + str(collector_addr) + "\n") if _args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( _args.sandesh_send_rate_limit) sandesh_config = SandeshConfig(_args.sandesh_keyfile, _args.sandesh_certfile, _args.sandesh_ca_cert, _args.sandesh_ssl_enable, _args.introspect_ssl_enable) # done parsing arguments prog = None if (node_type == 'contrail-analytics'): if not rule_file: rule_file = "/etc/contrail/supervisord_analytics_files/" + \ "contrail-analytics.rules" unit_names = [ 'contrail-collector.service', 'contrail-analytics-api.service', 'contrail-snmp-collector.service', 'contrail-query-engine.service', 'contrail-alarm-gen.service', 'contrail-topology.service', 'contrail-analytics-nodemgr.service', ] prog = AnalyticsEventManager(rule_file, unit_names, discovery_server, discovery_port, collector_addr, sandesh_config) elif (node_type == 'contrail-config'): if not rule_file: rule_file = "/etc/contrail/supervisord_config_files/" + \ "contrail-config.rules" unit_names = [ 'contrail-api.service', 'contrail-schema.service', 'contrail-svc-monitor.service', 'contrail-device-manager.service', 'contrail-discovery.service', 'contrail-config-nodemgr.service', 'ifmap.service', ] cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = ConfigEventManager(rule_file, unit_names, discovery_server, discovery_port, collector_addr, sandesh_config, cassandra_repair_interval, cassandra_repair_logdir) elif (node_type == 'contrail-control'): if not rule_file: rule_file = "/etc/contrail/supervisord_control_files/" + \ "contrail-control.rules" unit_names = [ 'contrail-control.service', 'contrail-dns.service', 'contrail-named.service', 'contrail-control-nodemgr.service', ] prog = ControlEventManager(rule_file, unit_names, discovery_server, discovery_port, collector_addr, sandesh_config) elif (node_type == 'contrail-vrouter'): if not rule_file: rule_file = "/etc/contrail/supervisord_vrouter_files/" + \ "contrail-vrouter.rules" unit_names = [ 'contrail-vrouter-agent.service', 'contrail-vrouter-nodemgr.service', ] prog = VrouterEventManager(rule_file, unit_names, discovery_server, discovery_port, collector_addr, sandesh_config) elif (node_type == 'contrail-database'): if not rule_file: rule_file = "/etc/contrail/supervisord_database_files/" + \ "contrail-database.rules" unit_names = [ 'contrail-database.service', 'kafka.service', 'contrail-database-nodemgr.service', ] hostip = _args.hostip minimum_diskgb = _args.minimum_diskgb contrail_databases = _args.contrail_databases cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = DatabaseEventManager(rule_file, unit_names, discovery_server, discovery_port, collector_addr, sandesh_config, hostip, minimum_diskgb, contrail_databases, cassandra_repair_interval, cassandra_repair_logdir) else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return prog.process() prog.send_nodemgr_process_status() prog.send_process_state_db(prog.group_names) prog.config_file = config_file prog.collector_chksum = _args.chksum """ @sighup Reconfig of collector list """ gevent.signal(signal.SIGHUP, prog.nodemgr_sighup_handler) gevent.joinall([ gevent.spawn(prog.runforever), gevent.spawn(prog.run_periodically(prog.do_periodic_events, 60)) ])
def main(args_str=" ".join(sys.argv[1:])): # Parse Arguments node_parser = argparse.ArgumentParser(add_help=False) node_parser.add_argument("--nodetype", default="contrail-analytics", help="Type of node which nodemgr is managing") try: args, remaining_argv = node_parser.parse_known_args(args_str.split()) except: usage() disc_options = {"server": socket.gethostname(), "port": 5998} default = { "rules": "", "collectors": [], "hostip": "127.0.0.1", "minimum_diskgb": 256, "contrail_databases": "config analytics", "cassandra_repair_interval": 24, "cassandra_repair_logdir": "/var/log/contrail/", "sandesh_send_rate_limit": SandeshSystem.get_sandesh_send_rate_limit(), } node_type = args.nodetype if node_type == "contrail-analytics": config_file = "/etc/contrail/contrail-analytics-nodemgr.conf" elif node_type == "contrail-config": config_file = "/etc/contrail/contrail-config-nodemgr.conf" elif node_type == "contrail-control": config_file = "/etc/contrail/contrail-control-nodemgr.conf" elif node_type == "contrail-vrouter": config_file = "/etc/contrail/contrail-vrouter-nodemgr.conf" elif node_type == "contrail-database": config_file = "/etc/contrail/contrail-database-nodemgr.conf" else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return if os.path.exists(config_file) == False: sys.stderr.write("config file " + config_file + " is not present" + "\n") return config = ConfigParser.SafeConfigParser() config.read([config_file]) if "DEFAULT" in config.sections(): default.update(dict(config.items("DEFAULT"))) if "DISCOVERY" in config.sections(): disc_options.update(dict(config.items("DISCOVERY"))) disc_options["discovery_server"] = disc_options.pop("server") disc_options["discovery_port"] = disc_options.pop("port") if "COLLECTOR" in config.sections(): try: collector = config.get("COLLECTOR", "server_list") default["collectors"] = collector.split() except ConfigParser.NoOptionError as e: pass parser = argparse.ArgumentParser(parents=[node_parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter) default.update(disc_options) parser.set_defaults(**default) parser.add_argument("--rules", help="Rules file to use for processing events") parser.add_argument("--discovery_server", help="IP address of Discovery Server") parser.add_argument("--discovery_port", type=int, help="Port of Discovery Server") parser.add_argument("--collectors", nargs="+", help="Collector addresses in format" + "ip1:port1 ip2:port2") parser.add_argument("--sandesh_send_rate_limit", type=int, help="Sandesh send rate limit in messages/sec") if node_type == "contrail-database": parser.add_argument("--minimum_diskgb", type=int, help="Minimum disk space in GB's") parser.add_argument( "--contrail_databases", nargs="+", help="Contrail databases on this node" + "in format: config analytics" ) parser.add_argument("--hostip", help="IP address of host") parser.add_argument( "--cassandra_repair_interval", type=int, help="Time in hours to periodically run " "nodetool repair for cassandra maintenance", ) parser.add_argument("--cassandra_repair_logdir", help="Directory for storing repair logs") try: _args = parser.parse_args(remaining_argv) except: usage() rule_file = _args.rules discovery_server = _args.discovery_server sys.stderr.write("Discovery server: " + discovery_server + "\n") discovery_port = _args.discovery_port sys.stderr.write("Discovery port: " + str(discovery_port) + "\n") collector_addr = _args.collectors sys.stderr.write("Collector address: " + str(collector_addr) + "\n") if _args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit(_args.sandesh_send_rate_limit) # done parsing arguments if not "SUPERVISOR_SERVER_URL" in os.environ: sys.stderr.write("Node manager must be run as a supervisor event " "listener\n") sys.stderr.flush() return prog = None if node_type == "contrail-analytics": prog = AnalyticsEventManager(rule_file, discovery_server, discovery_port, collector_addr) elif node_type == "contrail-config": cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = ConfigEventManager( rule_file, discovery_server, discovery_port, collector_addr, cassandra_repair_interval, cassandra_repair_logdir, ) elif node_type == "contrail-control": prog = ControlEventManager(rule_file, discovery_server, discovery_port, collector_addr) elif node_type == "contrail-vrouter": prog = VrouterEventManager(rule_file, discovery_server, discovery_port, collector_addr) elif node_type == "contrail-database": hostip = _args.hostip minimum_diskgb = _args.minimum_diskgb contrail_databases = _args.contrail_databases cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = DatabaseEventManager( rule_file, discovery_server, discovery_port, collector_addr, hostip, minimum_diskgb, contrail_databases, cassandra_repair_interval, cassandra_repair_logdir, ) else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return prog.process() prog.send_nodemgr_process_status() prog.send_process_state_db(prog.group_names) gevent.joinall([gevent.spawn(prog.runforever)])
def parse_args(): conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--config-file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(sys.argv) defaults = { 'mesos_api_server': mesos_consts._WEB_HOST, 'mesos_api_port': mesos_consts._WEB_PORT, 'http_server_port': HttpPortMesosManager, 'worker_id': '0', 'sandesh_send_rate_limit': SandeshSystem.get_sandesh_send_rate_limit(), 'collectors': None, 'logger_class': None, 'logging_conf': '', 'log_local': False, 'log_category': '', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, } vnc_opts = { 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'rabbit_use_ssl': False, 'kombu_ssl_version': '', 'kombu_ssl_keyfile': '', 'kombu_ssl_certfile': '', 'kombu_ssl_ca_certs': '', 'cassandra_server_ip': mesos_consts._CASSANDRA_HOST, 'cassandra_server_port': mesos_consts._CASSANDRA_PORT, 'cassandra_max_retries': mesos_consts._CASSANDRA_MAX_RETRIES, 'cassandra_timeout': mesos_consts._CASSANDRA_TIMEOUT, 'cassandra_user': None, 'cassandra_password': None, 'cluster_id': '', } sandesh_opts = { 'sandesh_keyfile': '/etc/contrail/ssl/private/server-privkey.pem', 'sandesh_certfile': '/etc/contrail/ssl/certs/server.pem', 'sandesh_ca_cert': '/etc/contrail/ssl/certs/ca-cert.pem', 'sandesh_ssl_enable': False, 'introspect_ssl_enable': False } mesos_opts = {} config = ConfigParser.SafeConfigParser() if args.config_file: config.read(args.config_file) if 'VNC' in config.sections(): vnc_opts.update(dict(config.items("VNC"))) if 'MESOS' in config.sections(): mesos_opts.update(dict(config.items("MESOS"))) if 'SANDESH' in config.sections(): sandesh_opts.update(dict(config.items('SANDESH'))) if 'DEFAULTS' in config.sections(): defaults.update(dict(config.items("DEFAULTS"))) parser = argparse.ArgumentParser( parents=[conf_parser], description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(vnc_opts) defaults.update(mesos_opts) defaults.update(sandesh_opts) parser.set_defaults(**defaults) args = parser.parse_args() if type(args.cassandra_server_list) is str: args.cassandra_server_list = args.cassandra_server_list.split() if type(args.pod_subnets) is str: args.pod_subnets = args.pod_subnets.split() if type(args.service_subnets) is str: args.service_subnets = args.service_subnets.split() args.sandesh_config = SandeshConfig(args.sandesh_keyfile, args.sandesh_certfile, args.sandesh_ca_cert, args.sandesh_ssl_enable, args.introspect_ssl_enable) return args
def parse_args(): conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--config-file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(sys.argv) defaults = { 'http_server_port': HttpPortKubeManager, 'worker_id': '0', 'sandesh_send_rate_limit': SandeshSystem.get_sandesh_send_rate_limit(), 'collectors': None, 'logger_class': None, 'logging_conf': '', 'log_local': False, 'log_category': '', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'kube_object_cache': 'True', 'disc_server_ip': 'localhost', 'disc_server_port': DiscoveryServerPort, 'log_level': SandeshLevel.SYS_DEBUG, 'log_file': '/var/log/contrail/contrail-kube-manager.log', 'api_service_link_local': 'True', 'orchestrator': 'kubernetes', 'token': '', } vnc_opts = { 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'rabbit_use_ssl': False, 'kombu_ssl_version': '', 'kombu_ssl_keyfile': '', 'kombu_ssl_certfile': '', 'kombu_ssl_ca_certs': '', 'cassandra_user': None, 'cassandra_password': None, 'cassandra_server_list': '', 'cluster_id': '', 'vnc_endpoint_ip': 'localhost', 'vnc_endpoint_port': ApiServerPort, 'admin_user': '', 'admin_password': '', 'admin_tenant': '', 'public_network_name': '__public__', 'public_fip_pool_name': '__fip_pool_public__', } k8s_opts = { 'kubernetes_api_server': 'localhost', 'kubernetes_api_port': '8080', 'kubernetes_api_secure_port': None, 'kubernetes_api_secure_ip': None, 'kubernetes_service_name': 'kubernetes', 'service_subnets': '', 'pod_subnets': '', } sandesh_opts = { 'sandesh_keyfile': '/etc/contrail/ssl/private/server-privkey.pem', 'sandesh_certfile': '/etc/contrail/ssl/certs/server.pem', 'sandesh_ca_cert': '/etc/contrail/ssl/certs/ca-cert.pem', 'sandesh_ssl_enable': False, 'introspect_ssl_enable': False } config = ConfigParser.SafeConfigParser() if args.config_file: config.read(args.config_file) if 'VNC' in config.sections(): vnc_opts.update(dict(config.items("VNC"))) if 'KUBERNETES' in config.sections(): k8s_opts.update(dict(config.items("KUBERNETES"))) if 'SANDESH' in config.sections(): sandesh_opts.update(dict(config.items('SANDESH'))) if 'DEFAULTS' in config.sections(): defaults.update(dict(config.items("DEFAULTS"))) parser = argparse.ArgumentParser( parents=[conf_parser], description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(vnc_opts) defaults.update(k8s_opts) defaults.update(sandesh_opts) parser.set_defaults(**defaults) args = parser.parse_args() if type(args.cassandra_server_list) is str: args.cassandra_server_list = args.cassandra_server_list.split() if type(args.pod_subnets) is str: args.pod_subnets = args.pod_subnets.split() if type(args.service_subnets) is str: args.service_subnets = args.service_subnets.split() args.sandesh_config = SandeshConfig(args.sandesh_keyfile, args.sandesh_certfile, args.sandesh_ca_cert, args.sandesh_ssl_enable, args.introspect_ssl_enable) return args
def parse_args(): conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--config-file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(sys.argv) defaults = { 'http_server_port': HttpPortKubeManager, 'worker_id': '0', 'sandesh_send_rate_limit': SandeshSystem.get_sandesh_send_rate_limit(), 'collectors': None, 'logger_class': None, 'logging_conf': '', 'log_local': False, 'log_category': '', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'kube_object_cache': 'True', } vnc_opts = { 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'rabbit_use_ssl': False, 'kombu_ssl_version': '', 'kombu_ssl_keyfile': '', 'kombu_ssl_certfile': '', 'kombu_ssl_ca_certs': '', 'cassandra_user': None, 'cassandra_password': None, 'cluster_id': '', } k8s_opts = {} config = ConfigParser.SafeConfigParser() if args.config_file: config.read(args.config_file) if 'VNC' in config.sections(): vnc_opts.update(dict(config.items("VNC"))) if 'KUBERNETES' in config.sections(): k8s_opts.update(dict(config.items("KUBERNETES"))) if 'DEFAULTS' in config.sections(): defaults.update(dict(config.items("DEFAULTS"))) parser = argparse.ArgumentParser( parents=[conf_parser], description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(vnc_opts) defaults.update(k8s_opts) parser.set_defaults(**defaults) args = parser.parse_args() if type(args.cassandra_server_list) is str: args.cassandra_server_list = args.cassandra_server_list.split() if type(args.pod_subnets) is str: args.pod_subnets = args.pod_subnets.split() if type(args.service_subnets) is str: args.service_subnets = args.service_subnets.split() return args
def main(args_str=' '.join(sys.argv[1:])): # Parse Arguments node_parser = argparse.ArgumentParser(add_help=False) node_parser.add_argument("--nodetype", default='contrail-analytics', help='Type of node which nodemgr is managing') try: args, remaining_argv = node_parser.parse_known_args(args_str.split()) except: usage() disc_options = {'server': socket.gethostname(), 'port': 5998, 'ssl': False} default = {'rules': '', 'collectors': [], 'hostip': '127.0.0.1', 'db_port': '9042', 'minimum_diskgb': 256, 'contrail_databases': 'config analytics', 'cassandra_repair_interval': 24, 'cassandra_repair_logdir': '/var/log/contrail/', 'sandesh_send_rate_limit': \ SandeshSystem.get_sandesh_send_rate_limit(), 'cassandra_use_ssl': 'true', } node_type = args.nodetype if (node_type == 'contrail-analytics'): config_file = '/etc/contrail/contrail-analytics-nodemgr.conf' elif (node_type == 'contrail-config'): config_file = '/etc/contrail/contrail-config-nodemgr.conf' elif (node_type == 'contrail-control'): config_file = '/etc/contrail/contrail-control-nodemgr.conf' elif (node_type == 'contrail-vrouter'): config_file = '/etc/contrail/contrail-vrouter-nodemgr.conf' elif (node_type == 'contrail-database'): config_file = '/etc/contrail/contrail-database-nodemgr.conf' else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return if (os.path.exists(config_file) == False): sys.stderr.write("config file " + config_file + " is not present" + "\n") return config = ConfigParser.SafeConfigParser() config.read([config_file]) if 'DEFAULT' in config.sections(): default.update(dict(config.items('DEFAULT'))) if 'DEFAULTS' in config.sections(): default.update(dict(config.items('DEFAULTS'))) if 'DISCOVERY' in config.sections(): disc_options.update(dict(config.items('DISCOVERY'))) if 'ssl' in config.options('DISCOVERY'): disc_options['ssl'] = config.getboolean('DISCOVERY', 'ssl') disc_options['discovery_server'] = disc_options.pop('server') disc_options['discovery_port'] = disc_options.pop('port') disc_options['discovery_ssl'] = disc_options.get('ssl') disc_options['discovery_cert'] = disc_options.get('cert') disc_options['discovery_key'] = disc_options.get('key') disc_options['discovery_cacert'] = disc_options.get('cacert') if 'COLLECTOR' in config.sections(): try: collector = config.get('COLLECTOR', 'server_list') default['collectors'] = collector.split() except ConfigParser.NoOptionError as e: pass parser = argparse.ArgumentParser( parents=[node_parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter) default.update(disc_options) parser.set_defaults(**default) parser.add_argument("--rules", help='Rules file to use for processing events') parser.add_argument("--discovery_server", help='IP address of Discovery Server') parser.add_argument("--discovery_port", type=int, help='Port of Discovery Server') parser.add_argument("--discovery_cert", help="Discovery Server ssl certificate") parser.add_argument("--discovery_key", help="Discovery Server ssl key") parser.add_argument("--discovery_cacert", help="Discovery Server ssl CA certificate") parser.add_argument("--discovery_ssl", action="store_true", help="Discovery service is configured with ssl") parser.add_argument("--collectors", nargs='+', help='Collector addresses in format' + 'ip1:port1 ip2:port2') parser.add_argument("--sandesh_send_rate_limit", type=int, help="Sandesh send rate limit in messages/sec") if (node_type == 'contrail-database' or node_type == 'contrail-config'): parser.add_argument("--minimum_diskGB", type=int, dest='minimum_diskgb', help="Minimum disk space in GB's") parser.add_argument("--contrail_databases", nargs='+', help='Contrail databases on this node' + 'in format: config analytics') parser.add_argument("--hostip", help="IP address of host") parser.add_argument("--db_port", help="Cassandra DB cql port") parser.add_argument("--cassandra_repair_interval", type=int, help="Time in hours to periodically run " "nodetool repair for cassandra maintenance") parser.add_argument("--cassandra_repair_logdir", help="Directory for storing repair logs") parser.add_argument( "--cassandra_use_ssl", help="To connect SSL enabled cassandra. values: true|false") try: _args = parser.parse_args(remaining_argv) except: usage() rule_file = _args.rules discovery_server = _args.discovery_server sys.stderr.write("Discovery server: " + discovery_server + "\n") discovery_port = _args.discovery_port sys.stderr.write("Discovery port: " + str(discovery_port) + "\n") collector_addr = _args.collectors sys.stderr.write("Collector address: " + str(collector_addr) + "\n") if _args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( _args.sandesh_send_rate_limit) # done parsing arguments if not 'SUPERVISOR_SERVER_URL' in os.environ: sys.stderr.write('Node manager must be run as a supervisor event ' 'listener\n') sys.stderr.flush() return prog = None kwargs = {} if _args.discovery_ssl: kwargs.update({ 'cert': _args.discovery_cert, 'key': _args.discovery_key, 'cacert': _args.discovery_cacert }) if _args.cassandra_use_ssl.lower() == 'true': kwargs.update({'cassandra_use_ssl': True}) if (node_type == 'contrail-analytics'): prog = AnalyticsEventManager(rule_file, discovery_server, discovery_port, collector_addr, **kwargs) elif (node_type == 'contrail-config'): hostip = _args.hostip db_port = _args.db_port minimum_diskgb = _args.minimum_diskgb contrail_databases = _args.contrail_databases cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = ConfigEventManager(rule_file, discovery_server, discovery_port, collector_addr, hostip, db_port, minimum_diskgb, contrail_databases, cassandra_repair_interval, cassandra_repair_logdir, **kwargs) elif (node_type == 'contrail-control'): prog = ControlEventManager(rule_file, discovery_server, discovery_port, collector_addr, **kwargs) elif (node_type == 'contrail-vrouter'): prog = VrouterEventManager(rule_file, discovery_server, discovery_port, collector_addr, **kwargs) elif (node_type == 'contrail-database'): hostip = _args.hostip db_port = _args.db_port minimum_diskgb = _args.minimum_diskgb contrail_databases = _args.contrail_databases cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = DatabaseEventManager(rule_file, discovery_server, discovery_port, collector_addr, hostip, db_port, minimum_diskgb, contrail_databases, cassandra_repair_interval, cassandra_repair_logdir, **kwargs) else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return prog.process() prog.send_nodemgr_process_status() prog.send_process_state_db(prog.group_names) gevent.joinall([gevent.spawn(prog.runforever)])
def parse_args(args_str): args_obj = None # Source any specified config/ini file # Turn off help, so we print all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--conf_file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str.split()) defaults = { 'reset_config': False, 'wipe_config': False, 'listen_ip_addr': _WEB_HOST, 'listen_port': _WEB_PORT, 'admin_port': _ADMIN_PORT, 'cassandra_server_list': "127.0.0.1:9160", 'rdbms_server_list': "127.0.0.1:3306", 'rdbms_connection_config': "", 'collectors': None, 'http_server_port': '8084', 'log_local': True, 'log_level': SandeshLevel.SYS_NOTICE, 'log_category': '', 'log_file': Sandesh._DEFAULT_LOG_FILE, 'trace_file': '/var/log/contrail/vnc_openstack.err', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'logging_level': 'WARN', 'logging_conf': '', 'logger_class': None, 'multi_tenancy': None, 'aaa_mode': cfgm_common.AAA_MODE_DEFAULT_VALUE, 'disc_server_ip': None, 'disc_server_port': '5998', 'zk_server_ip': '127.0.0.1:2181', 'worker_id': '0', 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'rabbit_max_pending_updates': '4096', 'rabbit_health_check_interval': '120.0', # in seconds 'cluster_id': '', 'max_requests': 1024, 'region_name': 'RegionOne', 'sandesh_send_rate_limit': SandeshSystem.get_sandesh_send_rate_limit(), 'stale_lock_seconds': '5', # lock but no resource past this => stale 'cloud_admin_role': cfgm_common.CLOUD_ADMIN_ROLE, 'global_read_only_role': cfgm_common.GLOBAL_READ_ONLY_ROLE, 'rabbit_use_ssl': False, 'kombu_ssl_version': '', 'kombu_ssl_keyfile': '', 'kombu_ssl_certfile': '', 'kombu_ssl_ca_certs': '', 'object_cache_entries': '10000', # max number of objects cached for read 'object_cache_exclude_types': '', # csv of object types to *not* cache 'db_engine': 'cassandra', # IF-MAP client options to connect and maintain irond sessions 'ifmap_server_ip': '127.0.0.1', 'ifmap_server_port': "8443", 'ifmap_username': "******", 'ifmap_password': "******", 'ifmap_queue_size': 10000, 'ifmap_max_message_size': 1024*1024, 'ifmap_health_check_interval': '60', # in seconds } # ssl options secopts = { 'use_certs': False, 'keyfile': '', 'certfile': '', 'ca_certs': '', } # keystone options ksopts = { 'auth_host': '127.0.0.1', 'auth_port': '35357', 'auth_protocol': 'http', 'admin_user': '', 'admin_password': '', 'admin_tenant_name': '', 'insecure': True, 'cafile': '' } # cassandra options cassandraopts = { 'cassandra_user' : None, 'cassandra_password' : None } # ifmap server options ifmapopts = { # IF-MAP options to start self-managed and minimalist IF-MAP server # Listen IP and port 'ifmap_listen_ip': None, 'ifmap_listen_port': None, # Key ans certificate files path. If not set automatically create 'ifmap_key_path': '/var/lib/contrail/api-server/ifmap-cert/key', 'ifmap_cert_path': '/var/lib/contrail/api-server/ifmap-cert/cert', # Credentials: [(user1, password), (user2, password)] 'ifmap_credentials': [('control', 'secret')], } # rdbms options rdbmsopts = { 'rdbms_user' : None, 'rdbms_password' : None, 'rdbms_connection': None } config = None saved_conf_file = args.conf_file if args.conf_file: config = ConfigParser.SafeConfigParser({'admin_token': None}) config.read(args.conf_file) if 'DEFAULTS' in config.sections(): defaults.update(dict(config.items("DEFAULTS"))) if 'multi_tenancy' in config.options('DEFAULTS'): defaults['multi_tenancy'] = config.getboolean( 'DEFAULTS', 'multi_tenancy') if 'default_encoding' in config.options('DEFAULTS'): default_encoding = config.get('DEFAULTS', 'default_encoding') gen.resource_xsd.ExternalEncoding = default_encoding if 'SECURITY' in config.sections() and\ 'use_certs' in config.options('SECURITY'): if config.getboolean('SECURITY', 'use_certs'): secopts.update(dict(config.items("SECURITY"))) if 'KEYSTONE' in config.sections(): ksopts.update(dict(config.items("KEYSTONE"))) if 'QUOTA' in config.sections(): for (k, v) in config.items("QUOTA"): try: if str(k) != 'admin_token': vnc_quota.QuotaHelper.default_quota[str(k)] = int(v) except ValueError: pass if 'CASSANDRA' in config.sections(): cassandraopts.update(dict(config.items('CASSANDRA'))) if 'RDBMS' in config.sections(): rdbmsopts.update(dict(config.items('RDBMS'))) if 'IFMAP_SERVER' in config.sections(): ifmapopts.update(dict(config.items('IFMAP_SERVER'))) # Override with CLI options # Don't surpress add_help here so it will handle -h parser = argparse.ArgumentParser( # Inherit options from config_parser parents=[conf_parser], # print script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(secopts) defaults.update(ksopts) defaults.update(cassandraopts) defaults.update(rdbmsopts) defaults.update(ifmapopts) parser.set_defaults(**defaults) parser.add_argument( "--cassandra_server_list", help="List of cassandra servers in IP Address:Port format", nargs='+') parser.add_argument( "--rdbms_server_list", help="List of cassandra servers in IP Address:Port format", nargs='+') parser.add_argument( "--rdbms_connection", help="DB Connection string") parser.add_argument( "--disc_server_ip", help="IP address of discovery server") parser.add_argument( "--disc_server_port", help="Port of discovery server") parser.add_argument( "--redis_server_ip", help="IP address of redis server") parser.add_argument( "--redis_server_port", help="Port of redis server") parser.add_argument( "--auth", choices=['keystone'], help="Type of authentication for user-requests") parser.add_argument( "--reset_config", action="store_true", help="Warning! Destroy previous configuration and start clean") parser.add_argument( "--wipe_config", action="store_true", help="Warning! Destroy previous configuration") parser.add_argument( "--listen_ip_addr", help="IP address to provide service on, default %s" % (_WEB_HOST)) parser.add_argument( "--listen_port", help="Port to provide service on, default %s" % (_WEB_PORT)) parser.add_argument( "--admin_port", help="Port with local auth for admin access, default %s" % (_ADMIN_PORT)) parser.add_argument( "--collectors", help="List of VNC collectors in ip:port format", nargs="+") parser.add_argument( "--http_server_port", help="Port of local HTTP server") parser.add_argument( "--log_local", action="store_true", help="Enable local logging of sandesh messages") parser.add_argument( "--log_level", help="Severity level for local logging of sandesh messages") parser.add_argument( "--logging_level", help=("Log level for python logging: DEBUG, INFO, WARN, ERROR default: %s" % defaults['logging_level'])) parser.add_argument( "--logging_conf", help=("Optional logging configuration file, default: None")) parser.add_argument( "--logger_class", help=("Optional external logger class, default: None")) parser.add_argument( "--log_category", help="Category filter for local logging of sandesh messages") parser.add_argument( "--log_file", help="Filename for the logs to be written to") parser.add_argument( "--trace_file", help="Filename for the errors backtraces to be written to") parser.add_argument("--use_syslog", action="store_true", help="Use syslog for logging") parser.add_argument("--syslog_facility", help="Syslog facility to receive log lines") parser.add_argument( "--multi_tenancy", action="store_true", help="Validate resource permissions (implies token validation)") parser.add_argument( "--aaa_mode", choices=cfgm_common.AAA_MODE_VALID_VALUES, help="AAA mode") parser.add_argument( "--worker_id", help="Worker Id") parser.add_argument( "--zk_server_ip", help="Ip address:port of zookeeper server") parser.add_argument( "--rabbit_server", help="Rabbitmq server address") parser.add_argument( "--rabbit_port", help="Rabbitmq server port") parser.add_argument( "--rabbit_user", help="Username for rabbit") parser.add_argument( "--rabbit_vhost", help="vhost for rabbit") parser.add_argument( "--rabbit_password", help="password for rabbit") parser.add_argument( "--rabbit_ha_mode", help="True if the rabbitmq cluster is mirroring all queue") parser.add_argument( "--rabbit_max_pending_updates", help="Max updates before stateful changes disallowed") parser.add_argument( "--rabbit_health_check_interval", help="Interval seconds between consumer heartbeats to rabbitmq") parser.add_argument( "--cluster_id", help="Used for database keyspace separation") parser.add_argument( "--max_requests", type=int, help="Maximum number of concurrent requests served by api server") parser.add_argument("--cassandra_user", help="Cassandra user name") parser.add_argument("--cassandra_password", help="Cassandra password") parser.add_argument("--sandesh_send_rate_limit", type=int, help="Sandesh send rate limit in messages/sec.") parser.add_argument("--stale_lock_seconds", help="Time after which lock without resource is stale, default 60") parser.add_argument( "--cloud_admin_role", help="Role name of cloud administrator") parser.add_argument( "--global_read_only_role", help="Role name of user with Read-Only access to all objects") parser.add_argument("--object_cache_entries", help="Maximum number of objects cached for read, default 10000") parser.add_argument("--object_cache_exclude_types", help="Comma separated values of object types to not cache") parser.add_argument("--db_engine", help="Database engine to use, default cassandra") parser.add_argument( "--ifmap_server_ip", help="IP address of ifmap server") parser.add_argument( "--ifmap_server_port", help="Port of ifmap server") parser.add_argument( "--ifmap_username", help="Username known to ifmap server") parser.add_argument( "--ifmap_password", help="Password known to ifmap server") parser.add_argument( "--ifmap_queue_size", type=int, help="Size of the queue that holds " "pending messages to be sent to ifmap server") parser.add_argument( "--ifmap_max_message_size", type=int, help="Maximum size of message " "sent to ifmap server") parser.add_argument("--ifmap_health_check_interval", help="Interval seconds to check for ifmap health, default 60") parser.add_argument("--ifmap_listen_ip", default=None, help="IP to bind IF-MAP server (If not set, the VNC " "API server will use IF-MAP client to connect to " " an external IF-MAP server)") parser.add_argument("--ifmap_listen_port", help="TCP port to bind IF-MAP server (If not set, the " "VNC API server will use IF-MAP client to connect " " to an external IF-MAP server)") parser.add_argument("--ifmap_key_path", help="Key file path to use for IF-MAP server") parser.add_argument("--ifmap_cert_path", help="Certificate file path to use for IF-MAP server") parser.add_argument('--ifmap_credentials', help="List of user and password: <username:password>", type=user_password, nargs='*') args_obj, remaining_argv = parser.parse_known_args(remaining_argv) args_obj.config_sections = config if type(args_obj.cassandra_server_list) is str: args_obj.cassandra_server_list =\ args_obj.cassandra_server_list.split() if type(args_obj.rdbms_server_list) is str: args_obj.rdbms_server_list =\ args_obj.rdbms_server_list.split() if type(args_obj.collectors) is str: args_obj.collectors = args_obj.collectors.split() args_obj.conf_file = saved_conf_file return args_obj, remaining_argv