def __init__(self, config, type_info, sandesh_instance, unit_names, update_process_list=False): self.config = config self.type_info = type_info self.max_cores = 4 self.max_old_cores = 3 self.max_new_cores = 1 self.all_core_file_list = [] self.core_dir_modified_time = 0 self.tick_count = 0 self.fail_status_bits = 0 self.prev_fail_status_bits = 1 self.instance_id = INSTANCE_ID_DEFAULT self.collector_addr = self.config.collectors self.sandesh_instance = sandesh_instance self.curr_build_info = None self.new_build_info = None self.last_cpu = None self.last_time = 0 self.own_version = None self.hostname = socket.gethostname() event_handlers = {} event_handlers['PROCESS_STATE'] = self.event_process_state event_handlers['PROCESS_COMMUNICATION'] = self.event_process_communication event_handlers['PROCESS_LIST_UPDATE'] = self.update_current_processes ConnectionState.init(self.sandesh_instance, self.hostname, self.type_info._module_name, self.instance_id, staticmethod(ConnectionState.get_conn_state_cb), NodeStatusUVE, NodeStatus, self.type_info._object_table, self.get_process_state_cb) self.sandesh_instance.init_generator( self.type_info._module_name, self.hostname, self.type_info._node_type_name, self.instance_id, self.collector_addr, self.type_info._module_name, ServiceHttpPortMap[self.type_info._module_name], ['nodemgr.common.sandesh'] + self.type_info._sandesh_packages, config=SandeshConfig.from_parser_arguments(self.config)) self.sandesh_instance.set_logging_params( enable_local_log=self.config.log_local, category=self.config.log_category, level=self.config.log_level, file=self.config.log_file, enable_syslog=self.config.use_syslog, syslog_facility=self.config.syslog_facility) self.logger = self.sandesh_instance.logger() if DockerProcessInfoManager and (utils.is_running_in_docker() or utils.is_running_in_kubepod()): self.process_info_manager = DockerProcessInfoManager( type_info._module_type, unit_names, event_handlers, update_process_list) else: self.msg_log('Node manager could not detect process manager', SandeshLevel.SYS_ERR) exit(-1) self.process_state_db = self.get_current_processes() for group in self.process_state_db: self.send_init_info(group)
def _read_sandesh_config(self): SandeshConfig.add_parser_arguments(self._arg_parser) sandesh_opts = SandeshConfig.get_default_options( ["SANDESH", "DEFAULTS"]) self._arg_parser.set_defaults(**sandesh_opts) SandeshConfig.update_options(sandesh_opts, self._parsed_config) self._arg_parser.set_defaults(**sandesh_opts) args, _ = self._arg_parser.parse_known_args() sandesh_config = SandeshConfig.from_parser_arguments(args) self.config["sandesh_config"] = sandesh_config
def parse_args(args_str=None): if not args_str: args_str = sys.argv[1:] conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--config-file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str) defaults = { 'mesos_api_server': mesos_consts._WEB_HOST, 'mesos_api_port': mesos_consts._WEB_PORT, 'http_server_port': HttpPortMesosManager, 'worker_id': '0', 'collectors': None, 'logger_class': None, 'logging_conf': '', 'log_local': False, 'log_category': '', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'disc_server_ip': 'localhost', 'disc_server_port': DiscoveryServerPort, 'log_level': SandeshLevel.SYS_DEBUG, } defaults.update(SandeshConfig.get_default_options(['DEFAULTS'])) vnc_opts = { 'admin_user': '******', 'admin_password': '******', 'admin_tenant': 'admin', 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'rabbit_use_ssl': False, 'kombu_ssl_version': '', 'kombu_ssl_keyfile': '', 'kombu_ssl_certfile': '', 'kombu_ssl_ca_certs': '', 'cassandra_server_ip': mesos_consts._CASSANDRA_HOST, 'cassandra_server_port': mesos_consts._CASSANDRA_PORT, 'cassandra_max_retries': mesos_consts._CASSANDRA_MAX_RETRIES, 'cassandra_timeout': mesos_consts._CASSANDRA_TIMEOUT, 'cassandra_user': None, 'cassandra_password': None, 'cluster_id': '', } sandesh_opts = SandeshConfig.get_default_options() mesos_opts = { 'mesos_api_server': 'localhost', 'mesos_api_port': '8080', 'mesos_api_secure_port': 8443, 'mesos_api_secure_ip': None, 'mesos_service_name': 'mesos', 'service_subnets': '', 'app_subnets': '' } config = ConfigParser.SafeConfigParser() if args.config_file: config.read(args.config_file) if 'VNC' in config.sections(): vnc_opts.update(dict(config.items("VNC"))) if 'MESOS' in config.sections(): mesos_opts.update(dict(config.items("MESOS"))) SandeshConfig.update_options(sandesh_opts, config) if 'DEFAULTS' in config.sections(): defaults.update(dict(config.items("DEFAULTS"))) parser = argparse.ArgumentParser( parents=[conf_parser], description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(vnc_opts) defaults.update(mesos_opts) defaults.update(sandesh_opts) parser.set_defaults(**defaults) args = parser.parse_args(args_str) if type(args.cassandra_server_list) is str: args.cassandra_server_list = args.cassandra_server_list.split() if type(args.app_subnets) is str: args.app_subnets = args.app_subnets.split() if type(args.service_subnets) is str: args.service_subnets = args.service_subnets.split() args.sandesh_config = SandeshConfig.from_parser_arguments(args) return args
def parse_args(args_str): ''' Eg. python svc_monitor.py --rabbit_server localhost --rabbit_port 5672 --rabbit_user guest --rabbit_password guest --cassandra_server_list 10.1.2.3:9160 --api_server_ip 10.1.2.3 --api_server_port 8082 --api_server_use_ssl False --zk_server_ip 10.1.2.3 --zk_server_port 2181 --collectors 127.0.0.1:8086 --http_server_port 8090 --log_local --log_level SYS_DEBUG --log_category test --log_file <stdout> --trace_file /var/log/contrail/svc-monitor.err --use_syslog --syslog_facility LOG_USER --cluster_id <testbed-name> --check_service_interval 60 [--region_name <name>] [--reset_config] ''' # Source any specified config/ini file # Turn off help, so we show all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--conf_file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str.split()) defaults = { 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'cassandra_server_list': '127.0.0.1:9160', 'api_server_ip': '127.0.0.1', 'api_server_port': '8082', 'api_server_use_ssl': False, 'zk_server_ip': '127.0.0.1', 'zk_server_port': '2181', 'collectors': None, 'http_server_port': '8088', 'http_server_ip': '0.0.0.0', 'log_local': False, 'log_level': SandeshLevel.SYS_DEBUG, 'log_category': '', 'log_file': Sandesh._DEFAULT_LOG_FILE, 'trace_file': '/var/log/contrail/svc-monitor.err', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'region_name': None, 'cluster_id': '', 'logging_conf': '', 'logger_class': None, 'check_service_interval': '60', 'nova_endpoint_type': 'internalURL', 'rabbit_use_ssl': False, 'kombu_ssl_version': '', 'kombu_ssl_keyfile': '', 'kombu_ssl_certfile': '', 'kombu_ssl_ca_certs': '', } defaults.update(SandeshConfig.get_default_options(['DEFAULTS'])) secopts = { 'use_certs': False, 'keyfile': '', 'certfile': '', 'ca_certs': '', } ksopts = { 'auth_host': '127.0.0.1', 'auth_protocol': 'http', 'auth_port': '5000', 'auth_version': 'v2.0', 'auth_insecure': True, 'admin_user': '******', 'admin_password': '******', 'admin_tenant_name': 'admin' } schedops = { 'si_netns_scheduler_driver': 'svc_monitor.scheduler.vrouter_scheduler.RandomScheduler', 'analytics_server_list': '127.0.0.1:8081', 'availability_zone': None, 'netns_availability_zone': None, 'aaa_mode': cfgm_common.AAA_MODE_DEFAULT_VALUE, } cassandraopts = { 'cassandra_user': None, 'cassandra_password': None, } sandeshopts = SandeshConfig.get_default_options() saved_conf_file = args.conf_file config = ConfigParser.SafeConfigParser() if args.conf_file: config.read(args.conf_file) defaults.update(dict(config.items("DEFAULTS"))) if ('SECURITY' in config.sections() and 'use_certs' in config.options('SECURITY')): if config.getboolean('SECURITY', 'use_certs'): secopts.update(dict(config.items("SECURITY"))) if 'KEYSTONE' in config.sections(): ksopts.update(dict(config.items("KEYSTONE"))) if 'SCHEDULER' in config.sections(): schedops.update(dict(config.items("SCHEDULER"))) if 'CASSANDRA' in config.sections(): cassandraopts.update(dict(config.items('CASSANDRA'))) SandeshConfig.update_options(sandeshopts, config) # Override with CLI options # Don't surpress add_help here so it will handle -h parser = argparse.ArgumentParser( # Inherit options from config_parser parents=[conf_parser], # script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(secopts) defaults.update(ksopts) defaults.update(schedops) defaults.update(cassandraopts) defaults.update(sandeshopts) parser.set_defaults(**defaults) parser.add_argument( "--cassandra_server_list", help="List of cassandra servers in IP Address:Port format", nargs='+') parser.add_argument("--cassandra_use_ssl", action="store_true", help="Enable TLS for cassandra communication") parser.add_argument("--cassandra_ca_certs", help="Cassandra CA certs") parser.add_argument( "--reset_config", action="store_true", help="Warning! Destroy previous configuration and start clean") parser.add_argument("--api_server_ip", help="IP address of API server") parser.add_argument("--api_server_port", help="Port of API server") parser.add_argument("--api_server_use_ssl", help="Use SSL to connect with API server") parser.add_argument("--collectors", help="List of VNC collectors in ip:port format", nargs="+") parser.add_argument("--http_server_port", help="Port of local HTTP server") parser.add_argument("--http_server_ip", help="IP of local HTTP server") parser.add_argument("--log_local", action="store_true", help="Enable local logging of sandesh messages") parser.add_argument( "--log_level", help="Severity level for local logging of sandesh messages") parser.add_argument( "--log_category", help="Category filter for local logging of sandesh messages") parser.add_argument("--log_file", help="Filename for the logs to be written to") parser.add_argument("--trace_file", help="Filename for the error " "backtraces to be written to") parser.add_argument("--use_syslog", action="store_true", help="Use syslog for logging") parser.add_argument("--syslog_facility", help="Syslog facility to receive log lines") parser.add_argument("--aaa_mode", choices=AAA_MODE_VALID_VALUES, help="AAA mode") parser.add_argument("--admin_user", help="Name of keystone admin user") parser.add_argument("--admin_password", help="Password of keystone admin user") parser.add_argument("--admin_tenant_name", help="Tenant name for keystone admin user") parser.add_argument("--region_name", help="Region name for openstack API") parser.add_argument("--cluster_id", help="Used for database keyspace separation") parser.add_argument( "--logging_conf", help=("Optional logging configuration file, default: None")) parser.add_argument("--logger_class", help=("Optional external logger class, default: None")) parser.add_argument("--cassandra_user", help="Cassandra user name") parser.add_argument("--cassandra_password", help="Cassandra password") parser.add_argument("--check_service_interval", help="Check service interval") SandeshConfig.add_parser_arguments(parser) args = parser.parse_args(remaining_argv) args._conf_file = saved_conf_file args.config_sections = config if type(args.cassandra_server_list) is str: args.cassandra_server_list = args.cassandra_server_list.split() if type(args.collectors) is str: args.collectors = args.collectors.split() if args.region_name and args.region_name.lower() == 'none': args.region_name = None if args.availability_zone and args.availability_zone.lower() == 'none': args.availability_zone = None if args.netns_availability_zone and \ args.netns_availability_zone.lower() == 'none': args.netns_availability_zone = None args.sandesh_config = SandeshConfig.from_parser_arguments(args) args.cassandra_use_ssl = (str(args.cassandra_use_ssl).lower() == 'true') return args
def parse_args(args_str=None): if not args_str: args_str = sys.argv[1:] conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--config-file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str) defaults = { 'http_server_port': HttpPortMesosManager, 'worker_id': '0', 'collectors': None, 'logger_class': None, 'logging_conf': '', 'log_local': False, 'log_category': '', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'disc_server_ip': 'localhost', 'disc_server_port': DiscoveryServerPort, 'log_level': SandeshLevel.SYS_DEBUG, } defaults.update(SandeshConfig.get_default_options(['DEFAULTS'])) vnc_opts = { 'admin_user': '******', 'admin_password': '******', 'admin_tenant': 'admin', 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'rabbit_use_ssl': False, 'kombu_ssl_version': '', 'kombu_ssl_keyfile': '', 'kombu_ssl_certfile': '', 'kombu_ssl_ca_certs': '', 'cassandra_user': None, 'cassandra_password': None, 'cluster_id': '', } sandesh_opts = SandeshConfig.get_default_options() mesos_opts = { 'mesos_cni_server': 'localhost', 'mesos_cni_port': 6991, 'mesos_cni_secure_port': 8443, 'mesos_cni_secure_ip': None, MandatoryArgs.POD_TASK_SUBNET.value['arg_str']: None, MandatoryArgs.IP_FABRIC_SUBNET.value['arg_str']: None, 'ip_fabric_forwarding': False, 'ip_fabric_snat': False, } config = ConfigParser.SafeConfigParser() if args.config_file: config.read(args.config_file) if 'VNC' in config.sections(): vnc_opts.update(dict(config.items("VNC"))) if 'MESOS' in config.sections(): mesos_opts.update(dict(config.items("MESOS"))) SandeshConfig.update_options(sandesh_opts, config) if 'DEFAULTS' in config.sections(): defaults.update(dict(config.items("DEFAULTS"))) parser = argparse.ArgumentParser( parents=[conf_parser], description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(vnc_opts) defaults.update(mesos_opts) defaults.update(sandesh_opts) parser.set_defaults(**defaults) args = parser.parse_args(args_str) if type(args.cassandra_server_list) is str: args.cassandra_server_list = args.cassandra_server_list.split() if type(args.pod_task_subnets) is str: args.pod_task_subnets = args.pod_task_subnets.split() if type(args.ip_fabric_subnets) is str: args.ip_fabric_subnets = args.ip_fabric_subnets.split() if type(args.ip_fabric_forwarding) is str: if args.ip_fabric_forwarding.lower() == 'true': args.ip_fabric_forwarding = True else: args.ip_fabric_forwarding = False if type(args.ip_fabric_snat) is str: if args.ip_fabric_snat.lower() == 'true': args.ip_fabric_snat = True else: args.ip_fabric_snat = False args.sandesh_config = SandeshConfig.from_parser_arguments(args) # Validate input argumnents. validate_mandatory_args(args) return args
def parse_args(args_str=None): if not args_str: args_str = sys.argv[1:] conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--config-file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str) defaults = { 'http_server_port': HttpPortKubeManager, 'worker_id': '0', 'collectors': None, 'logger_class': None, 'logging_conf': '', 'log_local': False, 'log_category': '', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'kube_object_cache': 'True', 'disc_server_ip': 'localhost', 'disc_server_port': DiscoveryServerPort, 'log_level': SandeshLevel.SYS_DEBUG, 'log_file': '/var/log/contrail/contrail-kube-manager.log', 'api_service_link_local' : 'True', 'orchestrator' : 'kubernetes', 'token' : '', 'nested_mode': '0', } defaults.update(SandeshConfig.get_default_options(['DEFAULTS'])) vnc_opts = { 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'rabbit_use_ssl': False, 'kombu_ssl_version': '', 'kombu_ssl_keyfile': '', 'kombu_ssl_certfile': '', 'kombu_ssl_ca_certs': '', 'cassandra_user': None, 'cassandra_password': None, 'cassandra_server_list': '', 'cluster_id': '', 'vnc_endpoint_ip': '[127.0.0.1]', 'vnc_endpoint_port': ApiServerPort, 'admin_user' : '', 'admin_password' : '', 'admin_tenant' : '', 'public_fip_pool': '{}', 'zk_server_ip': '127.0.0.1:2181', } k8s_opts = { 'kubernetes_api_server': 'localhost', 'kubernetes_api_port': '8080', 'kubernetes_api_secure_port': 8443, 'kubernetes_service_name': 'kubernetes', 'service_subnets': '', 'pod_subnets': '', 'kubernetes_cluster_owner': 'k8s', 'kubernetes_cluster_domain' : 'default-domain', 'cluster_name': None, 'cluster_project' : "{}", 'cluster_network' : None, } sandesh_opts = SandeshConfig.get_default_options() auth_opts = { 'auth_token_url': None, 'auth_user': '******', 'auth_password': '******', 'auth_tenant': 'admin', } config = ConfigParser.SafeConfigParser() if args.config_file: config.read(args.config_file) if 'VNC' in config.sections(): vnc_opts.update(dict(config.items("VNC"))) if 'KUBERNETES' in config.sections(): k8s_opts.update(dict(config.items("KUBERNETES"))) SandeshConfig.update_options(sandesh_opts, config) if 'AUTH' in config.sections(): auth_opts.update(dict(config.items("AUTH"))) if 'DEFAULTS' in config.sections(): defaults.update(dict(config.items("DEFAULTS"))) parser = argparse.ArgumentParser( parents=[conf_parser], description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(vnc_opts) defaults.update(k8s_opts) defaults.update(sandesh_opts) defaults.update(auth_opts) parser.set_defaults(**defaults) args = parser.parse_args(args_str) if type(args.cassandra_server_list) is str: args.cassandra_server_list = args.cassandra_server_list.split() if type(args.pod_subnets) is str: args.pod_subnets = args.pod_subnets.split() if type(args.service_subnets) is str: args.service_subnets = args.service_subnets.split() if type(args.collectors) is str: args.collectors = args.collectors.split() args.sandesh_config = SandeshConfig.from_parser_arguments(args) return args
def parse_args(args_str=None): if not args_str: args_str = sys.argv[1:] conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--config-file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str) defaults = { 'http_server_port': HttpPortMesosManager, 'worker_id': '0', 'collectors': None, 'logger_class': None, 'logging_conf': '', 'log_local': False, 'log_category': '', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'disc_server_ip': 'localhost', 'disc_server_port': DiscoveryServerPort, 'log_level': SandeshLevel.SYS_DEBUG, 'log_file': '/var/log/contrail/contrail-mesos-manager.log', } defaults.update(SandeshConfig.get_default_options(['DEFAULTS'])) vnc_opts = { 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'rabbit_use_ssl': False, 'kombu_ssl_version': '', 'kombu_ssl_keyfile': '', 'kombu_ssl_certfile': '', 'kombu_ssl_ca_certs': '', 'cassandra_user': None, 'cassandra_password': None, 'cassandra_server_list': '', 'cassandra_use_ssl': False, 'cassandra_ca_certs': None, 'cluster_id': '', 'vnc_endpoint_ip': '[127.0.0.1]', 'vnc_endpoint_port': ApiServerPort, 'admin_user' : '', 'admin_password' : '', 'admin_tenant' : '', 'public_fip_pool': '{}', 'zk_server_ip': '127.0.0.1:2181', } sandesh_opts = SandeshConfig.get_default_options() mesos_opts = { 'mesos_cni_server': 'localhost', 'mesos_cni_port': 6991, 'mesos_cni_secure_port': 8443, 'mesos_cni_secure_ip': None, MandatoryArgs.POD_TASK_SUBNET.value['arg_str']: None, MandatoryArgs.IP_FABRIC_SUBNET.value['arg_str']: None, 'mesos_cluster_owner': 'mesos', 'mesos_cluster_domain' : 'default-domain', 'mesos_cluster_name': 'mesos', 'cluster_project' : "{}", 'cluster_network' : "{}", 'cluster_pod_task_network' : None, 'ip_fabric_forwarding': False, 'ip_fabric_snat': False, 'mesos_agent_retry_sync_hold_time': 2, 'mesos_agent_retry_sync_count': 6, } auth_opts = { 'auth_token_url': None, 'auth_user': '******', 'auth_password': '******', 'auth_tenant': 'admin', } config = ConfigParser.SafeConfigParser() if args.config_file: config.read(args.config_file) if 'VNC' in config.sections(): vnc_opts.update(dict(config.items("VNC"))) if 'MESOS' in config.sections(): mesos_opts.update(dict(config.items("MESOS"))) SandeshConfig.update_options(sandesh_opts, config) if 'AUTH' in config.sections(): auth_opts.update(dict(config.items("AUTH"))) if 'DEFAULTS' in config.sections(): defaults.update(dict(config.items("DEFAULTS"))) parser = argparse.ArgumentParser( parents=[conf_parser], description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(vnc_opts) defaults.update(mesos_opts) defaults.update(sandesh_opts) defaults.update(auth_opts) parser.set_defaults(**defaults) args = parser.parse_args(args_str) if type(args.cassandra_server_list) is str: args.cassandra_server_list = args.cassandra_server_list.split() if type(args.cassandra_use_ssl) is str: args.cassandra_use_ssl = args.cassandra_use_ssl.lower() == 'true' if type(args.pod_task_subnets) is str: args.pod_task_subnets = args.pod_task_subnets.split() if type(args.ip_fabric_subnets) is str: args.ip_fabric_subnets = args.ip_fabric_subnets.split() if type(args.ip_fabric_forwarding) is str: if args.ip_fabric_forwarding.lower() == 'true': args.ip_fabric_forwarding = True else: args.ip_fabric_forwarding = False if type(args.ip_fabric_snat) is str: if args.ip_fabric_snat.lower() == 'true': args.ip_fabric_snat = True else: args.ip_fabric_snat = False args.sandesh_config = SandeshConfig.from_parser_arguments(args) # Validate input argumnents. validate_mandatory_args(args) return args
def parse_args(args_str): args_obj = None # Source any specified config/ini file # Turn off help, so we print all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--conf_file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str.split()) defaults = { 'reset_config': False, 'wipe_config': False, 'listen_ip_addr': _WEB_HOST, 'listen_port': _WEB_PORT, 'admin_port': _ADMIN_PORT, 'cassandra_server_list': "127.0.0.1:9160", 'collectors': None, 'http_server_port': '8084', 'http_server_ip': _WEB_HOST, 'log_local': True, 'log_level': SandeshLevel.SYS_NOTICE, 'log_category': '', 'log_file': Sandesh._DEFAULT_LOG_FILE, 'trace_file': '/var/log/contrail/vnc_openstack.err', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'logging_conf': '', 'logger_class': None, 'multi_tenancy': None, 'aaa_mode': None, 'zk_server_ip': '127.0.0.1:2181', 'worker_id': '0', 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'rabbit_max_pending_updates': '4096', 'rabbit_health_check_interval': '120.0', # in seconds 'cluster_id': '', 'max_requests': 1024, 'paginate_count': 256, 'region_name': 'RegionOne', 'stale_lock_seconds': '5', # lock but no resource past this => stale 'cloud_admin_role': cfgm_common.CLOUD_ADMIN_ROLE, 'global_read_only_role': cfgm_common.GLOBAL_READ_ONLY_ROLE, 'rabbit_use_ssl': False, 'kombu_ssl_version': '', 'kombu_ssl_keyfile': '', 'kombu_ssl_certfile': '', 'kombu_ssl_ca_certs': '', 'object_cache_entries': '10000', # max number of objects cached for read 'object_cache_exclude_types': '', # csv of object types to *not* cache 'debug_object_cache_types': '', # csv of object types to debug cache 'db_engine': 'cassandra', 'max_request_size': 1024000, 'amqp_timeout': 660, 'config_api_ssl_enable': False, 'config_api_ssl_keyfile': '', 'config_api_ssl_certfile': '', 'config_api_ssl_ca_cert': '', 'tcp_keepalive_enable': True, 'tcp_keepalive_idle_time': 7200, 'tcp_keepalive_interval': 75, 'tcp_keepalive_probes': 9, } defaults.update(SandeshConfig.get_default_options(['DEFAULTS'])) # keystone options ksopts = { 'signing_dir': '/var/lib/contrail/keystone-signing', 'auth_host': '127.0.0.1', 'auth_port': '35357', 'auth_protocol': 'http', 'admin_user': '', 'admin_password': '', 'admin_tenant_name': '', 'admin_user_domain_name': None, 'identity_uri': None, 'project_domain_name': None, 'insecure': True, 'cafile': '', 'certfile': '', 'keyfile': '', 'auth_type': 'password', 'auth_url': '', 'default_domain_id': 'default', } # cassandra options cassandraopts = { 'cassandra_user' : None, 'cassandra_password' : None } # sandesh options sandeshopts = SandeshConfig.get_default_options() config = None saved_conf_file = args.conf_file if args.conf_file: config = SafeConfigParser({'admin_token': None}, allow_no_value=True) config.read(args.conf_file) if 'DEFAULTS' in config.sections(): defaults.update(dict(config.items("DEFAULTS"))) if 'multi_tenancy' in config.options('DEFAULTS'): defaults['multi_tenancy'] = config.getboolean( 'DEFAULTS', 'multi_tenancy') if 'default_encoding' in config.options('DEFAULTS'): default_encoding = config.get('DEFAULTS', 'default_encoding') gen.resource_xsd.ExternalEncoding = default_encoding if 'KEYSTONE' in config.sections(): ksopts.update(dict(config.items("KEYSTONE"))) if 'QUOTA' in config.sections(): for (k, v) in config.items("QUOTA"): try: if str(k) != 'admin_token': vnc_quota.QuotaHelper.default_quota[str(k)] = int(v) except ValueError: pass if 'CASSANDRA' in config.sections(): cassandraopts.update(dict(config.items('CASSANDRA'))) SandeshConfig.update_options(sandeshopts, config) # Override with CLI options # Don't surpress add_help here so it will handle -h parser = argparse.ArgumentParser( # Inherit options from config_parser parents=[conf_parser], # print script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(ksopts) defaults.update(cassandraopts) defaults.update(sandeshopts) parser.set_defaults(**defaults) parser.add_argument( "--cassandra_server_list", help="List of cassandra servers in IP Address:Port format", nargs='+') parser.add_argument( "--cassandra_use_ssl", action="store_true", help="Enable TLS for cassandra connection") parser.add_argument( "--cassandra_ca_certs", help="Cassandra CA certs") parser.add_argument( "--redis_server_ip", help="IP address of redis server") parser.add_argument( "--redis_server_port", help="Port of redis server") parser.add_argument( "--auth", choices=['keystone', 'noauth', 'no-auth'], help="Type of authentication for user-requests") parser.add_argument( "--reset_config", action="store_true", help="Warning! Destroy previous configuration and start clean") parser.add_argument( "--wipe_config", action="store_true", help="Warning! Destroy previous configuration") parser.add_argument( "--listen_ip_addr", help="IP address to provide service on, default %s" % (_WEB_HOST)) parser.add_argument( "--listen_port", help="Port to provide service on, default %s" % (_WEB_PORT)) parser.add_argument( "--admin_port", help="Port with local auth for admin access, default %s" % (_ADMIN_PORT)) parser.add_argument( "--collectors", help="List of VNC collectors in ip:port format", nargs="+") parser.add_argument( "--http_server_port", help="Port of Introspect HTTP server") parser.add_argument( "--http_server_ip", help="IP address of Introspect HTTP server, default %s" % (_WEB_HOST)) parser.add_argument( "--log_local", action="store_true", help="Enable local logging of sandesh messages") parser.add_argument( "--log_level", help="Severity level for local logging of sandesh messages") parser.add_argument( "--logging_conf", help=("Optional logging configuration file, default: None")) parser.add_argument( "--logger_class", help=("Optional external logger class, default: None")) parser.add_argument( "--log_category", help="Category filter for local logging of sandesh messages") parser.add_argument( "--log_file", help="Filename for the logs to be written to") parser.add_argument( "--trace_file", help="Filename for the errors backtraces to be written to") parser.add_argument("--use_syslog", action="store_true", help="Use syslog for logging") parser.add_argument("--syslog_facility", help="Syslog facility to receive log lines") parser.add_argument( "--multi_tenancy", action="store_true", help="Validate resource permissions (implies token validation)") parser.add_argument( "--aaa_mode", choices=AAA_MODE_VALID_VALUES, help="AAA mode") parser.add_argument( "--worker_id", help="Worker Id") parser.add_argument( "--zk_server_ip", help="Ip address:port of zookeeper server") parser.add_argument( "--rabbit_server", help="Rabbitmq server address") parser.add_argument( "--rabbit_port", help="Rabbitmq server port") parser.add_argument( "--rabbit_user", help="Username for rabbit") parser.add_argument( "--rabbit_vhost", help="vhost for rabbit") parser.add_argument( "--rabbit_password", help="password for rabbit") parser.add_argument( "--rabbit_ha_mode", help="True if the rabbitmq cluster is mirroring all queue") parser.add_argument( "--rabbit_max_pending_updates", help="Max updates before stateful changes disallowed") parser.add_argument( "--rabbit_health_check_interval", help="Interval seconds between consumer heartbeats to rabbitmq") parser.add_argument( "--cluster_id", help="Used for database keyspace separation") parser.add_argument( "--max_requests", type=int, help="Maximum number of concurrent requests served by api server") parser.add_argument( "--paginate_count", type=int, help="Default number of items when pagination is requested") parser.add_argument("--cassandra_user", help="Cassandra user name") parser.add_argument("--cassandra_password", help="Cassandra password") parser.add_argument("--stale_lock_seconds", help="Time after which lock without resource is stale, default 60") parser.add_argument( "--cloud_admin_role", help="Role name of cloud administrator") parser.add_argument( "--global_read_only_role", help="Role name of user with Read-Only access to all objects") parser.add_argument("--object_cache_entries", help="Maximum number of objects cached for read, default 10000") parser.add_argument("--object_cache_exclude_types", help="Comma separated values of object types to not cache") parser.add_argument( "--debug_object_cache_types", help="Comma separated values of object types to debug trace between " "the cache and the DB") parser.add_argument("--db_engine", help="Database engine to use, default cassandra") parser.add_argument("--max_request_size", type=int, help="Maximum size of bottle requests served by api server") parser.add_argument("--amqp_timeout", help="Timeout for amqp request") SandeshConfig.add_parser_arguments(parser) args_obj, remaining_argv = parser.parse_known_args(remaining_argv) args_obj.conf_file = args.conf_file args_obj.config_sections = config if isinstance(args_obj.cassandra_server_list, string_types): args_obj.cassandra_server_list =\ args_obj.cassandra_server_list.split() if isinstance(args_obj.collectors, string_types): args_obj.collectors = args_obj.collectors.split() args_obj.sandesh_config = SandeshConfig.from_parser_arguments(args_obj) args_obj.cassandra_use_ssl = (str(args_obj.cassandra_use_ssl).lower() == 'true') args_obj.config_api_ssl_enable = (str(args_obj.config_api_ssl_enable).lower() == 'true') # convert log_local to a boolean if not isinstance(args_obj.log_local, bool): args_obj.log_local = bool(literal_eval(args_obj.log_local)) args_obj.conf_file = saved_conf_file return args_obj, remaining_argv
def parse_args(args_str): ''' Eg. python svc_monitor.py --rabbit_server localhost --rabbit_port 5672 --rabbit_user guest --rabbit_password guest --cassandra_server_list 10.1.2.3:9160 --api_server_ip 10.1.2.3 --api_server_port 8082 --api_server_use_ssl False --zk_server_ip 10.1.2.3 --zk_server_port 2181 --collectors 127.0.0.1:8086 --http_server_port 8090 --log_local --log_level SYS_DEBUG --log_category test --log_file <stdout> --trace_file /var/log/contrail/svc-monitor.err --use_syslog --syslog_facility LOG_USER --cluster_id <testbed-name> --check_service_interval 60 [--region_name <name>] [--reset_config] ''' # Source any specified config/ini file # Turn off help, so we show all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--conf_file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str.split()) defaults = { 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'cassandra_server_list': '127.0.0.1:9160', 'api_server_ip': '127.0.0.1', 'api_server_port': '8082', 'api_server_use_ssl': False, 'zk_server_ip': '127.0.0.1', 'zk_server_port': '2181', 'collectors': None, 'http_server_port': '8088', 'log_local': False, 'log_level': SandeshLevel.SYS_DEBUG, 'log_category': '', 'log_file': Sandesh._DEFAULT_LOG_FILE, 'trace_file': '/var/log/contrail/svc-monitor.err', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'region_name': None, 'cluster_id': '', 'logging_conf': '', 'logger_class': None, 'check_service_interval': '60', 'nova_endpoint_type': 'internalURL', 'rabbit_use_ssl': False, 'kombu_ssl_version': '', 'kombu_ssl_keyfile': '', 'kombu_ssl_certfile': '', 'kombu_ssl_ca_certs': '', } defaults.update(SandeshConfig.get_default_options(['DEFAULTS'])) secopts = { 'use_certs': False, 'keyfile': '', 'certfile': '', 'ca_certs': '', } ksopts = { 'auth_host': '127.0.0.1', 'auth_protocol': 'http', 'auth_port': '5000', 'auth_version': 'v2.0', 'auth_insecure': True, 'admin_user': '******', 'admin_password': '******', 'admin_tenant_name': 'admin' } schedops = { 'si_netns_scheduler_driver': 'svc_monitor.scheduler.vrouter_scheduler.RandomScheduler', 'analytics_server_list': '127.0.0.1:8081', 'availability_zone': None, 'netns_availability_zone': None, 'aaa_mode': cfgm_common.AAA_MODE_DEFAULT_VALUE, } cassandraopts = { 'cassandra_user': None, 'cassandra_password': None, } sandeshopts = SandeshConfig.get_default_options() saved_conf_file = args.conf_file config = ConfigParser.SafeConfigParser() if args.conf_file: config.read(args.conf_file) defaults.update(dict(config.items("DEFAULTS"))) if ('SECURITY' in config.sections() and 'use_certs' in config.options('SECURITY')): if config.getboolean('SECURITY', 'use_certs'): secopts.update(dict(config.items("SECURITY"))) if 'KEYSTONE' in config.sections(): ksopts.update(dict(config.items("KEYSTONE"))) if 'SCHEDULER' in config.sections(): schedops.update(dict(config.items("SCHEDULER"))) if 'CASSANDRA' in config.sections(): cassandraopts.update(dict(config.items('CASSANDRA'))) SandeshConfig.update_options(sandeshopts, config) # Override with CLI options # Don't surpress add_help here so it will handle -h parser = argparse.ArgumentParser( # Inherit options from config_parser parents=[conf_parser], # script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(secopts) defaults.update(ksopts) defaults.update(schedops) defaults.update(cassandraopts) defaults.update(sandeshopts) parser.set_defaults(**defaults) parser.add_argument( "--cassandra_server_list", help="List of cassandra servers in IP Address:Port format", nargs='+') parser.add_argument( "--reset_config", action="store_true", help="Warning! Destroy previous configuration and start clean") parser.add_argument("--api_server_ip", help="IP address of API server") parser.add_argument("--api_server_port", help="Port of API server") parser.add_argument("--api_server_use_ssl", help="Use SSL to connect with API server") parser.add_argument("--collectors", help="List of VNC collectors in ip:port format", nargs="+") parser.add_argument("--http_server_port", help="Port of local HTTP server") parser.add_argument( "--log_local", action="store_true", help="Enable local logging of sandesh messages") parser.add_argument( "--log_level", help="Severity level for local logging of sandesh messages") parser.add_argument( "--log_category", help="Category filter for local logging of sandesh messages") parser.add_argument("--log_file", help="Filename for the logs to be written to") parser.add_argument("--trace_file", help="Filename for the error " "backtraces to be written to") parser.add_argument("--use_syslog", action="store_true", help="Use syslog for logging") parser.add_argument("--syslog_facility", help="Syslog facility to receive log lines") parser.add_argument("--aaa_mode", choices=cfgm_common.AAA_MODE_VALID_VALUES, help="AAA mode") parser.add_argument("--admin_user", help="Name of keystone admin user") parser.add_argument("--admin_password", help="Password of keystone admin user") parser.add_argument("--admin_tenant_name", help="Tenant name for keystone admin user") parser.add_argument("--region_name", help="Region name for openstack API") parser.add_argument("--cluster_id", help="Used for database keyspace separation") parser.add_argument( "--logging_conf", help=("Optional logging configuration file, default: None")) parser.add_argument( "--logger_class", help=("Optional external logger class, default: None")) parser.add_argument("--cassandra_user", help="Cassandra user name") parser.add_argument("--cassandra_password", help="Cassandra password") parser.add_argument("--check_service_interval", help="Check service interval") SandeshConfig.add_parser_arguments(parser) args = parser.parse_args(remaining_argv) args._conf_file = saved_conf_file args.config_sections = config if type(args.cassandra_server_list) is str: args.cassandra_server_list = args.cassandra_server_list.split() if type(args.collectors) is str: args.collectors = args.collectors.split() if args.region_name and args.region_name.lower() == 'none': args.region_name = None if args.availability_zone and args.availability_zone.lower() == 'none': args.availability_zone = None if args.netns_availability_zone and \ args.netns_availability_zone.lower() == 'none': args.netns_availability_zone = None args.sandesh_config = SandeshConfig.from_parser_arguments(args) return args
def parse_args(args_str): ''' Eg. python dm_server.py --rabbit_server localhost --rabbit_port 5672 --cassandra_server_list 10.1.2.3:9160 --api_server_ip 10.1.2.3 --api_server_use_ssl False --analytics_server_ip 10.1.2.3 --zk_server_ip 10.1.2.3 --zk_server_port 2181 --collectors 127.0.0.1:8086 --http_server_port 8090 [--reset_config] ''' # Source any specified config/ini file # Turn off help, so we see all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--conf_file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str.split()) defaults = default_options() defaults.update(SandeshConfig.get_default_options(['DEFAULTS'])) defaults.update(SandeshConfig.get_default_options()) saved_conf_file = args.conf_file if args.conf_file: config = ConfigParser.SafeConfigParser() config.read(args.conf_file) defaults.update(dict(config.items("DEFAULTS"))) if ('SECURITY' in config.sections() and 'use_certs' in config.options('SECURITY')): if config.getboolean('SECURITY', 'use_certs'): defaults.update(dict(config.items("SECURITY"))) if 'KEYSTONE' in config.sections(): defaults.update(dict(config.items("KEYSTONE"))) if 'CASSANDRA' in config.sections(): defaults.update(dict(config.items('CASSANDRA'))) SandeshConfig.update_options(defaults, config) # Override with CLI options # Don't surpress add_help here so it will handle -h parser = argparse.ArgumentParser( # Inherit options from config_parser parents=[conf_parser], # print script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.set_defaults(**defaults) add_parser_arguments(parser) args = parser.parse_args(remaining_argv) if type(args.cassandra_server_list) is str: args.cassandra_server_list = args.cassandra_server_list.split() if type(args.collectors) is str: args.collectors = args.collectors.split() args.sandesh_config = SandeshConfig.from_parser_arguments(args) args.cassandra_use_ssl = (str(args.cassandra_use_ssl).lower() == 'true') args.rabbit_use_ssl = (str(args.rabbit_use_ssl).lower() == 'true') args.conf_file = saved_conf_file return args
def parse_args(args_str): # Turn off help, so we all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--conf_file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str.split()) defaults = { 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'cassandra_server_list': '127.0.0.1:9160', 'api_server_ip': '127.0.0.1', 'api_server_port': '8082', 'api_server_use_ssl': None, 'zk_server_ip': '127.0.0.1', 'zk_server_port': '2181', 'collectors': None, 'http_server_port': '8087', 'http_server_ip': '0.0.0.0', 'log_local': False, 'log_level': SandeshLevel.SYS_DEBUG, 'log_category': '', 'log_file': Sandesh._DEFAULT_LOG_FILE, 'trace_file': '/var/log/contrail/schema.err', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'cluster_id': '', 'logging_conf': '', 'logger_class': None, 'bgpaas_port_start': 50000, 'bgpaas_port_end': 50512, 'rabbit_use_ssl': False, 'kombu_ssl_version': '', 'kombu_ssl_keyfile': '', 'kombu_ssl_certfile': '', 'kombu_ssl_ca_certs': '', 'zk_timeout': 120, 'logical_routers_enabled': True, 'yield_in_evaluate': False, } defaults.update(SandeshConfig.get_default_options(['DEFAULTS'])) secopts = { 'use_certs': False, 'keyfile': '', 'certfile': '', 'ca_certs': '', } ksopts = { 'admin_user': '******', 'admin_password': '******', 'admin_tenant_name': 'default-domain' } cassandraopts = { 'cassandra_user': None, 'cassandra_password': None, } sandeshopts = SandeshConfig.get_default_options() saved_conf_file = args.conf_file if args.conf_file: config = ConfigParser.SafeConfigParser() config.read(args.conf_file) defaults.update(dict(config.items("DEFAULTS"))) if ('SECURITY' in config.sections() and 'use_certs' in config.options('SECURITY')): if config.getboolean('SECURITY', 'use_certs'): secopts.update(dict(config.items("SECURITY"))) if 'KEYSTONE' in config.sections(): ksopts.update(dict(config.items("KEYSTONE"))) if 'CASSANDRA' in config.sections(): cassandraopts.update(dict(config.items('CASSANDRA'))) SandeshConfig.update_options(sandeshopts, config) # Override with CLI options # Don't surpress add_help here so it will handle -h parser = argparse.ArgumentParser( # Inherit options from config_parser parents=[conf_parser], # print script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(secopts) defaults.update(ksopts) defaults.update(cassandraopts) defaults.update(sandeshopts) parser.set_defaults(**defaults) def _bool(s): """Convert string to bool (in argparse context).""" if s.lower() not in ['true', 'false']: raise ValueError('Need bool; got %r' % s) return {'true': True, 'false': False}[s.lower()] parser.add_argument( "--cassandra_server_list", help="List of cassandra servers in IP Address:Port format", nargs='+') parser.add_argument( "--reset_config", action="store_true", help="Warning! Destroy previous configuration and start clean") parser.add_argument("--api_server_ip", help="IP address of API server") parser.add_argument("--api_server_port", help="Port of API server") parser.add_argument("--api_server_use_ssl", help="Use SSL to connect with API server") parser.add_argument("--zk_server_ip", help="IP address:port of zookeeper server") parser.add_argument("--collectors", help="List of VNC collectors in ip:port format", nargs="+") parser.add_argument("--http_server_port", help="Port of local HTTP server") parser.add_argument("--http_server_ip", help="IP of local HTTP server") parser.add_argument("--log_local", action="store_true", help="Enable local logging of sandesh messages") parser.add_argument( "--log_level", help="Severity level for local logging of sandesh messages") parser.add_argument( "--log_category", help="Category filter for local logging of sandesh messages") parser.add_argument("--log_file", help="Filename for the logs to be written to") parser.add_argument("--trace_file", help="Filename for the error " "backtraces to be written to") parser.add_argument("--use_syslog", action="store_true", help="Use syslog for logging") parser.add_argument("--syslog_facility", help="Syslog facility to receive log lines") parser.add_argument("--admin_user", help="Name of keystone admin user") parser.add_argument("--admin_password", help="Password of keystone admin user") parser.add_argument("--admin_tenant_name", help="Tenant name for keystone admin user") parser.add_argument("--cluster_id", help="Used for database keyspace separation") parser.add_argument( "--logging_conf", help=("Optional logging configuration file, default: None")) parser.add_argument("--logger_class", help=("Optional external logger class, default: None")) parser.add_argument("--cassandra_user", help="Cassandra user name") parser.add_argument("--cassandra_password", help="Cassandra password") parser.add_argument("--rabbit_server", help="Rabbitmq server address") parser.add_argument("--rabbit_port", help="Rabbitmq server port") parser.add_argument("--rabbit_user", help="Username for rabbit") parser.add_argument("--rabbit_vhost", help="vhost for rabbit") parser.add_argument("--rabbit_password", help="password for rabbit") parser.add_argument("--rabbit_ha_mode", action='store_true', help="True if the rabbitmq cluster is " "mirroring all queue") parser.add_argument("--bgpaas_port_start", type=int, help="Start port for bgp-as-a-service proxy") parser.add_argument("--bgpaas_port_end", type=int, help="End port for bgp-as-a-service proxy") parser.add_argument("--zk_timeout", type=int, help="Timeout for ZookeeperClient") parser.add_argument("--yield_in_evaluate", type=_bool, help="Yield for other greenlets during evaluate") parser.add_argument("--logical_routers_enabled", type=_bool, help="Enabled logical routers") parser.add_argument("--cassandra_use_ssl", action="store_true", help="Enable TLS for cassandra communication") parser.add_argument("--cassandra_ca_certs", help="Cassandra CA certs") SandeshConfig.add_parser_arguments(parser) args = parser.parse_args(remaining_argv) args.conf_file = saved_conf_file if type(args.cassandra_server_list) is str: args.cassandra_server_list = args.cassandra_server_list.split() if type(args.collectors) is str: args.collectors = args.collectors.split() args.sandesh_config = SandeshConfig.from_parser_arguments(args) args.cassandra_use_ssl = (str(args.cassandra_use_ssl).lower() == 'true') return args
def main(args_str=' '.join(sys.argv[1:])): # Parse Arguments node_parser = argparse.ArgumentParser(add_help=False) node_parser.add_argument("--nodetype", default='contrail-analytics', help='Type of node which nodemgr is managing') try: args, remaining_argv = node_parser.parse_known_args(args_str.split()) except: usage() default = { 'rules': '', 'collectors': [], 'hostip': '127.0.0.1', 'minimum_diskgb': 256, 'contrail_databases': 'config analytics', 'cassandra_repair_interval': 24, 'cassandra_repair_logdir': '/var/log/contrail/', } default.update(SandeshConfig.get_default_options(['DEFAULTS'])) sandesh_opts = SandeshConfig.get_default_options() node_type = args.nodetype if (node_type == 'contrail-analytics'): config_file = '/etc/contrail/contrail-analytics-nodemgr.conf' elif (node_type == 'contrail-config'): config_file = '/etc/contrail/contrail-config-nodemgr.conf' elif (node_type == 'contrail-control'): config_file = '/etc/contrail/contrail-control-nodemgr.conf' elif (node_type == 'contrail-vrouter'): config_file = '/etc/contrail/contrail-vrouter-nodemgr.conf' elif (node_type == 'contrail-database'): config_file = '/etc/contrail/contrail-database-nodemgr.conf' else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return if (os.path.exists(config_file) == False): sys.stderr.write("config file " + config_file + " is not present" + "\n") return config = ConfigParser.SafeConfigParser() config.read([config_file]) if 'DEFAULTS' in config.sections(): default.update(dict(config.items('DEFAULTS'))) if 'COLLECTOR' in config.sections(): try: collector = config.get('COLLECTOR', 'server_list') default['collectors'] = collector.split() except ConfigParser.NoOptionError as e: pass SandeshConfig.update_options(sandesh_opts, config) parser = argparse.ArgumentParser( parents=[node_parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter) default.update(sandesh_opts) parser.set_defaults(**default) parser.add_argument("--rules", help='Rules file to use for processing events') parser.add_argument("--collectors", nargs='+', help='Collector addresses in format' + 'ip1:port1 ip2:port2') SandeshConfig.add_parser_arguments(parser, add_dscp=True) if (node_type == 'contrail-database'): parser.add_argument("--minimum_diskGB", type=int, dest='minimum_diskgb', help="Minimum disk space in GB's") parser.add_argument("--contrail_databases", nargs='+', help='Contrail databases on this node' + 'in format: config analytics') parser.add_argument("--hostip", help="IP address of host") parser.add_argument("--cassandra_repair_interval", type=int, help="Time in hours to periodically run " "nodetool repair for cassandra maintenance") parser.add_argument("--cassandra_repair_logdir", help="Directory for storing repair logs") try: _args = parser.parse_args(remaining_argv) except: usage() rule_file = _args.rules collector_addr = _args.collectors sys.stderr.write("Collector address: " + str(collector_addr) + "\n") # randomize collector list _args.chksum = "" if _args.collectors: _args.chksum = hashlib.md5("".join(_args.collectors)).hexdigest() _args.random_collectors = random.sample(_args.collectors, len(_args.collectors)) _args.collectors = _args.random_collectors collector_addr = _args.collectors sys.stderr.write("Random Collector address: " + str(collector_addr) + "\n") sandesh_config = SandeshConfig.from_parser_arguments(_args) # done parsing arguments prog = None if (node_type == 'contrail-analytics'): if not rule_file: rule_file = "/etc/contrail/supervisord_analytics_files/" + \ "contrail-analytics.rules" unit_names = [ 'contrail-collector.service', 'contrail-analytics-api.service', 'contrail-snmp-collector.service', 'contrail-query-engine.service', 'contrail-alarm-gen.service', 'contrail-topology.service', 'contrail-analytics-nodemgr.service', ] prog = AnalyticsEventManager(rule_file, unit_names, collector_addr, sandesh_config) elif (node_type == 'contrail-config'): if not rule_file: rule_file = "/etc/contrail/supervisord_config_files/" + \ "contrail-config.rules" unit_names = [ 'contrail-api.service', 'contrail-schema.service', 'contrail-svc-monitor.service', 'contrail-device-manager.service', 'contrail-config-nodemgr.service', 'ifmap.service', ] cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = ConfigEventManager(rule_file, unit_names, collector_addr, sandesh_config, cassandra_repair_interval, cassandra_repair_logdir) elif (node_type == 'contrail-control'): if not rule_file: rule_file = "/etc/contrail/supervisord_control_files/" + \ "contrail-control.rules" unit_names = [ 'contrail-control.service', 'contrail-dns.service', 'contrail-named.service', 'contrail-control-nodemgr.service', ] prog = ControlEventManager(rule_file, unit_names, collector_addr, sandesh_config) elif (node_type == 'contrail-vrouter'): if not rule_file: rule_file = "/etc/contrail/supervisord_vrouter_files/" + \ "contrail-vrouter.rules" unit_names = [ 'contrail-vrouter-agent.service', 'contrail-vrouter-nodemgr.service', ] prog = VrouterEventManager(rule_file, unit_names, collector_addr, sandesh_config) elif (node_type == 'contrail-database'): if not rule_file: rule_file = "/etc/contrail/supervisord_database_files/" + \ "contrail-database.rules" unit_names = [ 'contrail-database.service', 'kafka.service', 'contrail-database-nodemgr.service', ] hostip = _args.hostip minimum_diskgb = _args.minimum_diskgb contrail_databases = _args.contrail_databases cassandra_repair_interval = _args.cassandra_repair_interval cassandra_repair_logdir = _args.cassandra_repair_logdir prog = DatabaseEventManager(rule_file, unit_names, collector_addr, sandesh_config, hostip, minimum_diskgb, contrail_databases, cassandra_repair_interval, cassandra_repair_logdir) else: sys.stderr.write("Node type" + str(node_type) + "is incorrect" + "\n") return prog.process() prog.send_nodemgr_process_status() prog.send_process_state_db(prog.group_names) prog.config_file = config_file prog.collector_chksum = _args.chksum """ @sighup Reconfig of collector list """ gevent.signal(signal.SIGHUP, prog.nodemgr_sighup_handler) gevent.joinall([ gevent.spawn(prog.runforever), gevent.spawn(prog.run_periodically(prog.do_periodic_events, 60)) ])
def parse_args(args_str): ''' Eg. python dm_server.py --rabbit_server localhost --rabbit_port 5672 --cassandra_server_list 10.1.2.3:9160 --api_server_ip 10.1.2.3 --api_server_use_ssl False --analytics_server_ip 10.1.2.3 --zk_server_ip 10.1.2.3 --zk_server_port 2181 --collectors 127.0.0.1:8086 --http_server_port 8090 [--reset_config] ''' # Source any specified config/ini file # Turn off help, so we see all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--conf_file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str.split()) defaults = default_options() defaults.update(SandeshConfig.get_default_options(['DEFAULTS'])) defaults.update(SandeshConfig.get_default_options()) saved_conf_file = args.conf_file if args.conf_file: config = ConfigParser.SafeConfigParser() config.read(args.conf_file) defaults.update(dict(config.items("DEFAULTS"))) if ('SECURITY' in config.sections() and 'use_certs' in config.options('SECURITY')): if config.getboolean('SECURITY', 'use_certs'): defaults.update(dict(config.items("SECURITY"))) if 'KEYSTONE' in config.sections(): defaults.update(dict(config.items("KEYSTONE"))) if 'CASSANDRA' in config.sections(): defaults.update(dict(config.items('CASSANDRA'))) SandeshConfig.update_options(defaults, config) # Override with CLI options # Don't surpress add_help here so it will handle -h parser = argparse.ArgumentParser( # Inherit options from config_parser parents=[conf_parser], # print script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.set_defaults(**defaults) add_parser_arguments(parser) args = parser.parse_args(remaining_argv) if type(args.cassandra_server_list) is str: args.cassandra_server_list = args.cassandra_server_list.split() if type(args.collectors) is str: args.collectors = args.collectors.split() args.sandesh_config = SandeshConfig.from_parser_arguments(args) args.cassandra_use_ssl = (str(args.cassandra_use_ssl).lower() == 'true') args.rabbit_use_ssl = (str(args.rabbit_use_ssl).lower() == 'true') args.conf_file = saved_conf_file return args
def __init__(self, config, type_info, rule_file, sandesh_instance, update_process_list=False): self.config = config self.type_info = type_info self.stdin = sys.stdin self.stdout = sys.stdout self.stderr = sys.stderr self.rule_file = rule_file self.rules_data = {'Rules': []} self.max_cores = 4 self.max_old_cores = 3 self.max_new_cores = 1 self.all_core_file_list = [] self.core_dir_modified_time = 0 self.tick_count = 0 self.fail_status_bits = 0 self.prev_fail_status_bits = 1 self.instance_id = INSTANCE_ID_DEFAULT self.collector_addr = self.config.collectors self.sandesh_instance = sandesh_instance self.curr_build_info = None self.new_build_info = None self.last_cpu = None self.last_time = 0 self.installed_package_version = None SupervisorEventsReq.handle_request = self.sandesh_supervisor_handle_request event_handlers = {} event_handlers['PROCESS_STATE'] = self.event_process_state event_handlers['PROCESS_COMMUNICATION'] = \ self.event_process_communication event_handlers['PROCESS_LIST_UPDATE'] = self.update_current_process ConnectionState.init( self.sandesh_instance, socket.gethostname(), self.type_info._module_name, self.instance_id, staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self.type_info._object_table) self.sandesh_instance.init_generator( self.type_info._module_name, socket.gethostname(), self.type_info._node_type_name, self.instance_id, self.collector_addr, self.type_info._module_name, ServiceHttpPortMap[self.type_info._module_name], ['nodemgr.common.sandesh'] + self.type_info._sandesh_packages, config=SandeshConfig.from_parser_arguments(self.config)) self.sandesh_instance.set_logging_params( enable_local_log=self.config.log_local, category=self.config.log_category, level=self.config.log_level, file=self.config.log_file, enable_syslog=self.config.use_syslog, syslog_facility=self.config.syslog_facility) self.logger = self.sandesh_instance.logger() if is_systemd_based(): if not pydbus_present: self.msg_log('Node manager cannot run without pydbus', SandeshLevel.SYS_ERR) exit(-1) # In docker, systemd notifications via sd_notify do not # work, hence we will poll the process status self.process_info_manager = SystemdProcessInfoManager( self.type_info._unit_names, event_handlers, update_process_list, is_running_in_docker()) else: if not 'SUPERVISOR_SERVER_URL' in os.environ: self.msg_log( 'Node manager must be run as a supervisor event listener', SandeshLevel.SYS_ERR) exit(-1) self.process_info_manager = SupervisorProcessInfoManager( self.stdin, self.stdout, self.type_info._supervisor_serverurl, event_handlers, update_process_list) self.add_current_process() for group in self.process_state_db: self.send_init_info(group) self.third_party_process_dict = self.type_info._third_party_processes
def __init__(self, config, type_info, rule_file, sandesh_instance, update_process_list=False): self.config = config self.type_info = type_info self.stdin = sys.stdin self.stdout = sys.stdout self.stderr = sys.stderr self.rule_file = rule_file self.rules_data = {'Rules':[]} self.max_cores = 4 self.max_old_cores = 3 self.max_new_cores = 1 self.all_core_file_list = [] self.core_dir_modified_time = 0 self.tick_count = 0 self.fail_status_bits = 0 self.prev_fail_status_bits = 1 self.instance_id = INSTANCE_ID_DEFAULT self.collector_addr = self.config.collectors self.sandesh_instance = sandesh_instance self.curr_build_info = None self.new_build_info = None self.last_cpu = None self.last_time = 0 self.installed_package_version = None SupervisorEventsReq.handle_request = self.sandesh_supervisor_handle_request event_handlers = {} event_handlers['PROCESS_STATE'] = self.event_process_state event_handlers['PROCESS_COMMUNICATION'] = \ self.event_process_communication event_handlers['PROCESS_LIST_UPDATE'] = self.update_current_process ConnectionState.init(self.sandesh_instance, socket.gethostname(), self.type_info._module_name, self.instance_id, staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self.type_info._object_table) self.sandesh_instance.init_generator( self.type_info._module_name, socket.gethostname(), self.type_info._node_type_name, self.instance_id, self.collector_addr, self.type_info._module_name, ServiceHttpPortMap[self.type_info._module_name], ['nodemgr.common.sandesh'] + self.type_info._sandesh_packages, config=SandeshConfig.from_parser_arguments(self.config)) self.sandesh_instance.set_logging_params( enable_local_log=self.config.log_local, category=self.config.log_category, level=self.config.log_level, file=self.config.log_file, enable_syslog=self.config.use_syslog, syslog_facility=self.config.syslog_facility) self.logger = self.sandesh_instance.logger() if is_systemd_based(): if not pydbus_present: self.msg_log('Node manager cannot run without pydbus', SandeshLevel.SYS_ERR) exit(-1) # In docker, systemd notifications via sd_notify do not # work, hence we will poll the process status self.process_info_manager = SystemdProcessInfoManager( self.type_info._unit_names, event_handlers, update_process_list, is_running_in_docker()) else: if not 'SUPERVISOR_SERVER_URL' in os.environ: self.msg_log('Node manager must be run as a supervisor event listener', SandeshLevel.SYS_ERR) exit(-1) self.process_info_manager = SupervisorProcessInfoManager( self.stdin, self.stdout, self.type_info._supervisor_serverurl, event_handlers, update_process_list) self.add_current_process() for group in self.process_state_db: self.send_init_info(group) self.third_party_process_dict = self.type_info._third_party_processes
def __init__(self, config, type_info, unit_names, update_process_list=False): self.config = config self.type_info = type_info self.max_cores = 4 self.max_old_cores = 3 self.max_new_cores = 1 self.all_core_file_list = [] self.tick_count = 0 self.fail_status_bits = 0 self.prev_fail_status_bits = 1 self.instance_id = INSTANCE_ID_DEFAULT self.sandesh_instance = sandesh_global self.curr_build_info = None self.new_build_info = None self.hostip = self.config.hostip self.hostname = socket.getfqdn(self.hostip) if self.config.hostname is None \ else self.config.hostname self.collector_chksum = 0 self.random_collectors = list() if config.collectors: config.collectors.sort() self.collector_chksum = hashlib.md5(("".join(config.collectors)).encode()).hexdigest() self.random_collectors = random.sample(config.collectors, len(config.collectors)) ConnectionState.init( self.sandesh_instance, self.hostname, self.type_info._module_name, self.instance_id, staticmethod(ConnectionState.get_conn_state_cb), NodeStatusUVE, NodeStatus, self.type_info._object_table, self._get_process_state_cb) self.sandesh_instance.init_generator( self.type_info._module_name, self.hostname, self.type_info._node_type_name, self.instance_id, self.random_collectors, self.type_info._module_name, ServiceHttpPortMap[self.type_info._module_name], ['nodemgr.common.sandesh'] + self.type_info._sandesh_packages, config=SandeshConfig.from_parser_arguments(self.config)) self.sandesh_instance.set_logging_params( enable_local_log=self.config.log_local, category=self.config.log_category, level=self.config.log_level, file=self.config.log_file, enable_syslog=self.config.use_syslog, syslog_facility=self.config.syslog_facility) self.logger = self.sandesh_instance.logger() event_handlers = {} event_handlers['PROCESS_STATE'] = self._event_process_state event_handlers['PROCESS_COMMUNICATION'] = self._event_process_communication event_handlers['PROCESS_LIST_UPDATE'] = self._update_current_processes gevent.signal(signal.SIGHUP, self.nodemgr_sighup_handler) self.system_data = LinuxSysData(self.msg_log, self.config.corefile_path) if DockerProcessInfoManager and (utils.is_running_in_docker() or utils.is_running_in_kubepod()): self.process_info_manager = DockerProcessInfoManager( type_info._module_type, unit_names, event_handlers, update_process_list) else: self.msg_log('Node manager could not detect process manager', SandeshLevel.SYS_ERR) exit(-1) self.process_state_db = self._get_current_processes() for group in self.process_state_db: self._send_init_info(group)
def parse_args(args_str): args_obj = None # Source any specified config/ini file # Turn off help, so we print all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--conf_file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str.split()) defaults = { 'reset_config': False, 'wipe_config': False, 'listen_ip_addr': _WEB_HOST, 'listen_port': _WEB_PORT, 'admin_port': _ADMIN_PORT, 'cassandra_server_list': "127.0.0.1:9160", 'collectors': None, 'http_server_port': '8084', 'http_server_ip': _WEB_HOST, 'log_local': True, 'log_level': SandeshLevel.SYS_NOTICE, 'log_category': '', 'log_file': Sandesh._DEFAULT_LOG_FILE, 'trace_file': '/var/log/contrail/vnc_openstack.err', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'logging_conf': '', 'logger_class': None, 'multi_tenancy': None, 'aaa_mode': None, 'zk_server_ip': '127.0.0.1:2181', 'worker_id': '0', 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'rabbit_max_pending_updates': '4096', 'rabbit_health_check_interval': '120.0', # in seconds 'cluster_id': '', 'max_requests': 1024, 'paginate_count': 256, 'region_name': 'RegionOne', 'stale_lock_seconds': '5', # lock but no resource past this => stale 'cloud_admin_role': cfgm_common.CLOUD_ADMIN_ROLE, 'global_read_only_role': cfgm_common.GLOBAL_READ_ONLY_ROLE, 'rabbit_use_ssl': False, 'kombu_ssl_version': '', 'kombu_ssl_keyfile': '', 'kombu_ssl_certfile': '', 'kombu_ssl_ca_certs': '', 'object_cache_entries': '10000', # max number of objects cached for read 'object_cache_exclude_types': '', # csv of object types to *not* cache 'debug_object_cache_types': '', # csv of object types to debug cache 'db_engine': 'cassandra', 'max_request_size': 1024000, 'fabric_ansible_dir': '/opt/contrail/fabric_ansible_playbooks', 'enable_fabric_ansible': True, 'amqp_timeout': 660, 'config_api_ssl_enable': False, 'config_api_ssl_keyfile': '', 'config_api_ssl_certfile': '', 'config_api_ssl_ca_cert': '', } defaults.update(SandeshConfig.get_default_options(['DEFAULTS'])) # keystone options ksopts = { 'signing_dir': '/var/lib/contrail/keystone-signing', 'auth_host': '127.0.0.1', 'auth_port': '35357', 'auth_protocol': 'http', 'admin_user': '', 'admin_password': '', 'admin_tenant_name': '', 'admin_user_domain_name': None, 'identity_uri': None, 'project_domain_name': None, 'insecure': True, 'cafile': '', 'certfile': '', 'keyfile': '', 'auth_type': 'password', 'auth_url': '', } # cassandra options cassandraopts = { 'cassandra_user' : None, 'cassandra_password' : None } # sandesh options sandeshopts = SandeshConfig.get_default_options() config = None saved_conf_file = args.conf_file if args.conf_file: config = ConfigParser.SafeConfigParser({'admin_token': None}) config.read(args.conf_file) if 'DEFAULTS' in config.sections(): defaults.update(dict(config.items("DEFAULTS"))) if 'multi_tenancy' in config.options('DEFAULTS'): defaults['multi_tenancy'] = config.getboolean( 'DEFAULTS', 'multi_tenancy') if 'default_encoding' in config.options('DEFAULTS'): default_encoding = config.get('DEFAULTS', 'default_encoding') gen.resource_xsd.ExternalEncoding = default_encoding if 'KEYSTONE' in config.sections(): ksopts.update(dict(config.items("KEYSTONE"))) if 'QUOTA' in config.sections(): for (k, v) in config.items("QUOTA"): try: if str(k) != 'admin_token': vnc_quota.QuotaHelper.default_quota[str(k)] = int(v) except ValueError: pass if 'CASSANDRA' in config.sections(): cassandraopts.update(dict(config.items('CASSANDRA'))) SandeshConfig.update_options(sandeshopts, config) # Override with CLI options # Don't surpress add_help here so it will handle -h parser = argparse.ArgumentParser( # Inherit options from config_parser parents=[conf_parser], # print script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(ksopts) defaults.update(cassandraopts) defaults.update(sandeshopts) parser.set_defaults(**defaults) parser.add_argument( "--cassandra_server_list", help="List of cassandra servers in IP Address:Port format", nargs='+') parser.add_argument( "--cassandra_use_ssl", action="store_true", help="Enable TLS for cassandra connection") parser.add_argument( "--cassandra_ca_certs", help="Cassandra CA certs") parser.add_argument( "--redis_server_ip", help="IP address of redis server") parser.add_argument( "--redis_server_port", help="Port of redis server") parser.add_argument( "--auth", choices=['keystone', 'no-auth'], help="Type of authentication for user-requests") parser.add_argument( "--reset_config", action="store_true", help="Warning! Destroy previous configuration and start clean") parser.add_argument( "--wipe_config", action="store_true", help="Warning! Destroy previous configuration") parser.add_argument( "--listen_ip_addr", help="IP address to provide service on, default %s" % (_WEB_HOST)) parser.add_argument( "--listen_port", help="Port to provide service on, default %s" % (_WEB_PORT)) parser.add_argument( "--admin_port", help="Port with local auth for admin access, default %s" % (_ADMIN_PORT)) parser.add_argument( "--collectors", help="List of VNC collectors in ip:port format", nargs="+") parser.add_argument( "--http_server_port", help="Port of Introspect HTTP server") parser.add_argument( "--http_server_ip", help="IP address of Introspect HTTP server, default %s" % (_WEB_HOST)) parser.add_argument( "--log_local", action="store_true", help="Enable local logging of sandesh messages") parser.add_argument( "--log_level", help="Severity level for local logging of sandesh messages") parser.add_argument( "--logging_conf", help=("Optional logging configuration file, default: None")) parser.add_argument( "--logger_class", help=("Optional external logger class, default: None")) parser.add_argument( "--log_category", help="Category filter for local logging of sandesh messages") parser.add_argument( "--log_file", help="Filename for the logs to be written to") parser.add_argument( "--trace_file", help="Filename for the errors backtraces to be written to") parser.add_argument("--use_syslog", action="store_true", help="Use syslog for logging") parser.add_argument("--syslog_facility", help="Syslog facility to receive log lines") parser.add_argument( "--multi_tenancy", action="store_true", help="Validate resource permissions (implies token validation)") parser.add_argument( "--aaa_mode", choices=AAA_MODE_VALID_VALUES, help="AAA mode") parser.add_argument( "--worker_id", help="Worker Id") parser.add_argument( "--zk_server_ip", help="Ip address:port of zookeeper server") parser.add_argument( "--rabbit_server", help="Rabbitmq server address") parser.add_argument( "--rabbit_port", help="Rabbitmq server port") parser.add_argument( "--rabbit_user", help="Username for rabbit") parser.add_argument( "--rabbit_vhost", help="vhost for rabbit") parser.add_argument( "--rabbit_password", help="password for rabbit") parser.add_argument( "--rabbit_ha_mode", help="True if the rabbitmq cluster is mirroring all queue") parser.add_argument( "--rabbit_max_pending_updates", help="Max updates before stateful changes disallowed") parser.add_argument( "--rabbit_health_check_interval", help="Interval seconds between consumer heartbeats to rabbitmq") parser.add_argument( "--cluster_id", help="Used for database keyspace separation") parser.add_argument( "--max_requests", type=int, help="Maximum number of concurrent requests served by api server") parser.add_argument( "--paginate_count", type=int, help="Default number of items when pagination is requested") parser.add_argument("--cassandra_user", help="Cassandra user name") parser.add_argument("--cassandra_password", help="Cassandra password") parser.add_argument("--stale_lock_seconds", help="Time after which lock without resource is stale, default 60") parser.add_argument( "--cloud_admin_role", help="Role name of cloud administrator") parser.add_argument( "--global_read_only_role", help="Role name of user with Read-Only access to all objects") parser.add_argument("--object_cache_entries", help="Maximum number of objects cached for read, default 10000") parser.add_argument("--object_cache_exclude_types", help="Comma separated values of object types to not cache") parser.add_argument( "--debug_object_cache_types", help="Comma separated values of object types to debug trace between " "the cache and the DB") parser.add_argument("--db_engine", help="Database engine to use, default cassandra") parser.add_argument("--max_request_size", type=int, help="Maximum size of bottle requests served by api server") parser.add_argument("--fabric_ansible_dir", help="Fabric ansible directory path") parser.add_argument("--enable_fabric_ansible", help="Enables/disables execute-job api and the initial" "data loading for the job manager.") parser.add_argument("--amqp_timeout", help="Timeout for amqp request") SandeshConfig.add_parser_arguments(parser) args_obj, remaining_argv = parser.parse_known_args(remaining_argv) args_obj.conf_file = args.conf_file args_obj.config_sections = config if type(args_obj.cassandra_server_list) is str: args_obj.cassandra_server_list =\ args_obj.cassandra_server_list.split() if type(args_obj.collectors) is str: args_obj.collectors = args_obj.collectors.split() args_obj.sandesh_config = SandeshConfig.from_parser_arguments(args_obj) args_obj.cassandra_use_ssl = (str(args_obj.cassandra_use_ssl).lower() == 'true') args_obj.conf_file = saved_conf_file return args_obj, remaining_argv
def parse_logger_args(self, config_args): config_args = json.loads(config_args) parser = argparse.ArgumentParser() defaults = { 'collectors': None, 'http_server_port': '-1', 'log_local': False, 'log_level': SandeshLevel.SYS_DEBUG, 'log_category': '', 'log_file': Sandesh._DEFAULT_LOG_FILE, 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'cluster_id': '', 'logging_conf': '', 'logger_class': None, 'max_job_task': self.TASK_POOL_SIZE, 'playbook_timeout': self.PLAYBOOK_TIMEOUT_VALUE, } defaults.update(SandeshConfig.get_default_options(['DEFAULTS'])) secopts = { 'use_certs': False, 'keyfile': '', 'certfile': '', 'ca_certs': '', } ksopts = {} sandeshopts = SandeshConfig.get_default_options() if config_args.get("fabric_ansible_conf_file"): config = ConfigParser.SafeConfigParser() config.read(config_args['fabric_ansible_conf_file']) if 'DEFAULTS' in config.sections(): defaults.update(dict(config.items("DEFAULTS"))) if ('SECURITY' in config.sections() and 'use_certs' in config.options('SECURITY')): if config.getboolean('SECURITY', 'use_certs'): secopts.update(dict(config.items("SECURITY"))) if 'KEYSTONE' in config.sections(): ksopts.update(dict(config.items("KEYSTONE"))) SandeshConfig.update_options(sandeshopts, config) defaults.update(secopts) defaults.update(ksopts) defaults.update(sandeshopts) parser.set_defaults(**defaults) parser.add_argument("--collectors", help="List of VNC collectors in ip:port format", nargs="+") parser.add_argument("--http_server_port", help="Port of local HTTP server") parser.add_argument("--log_local", action="store_true", help="Enable local logging of sandesh messages") parser.add_argument("--log_level", help="Severity level for local logging" " of sandesh messages") parser.add_argument("--log_category", help="Category filter for local logging " "of sandesh messages") parser.add_argument("--log_file", help="Filename for the logs to be written to") parser.add_argument("--use_syslog", action="store_true", help="Use syslog for logging") parser.add_argument("--syslog_facility", help="Syslog facility to receive log lines") parser.add_argument("--admin_user", help="Name of keystone admin user") parser.add_argument("--admin_password", help="Password of keystone admin user") parser.add_argument("--admin_tenant_name", help="Tenant name for keystone admin user") parser.add_argument("--cluster_id", help="Used for database keyspace separation") parser.add_argument("--logging_conf", help=("Optional logging configuration " "file, default: None")) parser.add_argument("--logger_class", help=("Optional external logger class," " default: None")) parser.add_argument("--max_job_task", help=("Maximum job tasks that can execute in " "parallel in a parent job, default: %s" % self.TASK_POOL_SIZE)) parser.add_argument("--playbook_timeout", help=("Playbook execution timeout value," " default: 60 min")) SandeshConfig.add_parser_arguments(parser) args = parser.parse_args(list()) args.conf_file = config_args.get('fabric_ansible_conf_file') args.collectors = config_args.get('collectors') args.host_ip = config_args.get('host_ip') args.zk_server_ip = config_args.get('zk_server_ip') args.cluster_id = config_args.get('cluster_id') if isinstance(args.collectors, str): args.collectors = args.collectors.split() args.sandesh_config = SandeshConfig.from_parser_arguments(args) self.args = args return args
def parse_args(args_str=None): if not args_str: args_str = sys.argv[1:] conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--config-file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str) defaults = { 'http_server_port': HttpPortKubeManager, 'worker_id': '0', 'collectors': None, 'logger_class': None, 'logging_conf': '', 'log_local': False, 'log_category': '', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'kube_object_cache': 'True', 'disc_server_ip': 'localhost', 'disc_server_port': DiscoveryServerPort, 'log_level': SandeshLevel.SYS_DEBUG, 'log_file': '/var/log/contrail/contrail-kube-manager.log', 'api_service_link_local': 'True', 'orchestrator': 'kubernetes', 'token': '', 'nested_mode': '0', } defaults.update(SandeshConfig.get_default_options(['DEFAULTS'])) vnc_opts = { 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': '******', 'rabbit_password': '******', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'rabbit_use_ssl': False, 'kombu_ssl_version': '', 'kombu_ssl_keyfile': '', 'kombu_ssl_certfile': '', 'kombu_ssl_ca_certs': '', 'cassandra_user': None, 'cassandra_password': None, 'cassandra_server_list': '', 'cluster_id': '', 'vnc_endpoint_ip': '[127.0.0.1]', 'vnc_endpoint_port': ApiServerPort, 'admin_user': '', 'admin_password': '', 'admin_tenant': '', 'public_fip_pool': '{}', 'zk_server_ip': '127.0.0.1:2181', } k8s_opts = { 'kubernetes_api_server': 'localhost', 'kubernetes_api_port': '8080', 'kubernetes_api_secure_port': 8443, 'kubernetes_service_name': 'kubernetes', MandatoryArgs.SERVICE_SUBNET.value['arg_str']: None, MandatoryArgs.POD_SUBNET.value['arg_str']: None, 'kubernetes_cluster_owner': 'k8s', 'kubernetes_cluster_domain': 'default-domain', 'cluster_name': None, 'cluster_project': "{}", 'cluster_network': None, 'cluster_pod_network': None, 'cluster_service_network': None, 'ip_fabric_forwarding': False, } sandesh_opts = SandeshConfig.get_default_options() auth_opts = { 'auth_token_url': None, 'auth_user': '******', 'auth_password': '******', 'auth_tenant': 'admin', } config = ConfigParser.SafeConfigParser() if args.config_file: config.read(args.config_file) if 'VNC' in config.sections(): vnc_opts.update(dict(config.items("VNC"))) if 'KUBERNETES' in config.sections(): k8s_opts.update(dict(config.items("KUBERNETES"))) SandeshConfig.update_options(sandesh_opts, config) if 'AUTH' in config.sections(): auth_opts.update(dict(config.items("AUTH"))) if 'DEFAULTS' in config.sections(): defaults.update(dict(config.items("DEFAULTS"))) parser = argparse.ArgumentParser( parents=[conf_parser], description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(vnc_opts) defaults.update(k8s_opts) defaults.update(sandesh_opts) defaults.update(auth_opts) parser.set_defaults(**defaults) args = parser.parse_args(args_str) if type(args.cassandra_server_list) is str: args.cassandra_server_list = args.cassandra_server_list.split() if type(args.pod_subnets) is str: args.pod_subnets = args.pod_subnets.split() if type(args.service_subnets) is str: args.service_subnets = args.service_subnets.split() if type(args.collectors) is str: args.collectors = args.collectors.split() args.sandesh_config = SandeshConfig.from_parser_arguments(args) # Validate input argumnents. validate_mandatory_args(args) return args
def parse_logger_args(self, config_args): config_args = json.loads(config_args) parser = argparse.ArgumentParser() defaults = { 'collectors': None, 'http_server_port': '-1', 'log_local': False, 'log_level': SandeshLevel.SYS_DEBUG, 'log_category': '', 'log_file': Sandesh._DEFAULT_LOG_FILE, 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'cluster_id': '', 'logging_conf': '', 'logger_class': None, 'max_job_task': self.TASK_POOL_SIZE, 'playbook_timeout': self.PLAYBOOK_TIMEOUT_VALUE, } defaults.update(SandeshConfig.get_default_options(['DEFAULTS'])) secopts = { 'use_certs': False, 'keyfile': '', 'certfile': '', 'ca_certs': '', } ksopts = {} sandeshopts = SandeshConfig.get_default_options() if config_args.get("fabric_ansible_conf_file"): config = ConfigParser.SafeConfigParser() config.read(config_args['fabric_ansible_conf_file']) defaults.update(dict(config.items("DEFAULTS"))) if ('SECURITY' in config.sections() and 'use_certs' in config.options('SECURITY')): if config.getboolean('SECURITY', 'use_certs'): secopts.update(dict(config.items("SECURITY"))) if 'KEYSTONE' in config.sections(): ksopts.update(dict(config.items("KEYSTONE"))) SandeshConfig.update_options(sandeshopts, config) defaults.update(secopts) defaults.update(ksopts) defaults.update(sandeshopts) parser.set_defaults(**defaults) parser.add_argument("--collectors", help="List of VNC collectors in ip:port format", nargs="+") parser.add_argument("--http_server_port", help="Port of local HTTP server") parser.add_argument("--log_local", action="store_true", help="Enable local logging of sandesh messages") parser.add_argument("--log_level", help="Severity level for local logging" " of sandesh messages") parser.add_argument("--log_category", help="Category filter for local logging " "of sandesh messages") parser.add_argument("--log_file", help="Filename for the logs to be written to") parser.add_argument("--use_syslog", action="store_true", help="Use syslog for logging") parser.add_argument("--syslog_facility", help="Syslog facility to receive log lines") parser.add_argument("--admin_user", help="Name of keystone admin user") parser.add_argument("--admin_password", help="Password of keystone admin user") parser.add_argument("--admin_tenant_name", help="Tenant name for keystone admin user") parser.add_argument("--cluster_id", help="Used for database keyspace separation") parser.add_argument("--logging_conf", help=("Optional logging configuration " "file, default: None")) parser.add_argument("--logger_class", help=("Optional external logger class," " default: None")) parser.add_argument("--max_job_task", help=("Maximum job tasks that can execute in " "parallel in a parent job, default: %s" % self.TASK_POOL_SIZE)) parser.add_argument("--playbook_timeout", help=("Playbook execution timeout value," " default: 60 min")) SandeshConfig.add_parser_arguments(parser) args = parser.parse_args(list()) args.conf_file = config_args.get('fabric_ansible_conf_file') args.collectors = config_args.get('collectors') if isinstance(args.collectors, str): args.collectors = args.collectors.split() args.sandesh_config = SandeshConfig.from_parser_arguments(args) self.args = args return args
def __init__(self, config, type_info, unit_names, update_process_list=False): self.config = config self.type_info = type_info self.max_cores = 4 self.max_old_cores = 3 self.max_new_cores = 1 self.all_core_file_list = [] self.tick_count = 0 self.fail_status_bits = 0 self.prev_fail_status_bits = 1 self.instance_id = INSTANCE_ID_DEFAULT self.sandesh_instance = sandesh_global self.curr_build_info = None self.new_build_info = None self.hostip = self.config.hostip self.hostname = socket.getfqdn(self.hostip) self.collector_chksum = 0 self.random_collectors = list() if config.collectors: config.collectors.sort() self.collector_chksum = hashlib.md5("".join(config.collectors)).hexdigest() self.random_collectors = random.sample(config.collectors, len(config.collectors)) ConnectionState.init(self.sandesh_instance, self.hostname, self.type_info._module_name, self.instance_id, staticmethod(ConnectionState.get_conn_state_cb), NodeStatusUVE, NodeStatus, self.type_info._object_table, self._get_process_state_cb) self.sandesh_instance.init_generator( self.type_info._module_name, self.hostname, self.type_info._node_type_name, self.instance_id, self.random_collectors, self.type_info._module_name, ServiceHttpPortMap[self.type_info._module_name], ['nodemgr.common.sandesh'] + self.type_info._sandesh_packages, config=SandeshConfig.from_parser_arguments(self.config)) self.sandesh_instance.set_logging_params( enable_local_log=self.config.log_local, category=self.config.log_category, level=self.config.log_level, file=self.config.log_file, enable_syslog=self.config.use_syslog, syslog_facility=self.config.syslog_facility) self.logger = self.sandesh_instance.logger() event_handlers = {} event_handlers['PROCESS_STATE'] = self._event_process_state event_handlers['PROCESS_COMMUNICATION'] = self._event_process_communication event_handlers['PROCESS_LIST_UPDATE'] = self._update_current_processes if platform.system() == 'Windows': self.system_data = WindowsSysData() self.process_info_manager = WindowsProcessInfoManager(event_handlers) else: gevent.signal(signal.SIGHUP, self.nodemgr_sighup_handler) self.system_data = LinuxSysData(self.msg_log, self.config.corefile_path) if DockerProcessInfoManager and (utils.is_running_in_docker() or utils.is_running_in_kubepod()): self.process_info_manager = DockerProcessInfoManager( type_info._module_type, unit_names, event_handlers, update_process_list) else: self.msg_log('Node manager could not detect process manager', SandeshLevel.SYS_ERR) exit(-1) self.process_state_db = self._get_current_processes() for group in self.process_state_db: self._send_init_info(group)