def sandesh_init(self, http_server_port=None): """ Init sandesh """ self._sandesh = Sandesh() # Reset the sandesh send rate limit value if self._args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( self._args.sandesh_send_rate_limit) self.redefine_sandesh_handles() if not http_server_port: http_server_port = self._args.http_server_port self._sandesh.init_generator( self._module_name, self._hostname, self._node_type_name, self._instance_id, self._args.random_collectors, '%s_context' % self.context, int(http_server_port), ['cfgm_common', '%s.sandesh' % self.module_pkg], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf, config=self._args.sandesh_config) self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # connection state init ConnectionState.init( self._sandesh, self._hostname, self._module_name, self._instance_id, staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self.table)
def sandesh_init(self, http_server_port=None): """ Init sandesh """ self._sandesh = Sandesh() self.redefine_sandesh_handles() if not http_server_port: http_server_port = self._args.http_server_port self._instance_id = self.sandesh_instance_id self._sandesh.init_generator( self._module_name, self._hostname, self._node_type_name, self.sandesh_instance_id, self._args.random_collectors, '%s_context' % self.context, int(http_server_port), ['cfgm_common'], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf, config=self._args.sandesh_config) self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) VncGreenlet.register_sandesh_handler() self._sandesh.trace_buffer_create(name="MessageBusNotifyTraceBuf", size=1000)
def sandesh_init(self): """ Init Sandesh """ self._sandesh = Sandesh() # Initialize Sandesh generator. self._sandesh.init_generator( self.module['name'], self.module['hostname'], self.module['node_type_name'], self.module['instance_id'], self._args.collectors, 'mesos_manager_context', int(self._args.http_server_port), ['cfgm_common', 'mesos_manager.sandesh'], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf, config=self._args.sandesh_config) # Set Sandesh logging params. self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # Connect to collector. ConnectionState.init(self._sandesh, self.module['hostname'], self.module['name'], self.module['instance_id'], staticmethod(ConnectionState.get_conn_state_cb), NodeStatusUVE, NodeStatus, self.module['table'])
def setUp(self): self.maxDiff = None self._sandesh = Sandesh() http_port = test_utils.get_free_port() self._sandesh.init_generator('conn_info_test', socket.gethostname(), 'Test', 'Test', None, 'conn_info_test_ctxt', http_port)
def __init__(self, vnc_lib, args=None): self._args = args # api server and cassandra init self._vnc_lib = vnc_lib self._cassandra_init() # dictionary for nova self._nova = {} #initialize discovery client self._disc = None if self._args.disc_server_ip and self._args.disc_server_port: self._disc = client.DiscoveryClient(self._args.disc_server_ip, self._args.disc_server_port, client_type='Service Monitor') #sandesh init self._sandesh = Sandesh() sandesh.ServiceInstanceList.handle_request =\ self.sandesh_si_handle_request module = Module.SVC_MONITOR module_name = ModuleNames[module] node_type = Module2NodeType[module] node_type_name = NodeTypeNames[node_type] instance_id = INSTANCE_ID_DEFAULT self._sandesh.init_generator(module_name, socket.gethostname(), node_type_name, instance_id, self._args.collectors, 'svc_monitor_context', int(self._args.http_server_port), ['cfgm_common', 'sandesh'], self._disc) self._sandesh.set_logging_params(enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file) #create default analyzer template self._create_default_template('analyzer-template', 'analyzer', 'analyzer') self._create_default_template('nat-template', 'nat-service', 'firewall', 'in-network-nat') #create cpu_info object to send periodic updates sysinfo_req = False cpu_info = vnc_cpu_info.CpuInfo(module_name, instance_id, sysinfo_req, self._sandesh, 60) self._cpu_info = cpu_info # logging self._err_file = '/var/log/contrail/svc-monitor.err' self._tmp_file = '/var/log/contrail/svc-monitor.tmp' self._svc_err_logger = logging.getLogger('SvcErrLogger') self._svc_err_logger.setLevel(logging.ERROR) handler = logging.handlers.RotatingFileHandler(self._err_file, maxBytes=64 * 1024, backupCount=2) self._svc_err_logger.addHandler(handler)
def setUp(self): self._sandesh = Sandesh() http_port = get_free_port() self._sandesh.init_generator('sandesh_trace_test', socket.gethostname(), 'Test', 'Test', None, 'trace_test_ctxt', http_port) self._sandesh.set_logging_params(level=SandeshLevel.SYS_DEBUG, enable_local_log=True, enable_trace_print=True) self._trace_read_list = []
def setUp(self): self.sandesh_instance = Sandesh() self.sandesh_instance.init_generator('SandeshSessionTest', 'localhost', 'UT', 0, None, 'context', -1, connect_to_collector=False)
def setUp(self): self.maxDiff = None self.sandesh = Sandesh() self.sandesh.init_generator('sandesh_uve_alarm_test', socket.gethostname(), 'Test', '0', None, '', get_free_port(), connect_to_collector=False) # mock the sandesh client object self.sandesh._client = mock.MagicMock(spec=SandeshClient)
def sandesh_init(self, http_server_port=None): """ Init sandesh """ if self._sandesh is not None: return self._sandesh = Sandesh() self.redefine_sandesh_handles() if not http_server_port: http_server_port = self._args.http_server_port self._instance_id = self.sandesh_instance_id self._sandesh.init_generator( self._module_name, self._hostname, self._node_type_name, self.sandesh_instance_id, self._args.random_collectors, '%s_context' % self.context, int(http_server_port), ['cfgm_common'], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf, config=self._args.sandesh_config) self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) VncGreenlet.register_sandesh_handler() self._sandesh.trace_buffer_create(name="MessageBusNotifyTraceBuf", size=1000)
def sandesh_init(self): """ Init sandesh """ self._sandesh = Sandesh() # Reset the sandesh send rate limit value if self._args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( self._args.sandesh_send_rate_limit) self.redefine_sandesh_handles() self._sandesh.init_generator( self._module_name, self._hostname, self._node_type_name, self._instance_id, self._args.random_collectors, '%s_context' % self.context, int(self._args.http_server_port), ['cfgm_common', '%s.sandesh' % self.module_pkg], self.discovery, logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf) self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # connection state init ConnectionState.init( self._sandesh, self._hostname, self._module_name, self._instance_id, staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self.table)
def sandesh_init(self, http_server_port=None): """ Init sandesh """ self._sandesh = Sandesh() self.redefine_sandesh_handles() if not http_server_port: http_server_port = self._args.http_server_port self._sandesh.init_generator( self._module_name, self._hostname, self._node_type_name, self._instance_id, self._args.random_collectors, '%s_context' % self.context, int(http_server_port), ['cfgm_common', '%s.sandesh' % self.module_pkg], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf, config=self._args.sandesh_config) self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # connection state init ConnectionState.init( self._sandesh, self._hostname, self._module_name, self._instance_id, staticmethod(ConnectionState.get_conn_state_cb), NodeStatusUVE, NodeStatus, self.table) VncGreenlet.register_sandesh_handler()
def sandesh_init(self): """ Init Sandesh """ self._sandesh = Sandesh() # Initialize Sandesh generator. self._sandesh.init_generator( self.module['name'], self.module['hostname'], self.module['node_type_name'], self.module['instance_id'], self._args.collectors, 'mesos_manager_context', int(self._args.http_server_port), ['cfgm_common', 'mesos_manager.sandesh'], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf, config=self._args.sandesh_config) # Set Sandesh logging params. self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # Connect to collector. ConnectionState.init(self._sandesh, self.module['hostname'], self.module['name'], self.module['instance_id'], staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self.module['table'])
def sandesh_init(self): """ Init Sandesh """ self._sandesh = Sandesh() # Register custom sandesh request handlers. self._redefine_sandesh_handles() # Initialize Sandesh generator. self._sandesh.init_generator( self._module["name"], self._module["hostname"], self._module["node_type_name"], self._module["instance_id"], self._args.random_collectors, 'kube_manager_context', int(self._args.http_server_port), ['cfgm_common', 'kube_manager'], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf, config=self._args.sandesh_config) # Set Sandesh logging params. self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # Connect to collector. ConnectionState.init( self._sandesh, self._module["hostname"], self._module["name"], self._module["instance_id"], staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self._module["table"])
def setUp(self): self.maxDiff = None self._sandesh = Sandesh() http_port = test_utils.get_free_port() self._sandesh.init_generator( "conn_info_test", socket.gethostname(), "Test", "Test", None, "conn_info_test_ctxt", http_port )
def __init__(self, vnc_lib, args=None): self._args = args # api server and cassandra init self._vnc_lib = vnc_lib self._cassandra_init() # dictionary for nova self._nova = {} # initialize discovery client self._disc = None if self._args.disc_server_ip and self._args.disc_server_port: self._disc = client.DiscoveryClient( self._args.disc_server_ip, self._args.disc_server_port, client_type="Service Monitor" ) # sandesh init self._sandesh = Sandesh() sandesh.ServiceInstanceList.handle_request = self.sandesh_si_handle_request module = Module.SVC_MONITOR module_name = ModuleNames[module] node_type = Module2NodeType[module] node_type_name = NodeTypeNames[node_type] instance_id = INSTANCE_ID_DEFAULT self._sandesh.init_generator( module_name, socket.gethostname(), node_type_name, instance_id, self._args.collectors, "svc_monitor_context", int(self._args.http_server_port), ["cfgm_common", "sandesh"], self._disc, ) self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, ) # create default analyzer template self._create_default_template("analyzer-template", "analyzer", "analyzer") self._create_default_template("nat-template", "nat-service", "firewall", "in-network-nat") # create cpu_info object to send periodic updates sysinfo_req = False cpu_info = vnc_cpu_info.CpuInfo(module_name, instance_id, sysinfo_req, self._sandesh, 60) self._cpu_info = cpu_info # logging self._err_file = "/var/log/contrail/svc-monitor.err" self._tmp_file = "/var/log/contrail/svc-monitor.tmp" self._svc_err_logger = logging.getLogger("SvcErrLogger") self._svc_err_logger.setLevel(logging.ERROR) handler = logging.handlers.RotatingFileHandler(self._err_file, maxBytes=64 * 1024, backupCount=2) self._svc_err_logger.addHandler(handler)
class SandeshSessionTest(unittest.TestCase): def setUp(self): self.sandesh_instance = Sandesh() self.sandesh_instance.init_generator('SandeshSessionTest', 'localhost', 'UT', 0, None, 'context', -1, connect_to_collector=False) # end setUp def tearDown(self): pass # end tearDown def verify_watermarks(self, session, expected_wms, actual_wms): expected_wms.sort() actual_wms.sort() print('== verify watermarks ==') print(expected_wms) self.assertEqual(len(expected_wms), len(actual_wms)) for i in range(len(expected_wms)): self.assertEqual(expected_wms[i][0], actual_wms[i].size) # Invoke the watermark callback and verify that the # send_level is set correctly actual_wms[i].callback(expected_wms[i][0]) self.assertEqual(expected_wms[i][1], session.send_level()) # end verify_watermarks def test_send_queue_watermarks(self): session = SandeshSession(self.sandesh_instance, None, None, None) wms = SandeshSendQueue._SENDQ_WATERMARKS # verify high watermarks are set properly in sandesh send queue high_wms = [wm for wm in wms if wm[2] is True] sendq_hwms = session.send_queue().high_watermarks() self.verify_watermarks(session, high_wms, sendq_hwms) # verify low watermarks are set properly in sandesh send queue low_wms = [wm for wm in wms if wm[2] is False] sendq_lwms = session.send_queue().low_watermarks() self.verify_watermarks(session, low_wms, sendq_lwms)
def _sandesh_init(self, discovery): sandesh_instance = Sandesh() sandesh.ServiceInstanceList.handle_request =\ self.sandesh_si_handle_request sandesh_instance.init_generator( self._module_name, self._hostname, self._node_type_name, self._instance_id, self._args.collectors, 'svc_monitor_context', int(self._args.http_server_port), ['cfgm_common', 'svc_monitor.sandesh'], discovery) sandesh_instance.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) return sandesh_instance
def __init__(self, vnc_lib, args=None): self._args = args # api server and cassandra init self._vnc_lib = vnc_lib self._cassandra_init() # dictionary for nova self._nova = {} #initialize discovery client self._disc = None if self._args.disc_server_ip and self._args.disc_server_port: self._disc = client.DiscoveryClient(self._args.disc_server_ip, self._args.disc_server_port, client_type='Service Monitor') #sandesh init collectors = None if self._args.collector and self._args.collector_port: collectors = [(self._args.collector, int(self._args.collector_port))] self._sandesh = Sandesh() sandesh.ServiceInstanceList.handle_request =\ self.sandesh_si_handle_request self._sandesh.init_generator( ModuleNames[Module.SVC_MONITOR], socket.gethostname(), collectors, 'svc_monitor_context', int(self._args.http_server_port), ['cfgm_common', 'sandesh'], self._disc) self._sandesh.set_logging_params(enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file) #create default analyzer template self._create_default_template('analyzer-template', 'analyzer', 'analyzer') self._create_default_template('nat-template', 'nat-service', 'firewall', 'in-network-nat') #create cpu_info object to send periodic updates sysinfo_req = False cpu_info = vnc_cpu_info.CpuInfo( Module.SVC_MONITOR, sysinfo_req, self._sandesh, 60) self._cpu_info = cpu_info # logging self._err_file = '/var/log/contrail/svc-monitor.err' self._tmp_file = '/var/log/contrail/svc-monitor.tmp' self._svc_err_logger = logging.getLogger('SvcErrLogger') self._svc_err_logger.setLevel(logging.ERROR) handler = logging.handlers.RotatingFileHandler( self._err_file, maxBytes=64*1024, backupCount=2) self._svc_err_logger.addHandler(handler)
def sandesh_init(self): """ Init Sandesh """ self._sandesh = Sandesh() # Reset sandesh send rate limit value. if self._args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( self._args.sandesh_send_rate_limit) # Register custom sandesh request handlers. self._redefine_sandesh_handles() # Initialize Sandesh generator. self._sandesh.init_generator( self._module["name"], self._module["hostname"], self._module["node_type_name"], self._module["instance_id"], self._args.collectors, 'kube_manager_context', int(self._args.http_server_port), ['cfgm_common', 'kube_manager'], self._module["discovery"], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf, config=self._args.sandesh_config) # Set Sandesh logging params. self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # Connect to collector. ConnectionState.init( self._sandesh, self._module["hostname"], self._module["name"], self._module["instance_id"], staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self._module["table"])
def _sandesh_init(self, discovery): sandesh_instance = Sandesh() sandesh.ServiceInstanceList.handle_request =\ self.sandesh_si_handle_request sandesh_instance.init_generator( self._module_name, self._hostname, self._node_type_name, self._instance_id, self._args.collectors, 'svc_monitor_context', int(self._args.http_server_port), ['cfgm_common', 'svc_monitor.sandesh'], discovery, logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf) sandesh_instance.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) return sandesh_instance
def sandesh_init(self): """ Init Sandesh """ self._sandesh = Sandesh() # Reset sandesh send rate limit value. if self._args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( self._args.sandesh_send_rate_limit) # Initialize Sandesh generator. self._sandesh.init_generator( self.module.name, self.module.hostname, self.module.node_type_name, self.module.instance_id, self._args.collectors, 'mesos_manager_context', int(self._args.http_server_port), ['cfgm_common', 'mesos_manager.sandesh'], self.module.discovery, logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf) # Set Sandesh logging params. self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # Connect to collector. ConnectionState.init( self._sandesh, self.module.hostname, self.module.name, self.module.instance_id, staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self.module.table)
class JobLogger(ConfigServiceLogger): def __init__(self, args=None, http_server_port=None, sandesh_instance_id=None, sandesh_instance=None): """ Initialize Job Logger. :param args: Config params passed to job manager :param http_server_port: Required for Sandesh logger initialization :param sandesh_instance_id: Uniquely identifies the logger instance :param sandesh_instance: Optional sandesh instance """ self.sandesh_instance_id = sandesh_instance_id self._sandesh = sandesh_instance module = Module.FABRIC_ANSIBLE module_pkg = "job_manager" self.context = "job_manager" super(JobLogger, self).__init__(module, module_pkg, args, http_server_port) def sandesh_init(self, http_server_port=None): if self._sandesh is not None: return self._sandesh = Sandesh() self.redefine_sandesh_handles() if not http_server_port: http_server_port = self._args.http_server_port self._instance_id = self.sandesh_instance_id self._sandesh.init_generator( self._module_name, self._hostname, self._node_type_name, self.sandesh_instance_id, self._args.random_collectors, '%s_context' % self.context, int(http_server_port), ['cfgm_common'], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf, config=self._args.sandesh_config) self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) VncGreenlet.register_sandesh_handler() self._sandesh.trace_buffer_create(name="MessageBusNotifyTraceBuf", size=1000)
def run_introspect(cfg, database, lock): sandesh_config = cfg['sandesh'] sandesh_config['collectors'] = sandesh_config['collectors'].split() random.shuffle(sandesh_config['collectors']) sandesh_config.update({ 'id': Module.VCENTER_MANAGER, 'hostname': socket.gethostname(), 'table': 'ObjectContrailvCenterManagerNode', 'instance_id': INSTANCE_ID_DEFAULT, 'introspect_port': ServiceHttpPortMap['contrail-vcenter-manager'], }) sandesh_config['name'] = ModuleNames[sandesh_config['id']] sandesh_config['node_type'] = Module2NodeType[sandesh_config['id']] sandesh_config['node_type_name'] = NodeTypeNames[sandesh_config['node_type']] sandesh = Sandesh() sandesh_handler = SandeshHandler(database, lock) sandesh_handler.bind_handlers() config = SandeshConfig(http_server_ip=sandesh_config['http_server_ip']) sandesh.init_generator( module='cvm', source=sandesh_config['hostname'], node_type=sandesh_config['node_type_name'], instance_id=sandesh_config['instance_id'], collectors=sandesh_config['collectors'], client_context='cvm_context', http_port=sandesh_config['introspect_port'], sandesh_req_uve_pkg_list=['cfgm_common', 'cvm'], config=config ) sandesh.sandesh_logger().set_logger_params( logger=sandesh.logger(), enable_local_log=True, level=translate_logging_level(sandesh_config['logging_level']), file=sandesh_config['log_file'], enable_syslog=False, syslog_facility=None ) ConnectionState.init( sandesh=sandesh, hostname=sandesh_config['hostname'], module_id=sandesh_config['name'], instance_id=sandesh_config['instance_id'], conn_status_cb=staticmethod(ConnectionState.get_conn_state_cb), uve_type_cls=NodeStatusUVE, uve_data_type_cls=NodeStatus, table=sandesh_config['table'] )
class JobLogger(ConfigServiceLogger): def __init__(self, args=None, http_server_port=None, sandesh_instance_id=None): self.sandesh_instance_id = sandesh_instance_id module = Module.FABRIC_ANSIBLE module_pkg = "job_manager" self.context = "job_manager" super(JobLogger, self).__init__(module, module_pkg, args, http_server_port) def sandesh_init(self, http_server_port=None): """ Init sandesh """ self._sandesh = Sandesh() self.redefine_sandesh_handles() if not http_server_port: http_server_port = self._args.http_server_port self._instance_id = self.sandesh_instance_id self._sandesh.init_generator( self._module_name, self._hostname, self._node_type_name, self.sandesh_instance_id, self._args.random_collectors, '%s_context' % self.context, int(http_server_port), ['cfgm_common', '%s.sandesh' % self.module_pkg], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf, config=self._args.sandesh_config) self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) VncGreenlet.register_sandesh_handler() self._sandesh.trace_buffer_create(name="MessageBusNotifyTraceBuf", size=1000)
class ConnInfoTest(unittest.TestCase): def setUp(self): self.maxDiff = None self._sandesh = Sandesh() http_port = test_utils.get_free_port() self._sandesh.init_generator('conn_info_test', socket.gethostname(), 'Test', 'Test', None, 'conn_info_test_ctxt', http_port) #end setUp def _check_process_status_cb(self, vcinfos): self._expected_vcinfos.sort() vcinfos.sort() self.assertEqual(self._expected_vcinfos, vcinfos) return (ProcessState.FUNCTIONAL, '') #end _check_process_status_cb def _update_conn_info(self, name, status, description, vcinfos): cinfo = ConnectionInfo() cinfo.name = name cinfo.status = ConnectionStatusNames[status] cinfo.description = description cinfo.type = ConnectionTypeNames[ConnectionType.TEST] cinfo.server_addrs = ['127.0.0.1:0'] vcinfos.append(cinfo) #end _update_conn_info def _update_conn_state(self, name, status, description, vcinfos): self._expected_vcinfos = vcinfos ConnectionState.update(ConnectionType.TEST, name, status, ['127.0.0.1:0'], description) #end _update_conn_state def _delete_conn_info(self, name, vcinfos): return [cinfo for cinfo in vcinfos if cinfo.name != name] #end _delete_conn_info def _delete_conn_state(self, name, vcinfos): self._expected_vcinfos = vcinfos ConnectionState.delete(ConnectionType.TEST, name) #end _delete_conn_state def test_basic(self): ConnectionState.init(sandesh = self._sandesh, hostname = "TestHost", module_id = "TestModule", instance_id = "0", status_cb = self._check_process_status_cb, uve_type_cls = NodeStatusTestUVE, uve_data_type_cls = NodeStatusTest) vcinfos = [] self._update_conn_info("Test1", ConnectionStatus.UP, "Test1 UP", vcinfos) self._update_conn_state("Test1", ConnectionStatus.UP, "Test1 UP", vcinfos) self._update_conn_info("Test2", ConnectionStatus.UP, "Test2 UP", vcinfos) self._update_conn_state("Test2", ConnectionStatus.UP, "Test2 UP", vcinfos) vcinfos = self._delete_conn_info("Test2", vcinfos) self._delete_conn_state("Test2", vcinfos) #end test_basic def test_callback(self): vcinfos = [] self._update_conn_info("Test1", ConnectionStatus.UP, "Test1 UP", vcinfos); (pstate, message) = ConnectionState.get_process_state_cb(vcinfos) self.assertEqual(ProcessState.FUNCTIONAL, pstate) self.assertEqual('', message) self._update_conn_info("Test2", ConnectionStatus.DOWN, "Test2 DOWN", vcinfos); (pstate, message) = ConnectionState.get_process_state_cb(vcinfos) self.assertEqual(ProcessState.NON_FUNCTIONAL, pstate) self.assertEqual("Test:Test2[Test2 DOWN] connection down", message); self._update_conn_info("Test3", ConnectionStatus.DOWN, "Test3 DOWN", vcinfos); (pstate, message) = ConnectionState.get_process_state_cb(vcinfos); self.assertEqual(ProcessState.NON_FUNCTIONAL, pstate); self.assertEqual("Test:Test2[Test2 DOWN], Test:Test3[Test3 DOWN] connection down", message);
class ConfigServiceLogger(object): _LOGGER_LEVEL_TO_SANDESH_LEVEL = { logging.CRITICAL: SandeshLevel.SYS_EMERG, logging.CRITICAL: SandeshLevel.SYS_ALERT, logging.CRITICAL: SandeshLevel.SYS_CRIT, logging.ERROR: SandeshLevel.SYS_ERR, logging.WARNING: SandeshLevel.SYS_WARN, logging.WARNING: SandeshLevel.SYS_NOTICE, logging.INFO: SandeshLevel.SYS_INFO, logging.DEBUG: SandeshLevel.SYS_DEBUG } def __init__(self, module, module_pkg, args=None, http_server_port=None): self.module_pkg = module_pkg if not hasattr(self, 'context'): self.context = module_pkg self._args = args if 'host_ip' in args: host_ip = args.host_ip else: host_ip = socket.gethostbyname(socket.getfqdn()) node_type = Module2NodeType[module] self._module_name = ModuleNames[module] self._node_type_name = NodeTypeNames[node_type] self.table = "ObjectConfigNode" self._instance_id = INSTANCE_ID_DEFAULT self._hostname = socket.getfqdn(host_ip) # sandesh init self.sandesh_init(http_server_port) def _get_sandesh_logger_level(self, sandesh_level): return self._LOGGER_LEVEL_TO_SANDESH_LEVEL[sandesh_level] def log(self, log_msg, level=SandeshLevel.SYS_DEBUG, fun=None): if fun: log = fun(level=level, og_msg=log_msg, sandesh=self._sandesh) log.send(sandesh=self._sandesh) else: self._sandesh.logger().log( SandeshLogger.get_py_logger_level(level), log_msg) def emergency(self, log_msg, log_fun=None): self.log(log_msg, level=SandeshLevel.SYS_EMERG, fun=log_fun) def alert(self, log_msg, log_fun=None): self.log(log_msg, level=SandeshLevel.SYS_ALERT, fun=log_fun) def critical(self, log_msg, log_fun=None): self.log(log_msg, level=SandeshLevel.SYS_CRIT, fun=log_fun) def error(self, log_msg, log_fun=None): self.log(log_msg, level=SandeshLevel.SYS_ERR, fun=log_fun) def cgitb_error(self): string_buf = cStringIO.StringIO() cgitb_hook(file=string_buf, format="text") self.error(string_buf.getvalue()) def warning(self, log_msg, log_fun=None): self.log(log_msg, level=SandeshLevel.SYS_WARN, fun=log_fun) def notice(self, log_msg, log_fun=None): self.log(log_msg, level=SandeshLevel.SYS_NOTICE, fun=log_fun) def info(self, log_msg, log_fun=None): self.log(log_msg, level=SandeshLevel.SYS_INFO, fun=log_fun) def debug(self, log_msg, log_fun=None): self.log(log_msg, level=SandeshLevel.SYS_DEBUG, fun=log_fun) def _utc_timestamp_usec(self): epoch = datetime.datetime.utcfromtimestamp(0) now = datetime.datetime.utcnow() delta = now - epoch return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) def redefine_sandesh_handles(self): """ Redefine sandesh handle requests for various object types. """ pass def sandesh_init(self, http_server_port=None): """ Init sandesh """ self._sandesh = Sandesh() self.redefine_sandesh_handles() if not http_server_port: http_server_port = self._args.http_server_port self._sandesh.init_generator( self._module_name, self._hostname, self._node_type_name, self._instance_id, self._args.random_collectors, '%s_context' % self.context, int(http_server_port), ['cfgm_common', '%s.sandesh' % self.module_pkg], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf, config=self._args.sandesh_config) self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # connection state init ConnectionState.init( self._sandesh, self._hostname, self._module_name, self._instance_id, staticmethod(ConnectionState.get_conn_state_cb), NodeStatusUVE, NodeStatus, self.table) VncGreenlet.register_sandesh_handler() def introspect_init(self): self._sandesh.run_introspect_server(int(self._args.http_server_port)) def sandesh_reconfig_collectors(self, args): self._sandesh.reconfig_collectors(args.random_collectors)
class MesosManagerLogger(object): def __init__(self, args=None): self._args = args # Initialize module parameters. self.module = {} self.module["id"] = Module.MESOS_MANAGER self.module["name"] = ModuleNames[self.module["id"]] self.module["node_type"] = Module2NodeType[self.module["id"]] self.module["node_type_name"] = NodeTypeNames[self.module["node_type"]] self.module["hostname"] = socket.gethostname() self.module["table"] = "ObjectConfigNode" if self._args.worker_id: self.module["instance_id"] = self._args.worker_id else: self.module["instance_id"] = INSTANCE_ID_DEFAULT # Init Sandesh. self.sandesh_init() def syslog(self, log_msg, level): # Log to syslog. self._sandesh.logger().log( SandeshLogger.get_py_logger_level(level), log_msg) def log(self, log_msg, level=SandeshLevel.SYS_DEBUG, fun=None): # If a sandesh function is provided, use the function. # If not, revert to syslog. if fun: log = fun(level=level, log_msg=log_msg, sandesh=self._sandesh) log.send(sandesh=self._sandesh) else: self.syslog(log_msg, level) # EMERGENCY. def emergency(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_EMERG logging_fun = log_fun if log_fun else sandesh.MesosManagerEmergencyLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # ALERT. def alert(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_ALERT logging_fun = log_fun if log_fun else sandesh.MesosManagerAlertLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # CRITICAL. def critical(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_CRIT logging_fun = log_fun if log_fun else sandesh.MesosManagerCriticalLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # CRITICAL. def error(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_ERR logging_fun = log_fun if log_fun else sandesh.MesosManagerErrorLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # WARNING. def warning(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_WARN logging_fun = log_fun if log_fun else sandesh.MesosManagerWarningLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # NOTICE. def notice(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_NOTICE logging_fun = log_fun if log_fun else sandesh.MesosManagerNoticeLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # INFO. def info(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_INFO logging_fun = log_fun if log_fun else sandesh.MesosManagerInfoLog # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # DEBUG. def debug(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_DEBUG logging_fun = log_fun if log_fun else sandesh.MesosManagerDebugLog # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) def sandesh_init(self): """ Init Sandesh """ self._sandesh = Sandesh() # Initialize Sandesh generator. self._sandesh.init_generator( self.module['name'], self.module['hostname'], self.module['node_type_name'], self.module['instance_id'], self._args.collectors, 'mesos_manager_context', int(self._args.http_server_port), ['cfgm_common', 'mesos_manager.sandesh'], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf, config=self._args.sandesh_config) # Set Sandesh logging params. self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # Connect to collector. ConnectionState.init(self._sandesh, self.module['hostname'], self.module['name'], self.module['instance_id'], staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self.module['table'])
class SandeshTraceTest(unittest.TestCase): def setUp(self): self._sandesh = Sandesh() http_port = get_free_port() self._sandesh.init_generator('sandesh_trace_test', socket.gethostname(), 'Test', 'Test', None, 'trace_test_ctxt', http_port) self._sandesh.set_logging_params(level=SandeshLevel.SYS_DEBUG, enable_local_log=True, enable_trace_print=True) self._trace_read_list = [] # end setUp def sandesh_trace_read_handler(self, trace_msg, more): self._trace_read_list.append(trace_msg) # end sandesh_trace_read_handler def test_create_delete_trace_buffer(self): trace_buf_name = 'test_create_delete_trace_buffer' trace_buf_size = 5 self._sandesh.trace_buffer_create(trace_buf_name, trace_buf_size) self.assertTrue( trace_buf_name in self._sandesh.trace_buffer_list_get()) self.assertEqual(trace_buf_size, self._sandesh.trace_buffer_size_get(trace_buf_name)) # Read from empty trace buffer self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='test', count=0, read_cb=self.sandesh_trace_read_handler) exp_trace_list = [] self.assertEqual(exp_trace_list, self._trace_read_list) self._sandesh.trace_buffer_delete(trace_buf_name) self.assertFalse( trace_buf_name in self._sandesh.trace_buffer_list_get()) # Read deleted trace buffer self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='test', count=0, read_cb=self.sandesh_trace_read_handler) exp_trace_list = [] self.assertEqual(exp_trace_list, self._trace_read_list) # end test_create_delete_trace_buffer def test_enable_disable_trace_buffer(self): trace_buf_name = 'test_enable_disable_trace_buffer' trace_buf_size = 5 # Create trace buffer in disabled state self._sandesh.trace_buffer_create(trace_buf_name, trace_buf_size, False) tmsg1 = TraceTest(magicNo=1234, sandesh=self._sandesh) tmsg1.trace_msg(name=trace_buf_name, sandesh=self._sandesh) self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='read1', count=0, read_cb=self.sandesh_trace_read_handler) # Trace buffer should be empty exp_trace_list = [] self.assertEqual(exp_trace_list, self._trace_read_list) # Enable trace buffer self._sandesh.trace_buffer_enable(trace_buf_name) tmsg1.trace_msg(name=trace_buf_name, sandesh=self._sandesh) tmsg2 = TraceTest(magicNo=3456, sandesh=self._sandesh) tmsg2.trace_msg(name=trace_buf_name, sandesh=self._sandesh) self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='read2', count=0, read_cb=self.sandesh_trace_read_handler) exp_trace_list = [tmsg1, tmsg2] self.assertEqual(exp_trace_list, self._trace_read_list) self._trace_read_list = [] # Disable trace buffer self._sandesh.trace_buffer_disable(trace_buf_name) tmsg3 = TraceTest(magicNo=7890, sandesh=self._sandesh) tmsg3.trace_msg(name=trace_buf_name, sandesh=self._sandesh) self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='read3', count=0, read_cb=self.sandesh_trace_read_handler) exp_trace_list = [tmsg1, tmsg2] self.assertEqual(exp_trace_list, self._trace_read_list) # end test_enable_disable_trace_buffer def test_enable_disable_trace(self): trace_buf_name = 'test_enable_disable_trace' trace_buf_size = 3 # Disable trace self._sandesh.trace_disable() self._sandesh.trace_buffer_create(trace_buf_name, trace_buf_size) tmsg1 = TraceTest(magicNo=1234, sandesh=self._sandesh) tmsg1.trace_msg(name=trace_buf_name, sandesh=self._sandesh) self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='read1', count=0, read_cb=self.sandesh_trace_read_handler) # Trace buffer should be empty exp_trace_list = [] self.assertEqual(exp_trace_list, self._trace_read_list) # Enable trace self._sandesh.trace_enable() tmsg1.trace_msg(name=trace_buf_name, sandesh=self._sandesh) self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='read2', count=0, read_cb=self.sandesh_trace_read_handler) exp_trace_list = [tmsg1] self.assertEqual(exp_trace_list, self._trace_read_list) # end test_enable_disable_trace def test_read_count_trace_buffer(self): trace_buf_name = 'test_read_count_trace_buffer' trace_buf_size = 10 self._sandesh.trace_buffer_create(trace_buf_name, trace_buf_size) tmsg1 = TraceTest(magicNo=1, sandesh=self._sandesh) tmsg1.trace_msg(name=trace_buf_name, sandesh=self._sandesh) tmsg2 = TraceTest(magicNo=2, sandesh=self._sandesh) tmsg2.trace_msg(name=trace_buf_name, sandesh=self._sandesh) tmsg3 = TraceTest(magicNo=3, sandesh=self._sandesh) tmsg3.trace_msg(name=trace_buf_name, sandesh=self._sandesh) # Total messages in trace buffer = 3, count = 1 self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='read1', count=1, read_cb=self.sandesh_trace_read_handler) exp_trace_list = [tmsg1] self.assertEqual(exp_trace_list, self._trace_read_list) self._trace_read_list = [] # count = 0, should read the last two messages self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='read1', count=0, read_cb=self.sandesh_trace_read_handler) exp_trace_list = [tmsg2, tmsg3] self.assertEqual(exp_trace_list, self._trace_read_list) self._trace_read_list = [] # Total messages in trace buffer = 3, count = 5 (< trace_buf_size) self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='read2', count=5, read_cb=self.sandesh_trace_read_handler) exp_trace_list = [tmsg1, tmsg2, tmsg3] self.assertEqual(exp_trace_list, self._trace_read_list) self._trace_read_list = [] # Total messages in trace buffer = 3, count = 20 (> trace_buf_size) self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='read3', count=20, read_cb=self.sandesh_trace_read_handler) exp_trace_list = [tmsg1, tmsg2, tmsg3] self.assertEqual(exp_trace_list, self._trace_read_list) # end test_read_count_trace_buffer def test_overwrite_trace_buffer(self): trace_buf_name = 'test_overwrite_trace_buffer' trace_buf_size = 3 self._sandesh.trace_buffer_create(trace_buf_name, trace_buf_size) tmsg1 = TraceTest(magicNo=123, sandesh=self._sandesh) tmsg1.trace_msg(name=trace_buf_name, sandesh=self._sandesh) tmsg2 = TraceTest(magicNo=345, sandesh=self._sandesh) tmsg2.trace_msg(name=trace_buf_name, sandesh=self._sandesh) tmsg3 = TraceTest(magicNo=567, sandesh=self._sandesh) tmsg3.trace_msg(name=trace_buf_name, sandesh=self._sandesh) self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='read1', count=0, read_cb=self.sandesh_trace_read_handler) exp_trace_list = [tmsg1, tmsg2, tmsg3] self.assertEqual(exp_trace_list, self._trace_read_list) self._trace_read_list = [] # Overwrite trace buffer tmsg4 = TraceTest(magicNo=789, sandesh=self._sandesh) tmsg4.trace_msg(name=trace_buf_name, sandesh=self._sandesh) self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='read2', count=0, read_cb=self.sandesh_trace_read_handler) exp_trace_list = [tmsg2, tmsg3, tmsg4] self.assertEqual(exp_trace_list, self._trace_read_list) # end test_overwrite_trace_buffer def test_trace_buffer_read_context(self): trace_buf_name = 'test_trace_buffer_read_context' trace_buf_size = 3 self._sandesh.trace_buffer_create(trace_buf_name, trace_buf_size) tmsg1 = TraceTest(magicNo=123, sandesh=self._sandesh) tmsg1.trace_msg(name=trace_buf_name, sandesh=self._sandesh) tmsg2 = TraceTest(magicNo=345, sandesh=self._sandesh) tmsg2.trace_msg(name=trace_buf_name, sandesh=self._sandesh) self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='read1', count=0, read_cb=self.sandesh_trace_read_handler) exp_trace_list = [tmsg1, tmsg2] self.assertEqual(exp_trace_list, self._trace_read_list) self._trace_read_list = [] # After reading the entire content of trace buffer, # dont delete the read context. Subsequent call to trace_buffer_read() # should not read any trace message self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='read1', count=0, read_cb=self.sandesh_trace_read_handler) exp_trace_list = [] self.assertEqual(exp_trace_list, self._trace_read_list) # We have not deleted the read context. Add more trace messages # and make sure we don't read the already read trace messages. tmsg3 = TraceTest(magicNo=56, sandesh=self._sandesh) tmsg3.trace_msg(name=trace_buf_name, sandesh=self._sandesh) tmsg4 = TraceTest(magicNo=67, sandesh=self._sandesh) tmsg4.trace_msg(name=trace_buf_name, sandesh=self._sandesh) self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='read1', count=1, read_cb=self.sandesh_trace_read_handler) exp_trace_list = [tmsg3] self.assertEqual(exp_trace_list, self._trace_read_list) self._trace_read_list = [] # Now read the last message self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='read1', count=0, read_cb=self.sandesh_trace_read_handler) exp_trace_list = [tmsg4] self.assertEqual(exp_trace_list, self._trace_read_list) self._trace_read_list = [] # Read the trace buffer with different read_context self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='read2', count=2, read_cb=self.sandesh_trace_read_handler) exp_trace_list = [tmsg2, tmsg3] self.assertEqual(exp_trace_list, self._trace_read_list) self._trace_read_list = [] # Delete read context self._sandesh.trace_buffer_read_done(name=trace_buf_name, context='read2') self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='read2', count=1, read_cb=self.sandesh_trace_read_handler) exp_trace_list = [tmsg2] self.assertEqual(exp_trace_list, self._trace_read_list) self._trace_read_list = [] # Interleave reading and writing of trace buffer - invalidate read # context tmsg5 = TraceTest(magicNo=78, sandesh=self._sandesh) tmsg5.trace_msg(name=trace_buf_name, sandesh=self._sandesh) tmsg6 = TraceTest(magicNo=89, sandesh=self._sandesh) tmsg6.trace_msg(name=trace_buf_name, sandesh=self._sandesh) self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='read2', count=2, read_cb=self.sandesh_trace_read_handler) exp_trace_list = [tmsg4, tmsg5] self.assertEqual(exp_trace_list, self._trace_read_list) self._trace_read_list = [] tmsg7 = TraceTest(magicNo=98, sandesh=self._sandesh) tmsg7.trace_msg(name=trace_buf_name, sandesh=self._sandesh) self._sandesh.trace_buffer_read( name=trace_buf_name, read_context='read1', count=0, read_cb=self.sandesh_trace_read_handler) exp_trace_list = [tmsg5, tmsg6, tmsg7] self.assertEqual(exp_trace_list, self._trace_read_list)
class SvcMonitor(object): """ data + methods used/referred to by ssrc and arc greenlets """ _KEYSPACE = 'svc_monitor_keyspace' _SVC_VM_CF = 'svc_vm_table' _SVC_SI_CF = 'svc_si_table' _SVC_CLEANUP_CF = 'svc_cleanup_table' def __init__(self, vnc_lib, args=None): self._args = args # api server and cassandra init self._vnc_lib = vnc_lib self._cassandra_init() # dictionary for nova self._nova = {} #initialize discovery client self._disc = None if self._args.disc_server_ip and self._args.disc_server_port: self._disc = client.DiscoveryClient(self._args.disc_server_ip, self._args.disc_server_port, client_type='Service Monitor') #sandesh init self._sandesh = Sandesh() sandesh.ServiceInstanceList.handle_request =\ self.sandesh_si_handle_request module = Module.SVC_MONITOR module_name = ModuleNames[module] node_type = Module2NodeType[module] node_type_name = NodeTypeNames[node_type] instance_id = INSTANCE_ID_DEFAULT self._sandesh.init_generator(module_name, socket.gethostname(), node_type_name, instance_id, self._args.collectors, 'svc_monitor_context', int(self._args.http_server_port), ['cfgm_common', 'sandesh'], self._disc) self._sandesh.set_logging_params(enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file) #create default analyzer template self._create_default_template('analyzer-template', 'analyzer', 'analyzer') self._create_default_template('nat-template', 'nat-service', 'firewall', 'in-network-nat') #create cpu_info object to send periodic updates sysinfo_req = False cpu_info = vnc_cpu_info.CpuInfo(module_name, instance_id, sysinfo_req, self._sandesh, 60) self._cpu_info = cpu_info # logging self._err_file = '/var/log/contrail/svc-monitor.err' self._tmp_file = '/var/log/contrail/svc-monitor.tmp' self._svc_err_logger = logging.getLogger('SvcErrLogger') self._svc_err_logger.setLevel(logging.ERROR) handler = logging.handlers.RotatingFileHandler(self._err_file, maxBytes=64 * 1024, backupCount=2) self._svc_err_logger.addHandler(handler) # end __init__ # create service template def _create_default_template(self, st_name, image_name, svc_type, svc_mode=None): domain_name = 'default-domain' domain_fq_name = [domain_name] st_fq_name = [domain_name, st_name] self._svc_syslog("Creating %s %s image %s" % (domain_name, st_name, image_name)) try: st_obj = self._vnc_lib.service_template_read(fq_name=st_fq_name) st_uuid = st_obj.uuid self._svc_syslog("%s exists uuid %s" % (st_name, str(st_uuid))) return except NoIdError: domain = self._vnc_lib.domain_read(fq_name=domain_fq_name) st_obj = ServiceTemplate(name=st_name, domain_obj=domain) st_uuid = self._vnc_lib.service_template_create(st_obj) svc_properties = ServiceTemplateType() svc_properties.set_image_name(image_name) svc_properties.set_service_type(svc_type) svc_properties.set_flavor("m1.medium") svc_properties.set_ordered_interfaces(True) # set interface list if svc_type == 'analyzer': if_list = [['left', False]] else: if_list = [['management', False], ['left', False], ['right', False]] svc_properties.set_service_mode(svc_mode) for itf in if_list: if_type = ServiceTemplateInterfaceType(shared_ip=itf[1]) if_type.set_service_interface_type(itf[0]) svc_properties.add_interface_type(if_type) try: st_obj.set_service_template_properties(svc_properties) self._vnc_lib.service_template_update(st_obj) except Exception as e: print e self._svc_syslog("%s created with uuid %s" % (st_name, str(st_uuid))) #_create_default_analyzer_template def cleanup(self): # TODO cleanup sandesh context pass # end cleanup def _sandesh_populate_vn_info(self, si_info, sandesh_si): for if_str in [_LEFT_STR, _RIGHT_STR, _MGMT_STR]: if_set = set() if_str_vn = if_str + '-vn' if not if_str_vn in si_info.keys(): continue vn_fq_str = str(si_info[if_str_vn]) vn_uuid = str(si_info[vn_fq_str]) vn_str = ("VN [%s : %s]" % (vn_fq_str, vn_uuid)) if_set.add(vn_str) iip_uuid_str = if_str + '-iip-uuid' if iip_uuid_str in si_info.keys(): vn_iip_uuid = str(si_info[iip_uuid_str]) iip_addr_str = if_str + '-iip-addr' vn_iip_addr = str(si_info[iip_addr_str]) iip_str = ("IIP [%s : %s]" % (vn_iip_addr, vn_iip_uuid)) if_set.add(iip_str) if if_str == _LEFT_STR: sandesh_si.left_vn = list(if_set) if if_str == _RIGHT_STR: sandesh_si.right_vn = list(if_set) if if_str == _MGMT_STR: sandesh_si.management_vn = list(if_set) si_info['done'] = True # end _sandesh_populate_vn_info def sandesh_si_handle_request(self, req): si_resp = sandesh.ServiceInstanceListResp(si_names=[]) if req.si_name is None: vm_list = list(self._svc_vm_cf.get_range()) si_list = list(self._svc_si_cf.get_range()) #walk all vms for vm_uuid, si in vm_list: if 'done' in si: continue #collect all ecmp instances sandesh_si = sandesh.ServiceInstance(name=si['si_fq_str']) vm_set = set() for key, val in vm_list: if val['si_fq_str'] != si['si_fq_str']: continue vm_str = ("%s: %s" % (val['instance_name'], key)) vm_set.add(vm_str) val['done'] = True sandesh_si.vm_list = list(vm_set) #find the vn and iip iformation for si_fq_str, si_info in si_list: if si_fq_str != si['si_fq_str']: continue self._sandesh_populate_vn_info(si_info, sandesh_si) si_info['done'] = True si_resp.si_names.append(sandesh_si) #walk all instances where vms are pending launch for si_fq_str, si_info in si_list: if 'done' in si_info.keys(): continue sandesh_si = sandesh.ServiceInstance(name=si_fq_str) sandesh_si.vm_list = set() sandesh_si.instance_name = '' self._sandesh_populate_vn_info(si_info, sandesh_si) si_resp.si_names.append(sandesh_si) si_resp.response(req.context()) # end sandesh_si_handle_request def _utc_timestamp_usec(self): epoch = datetime.datetime.utcfromtimestamp(0) now = datetime.datetime.utcnow() delta = now - epoch return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10**6) # end utc_timestamp_usec def _uve_svc_instance(self, si_fq_name_str, status=None, vm_uuid=None, st_name=None): svc_uve = UveSvcInstanceConfig(name=si_fq_name_str, deleted=False, st_name=None, vm_list=[], create_ts=None) if st_name: svc_uve.st_name = st_name if vm_uuid: svc_uve.vm_list.append(vm_uuid) if status: svc_uve.status = status if status == 'CREATE': svc_uve.create_ts = self._utc_timestamp_usec() elif status == 'DELETE': svc_uve.deleted = True svc_log = UveSvcInstanceConfigTrace(data=svc_uve, sandesh=self._sandesh) svc_log.send(sandesh=self._sandesh) # end uve_vm def _svc_syslog(self, log_msg): self._sandesh._logger.debug("%s", log_msg) vn_log = sandesh.SvcMonitorLog(log_msg=log_msg, sandesh=self._sandesh) vn_log.send(sandesh=self._sandesh) # end _svc_syslog def _get_proj_name_from_si_fq_str(self, si_fq_str): return si_fq_str.split(':')[1] # enf _get_si_fq_str_to_proj_name def _get_vn_id(self, proj_obj, vn_fq_name_str, shared_vn_name=None, shared_vn_subnet=None): vn_id = None if vn_fq_name_str: vn_fq_name = vn_fq_name_str.split(':') # search for provided VN try: vn_id = self._vnc_lib.fq_name_to_id('virtual-network', vn_fq_name) except NoIdError: self._svc_syslog("Error: vn_fq_name %s not found" % (vn_fq_name_str)) else: # search or create shared VN domain_name, proj_name = proj_obj.get_fq_name() vn_fq_name = [domain_name, proj_name, shared_vn_name] try: vn_id = self._vnc_lib.fq_name_to_id('virtual-network', vn_fq_name) except NoIdError: vn_id = self._create_svc_vn(shared_vn_name, shared_vn_subnet, proj_obj) return vn_id # end _get_vn_id def _set_svc_vm_if_properties(self, vmi_obj, vn_obj): # confirm service vm by checking reference to service instance vm_id = get_vm_id_from_interface(vmi_obj) if vm_id is None: return vm_obj = self._vnc_lib.virtual_machine_read(id=vm_id) si_list = vm_obj.get_service_instance_refs() if not si_list: return # if interface property already set if vmi_obj.get_virtual_machine_interface_properties() is not None: return # get service instance fq_name = si_list[0]['to'] si_obj = self._vnc_lib.service_instance_read(fq_name=fq_name) si_props = si_obj.get_service_instance_properties() si_if_list = si_props.get_interface_list() si_if = None # get service template st_list = si_obj.get_service_template_refs() if st_list is not None: fq_name = st_list[0]['to'] st_obj = self._vnc_lib.service_template_read(fq_name=fq_name) st_props = st_obj.get_service_template_properties() st_if_list = st_props.get_interface_type() # set interface type vn_fq_name_str = vn_obj.get_fq_name_str() for idx in range(0, len(st_if_list)): st_if = st_if_list[idx] itf_type = st_if.service_interface_type si_if = None if si_if_list and st_props.get_ordered_interfaces(): si_if = si_if_list[idx] si_vn_str = si_if.get_virtual_network() else: funcname = "get_" + itf_type + "_virtual_network" func = getattr(si_props, funcname) si_vn_str = func() if (((itf_type == _MGMT_STR) and (vn_obj.name == _SVC_VN_MGMT)) or ((itf_type == _LEFT_STR) and (vn_obj.name == _SVC_VN_LEFT)) or ((itf_type == _RIGHT_STR) and (vn_obj.name == _SVC_VN_RIGHT)) or ((si_vn_str == vn_fq_name_str))): if_properties = VirtualMachineInterfacePropertiesType(itf_type) vmi_obj.set_virtual_machine_interface_properties(if_properties) break # set static routes if st_if.get_static_route_enable(): static_routes = si_if.get_static_routes() if not static_routes: static_routes = {'route': []} try: domain_name, proj_name = si_obj.get_parent_fq_name() rt_name = si_obj.uuid + ' ' + str(idx) rt_fq_name = [domain_name, proj_name, rt_name] rt_obj = self._vnc_lib.interface_route_table_read( fq_name=rt_fq_name) rt_obj.set_interface_route_table_routes(static_routes) except NoIdError: proj_obj = self._vnc_lib.project_read( fq_name=si_obj.get_parent_fq_name()) rt_obj = InterfaceRouteTable( name=rt_name, parent_obj=proj_obj, interface_route_table_routes=static_routes) self._vnc_lib.interface_route_table_create(rt_obj) vmi_obj.set_interface_route_table(rt_obj) # remove security groups and update vmi if st_props.service_mode not in [ 'in-network', 'in-network-nat' ] or st_props.service_type == 'analyzer': vmi_obj.set_security_group_list([]) self._vnc_lib.virtual_machine_interface_update(vmi_obj) # end _set_svc_vm_if_properties def _create_svc_instance_vm(self, st_obj, si_obj): #check if all config received before launch if not self._check_store_si_info(st_obj, si_obj): return row_entry = {} st_props = st_obj.get_service_template_properties() if st_props is None: return st_if_list = st_props.get_interface_type() flavor = st_props.get_flavor() image_name = st_props.get_image_name() if image_name is None: self._svc_syslog("Error: Image name not present in %s" % (st_obj.name)) return si_props = si_obj.get_service_instance_properties() max_instances = si_props.get_scale_out().get_max_instances() si_if_list = si_props.get_interface_list() if si_if_list and (len(si_if_list) != len(st_if_list)): self._svc_syslog("Error: IF mismatch template %s instance %s" % (len(st_if_list), len(si_if_list))) return # check and create service virtual networks nics = [] proj_fq_name = si_obj.get_parent_fq_name() proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name) for idx in range(0, len(st_if_list)): nic = {} st_if = st_if_list[idx] itf_type = st_if.service_interface_type # set vn id if si_if_list and st_props.get_ordered_interfaces(): si_if = si_if_list[idx] vn_fq_name_str = si_if.get_virtual_network() else: funcname = "get_" + itf_type + "_virtual_network" func = getattr(si_props, funcname) vn_fq_name_str = func() if itf_type in _SVC_VNS: vn_id = self._get_vn_id(proj_obj, vn_fq_name_str, _SVC_VNS[itf_type][0], _SVC_VNS[itf_type][1]) else: vn_id = self._get_vn_id(proj_obj, vn_fq_name_str) if vn_id is None: continue nic['net-id'] = vn_id nic['v4-fixed-ip'] = None # set shared ip if st_if.shared_ip: try: iip_dict = self._svc_si_cf.get(si_obj.get_fq_name_str()) iip_uuid_str = itf_type + '-iip-uuid' if iip_uuid_str in iip_dict.keys(): nic['v4-fixed-ip'] = iip_dict[itf_type + '-iip-addr'] except pycassa.NotFoundException: self._svc_syslog( "Error: Shared IP not found in db for %s" % (itf_type)) # add to nic list nics.append(nic) # create and launch vm vm_refs = si_obj.get_virtual_machine_back_refs() n_client = self._novaclient_get(proj_obj.name) for inst_count in range(0, max_instances): instance_name = si_obj.name + '_' + str(inst_count + 1) exists = False for vm_ref in vm_refs or []: vm = n_client.servers.find(id=vm_ref['uuid']) if vm.name == instance_name: exists = True break if exists: vm_uuid = vm_ref['uuid'] else: vm = self._create_svc_vm(instance_name, image_name, nics, proj_obj.name, flavor) if vm is None: continue vm_uuid = vm.id # store vm, instance in cassandra; use for linking when VM is up row_entry['si_fq_str'] = si_obj.get_fq_name_str() row_entry['instance_name'] = instance_name self._svc_vm_cf.insert(vm_uuid, row_entry) # uve trace self._uve_svc_instance(si_obj.get_fq_name_str(), status='CREATE', vm_uuid=vm.id, st_name=st_obj.get_fq_name_str()) # end _create_svc_instance_vm def _delete_svc_instance_vm(self, vm_uuid, proj_name, si_fq_str=None): found = True try: self._svc_syslog("Deleting VM %s %s" % (proj_name, vm_uuid)) n_client = self._novaclient_get(proj_name) vm = n_client.servers.find(id=vm_uuid) vm.delete() self._uve_svc_instance(si_fq_str, status='DELETE', vm_uuid=vm_uuid) except nc_exc.NotFound: # remove from cleanup list self._cleanup_cf.remove(vm_uuid) found = False # remove from launch table and queue into cleanup list if found: self._svc_vm_cf.remove(vm_uuid) self._cleanup_cf.insert(vm_uuid, { 'proj_name': proj_name, 'type': 'vm' }) # end _delete_svc_instance_vm def _restart_svc_vm(self, vm_uuid, si_fq_str): proj_name = self._get_proj_name_from_si_fq_str(si_fq_str) self._delete_svc_instance_vm(vm_uuid, proj_name, si_fq_str=si_fq_str) si_obj = self._vnc_lib.service_instance_read(fq_name_str=si_fq_str) st_list = si_obj.get_service_template_refs() if st_list is not None: fq_name = st_list[0]['to'] st_obj = self._vnc_lib.service_template_read(fq_name=fq_name) self._create_svc_instance_vm(st_obj, si_obj) # end _restart_svc_vm def _check_store_si_info(self, st_obj, si_obj): config_complete = True st_props = st_obj.get_service_template_properties() st_if_list = st_props.get_interface_type() si_props = si_obj.get_service_instance_properties() si_if_list = si_props.get_interface_list() if si_if_list and (len(si_if_list) != len(st_if_list)): self._svc_syslog("Error: IF mismatch template %s instance %s" % (len(st_if_list), len(si_if_list))) return #read existing si_entry try: si_entry = self._svc_si_cf.get(si_obj.get_fq_name_str()) except Exception: si_entry = {} #walk the interface list for idx in range(0, len(st_if_list)): st_if = st_if_list[idx] itf_type = st_if.service_interface_type si_if = None if si_if_list and st_props.get_ordered_interfaces(): si_if = si_if_list[idx] si_vn_str = si_if.get_virtual_network() else: funcname = "get_" + itf_type + "_virtual_network" func = getattr(si_props, funcname) si_vn_str = ':'.join(func()) if not si_vn_str: continue si_entry[itf_type + '-vn'] = si_vn_str try: vn_obj = self._vnc_lib.virtual_network_read( fq_name_str=si_vn_str) if vn_obj.uuid != si_entry.get(si_vn_str, None): si_entry[si_vn_str] = vn_obj.uuid if not st_if.shared_ip: continue iip_uuid_str = itf_type + '-iip-uuid' iip_uuid = si_entry.get(iip_uuid_str, None) iip = self._allocate_shared_iip(st_obj, si_obj, vn_obj, iip_uuid) si_entry[itf_type + '-iip-uuid'] = iip['uuid'] si_entry[itf_type + '-iip-addr'] = iip['addr'] except NoIdError: self._svc_syslog("Warn: VN %s add is pending" % si_vn_str) si_entry[si_vn_str] = 'pending' config_complete = False if config_complete: self._svc_syslog("SI %s info is complete" % si_obj.get_fq_name_str()) else: self._svc_syslog("Warn: SI %s info is not complete" % si_obj.get_fq_name_str()) #insert entry self._svc_si_cf.insert(si_obj.get_fq_name_str(), si_entry) return config_complete #end _check_store_si_info def _allocate_shared_iip(self, st_obj, si_obj, vn_obj, iip_uuid): iip_entry = {} iip_obj = None if iip_uuid: try: iip_obj = self._vnc_lib.instance_ip_read(id=iip_uuid) except NoIdError: pass # allocate ip if not iip_obj: addr = self._vnc_lib.virtual_network_ip_alloc(vn_obj) iip_name = '%s %s' % (vn_obj.uuid, addr[0]) iip_obj = InstanceIp(name=iip_name, instance_ip_address=addr[0]) iip_obj.add_virtual_network(vn_obj) self._vnc_lib.instance_ip_create(iip_obj) iip_entry = { 'uuid': iip_obj.uuid, 'addr': iip_obj.get_instance_ip_address() } return iip_entry #end _allocate_shared_iip def _delete_shared_vn(self, vn_uuid, proj_name): try: self._svc_syslog("Deleting VN %s %s" % (proj_name, vn_uuid)) self._vnc_lib.virtual_network_delete(id=vn_uuid) except RefsExistError: self._svc_err_logger.error("Delete failed refs exist VN %s %s" % (proj_name, vn_uuid)) except NoIdError: # remove from cleanup list self._cleanup_cf.remove(vn_uuid) # end _delete_shared_vn def _delete_shared_iip(self, iip_uuid, proj_name): try: iip_obj = self._vnc_lib.instance_ip_read(id=iip_uuid) vmi_refs = iip_obj.get_virtual_machine_interface_refs() if vmi_refs is None: self._svc_syslog("Deleting IIP %s %s" % (proj_name, iip_uuid)) self._vnc_lib.instance_ip_delete(id=iip_uuid) except RefsExistError: self._svc_err_logger.error("Delete failed refs exist IIP %s %s" % (proj_name, iip_uuid)) except NoIdError: # remove from cleanup list self._cleanup_cf.remove(iip_uuid) # end _delete_shared_iip def _delmsg_project_service_instance(self, idents): proj_fq_str = idents['project'] proj_obj = self._vnc_lib.project_read(fq_name_str=proj_fq_str) if proj_obj.get_service_instances() is not None: return # no SIs left hence delete shared VNs for vn_name in [_SVC_VN_MGMT, _SVC_VN_LEFT, _SVC_VN_RIGHT]: domain_name, proj_name = proj_obj.get_fq_name() vn_fq_name = [domain_name, proj_name, vn_name] try: vn_uuid = self._vnc_lib.fq_name_to_id('virtual-network', vn_fq_name) self._cleanup_cf.insert(vn_uuid, { 'proj_name': proj_obj.name, 'type': 'vn' }) except Exception: pass # end _delmsg_project_service_instance def _delmsg_service_instance_service_template(self, idents): si_fq_str = idents['service-instance'] vm_list = list(self._svc_vm_cf.get_range()) for vm_uuid, si in vm_list: if si_fq_str != si['si_fq_str']: continue proj_name = self._get_proj_name_from_si_fq_str(si_fq_str) self._delete_svc_instance_vm(vm_uuid, proj_name, si_fq_str=si_fq_str) #insert shared instance IP uuids into cleanup list if present try: si_info = self._svc_si_cf.get(si_fq_str) for itf_str in [_MGMT_STR, _LEFT_STR, _RIGHT_STR]: iip_uuid_str = itf_str + '-iip-uuid' if not iip_uuid_str in si_info: continue self._cleanup_cf.insert(si_info[iip_uuid_str], { 'proj_name': proj_name, 'type': 'iip' }) except pycassa.NotFoundException: pass #delete si info try: self._svc_si_cf.remove(si_fq_str) except pycassa.NotFoundException: pass #end _delmsg_service_instance_service_template def _delmsg_virtual_machine_service_instance(self, idents): vm_uuid = idents['virtual-machine'] si_fq_str = idents['service-instance'] proj_name = self._get_proj_name_from_si_fq_str(si_fq_str) self._delete_svc_instance_vm(vm_uuid, proj_name, si_fq_str=si_fq_str) # end _delmsg_service_instance_virtual_machine def _delmsg_virtual_machine_interface_route_table(self, idents): rt_fq_str = idents['interface-route-table'] rt_obj = self._vnc_lib.interface_route_table_read( fq_name_str=rt_fq_str) vmi_list = rt_obj.get_virtual_machine_interface_back_refs() if vmi_list is None: self._vnc_lib.interface_route_table_delete(id=rt_obj.uuid) # end _delmsg_virtual_machine_interface_route_table def _addmsg_virtual_machine_interface_virtual_network(self, idents): vmi_fq_str = idents['virtual-machine-interface'] vn_fq_str = idents['virtual-network'] try: vmi_obj = self._vnc_lib.virtual_machine_interface_read( fq_name_str=vmi_fq_str) vn_obj = self._vnc_lib.virtual_network_read(fq_name_str=vn_fq_str) except NoIdError: return # check if this is a service vm vm_id = get_vm_id_from_interface(vmi_obj) if vm_id is None: return vm_obj = self._vnc_lib.virtual_machine_read(id=vm_id) si_list = vm_obj.get_service_instance_refs() if si_list: fq_name = si_list[0]['to'] si_obj = self._vnc_lib.service_instance_read(fq_name=fq_name) else: try: svc_vm_cf_row = self._svc_vm_cf.get(vm_obj.uuid) si_fq_str = svc_vm_cf_row['si_fq_str'] vm_obj.name = svc_vm_cf_row['instance_name'] si_obj = self._vnc_lib.service_instance_read( fq_name_str=si_fq_str) except pycassa.NotFoundException: return except NoIdError: proj_name = self._get_proj_name_from_si_fq_str(si_fq_str) self._delete_svc_instance_vm(vm_obj.uuid, proj_name, si_fq_str=si_fq_str) return # create service instance to service vm link vm_obj.add_service_instance(si_obj) self._vnc_lib.virtual_machine_update(vm_obj) # set service instance property self._set_svc_vm_if_properties(vmi_obj, vn_obj) # end _addmsg_virtual_machine_interface_virtual_network def _addmsg_service_instance_service_template(self, idents): st_fq_str = idents['service-template'] si_fq_str = idents['service-instance'] try: st_obj = self._vnc_lib.service_template_read(fq_name_str=st_fq_str) si_obj = self._vnc_lib.service_instance_read(fq_name_str=si_fq_str) except NoIdError: return #launch VMs self._create_svc_instance_vm(st_obj, si_obj) # end _addmsg_service_instance_service_template def _addmsg_service_instance_properties(self, idents): si_fq_str = idents['service-instance'] try: si_obj = self._vnc_lib.service_instance_read(fq_name_str=si_fq_str) except NoIdError: return #update static routes self._update_static_routes(si_obj) # end _addmsg_service_instance_service_template def _addmsg_project_virtual_network(self, idents): vn_fq_str = idents['virtual-network'] try: si_list = list(self._svc_si_cf.get_range()) except pycassa.NotFoundException: return for si_fq_str, si_info in si_list: if vn_fq_str not in si_info.keys(): continue try: si_obj = self._vnc_lib.service_instance_read( fq_name_str=si_fq_str) st_refs = si_obj.get_service_template_refs() fq_name = st_refs[0]['to'] st_obj = self._vnc_lib.service_template_read(fq_name=fq_name) #launch VMs self._create_svc_instance_vm(st_obj, si_obj) except Exception: continue #end _addmsg_project_virtual_network def process_poll_result(self, poll_result_str): result_list = parse_poll_result(poll_result_str) # process ifmap message for (result_type, idents, metas) in result_list: for meta in metas: meta_name = re.sub('{.*}', '', meta.tag) if result_type == 'deleteResult': funcname = "_delmsg_" + meta_name.replace('-', '_') elif result_type in ['searchResult', 'updateResult']: funcname = "_addmsg_" + meta_name.replace('-', '_') # end if result_type try: func = getattr(self, funcname) except AttributeError: pass else: self._svc_syslog("%s with %s/%s" % (funcname, meta_name, idents)) func(idents) # end for meta # end for result_type # end process_poll_result def _novaclient_get(self, proj_name): client = self._nova.get(proj_name) if client is not None: return client self._nova[proj_name] = nc.Client('2', username=self._args.admin_user, project_id=proj_name, api_key=self._args.admin_password, region_name=self._args.region_name, service_type='compute', auth_url='http://' + self._args.auth_host + ':5000/v2.0') return self._nova[proj_name] # end _novaclient_get def _update_static_routes(self, si_obj): # get service instance interface list si_props = si_obj.get_service_instance_properties() si_if_list = si_props.get_interface_list() if not si_if_list: return for idx in range(0, len(si_if_list)): si_if = si_if_list[idx] static_routes = si_if.get_static_routes() if not static_routes: static_routes = {'route': []} # update static routes try: domain_name, proj_name = si_obj.get_parent_fq_name() rt_name = si_obj.uuid + ' ' + str(idx) rt_fq_name = [domain_name, proj_name, rt_name] rt_obj = self._vnc_lib.interface_route_table_read( fq_name=rt_fq_name) rt_obj.set_interface_route_table_routes(static_routes) self._vnc_lib.interface_route_table_update(rt_obj) except NoIdError: pass # end _update_static_routes def _create_svc_vm(self, vm_name, image_name, nics, proj_name, flavor_name): n_client = self._novaclient_get(proj_name) if flavor_name: flavor = n_client.flavors.find(name=flavor_name) else: flavor = n_client.flavors.find(ram=4096) image = '' try: image = n_client.images.find(name=image_name) except nc_exc.NotFound: self._svc_syslog("Error: Image %s not found in project %s" % (image_name, proj_name)) return except nc_exc.NoUniqueMatch: self._svc_syslog("Error: Multiple images %s found in project %s" % (image_name, proj_name)) return # launch vm self._svc_syslog('Launching VM : ' + vm_name) nova_vm = n_client.servers.create(name=vm_name, image=image, flavor=flavor, nics=nics) nova_vm.get() self._svc_syslog('Created VM : ' + str(nova_vm)) return nova_vm # end create_svc_vm def _create_svc_vn(self, vn_name, vn_subnet, proj_obj): self._svc_syslog("Creating network %s subnet %s" % (vn_name, vn_subnet)) vn_obj = VirtualNetwork(name=vn_name, parent_obj=proj_obj) domain_name, project_name = proj_obj.get_fq_name() ipam_fq_name = [domain_name, 'default-project', 'default-network-ipam'] ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name) cidr = vn_subnet.split('/') pfx = cidr[0] pfx_len = int(cidr[1]) subnet_info = IpamSubnetType(subnet=SubnetType(pfx, pfx_len)) subnet_data = VnSubnetsType([subnet_info]) vn_obj.add_network_ipam(ipam_obj, subnet_data) self._vnc_lib.virtual_network_create(vn_obj) return vn_obj.uuid # end _create_svc_vn def _cassandra_init(self): server_idx = 0 num_dbnodes = len(self._args.cassandra_server_list) connected = False while not connected: try: cass_server = self._args.cassandra_server_list[server_idx] sys_mgr = SystemManager(cass_server) connected = True except Exception as e: server_idx = (server_idx + 1) % num_dbnodes time.sleep(3) if self._args.reset_config: try: sys_mgr.drop_keyspace(SvcMonitor._KEYSPACE) except pycassa.cassandra.ttypes.InvalidRequestException as e: print "Warning! " + str(e) try: sys_mgr.create_keyspace(SvcMonitor._KEYSPACE, SIMPLE_STRATEGY, {'replication_factor': str(num_dbnodes)}) except pycassa.cassandra.ttypes.InvalidRequestException as e: print "Warning! " + str(e) column_families = [ self._SVC_VM_CF, self._SVC_CLEANUP_CF, self._SVC_SI_CF ] for cf in column_families: try: sys_mgr.create_column_family(SvcMonitor._KEYSPACE, cf) except pycassa.cassandra.ttypes.InvalidRequestException as e: print "Warning! " + str(e) conn_pool = pycassa.ConnectionPool(SvcMonitor._KEYSPACE, self._args.cassandra_server_list) rd_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM wr_consistency = pycassa.cassandra.ttypes.ConsistencyLevel.QUORUM self._svc_vm_cf = pycassa.ColumnFamily( conn_pool, self._SVC_VM_CF, read_consistency_level=rd_consistency, write_consistency_level=wr_consistency) self._svc_si_cf = pycassa.ColumnFamily( conn_pool, self._SVC_SI_CF, read_consistency_level=rd_consistency, write_consistency_level=wr_consistency) self._cleanup_cf = pycassa.ColumnFamily( conn_pool, self._SVC_CLEANUP_CF, read_consistency_level=rd_consistency, write_consistency_level=wr_consistency)
class SvcMonitor(object): """ data + methods used/referred to by ssrc and arc greenlets """ _KEYSPACE = 'svc_monitor_keyspace' _SVC_VM_CF = 'svc_vm_table' _SVC_SI_CF = 'svc_si_table' _SVC_CLEANUP_CF = 'svc_cleanup_table' def __init__(self, vnc_lib, args=None): self._args = args # api server and cassandra init self._vnc_lib = vnc_lib self._cassandra_init() # dictionary for nova self._nova = {} #initialize discovery client self._disc = None if self._args.disc_server_ip and self._args.disc_server_port: self._disc = client.DiscoveryClient(self._args.disc_server_ip, self._args.disc_server_port, client_type='Service Monitor') #sandesh init self._sandesh = Sandesh() sandesh.ServiceInstanceList.handle_request =\ self.sandesh_si_handle_request self._sandesh.init_generator( ModuleNames[Module.SVC_MONITOR], socket.gethostname(), self._args.collectors, 'svc_monitor_context', int(self._args.http_server_port), ['cfgm_common', 'sandesh'], self._disc) self._sandesh.set_logging_params(enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file) #create default analyzer template self._create_default_template('analyzer-template', 'analyzer', 'analyzer') self._create_default_template('nat-template', 'nat-service', 'firewall', 'in-network-nat') #create cpu_info object to send periodic updates sysinfo_req = False cpu_info = vnc_cpu_info.CpuInfo( Module.SVC_MONITOR, sysinfo_req, self._sandesh, 60) self._cpu_info = cpu_info # logging self._err_file = '/var/log/contrail/svc-monitor.err' self._tmp_file = '/var/log/contrail/svc-monitor.tmp' self._svc_err_logger = logging.getLogger('SvcErrLogger') self._svc_err_logger.setLevel(logging.ERROR) handler = logging.handlers.RotatingFileHandler( self._err_file, maxBytes=64*1024, backupCount=2) self._svc_err_logger.addHandler(handler) # end __init__ # create service template def _create_default_template(self, st_name, image_name, svc_type, svc_mode=None): domain_name = 'default-domain' domain_fq_name = [domain_name] st_fq_name = [domain_name, st_name] self._svc_syslog("Creating %s %s image %s" % (domain_name, st_name, image_name)) try: st_obj = self._vnc_lib.service_template_read(fq_name=st_fq_name) st_uuid = st_obj.uuid except NoIdError: domain = self._vnc_lib.domain_read(fq_name=domain_fq_name) st_obj = ServiceTemplate(name=st_name, domain_obj=domain) st_uuid = self._vnc_lib.service_template_create(st_obj) svc_properties = ServiceTemplateType() svc_properties.set_image_name(image_name) svc_properties.set_service_type(svc_type) # set interface list if svc_type == 'analyzer': if_list = [['left', False]] else: if_list = [ ['left', False], ['right', False], ['management', False]] svc_properties.set_service_mode(svc_mode) for itf in if_list: if_type = ServiceTemplateInterfaceType(shared_ip=itf[1]) if_type.set_service_interface_type(itf[0]) svc_properties.add_interface_type(if_type) try: st_obj.set_service_template_properties(svc_properties) self._vnc_lib.service_template_update(st_obj) except Exception as e: print e self._svc_syslog("%s created with uuid %s" % (st_name, str(st_uuid))) #_create_default_analyzer_template def cleanup(self): # TODO cleanup sandesh context pass # end cleanup def _sandesh_populate_vn_info(self, si_info, sandesh_si): for if_str in [_LEFT_STR, _RIGHT_STR, _MGMT_STR]: if_set = set() if_str_vn = if_str + '-vn' if not if_str_vn in si_info.keys(): continue vn_fq_str = str(si_info[if_str_vn]) vn_uuid = str(si_info[vn_fq_str]) vn_str = ("VN [%s : %s]" % (vn_fq_str, vn_uuid)) if_set.add(vn_str) iip_uuid_str = if_str + '-iip-uuid' if iip_uuid_str in si_info.keys(): vn_iip_uuid = str(si_info[iip_uuid_str]) iip_addr_str = if_str + '-iip-addr' vn_iip_addr = str(si_info[iip_addr_str]) iip_str = ("IIP [%s : %s]" % (vn_iip_addr, vn_iip_uuid)) if_set.add(iip_str) if if_str == _LEFT_STR: sandesh_si.left_vn = list(if_set) if if_str == _RIGHT_STR: sandesh_si.right_vn = list(if_set) if if_str == _MGMT_STR: sandesh_si.management_vn = list(if_set) si_info['done'] = True # end _sandesh_populate_vn_info def sandesh_si_handle_request(self, req): si_resp = sandesh.ServiceInstanceListResp(si_names=[]) if req.si_name is None: vm_list = list(self._svc_vm_cf.get_range()) si_list = list(self._svc_si_cf.get_range()) #walk all vms for vm_uuid, si in vm_list: if 'done' in si: continue #collect all ecmp instances sandesh_si = sandesh.ServiceInstance(name=si['si_fq_str']) vm_set = set() for key, val in vm_list: if val['si_fq_str'] != si['si_fq_str']: continue vm_str = ("%s: %s" % (val['instance_name'], key)) vm_set.add(vm_str) val['done'] = True sandesh_si.vm_list = list(vm_set) #find the vn and iip iformation for si_fq_str, si_info in si_list: if si_fq_str != si['si_fq_str']: continue self._sandesh_populate_vn_info(si_info, sandesh_si) si_info['done'] = True si_resp.si_names.append(sandesh_si) #walk all instances where vms are pending launch for si_fq_str, si_info in si_list: if 'done' in si_info.keys(): continue sandesh_si = sandesh.ServiceInstance(name=si_fq_str) sandesh_si.vm_list = set() sandesh_si.instance_name = '' self._sandesh_populate_vn_info(si_info, sandesh_si) si_resp.si_names.append(sandesh_si) si_resp.response(req.context()) # end sandesh_si_handle_request def _utc_timestamp_usec(self): epoch = datetime.datetime.utcfromtimestamp(0) now = datetime.datetime.utcnow() delta = now - epoch return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) # end utc_timestamp_usec def _uve_svc_instance(self, si_fq_name_str, status=None, vm_uuid=None, st_name=None): svc_uve = UveSvcInstanceConfig(name=si_fq_name_str, deleted=False, st_name=None, vm_list=[], create_ts=None) if st_name: svc_uve.st_name = st_name if vm_uuid: svc_uve.vm_list.append(vm_uuid) if status: svc_uve.status = status if status == 'CREATE': svc_uve.create_ts = self._utc_timestamp_usec() elif status == 'DELETE': svc_uve.deleted = True svc_log = UveSvcInstanceConfigTrace( data=svc_uve, sandesh=self._sandesh) svc_log.send(sandesh=self._sandesh) # end uve_vm def _svc_syslog(self, log_msg): self._sandesh._logger.debug("%s", log_msg) vn_log = sandesh.SvcMonitorLog( log_msg=log_msg, sandesh=self._sandesh) vn_log.send(sandesh=self._sandesh) # end _svc_syslog def _get_proj_name_from_si_fq_str(self, si_fq_str): return si_fq_str.split(':')[1] # enf _get_si_fq_str_to_proj_name def _get_vn_id(self, proj_obj, vn_fq_name, shared_vn_name=None, shared_vn_subnet=None): vn_id = None if vn_fq_name: # search for provided VN try: vn_id = self._vnc_lib.fq_name_to_id( 'virtual-network', vn_fq_name) except NoIdError: self._svc_syslog("Error: vn_name %s not found" % (vn_name)) else: # search or create shared VN domain_name, proj_name = proj_obj.get_fq_name() vn_fq_name = [domain_name, proj_name, shared_vn_name] try: vn_id = self._vnc_lib.fq_name_to_id( 'virtual-network', vn_fq_name) except NoIdError: vn_id = self._create_svc_vn(shared_vn_name, shared_vn_subnet, proj_obj) return vn_id # end _get_vn_id def _set_svc_vm_if_properties(self, vmi_obj, vn_obj): # confirm service vm by checking reference to service instance vm_obj = self._vnc_lib.virtual_machine_read( fq_name_str=vmi_obj.parent_name) si_list = vm_obj.get_service_instance_refs() if not si_list: return # if interface property already set if vmi_obj.get_virtual_machine_interface_properties() is not None: return # get service instance fq_name = si_list[0]['to'] si_obj = self._vnc_lib.service_instance_read(fq_name=fq_name) si_props = si_obj.get_service_instance_properties() si_if_list = si_props.get_interface_list() si_if = None # get service template st_list = si_obj.get_service_template_refs() if st_list is not None: fq_name = st_list[0]['to'] st_obj = self._vnc_lib.service_template_read(fq_name=fq_name) st_props = st_obj.get_service_template_properties() st_if_list = st_props.get_interface_type() # set interface type vn_fq_name_str = vn_obj.get_fq_name_str() for idx in range(0, len(st_if_list)): st_if = st_if_list[idx] itf_type = st_if.service_interface_type si_if = None if si_if_list: si_if = si_if_list[idx] si_vn_str = si_if.get_virtual_network() else: funcname = "get_" + itf_type + "_virtual_network" func = getattr(si_props, funcname) si_vn_str = func() if (((itf_type == _MGMT_STR) and (vn_obj.name == _SVC_VN_MGMT)) or ((itf_type == _LEFT_STR) and (vn_obj.name == _SVC_VN_LEFT)) or ((itf_type == _RIGHT_STR) and (vn_obj.name == _SVC_VN_RIGHT)) or ((si_vn_str == vn_fq_name_str))): if_properties = VirtualMachineInterfacePropertiesType(itf_type) vmi_obj.set_virtual_machine_interface_properties(if_properties) break # set static routes if st_if.get_static_route_enable(): static_routes = si_if.get_static_routes() try: domain_name, proj_name = si_obj.get_parent_fq_name() rt_name = si_obj.uuid + ' ' + str(idx) rt_fq_name = [domain_name, proj_name, rt_name] rt_obj = self._vnc_lib.interface_route_table_read( fq_name=rt_fq_name) rt_obj.set_interface_route_table_routes(static_routes) except NoIdError: proj_obj = self._vnc_lib.project_read( fq_name=si_obj.get_parent_fq_name()) rt_obj = InterfaceRouteTable( name=rt_name, parent_obj=proj_obj, interface_route_table_routes=static_routes) self._vnc_lib.interface_route_table_create(rt_obj) vmi_obj.set_interface_route_table(rt_obj) # remove security groups and update vmi vmi_obj.set_security_group_list([]) self._vnc_lib.virtual_machine_interface_update(vmi_obj) # end _set_svc_vm_if_properties def _create_svc_instance_vm(self, st_obj, si_obj): #check if all config received before launch if not self._check_store_si_info(st_obj, si_obj): return row_entry = {} st_props = st_obj.get_service_template_properties() if st_props is None: return st_if_list = st_props.get_interface_type() flavor = st_props.get_flavor() image_name = st_props.get_image_name() if image_name is None: self._svc_syslog("Error: Image name not present in %s" % (st_obj.name)) return si_props = si_obj.get_service_instance_properties() max_instances = si_props.get_scale_out().get_max_instances() si_if_list = si_props.get_interface_list() if si_if_list and (len(si_if_list) != len(st_if_list)): self._svc_syslog("Error: IF mismatch template %s instance %s" % (len(st_if_list), len(si_if_list))) return # check and create service virtual networks nics = [] proj_fq_name = si_obj.get_parent_fq_name() proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name) for idx in range(0, len(st_if_list)): nic = {} st_if = st_if_list[idx] itf_type = st_if.service_interface_type # set vn id if si_if_list: si_if = si_if_list[idx] vn_fq_name_str = si_if.get_virtual_network() else: funcname = "get_" + itf_type + "_virtual_network" func = getattr(si_props, funcname) vn_fq_name_str = func() vn_fq_name = None if vn_fq_name_str: domain, proj, vn_name = vn_fq_name_str.split(':') vn_fq_name = [domain, proj, vn_name] vn_id = self._get_vn_id(proj_obj, vn_fq_name, _SVC_VNS[itf_type][0], _SVC_VNS[itf_type][1]) if vn_id is None: continue nic['net-id'] = vn_id # set shared ip if st_if.shared_ip: try: iip_dict = self._svc_si_cf.get(si_obj.get_fq_name_str()) iip_uuid_str = itf_type + '-iip-uuid' if iip_uuid_str in iip_dict.keys(): nic['v4-fixed-ip'] = iip_dict[itf_type + '-iip-addr'] except pycassa.NotFoundException: self._svc_syslog( "Error: Shared IP not found in db for %s" % (itf_type)) # add to nic list nics.append(nic) # create and launch vm vm_refs = si_obj.get_virtual_machine_back_refs() n_client = self._novaclient_get(proj_obj.name) for inst_count in range(0, max_instances): instance_name = si_obj.name + '_' + str(inst_count + 1) exists = False for vm_ref in vm_refs or []: vm = n_client.servers.find(id=vm_ref['uuid']) if vm.name == instance_name: exists = True break if exists: vm_uuid = vm_ref['uuid'] else: vm = self._create_svc_vm(instance_name, image_name, nics, proj_obj.name, flavor) if vm is None: continue vm_uuid = vm.id # store vm, instance in cassandra; use for linking when VM is up row_entry['si_fq_str'] = si_obj.get_fq_name_str() row_entry['instance_name'] = instance_name self._svc_vm_cf.insert(vm_uuid, row_entry) # uve trace self._uve_svc_instance(si_obj.get_fq_name_str(), status='CREATE', vm_uuid=vm.id, st_name=st_obj.get_fq_name_str()) # end _create_svc_instance_vm def _delete_svc_instance_vm(self, vm_uuid, proj_name, si_fq_str=None): found = True try: self._svc_syslog("Deleting VM %s %s" % (proj_name, vm_uuid)) n_client = self._novaclient_get(proj_name) vm = n_client.servers.find(id=vm_uuid) vm.delete() self._uve_svc_instance( si_fq_str, status='DELETE', vm_uuid=vm_uuid) except nc_exc.NotFound: # remove from cleanup list self._cleanup_cf.remove(vm_uuid) found = False # remove from launch table and queue into cleanup list if found: self._svc_vm_cf.remove(vm_uuid) self._cleanup_cf.insert( vm_uuid, {'proj_name': proj_name, 'type': 'vm'}) # end _delete_svc_instance_vm def _restart_svc_vm(self, vm_uuid, si_fq_str): proj_name = self._get_proj_name_from_si_fq_str(si_fq_str) self._delete_svc_instance_vm(vm_uuid, proj_name, si_fq_str=si_fq_str) si_obj = self._vnc_lib.service_instance_read(fq_name_str=si_fq_str) st_list = si_obj.get_service_template_refs() if st_list is not None: fq_name = st_list[0]['to'] st_obj = self._vnc_lib.service_template_read(fq_name=fq_name) self._create_svc_instance_vm(st_obj, si_obj) # end _restart_svc_vm def _check_store_si_info(self, st_obj, si_obj): config_complete = True st_props = st_obj.get_service_template_properties() si_props = si_obj.get_service_instance_properties() if_list = st_props.get_interface_type() #read existing si_entry try: si_entry = self._svc_si_cf.get(si_obj.get_fq_name_str()) except Exception: si_entry = {} #walk the interface list for vm_if in if_list: itf_type = vm_if.service_interface_type funcname = "get_" + itf_type + "_virtual_network" func = getattr(si_props, funcname) vn_fq_name_str = func() if not vn_fq_name_str: continue si_entry[itf_type + '-vn'] = vn_fq_name_str try: vn_obj = self._vnc_lib.virtual_network_read( fq_name_str=vn_fq_name_str) if vn_obj.uuid != si_entry.get(vn_fq_name_str, None): si_entry[vn_fq_name_str] = vn_obj.uuid if not vm_if.shared_ip: continue iip_uuid_str = itf_type + '-iip-uuid' iip_uuid = si_entry.get(iip_uuid_str, None) iip = self._allocate_shared_iip(st_obj, si_obj, vn_obj, iip_uuid) si_entry[itf_type + '-iip-uuid'] = iip['uuid'] si_entry[itf_type + '-iip-addr'] = iip['addr'] except NoIdError: self._svc_syslog("Warn: VN %s add is pending" % vn_fq_name_str) si_entry[vn_fq_name_str] = 'pending' config_complete = False if config_complete: self._svc_syslog("SI %s info is complete" % si_obj.get_fq_name_str()) else: self._svc_syslog("Warn: SI %s info is not complete" % si_obj.get_fq_name_str()) #insert entry self._svc_si_cf.insert(si_obj.get_fq_name_str(), si_entry) return config_complete #end _check_store_si_info def _allocate_shared_iip(self, st_obj, si_obj, vn_obj, iip_uuid): iip_entry = {} iip_obj = None if iip_uuid: try: iip_obj = self._vnc_lib.instance_ip_read(id=iip_uuid) except NoIdError: pass # allocate ip if not iip_obj: addr = self._vnc_lib.virtual_network_ip_alloc(vn_obj) iip_name = '%s %s' % (vn_obj.uuid, addr[0]) iip_obj = InstanceIp(name=iip_name, instance_ip_address=addr[0]) iip_obj.add_virtual_network(vn_obj) self._vnc_lib.instance_ip_create(iip_obj) iip_entry = {'uuid': iip_obj.uuid, 'addr': iip_obj.get_instance_ip_address()} return iip_entry #end _allocate_shared_iip def _delete_shared_vn(self, vn_uuid, proj_name): try: self._svc_syslog("Deleting VN %s %s" % (proj_name, vn_uuid)) self._vnc_lib.virtual_network_delete(id=vn_uuid) except RefsExistError: self._svc_err_logger.error("Delete failed refs exist VN %s %s" % (proj_name, vn_uuid)) except NoIdError: # remove from cleanup list self._cleanup_cf.remove(vn_uuid) # end _delete_shared_vn def _delete_shared_iip(self, iip_uuid, proj_name): try: iip_obj = self._vnc_lib.instance_ip_read(id=iip_uuid) vmi_refs = iip_obj.get_virtual_machine_interface_refs() if vmi_refs is None: self._svc_syslog("Deleting IIP %s %s" % (proj_name, iip_uuid)) self._vnc_lib.instance_ip_delete(id=iip_uuid) except RefsExistError: self._svc_err_logger.error("Delete failed refs exist IIP %s %s" % (proj_name, iip_uuid)) except NoIdError: # remove from cleanup list self._cleanup_cf.remove(iip_uuid) # end _delete_shared_iip def _delmsg_project_service_instance(self, idents): proj_fq_str = idents['project'] proj_obj = self._vnc_lib.project_read(fq_name_str=proj_fq_str) if proj_obj.get_service_instances() is not None: return # no SIs left hence delete shared VNs for vn_name in [_SVC_VN_MGMT, _SVC_VN_LEFT, _SVC_VN_RIGHT]: domain_name, proj_name = proj_obj.get_fq_name() vn_fq_name = [domain_name, proj_name, vn_name] try: vn_uuid = self._vnc_lib.fq_name_to_id( 'virtual-network', vn_fq_name) self._cleanup_cf.insert( vn_uuid, {'proj_name': proj_obj.name, 'type': 'vn'}) except Exception: pass # end _delmsg_project_service_instance def _delmsg_service_instance_service_template(self, idents): si_fq_str = idents['service-instance'] vm_list = list(self._svc_vm_cf.get_range()) for vm_uuid, si in vm_list: if si_fq_str != si['si_fq_str']: continue proj_name = self._get_proj_name_from_si_fq_str(si_fq_str) self._delete_svc_instance_vm( vm_uuid, proj_name, si_fq_str=si_fq_str) #insert shared instance IP uuids into cleanup list if present try: si_info = self._svc_si_cf.get(si_fq_str) for itf_str in [_MGMT_STR, _LEFT_STR, _RIGHT_STR]: iip_uuid_str = itf_str + '-iip-uuid' if not iip_uuid_str in si_info: continue self._cleanup_cf.insert( si_info[iip_uuid_str], {'proj_name': proj_name, 'type': 'iip'}) except pycassa.NotFoundException: pass #delete si info try: self._svc_si_cf.remove(si_fq_str) except pycassa.NotFoundException: pass #end _delmsg_service_instance_service_template def _delmsg_virtual_machine_service_instance(self, idents): vm_uuid = idents['virtual-machine'] si_fq_str = idents['service-instance'] proj_name = self._get_proj_name_from_si_fq_str(si_fq_str) self._delete_svc_instance_vm(vm_uuid, proj_name, si_fq_str=si_fq_str) # end _delmsg_service_instance_virtual_machine def _delmsg_virtual_machine_interface_route_table(self, idents): rt_fq_str = idents['interface-route-table'] rt_obj = self._vnc_lib.interface_route_table_read( fq_name_str=rt_fq_str) vmi_list = rt_obj.get_virtual_machine_interface_back_refs() if vmi_list is None: self._vnc_lib.interface_route_table_delete(id=rt_obj.uuid) # end _delmsg_virtual_machine_interface_route_table def _addmsg_virtual_machine_interface_virtual_network(self, idents): vmi_fq_str = idents['virtual-machine-interface'] vn_fq_str = idents['virtual-network'] try: vmi_obj = self._vnc_lib.virtual_machine_interface_read( fq_name_str=vmi_fq_str) vn_obj = self._vnc_lib.virtual_network_read( fq_name_str=vn_fq_str) except NoIdError: return # check if this is a service vm vm_obj = self._vnc_lib.virtual_machine_read( fq_name_str=vmi_obj.parent_name) si_list = vm_obj.get_service_instance_refs() if si_list: fq_name = si_list[0]['to'] si_obj = self._vnc_lib.service_instance_read(fq_name=fq_name) else: try: svc_vm_cf_row = self._svc_vm_cf.get(vm_obj.uuid) si_fq_str = svc_vm_cf_row['si_fq_str'] vm_obj.name = svc_vm_cf_row['instance_name'] si_obj = self._vnc_lib.service_instance_read( fq_name_str=si_fq_str) except pycassa.NotFoundException: return except NoIdError: proj_name = self._get_proj_name_from_si_fq_str(si_fq_str) self._delete_svc_instance_vm( vm_obj.uuid, proj_name, si_fq_str=si_fq_str) return # create service instance to service vm link vm_obj.add_service_instance(si_obj) self._vnc_lib.virtual_machine_update(vm_obj) # set service instance property self._set_svc_vm_if_properties(vmi_obj, vn_obj) # end _addmsg_virtual_machine_interface_virtual_network def _addmsg_service_instance_service_template(self, idents): st_fq_str = idents['service-template'] si_fq_str = idents['service-instance'] try: st_obj = self._vnc_lib.service_template_read( fq_name_str=st_fq_str) si_obj = self._vnc_lib.service_instance_read( fq_name_str=si_fq_str) except NoIdError: return #launch VMs self._create_svc_instance_vm(st_obj, si_obj) # end _addmsg_service_instance_service_template def _addmsg_project_virtual_network(self, idents): vn_fq_str = idents['virtual-network'] try: si_list = list(self._svc_si_cf.get_range()) except pycassa.NotFoundException: return for si_fq_str, si_info in si_list: if vn_fq_str not in si_info.keys(): continue try: si_obj = self._vnc_lib.service_instance_read( fq_name_str=si_fq_str) st_refs = si_obj.get_service_template_refs() fq_name = st_refs[0]['to'] st_obj = self._vnc_lib.service_template_read(fq_name=fq_name) #launch VMs self._create_svc_instance_vm(st_obj, si_obj) except Exception: continue #end _addmsg_project_virtual_network def process_poll_result(self, poll_result_str): result_list = parse_poll_result(poll_result_str) # process ifmap message for (result_type, idents, metas) in result_list: for meta in metas: meta_name = re.sub('{.*}', '', meta.tag) if result_type == 'deleteResult': funcname = "_delmsg_" + meta_name.replace('-', '_') elif result_type in ['searchResult', 'updateResult']: funcname = "_addmsg_" + meta_name.replace('-', '_') self._svc_syslog("%s with %s/%s" % (funcname, meta_name, idents)) # end if result_type try: func = getattr(self, funcname) except AttributeError: pass else: self._svc_syslog("%s with %s/%s" % (funcname, meta_name, idents)) func(idents) # end for meta # end for result_type # end process_poll_result def _novaclient_get(self, proj_name): client = self._nova.get(proj_name) if client is not None: return client self._nova[proj_name] = nc.Client( '2', username=self._args.admin_user, project_id=proj_name, api_key=self._args.admin_password, auth_url='http://' + self._args.auth_host + ':5000/v2.0') return self._nova[proj_name] # end _novaclient_get def _create_svc_vm(self, vm_name, image_name, nics, proj_name, flavor_name): n_client = self._novaclient_get(proj_name) if flavor_name: flavor = n_client.flavors.find(name=flavor_name) else: flavor = n_client.flavors.find(ram=4096) image = '' try: image = n_client.images.find(name=image_name) except nc_exc.NotFound: self._svc_syslog( "Error: Image %s not found in project %s" % (image_name, proj_name)) return except nc_exc.NoUniqueMatch: self._svc_syslog( "Error: Multiple images %s found in project %s" % (image_name, proj_name)) return # launch vm self._svc_syslog('Launching VM : ' + vm_name) nova_vm = n_client.servers.create(name=vm_name, image=image, flavor=flavor, nics=nics) nova_vm.get() self._svc_syslog('Created VM : ' + str(nova_vm)) return nova_vm # end create_svc_vm def _create_svc_vn(self, vn_name, vn_subnet, proj_obj): self._svc_syslog( "Creating network %s subnet %s" % (vn_name, vn_subnet)) vn_obj = VirtualNetwork(name=vn_name, parent_obj=proj_obj) domain_name, project_name = proj_obj.get_fq_name() ipam_fq_name = [domain_name, 'default-project', 'default-network-ipam'] ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name) cidr = vn_subnet.split('/') pfx = cidr[0] pfx_len = int(cidr[1]) subnet_info = IpamSubnetType(subnet=SubnetType(pfx, pfx_len)) subnet_data = VnSubnetsType([subnet_info]) vn_obj.add_network_ipam(ipam_obj, subnet_data) self._vnc_lib.virtual_network_create(vn_obj) return vn_obj.uuid # end _create_svc_vn def _cassandra_init(self): sys_mgr = SystemManager(self._args.cassandra_server_list[0]) if self._args.reset_config: try: sys_mgr.drop_keyspace(SvcMonitor._KEYSPACE) except pycassa.cassandra.ttypes.InvalidRequestException as e: print "Warning! " + str(e) try: sys_mgr.create_keyspace(SvcMonitor._KEYSPACE, SIMPLE_STRATEGY, {'replication_factor': '1'}) except pycassa.cassandra.ttypes.InvalidRequestException as e: print "Warning! " + str(e) column_families = [self._SVC_VM_CF, self._SVC_CLEANUP_CF, self._SVC_SI_CF] for cf in column_families: try: sys_mgr.create_column_family(SvcMonitor._KEYSPACE, cf) except pycassa.cassandra.ttypes.InvalidRequestException as e: print "Warning! " + str(e) conn_pool = pycassa.ConnectionPool(SvcMonitor._KEYSPACE, self._args.cassandra_server_list) self._svc_vm_cf = pycassa.ColumnFamily(conn_pool, self._SVC_VM_CF) self._svc_si_cf = pycassa.ColumnFamily(conn_pool, self._SVC_SI_CF) self._cleanup_cf = pycassa.ColumnFamily(conn_pool, self._SVC_CLEANUP_CF)
class MesosManagerLogger(object): def __init__(self, args=None): self._args = args # Initialize module parameters. self.module = {} self.module["id"] = Module.MESOS_MANAGER self.module["name"] = ModuleNames[self.module["id"]] self.module["node_type"] = Module2NodeType[self.module["id"]] self.module["node_type_name"] = NodeTypeNames[self.module["node_type"]] if 'host_ip' in self._args: host_ip = self._args.host_ip else: host_ip = socket.gethostbyname(socket.getfqdn()) self.module["hostname"] = socket.getfqdn(host_ip) self.module["table"] = "ObjectConfigNode" if self._args.worker_id: self.module["instance_id"] = self._args.worker_id else: self.module["instance_id"] = INSTANCE_ID_DEFAULT # Init Sandesh. self.sandesh_init() def syslog(self, log_msg, level): # Log to syslog. self._sandesh.logger().log( SandeshLogger.get_py_logger_level(level), log_msg) def log(self, log_msg, level=SandeshLevel.SYS_DEBUG, fun=None): # If a sandesh function is provided, use the function. # If not, revert to syslog. if fun: log = fun(level=level, log_msg=log_msg, sandesh=self._sandesh) log.send(sandesh=self._sandesh) else: self.syslog(log_msg, level) # EMERGENCY. def emergency(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_EMERG logging_fun = log_fun if log_fun else sandesh.MesosManagerEmergencyLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # ALERT. def alert(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_ALERT logging_fun = log_fun if log_fun else sandesh.MesosManagerAlertLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # CRITICAL. def critical(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_CRIT logging_fun = log_fun if log_fun else sandesh.MesosManagerCriticalLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # CRITICAL. def error(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_ERR logging_fun = log_fun if log_fun else sandesh.MesosManagerErrorLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # WARNING. def warning(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_WARN logging_fun = log_fun if log_fun else sandesh.MesosManagerWarningLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # NOTICE. def notice(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_NOTICE logging_fun = log_fun if log_fun else sandesh.MesosManagerNoticeLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # INFO. def info(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_INFO logging_fun = log_fun if log_fun else sandesh.MesosManagerInfoLog # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # DEBUG. def debug(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_DEBUG logging_fun = log_fun if log_fun else sandesh.MesosManagerDebugLog # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) def redefine_sandesh_handles(self): """ Register custom introspect handlers. """ # Register Virtual Machine DB introspect handler. introspect.VirtualMachineDatabaseList.handle_request = \ VirtualMachineMM.sandesh_handle_db_list_request # Register Virtual Router DB introspect handler. introspect.VirtualRouterDatabaseList.handle_request = \ VirtualRouterMM.sandesh_handle_db_list_request # Register Virtual Machine Interface DB introspect handler. introspect.VirtualMachineInterfaceDatabaseList.handle_request = \ VirtualMachineInterfaceMM.sandesh_handle_db_list_request # Register Virtual Network DB introspect handler. introspect.VirtualNetworkDatabaseList.handle_request = \ VirtualNetworkMM.sandesh_handle_db_list_request # Register Instance IP DB introspect handler. introspect.InstanceIpDatabaseList.handle_request = \ InstanceIpMM.sandesh_handle_db_list_request # Register Project DB introspect handler. introspect.ProjectDatabaseList.handle_request = \ ProjectMM.sandesh_handle_db_list_request # Register Domain DB introspect handler. introspect.DomainDatabaseList.handle_request = \ DomainMM.sandesh_handle_db_list_request # Register NetworkIpam DB introspect handler. introspect.NetworkIpamDatabaseList.handle_request = \ NetworkIpamMM.sandesh_handle_db_list_request def sandesh_init(self): """ Init Sandesh """ self._sandesh = Sandesh() # Register custom sandesh request handlers. self.redefine_sandesh_handles() # Initialize Sandesh generator. self._sandesh.init_generator( self.module['name'], self.module['hostname'], self.module['node_type_name'], self.module['instance_id'], self._args.collectors, 'mesos_manager_context', int(self._args.http_server_port), ['cfgm_common', 'mesos_manager.sandesh', 'mesos_introspect.sandesh'], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf, config=self._args.sandesh_config) # Set Sandesh logging params. self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # Connect to collector. ConnectionState.init(self._sandesh, self.module['hostname'], self.module['name'], self.module['instance_id'], staticmethod(ConnectionState.get_conn_state_cb), NodeStatusUVE, NodeStatus, self.module['table']) def introspect_init(self): self._sandesh.run_introspect_server(int(self._args.http_server_port))
class SandeshUVEAlarmTest(unittest.TestCase): def setUp(self): self.maxDiff = None self.sandesh = Sandesh() self.sandesh.init_generator('sandesh_uve_alarm_test', socket.gethostname(), 'Test', '0', None, '', get_free_port(), connect_to_collector=False) # mock the sandesh client object self.sandesh._client = mock.MagicMock(spec=SandeshClient) # end setUp def tearDown(self): pass # end tearDown def verify_uve_alarm_sandesh(self, sandesh, seqnum, sandesh_type, data): self.assertEqual(socket.gethostname(), sandesh._source) self.assertEqual('Test', sandesh._node_type) self.assertEqual('sandesh_uve_alarm_test', sandesh._module) self.assertEqual('0', sandesh._instance_id) self.assertEqual(SANDESH_KEY_HINT, (SANDESH_KEY_HINT & sandesh._hints)) self.assertEqual(sandesh_type, sandesh._type) self.assertEqual(seqnum, sandesh._seqnum) self.assertEqual(data, sandesh.data) # end verify_uve_alarm_sandesh def test_sandesh_uve(self): uve_data = [ # add uve SandeshUVEData(name='uve1'), # update uve SandeshUVEData(name='uve1', xyz=345), # add another uve SandeshUVEData(name='uve2', xyz=12), # delete uve SandeshUVEData(name='uve2', deleted=True), # add deleted uve SandeshUVEData(name='uve2') ] # send UVEs for i in range(len(uve_data)): uve_test = SandeshUVETest(data=uve_data[i], sandesh=self.sandesh) uve_test.send(sandesh=self.sandesh) expected_data = [{ 'seqnum': i + 1, 'data': uve_data[i] } for i in range(len(uve_data))] # send UVE with different key uve_test_data = SandeshUVEData(name='uve1') uve_test = SandeshUVETest(data=uve_test_data, table='CollectorInfo', sandesh=self.sandesh) uve_test.send(sandesh=self.sandesh) expected_data.extend([{'seqnum': 6, 'data': uve_test_data}]) # send dynamic UVEs dynamic_uve_data = [ # add uve { 'type': (ConfigUVE, Config), 'table': 'CollectorInfo', 'name': 'node1', 'elements': { 'log_level': 'SYS_INFO' }, 'expected_elements': { 'log_level': 'SYS_INFO' }, 'seqnum': 1 }, # update uve { 'type': (ConfigUVE, Config), 'table': 'CollectorInfo', 'name': 'node1', 'elements': { 'log_local': 'True' }, 'expected_elements': { 'log_local': 'True' }, 'seqnum': 2 }, # add another uve { 'type': (ConfigUVE, Config), 'table': 'ControlInfo', 'name': 'node1', 'elements': { 'log_category': 'Redis' }, 'expected_elements': { 'log_category': 'Redis' }, 'seqnum': 3 }, # delete uve { 'type': (ConfigUVE, Config), 'table': 'ControlInfo', 'name': 'node1', 'deleted': True, 'seqnum': 4 }, # add deleted uve { 'type': (ConfigUVE, Config), 'table': 'ControlInfo', 'name': 'node1', 'elements': { 'log_level': 'SYS_DEBUG', 'log_file': '/var/log/control.log' }, 'expected_elements': { 'log_level': 'SYS_DEBUG', 'log_file': '/var/log/control.log' }, 'seqnum': 5 }, # add another uve - different type { 'type': (ConfigTestUVE, ConfigTest), 'table': 'CollectorInfo', 'name': 'node2', 'elements': { 'param1': 'val1', 'param2': 'val2' }, 'expected_elements': { 'param1': 'val1', 'param2': 'val2' }, 'seqnum': 1 }, # delete uve, set elements to [] { 'type': (ConfigTestUVE, ConfigTest), 'table': 'CollectorInfo', 'name': 'node2', 'deleted': True, 'elements': {}, 'expected_elements': {}, 'seqnum': 2 }, ] for uve in dynamic_uve_data: uve_type, uve_data_type = uve['type'] elts = uve.get('elements') uve_data = uve_data_type(name=uve['name'], elements=elts, deleted=uve.get('deleted')) dynamic_uve = uve_type(data=uve_data, table=uve['table'], sandesh=self.sandesh) dynamic_uve.send(sandesh=self.sandesh) elts_exp = uve.get('expected_elements') if elts_exp is not None: uve_data = uve_data_type(name=uve['name'], elements=elts_exp, deleted=uve.get('deleted')) uve_data._table = uve['table'] print(uve_data.__dict__) expected_data.extend([{'seqnum': uve['seqnum'], 'data': uve_data}]) # verify the result args_list = self.sandesh._client.send_uve_sandesh.call_args_list args_len = len(args_list) self.assertEqual(len(expected_data), len(args_list), 'args_list: %s' % str(args_list)) for i in range(len(expected_data)): self.verify_uve_alarm_sandesh(args_list[i][0][0], seqnum=expected_data[i]['seqnum'], sandesh_type=SandeshType.UVE, data=expected_data[i]['data']) # sync UVEs expected_data = [] self.sandesh._uve_type_maps.sync_all_uve_types({}, self.sandesh) sync_uve_data = [{ 'data': SandeshUVEData(name='uve1'), 'table': 'CollectorInfo', 'seqnum': 6 }, { 'data': SandeshUVEData(name='uve2'), 'table': 'OpserverInfo', 'seqnum': 5 }, { 'data': SandeshUVEData(name='uve1', xyz=345), 'table': 'OpserverInfo', 'seqnum': 2 }] for uve_data in sync_uve_data: uve_data['data']._table = uve_data['table'] expected_data.extend([{ 'seqnum': uve_data['seqnum'], 'data': uve_data['data'] }]) sync_dynamic_uve_data = [{ 'type': (ConfigTestUVE, ConfigTest), 'table': 'CollectorInfo', 'name': 'node2', 'deleted': True, 'elements': {}, 'seqnum': 2 }, { 'type': (ConfigUVE, Config), 'table': 'ControlInfo', 'name': 'node1', 'elements': { 'log_level': 'SYS_DEBUG', 'log_file': '/var/log/control.log' }, 'seqnum': 5 }, { 'type': (ConfigUVE, Config), 'table': 'CollectorInfo', 'name': 'node1', 'elements': { 'log_local': 'True' }, 'seqnum': 2 }] for uve in sync_dynamic_uve_data: uve_type, uve_data_type = uve['type'] elts = uve.get('elements') uve_data = uve_data_type(name=uve['name'], elements=elts, deleted=uve.get('deleted')) uve_data._table = uve['table'] expected_data.extend([{'seqnum': uve['seqnum'], 'data': uve_data}]) # verify the result args_list = self.sandesh._client.send_uve_sandesh.\ call_args_list[args_len:] args_sandesh_list = [args[0][0] for args in args_list] args_dlist = [{ 'source': sandesh._source, 'node_type': sandesh._node_type, 'module': sandesh._module, 'instance_id': sandesh._instance_id, 'hints': (SANDESH_KEY_HINT & sandesh._hints), 'seqnum': sandesh._seqnum, 'type': sandesh._type, 'data': sandesh.data } for sandesh in args_sandesh_list] expected_source = socket.gethostname() expected_node_type = 'Test' expected_module = 'sandesh_uve_alarm_test' expected_instance_id = '0' expected_hints = SANDESH_KEY_HINT expected_sandesh_type = SandeshType.UVE expected_dlist = [{ 'source': expected_source, 'node_type': expected_node_type, 'module': expected_module, 'instance_id': expected_instance_id, 'hints': expected_hints, 'seqnum': einfo['seqnum'], 'type': expected_sandesh_type, 'data': einfo['data'] } for einfo in expected_data] self.assertEqual(len(expected_data), len(args_list), 'args_list: %s' % str(args_list)) self.assertEqual(len(expected_dlist), len(args_dlist), 'args_dlist: %s' % str(args_dlist)) for expected_dict in expected_dlist: self.assertTrue(expected_dict in args_dlist) for args_dict in args_dlist: self.assertTrue(args_dict in expected_dlist) # end test_sandesh_uve def _create_uve_alarm_info(self): uve_alarm_info = UVEAlarmInfo() uve_alarm_info.type = 'ProcessStatus' condition = AlarmCondition( operation='==', operand1='NodeStatus.process_info.process_state', operand2=AlarmOperand2(json_value=json.dumps('null'))) match1 = AlarmMatch(json_operand1_value=json.dumps('null')) condition_match = AlarmConditionMatch(condition, [match1]) and_list = AlarmAndList(and_list=[condition_match]) uve_alarm_info.alarm_rules = [AlarmRules(or_list=[and_list])] uve_alarm_info.ack = False uve_alarm_info.timestamp = UTCTimestampUsec() uve_alarm_info.severity = 1 return uve_alarm_info # end _create_uve_alarm_info def _update_uve_alarm_info(self): uve_alarm_info = self._create_uve_alarm_info() uve_alarm_info.ack = True return uve_alarm_info # end _update_uve_alarm_info def test_sandesh_alarm(self): alarm_data = [ # add alarm (UVEAlarms(name='alarm1', alarms=[self._create_uve_alarm_info()]), 'ObjectCollectorInfo'), # update alarm (UVEAlarms(name='alarm1', alarms=[self._update_uve_alarm_info()]), 'ObjectCollectorInfo'), # add another alarm (UVEAlarms(name='alarm2', alarms=[self._create_uve_alarm_info()]), 'ObjectVRouterInfo'), # delete alarm (UVEAlarms(name='alarm2', deleted=True), 'ObjectVRouterInfo'), # add deleted alarm (UVEAlarms(name='alarm2', alarms=[self._create_uve_alarm_info()]), 'ObjectVRouterInfo'), # add alarm with deleted flag set (UVEAlarms(name='alarm3', alarms=[self._create_uve_alarm_info()], deleted=True), 'ObjectCollectorInfo'), # add alarm with same key and different table (UVEAlarms(name='alarm3', alarms=[self._create_uve_alarm_info()]), 'ObjectVRouterInfo') ] # send the alarms for i in range(len(alarm_data)): alarm_test = AlarmTrace(data=alarm_data[i][0], table=alarm_data[i][1], sandesh=self.sandesh) alarm_test.send(sandesh=self.sandesh) expected_data1 = [{ 'seqnum': i + 1, 'data': alarm_data[i][0] } for i in range(len(alarm_data))] # Sync alarms self.sandesh._uve_type_maps.sync_all_uve_types({}, self.sandesh) expected_data2 = [ { 'seqnum': 2, 'data': alarm_data[1][0] }, { 'seqnum': 5, 'data': alarm_data[4][0] }, { 'seqnum': 6, 'data': alarm_data[5][0] }, { 'seqnum': 7, 'data': alarm_data[6][0] }, ] expected_data = expected_data1 + expected_data2 # get the result args_list = self.sandesh._client.send_uve_sandesh.call_args_list self.assertEqual(len(expected_data), len(args_list), 'args_list: %s' % str(args_list)) # Verify alarm traces for raised/cleared alarms for i in range(len(expected_data1)): self.verify_uve_alarm_sandesh(args_list[i][0][0], seqnum=expected_data1[i]['seqnum'], sandesh_type=SandeshType.ALARM, data=expected_data1[i]['data']) # Verify alarm traces after alarms sync. # It is observed that they come in different order for py2 and py3 for i in range(len(expected_data1), len(expected_data)): for j in range(len(expected_data1), len(expected_data)): if expected_data[i]['seqnum'] == args_list[j][0][0]._seqnum: self.verify_uve_alarm_sandesh( args_list[j][0][0], seqnum=expected_data[i]['seqnum'], sandesh_type=SandeshType.ALARM, data=expected_data[i]['data'])
class MesosManagerLogger(object): def __init__(self, args=None): self._args = args # Initialize module parameters. self.module = {} self.module["id"] = Module.MESOS_MANAGER self.module["name"] = ModuleNames[self.module["id"]] self.module["node_type"] = Module2NodeType[self.module["id"]] self.module["node_type_name"] = NodeTypeNames[self.module["node_type"]] self.module["hostname"] = socket.gethostname() self.module["table"] = "ObjectConfigNode" if self._args.worker_id: self.module["instance_id"] = self._args.worker_id else: self.module["instance_id"] = INSTANCE_ID_DEFAULT # Initialize discovery client if self._args.disc_server_ip and self._args.disc_server_port: self.module["discovery"] = client.DiscoveryClient( self._args.disc_server_ip, self._args.disc_server_port, self.module["name"]) # Init Sandesh. self.sandesh_init() def syslog(self, log_msg, level): # Log to syslog. self._sandesh.logger().log(SandeshLogger.get_py_logger_level(level), log_msg) def log(self, log_msg, level=SandeshLevel.SYS_DEBUG, fun=None): # If a sandesh function is provided, use the function. # If not, revert to syslog. if fun: log = fun(level=level, log_msg=log_msg, sandesh=self._sandesh) log.send(sandesh=self._sandesh) else: self.syslog(log_msg, level) # EMERGENCY. def emergency(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_EMERG logging_fun = log_fun if log_fun else sandesh.MesosManagerEmergencyLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # ALERT. def alert(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_ALERT logging_fun = log_fun if log_fun else sandesh.MesosManagerAlertLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # CRITICAL. def critical(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_CRIT logging_fun = log_fun if log_fun else sandesh.MesosManagerCriticalLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # CRITICAL. def error(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_ERR logging_fun = log_fun if log_fun else sandesh.MesosManagerErrorLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # WARNING. def warning(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_WARN logging_fun = log_fun if log_fun else sandesh.MesosManagerWarningLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # NOTICE. def notice(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_NOTICE logging_fun = log_fun if log_fun else sandesh.MesosManagerNoticeLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # INFO. def info(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_INFO logging_fun = log_fun if log_fun else sandesh.MesosManagerInfoLog # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # DEBUG. def debug(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_DEBUG logging_fun = log_fun if log_fun else sandesh.MesosManagerDebugLog # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) def sandesh_init(self): """ Init Sandesh """ self._sandesh = Sandesh() # Reset sandesh send rate limit value. if self._args.sandesh_send_rate_limit is not None: SandeshSystem.set_sandesh_send_rate_limit( self._args.sandesh_send_rate_limit) # Initialize Sandesh generator. self._sandesh.init_generator( self.module['name'], self.module['hostname'], self.module['node_type_name'], self.module['instance_id'], self._args.collectors, 'mesos_manager_context', int(self._args.http_server_port), ['cfgm_common', 'mesos_manager.sandesh'], self.module['discovery'], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf, config=self._args.sandesh_config) # Set Sandesh logging params. self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # Connect to collector. ConnectionState.init( self._sandesh, self.module['hostname'], self.module['name'], self.module['instance_id'], staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self.module['table'])
class KubeManagerLogger(object): def __init__(self, args=None): self._args = args # Initialize module parameters. self._module = {} self._module["id"] = Module.KUBE_MANAGER self._module["name"] = ModuleNames[self._module["id"]] self._module["node_type"] = Module2NodeType[self._module["id"]] self._module["node_type_name"] =\ NodeTypeNames[self._module["node_type"]] self._module["hostname"] = socket.gethostname() self._module["table"] = "ObjectKubernetesManagerNode" if self._args.worker_id: self._module["instance_id"] = self._args.worker_id else: self._module["instance_id"] = INSTANCE_ID_DEFAULT # Init Sandesh. self.sandesh_init() def syslog(self, log_msg, level): """ Log to syslog. """ self._sandesh.logger().log( SandeshLogger.get_py_logger_level(level), log_msg) def log(self, log_msg, level=SandeshLevel.SYS_DEBUG, fun=None): """ If a sandesh function is provided, use the function. If not, revert to syslog. """ if fun: log = fun(level=level, log_msg=log_msg, sandesh=self._sandesh) log.send(sandesh=self._sandesh) else: self.syslog(log_msg, level) # EMERGENCY. def emergency(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_EMERG logging_fun = log_fun if log_fun else sandesh.KubeManagerEmergencyLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # ALERT. def alert(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_ALERT logging_fun = log_fun if log_fun else sandesh.KubeManagerAlertLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # CRITICAL. def critical(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_CRIT logging_fun = log_fun if log_fun else sandesh.KubeManagerCriticalLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # ERROR. def error(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_ERR logging_fun = log_fun if log_fun else sandesh.KubeManagerErrorLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # WARNING. def warning(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_WARN logging_fun = log_fun if log_fun else sandesh.KubeManagerWarningLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # NOTICE. def notice(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_NOTICE logging_fun = log_fun if log_fun else sandesh.KubeManagerNoticeLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # INFO. def info(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_INFO logging_fun = log_fun if log_fun else sandesh.KubeManagerInfoLog # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # DEBUG. def debug(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_DEBUG logging_fun = log_fun if log_fun else sandesh.KubeManagerDebugLog # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) def _redefine_sandesh_handles(self): """ Register custom introspect handlers. """ # Register Pod DB introspect handler. introspect.PodDatabaseList.handle_request =\ PodKM.sandesh_handle_db_list_request # Register Namespace DB introspect handler. introspect.NamespaceDatabaseList.handle_request =\ NamespaceKM.sandesh_handle_db_list_request # Register Service DB introspect handler. introspect.ServiceDatabaseList.handle_request =\ ServiceKM.sandesh_handle_db_list_request # Register NetworkPolicy DB introspect handler. introspect.NetworkPolicyDatabaseList.handle_request =\ NetworkPolicyKM.sandesh_handle_db_list_request # Register Ingress DB introspect handler. introspect.IngressDatabaseList.handle_request =\ IngressKM.sandesh_handle_db_list_request def sandesh_init(self): """ Init Sandesh """ self._sandesh = Sandesh() # Register custom sandesh request handlers. self._redefine_sandesh_handles() # Initialize Sandesh generator. self._sandesh.init_generator( self._module["name"], self._module["hostname"], self._module["node_type_name"], self._module["instance_id"], self._args.random_collectors, 'kube_manager_context', int(self._args.http_server_port), ['cfgm_common', 'kube_manager'], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf, config=self._args.sandesh_config) # Set Sandesh logging params. self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # Connect to collector. ConnectionState.init( self._sandesh, self._module["hostname"], self._module["name"], self._module["instance_id"], staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self._module["table"]) def sandesh_uninit(self): self._sandesh.uninit()
class ConfigServiceLogger(object): _LOGGER_LEVEL_TO_SANDESH_LEVEL = { logging.CRITICAL: SandeshLevel.SYS_EMERG, logging.CRITICAL: SandeshLevel.SYS_ALERT, logging.CRITICAL: SandeshLevel.SYS_CRIT, logging.ERROR: SandeshLevel.SYS_ERR, logging.WARNING: SandeshLevel.SYS_WARN, logging.WARNING: SandeshLevel.SYS_NOTICE, logging.INFO: SandeshLevel.SYS_INFO, logging.DEBUG: SandeshLevel.SYS_DEBUG } def __init__(self, module, module_pkg, args=None, http_server_port=None): self.module_pkg = module_pkg if not hasattr(self, 'context'): self.context = module_pkg self._args = args node_type = Module2NodeType[module] self._module_name = ModuleNames[module] self._node_type_name = NodeTypeNames[node_type] self.table = "ObjectConfigNode" self._instance_id = INSTANCE_ID_DEFAULT self._hostname = socket.gethostname() # sandesh init self.sandesh_init(http_server_port) def _get_sandesh_logger_level(self, sandesh_level): return self._LOGGER_LEVEL_TO_SANDESH_LEVEL[sandesh_level] def log(self, log_msg, level=SandeshLevel.SYS_DEBUG, fun=None): if fun: log = fun(level=level, og_msg=log_msg, sandesh=self._sandesh) log.send(sandesh=self._sandesh) else: self._sandesh.logger().log( SandeshLogger.get_py_logger_level(level), log_msg) def emergency(self, log_msg, log_fun=None): self.log(log_msg, level=SandeshLevel.SYS_EMERG, fun=log_fun) def alert(self, log_msg, log_fun=None): self.log(log_msg, level=SandeshLevel.SYS_ALERT, fun=log_fun) def critical(self, log_msg, log_fun=None): self.log(log_msg, level=SandeshLevel.SYS_CRIT, fun=log_fun) def error(self, log_msg, log_fun=None): self.log(log_msg, level=SandeshLevel.SYS_ERR, fun=log_fun) def cgitb_error(self): string_buf = cStringIO.StringIO() cgitb_hook(file=string_buf, format="text") self.error(string_buf.getvalue()) def warning(self, log_msg, log_fun=None): self.log(log_msg, level=SandeshLevel.SYS_WARN, fun=log_fun) def notice(self, log_msg, log_fun=None): self.log(log_msg, level=SandeshLevel.SYS_NOTICE, fun=log_fun) def info(self, log_msg, log_fun=None): self.log(log_msg, level=SandeshLevel.SYS_INFO, fun=log_fun) def debug(self, log_msg, log_fun=None): self.log(log_msg, level=SandeshLevel.SYS_DEBUG, fun=log_fun) def _utc_timestamp_usec(self): epoch = datetime.datetime.utcfromtimestamp(0) now = datetime.datetime.utcnow() delta = now - epoch return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10**6) def redefine_sandesh_handles(self): """ Redefine sandesh handle requests for various object types. """ pass def sandesh_init(self, http_server_port=None): """ Init sandesh """ self._sandesh = Sandesh() self.redefine_sandesh_handles() if not http_server_port: http_server_port = self._args.http_server_port self._sandesh.init_generator( self._module_name, self._hostname, self._node_type_name, self._instance_id, self._args.random_collectors, '%s_context' % self.context, int(http_server_port), ['cfgm_common', '%s.sandesh' % self.module_pkg], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf, config=self._args.sandesh_config) self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # connection state init ConnectionState.init( self._sandesh, self._hostname, self._module_name, self._instance_id, staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self.table) def introspect_init(self): self._sandesh.run_introspect_server(int(self._args.http_server_port)) def sandesh_reconfig_collectors(self, args): self._sandesh.reconfig_collectors(args.random_collectors)
class KubeManagerLogger(object): def __init__(self, args=None): self._args = args # Initialize module parameters. self._module = {} self._module["id"] = Module.KUBE_MANAGER self._module["name"] = ModuleNames[self._module["id"]] self._module["node_type"] = Module2NodeType[self._module["id"]] self._module["node_type_name"] =\ NodeTypeNames[self._module["node_type"]] self._module["hostname"] = socket.gethostname() self._module["table"] = "ObjectKubernetesManagerNode" if self._args.worker_id: self._module["instance_id"] = self._args.worker_id else: self._module["instance_id"] = INSTANCE_ID_DEFAULT # Init Sandesh. self.sandesh_init() def syslog(self, log_msg, level): """ Log to syslog. """ self._sandesh.logger().log( SandeshLogger.get_py_logger_level(level), log_msg) def log(self, log_msg, level=SandeshLevel.SYS_DEBUG, fun=None): """ If a sandesh function is provided, use the function. If not, revert to syslog. """ if fun: log = fun(level=level, log_msg=log_msg, sandesh=self._sandesh) log.send(sandesh=self._sandesh) else: self.syslog(log_msg, level) # EMERGENCY. def emergency(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_EMERG logging_fun = log_fun if log_fun else sandesh.KubeManagerEmergencyLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # ALERT. def alert(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_ALERT logging_fun = log_fun if log_fun else sandesh.KubeManagerAlertLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # CRITICAL. def critical(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_CRIT logging_fun = log_fun if log_fun else sandesh.KubeManagerCriticalLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # ERROR. def error(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_ERR logging_fun = log_fun if log_fun else sandesh.KubeManagerErrorLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # WARNING. def warning(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_WARN logging_fun = log_fun if log_fun else sandesh.KubeManagerWarningLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # NOTICE. def notice(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_NOTICE logging_fun = log_fun if log_fun else sandesh.KubeManagerNoticeLog # Log to syslog. self.syslog(log_msg, log_level) # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # INFO. def info(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_INFO logging_fun = log_fun if log_fun else sandesh.KubeManagerInfoLog # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) # DEBUG. def debug(self, log_msg, log_fun=None): log_level = SandeshLevel.SYS_DEBUG logging_fun = log_fun if log_fun else sandesh.KubeManagerDebugLog # Log using the desired logging function. self.log(log_msg, level=log_level, fun=logging_fun) def _redefine_sandesh_handles(self): """ Register custom introspect handlers. """ # Register Pod DB introspect handler. introspect.PodDatabaseList.handle_request =\ PodKM.sandesh_handle_db_list_request # Register Namespace DB introspect handler. introspect.NamespaceDatabaseList.handle_request =\ NamespaceKM.sandesh_handle_db_list_request # Register Service DB introspect handler. introspect.ServiceDatabaseList.handle_request =\ ServiceKM.sandesh_handle_db_list_request # Register NetworkPolicy DB introspect handler. introspect.NetworkPolicyDatabaseList.handle_request =\ NetworkPolicyKM.sandesh_handle_db_list_request # Register Ingress DB introspect handler. introspect.IngressDatabaseList.handle_request =\ IngressKM.sandesh_handle_db_list_request def sandesh_init(self): """ Init Sandesh """ self._sandesh = Sandesh() # Register custom sandesh request handlers. self._redefine_sandesh_handles() # Initialize Sandesh generator. self._sandesh.init_generator( self._module["name"], self._module["hostname"], self._module["node_type_name"], self._module["instance_id"], self._args.random_collectors, 'kube_manager_context', int(self._args.http_server_port), ['cfgm_common', 'kube_manager'], logger_class=self._args.logger_class, logger_config_file=self._args.logging_conf, config=self._args.sandesh_config) # Set Sandesh logging params. self._sandesh.set_logging_params( enable_local_log=self._args.log_local, category=self._args.log_category, level=self._args.log_level, file=self._args.log_file, enable_syslog=self._args.use_syslog, syslog_facility=self._args.syslog_facility) # Connect to collector. ConnectionState.init( self._sandesh, self._module["hostname"], self._module["name"], self._module["instance_id"], staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE, NodeStatus, self._module["table"])
class ConnInfoTest(unittest.TestCase): def setUp(self): self.maxDiff = None self._sandesh = Sandesh() http_port = get_free_port() self._sandesh.init_generator( 'conn_info_test', socket.gethostname(), 'Test', 'Test', None, 'conn_info_test_ctxt', http_port) # end setUp def _check_conn_status_cb(self, vcinfos): self.assertEqual( sorted(self._expected_vcinfos, key=lambda item: item.name), sorted(vcinfos, key=lambda item: item.name)) return (ProcessState.FUNCTIONAL, '') # end _check_conn_status_cb def _update_conn_info(self, name, status, description, vcinfos): cinfo = ConnectionInfo() cinfo.name = name cinfo.status = ConnectionStatusNames[status] cinfo.description = description cinfo.type = ConnectionTypeNames[ConnectionType.TEST] cinfo.server_addrs = ['127.0.0.1:0'] vcinfos.append(cinfo) # end _update_conn_info def _update_conn_state(self, name, status, description, vcinfos): self._expected_vcinfos = vcinfos ConnectionState.update(ConnectionType.TEST, name, status, ['127.0.0.1:0'], description) # end _update_conn_state def _delete_conn_info(self, name, vcinfos): return [cinfo for cinfo in vcinfos if cinfo.name != name] # end _delete_conn_info def _delete_conn_state(self, name, vcinfos): self._expected_vcinfos = vcinfos ConnectionState.delete(ConnectionType.TEST, name) # end _delete_conn_state def test_basic(self): ConnectionState.init(sandesh=self._sandesh, hostname="TestHost", module_id="TestModule", instance_id="0", conn_status_cb=self._check_conn_status_cb, uve_type_cls=NodeStatusUVE, uve_data_type_cls=NodeStatus) vcinfos = [] self._update_conn_info("Test1", ConnectionStatus.UP, "Test1 UP", vcinfos) self._update_conn_state("Test1", ConnectionStatus.UP, "Test1 UP", vcinfos) self._update_conn_info("Test2", ConnectionStatus.UP, "Test2 UP", vcinfos) self._update_conn_state("Test2", ConnectionStatus.UP, "Test2 UP", vcinfos) vcinfos = self._delete_conn_info("Test2", vcinfos) self._delete_conn_state("Test2", vcinfos) # end test_basic def test_callback(self): vcinfos = [] self._update_conn_info("Test1", ConnectionStatus.UP, "Test1 UP", vcinfos) (pstate, message) = ConnectionState.get_conn_state_cb(vcinfos) self.assertEqual(ProcessState.FUNCTIONAL, pstate) self.assertEqual('', message) self._update_conn_info("Test2", ConnectionStatus.DOWN, "Test2 DOWN", vcinfos) (pstate, message) = ConnectionState.get_conn_state_cb(vcinfos) self.assertEqual(ProcessState.NON_FUNCTIONAL, pstate) self.assertEqual("Test:Test2[Test2 DOWN] connection down", message) self._update_conn_info("Test3", ConnectionStatus.DOWN, "Test3 DOWN", vcinfos) (pstate, message) = ConnectionState.get_conn_state_cb(vcinfos) self.assertEqual(ProcessState.NON_FUNCTIONAL, pstate) self.assertEqual( "Test:Test2[Test2 DOWN], Test:Test3[Test3 DOWN] connection down", message)