def __init__(self, conf):
     self._conf = conf
     module = Module.CONTRAIL_TOPOLOGY
     self._moduleid = ModuleNames[module]
     node_type = Module2NodeType[module]
     self._node_type_name = NodeTypeNames[node_type]
     self._hostname = socket.gethostname()
     self._instance_id = '0'
     if self._conf.sandesh_send_rate_limit() is not None:
         SandeshSystem.set_sandesh_send_rate_limit( \
             self._conf.sandesh_send_rate_limit())
     sandesh_global.init_generator(self._moduleid, self._hostname,
                                   self._node_type_name, self._instance_id,
                                   self._conf.collectors(), 
                                   self._node_type_name,
                                   self._conf.http_port(),
                                   ['contrail_topology.sandesh'],
                                   self._conf._disc)
     sandesh_global.set_logging_params(
         enable_local_log=self._conf.log_local(),
         category=self._conf.log_category(),
         level=self._conf.log_level(),
         file=self._conf.log_file(),
         enable_syslog=self._conf.use_syslog(),
         syslog_facility=self._conf.syslog_facility())
     ConnectionState.init(sandesh_global, self._hostname, self._moduleid,
         self._instance_id,
         staticmethod(ConnectionState.get_process_state_cb),
         NodeStatusUVE, NodeStatus)
예제 #2
0
    def __init__(self, conf, instance='0'):
        self._conf = conf
        module = Module.CONTRAIL_SNMP_COLLECTOR
        self._moduleid = ModuleNames[module]
        node_type = Module2NodeType[module]
        self._node_type_name = NodeTypeNames[node_type]
        self._hostname = socket.gethostname()
        self._instance_id = instance
        sandesh_global.init_generator(self._moduleid, self._hostname,
                                      self._node_type_name, self._instance_id,
                                      self._conf.collectors(),
                                      self._node_type_name,
                                      self._conf.http_port(),
                                      ['contrail_snmp_collector.gen_py',
                                      'opserver.sandesh'],
                                      self._conf._disc)
        sandesh_global.set_logging_params(
            enable_local_log=self._conf.log_local(),
            category=self._conf.log_category(),
            level=self._conf.log_level(),
            file=self._conf.log_file(),
            enable_syslog=self._conf.use_syslog(),
            syslog_facility=self._conf.syslog_facility())
        ConnectionState.init(sandesh_global, self._hostname, self._moduleid,
            self._instance_id,
            staticmethod(ConnectionState.get_process_state_cb),
            NodeStatusUVE, NodeStatus)

        self.if_stat = {}
        self._logger = sandesh_global.logger()
예제 #3
0
 def __init__(self, conf):
     self._conf = conf
     module = Module.CONTRAIL_TOPOLOGY
     self._moduleid = ModuleNames[module]
     node_type = Module2NodeType[module]
     self._node_type_name = NodeTypeNames[node_type]
     self._hostname = socket.gethostname()
     self.table = "ObjectCollectorInfo"
     self._instance_id = '0'
     sandesh_global.init_generator(self._moduleid, self._hostname,
                                   self._node_type_name, self._instance_id,
                                   self._conf.random_collectors,
                                   self._node_type_name,
                                   self._conf.http_port(),
                                   ['contrail_topology.sandesh'],
                                   config=self._conf.sandesh_config())
     sandesh_global.set_logging_params(
         enable_local_log=self._conf.log_local(),
         category=self._conf.log_category(),
         level=self._conf.log_level(),
         file=self._conf.log_file(),
         enable_syslog=self._conf.use_syslog(),
         syslog_facility=self._conf.syslog_facility())
     ConnectionState.init(sandesh_global, self._hostname, self._moduleid,
         self._instance_id,
         staticmethod(ConnectionState.get_process_state_cb),
         NodeStatusUVE, NodeStatus, self.table)
     self._logger = sandesh_global.logger()
    def __init__(self, rule_file, discovery_server,
                 discovery_port, collector_addr):
        self.node_type = "contrail-config"
        self.module = Module.CONFIG_NODE_MGR
        self.module_id = ModuleNames[self.module]
        self.supervisor_serverurl = "unix:///var/run/supervisord_config.sock"
        self.add_current_process()
        node_type = Module2NodeType[self.module]
        node_type_name = NodeTypeNames[node_type]
        self.sandesh_global = sandesh_global
        EventManager.__init__(
            self, rule_file, discovery_server,
            discovery_port, collector_addr, sandesh_global)
        _disc = self.get_discovery_client()
        sandesh_global.init_generator(
            self.module_id, socket.gethostname(),
            node_type_name, self.instance_id, self.collector_addr,
            self.module_id, 8100, ['cfgm_common.uve'], _disc)
        sandesh_global.set_logging_params(enable_local_log=True)
        ConnectionState.init(sandesh_global, socket.gethostname(),
		self.module_id, self.instance_id,
		staticmethod(ConnectionState.get_process_state_cb),
		NodeStatusUVE, NodeStatus)
        self.send_system_cpu_info()
        self.third_party_process_list = [ ]
예제 #5
0
    def sandesh_init(self):
        """ Init sandesh """
        self._sandesh = Sandesh()
        # Reset the sandesh send rate limit value
        if self._args.sandesh_send_rate_limit is not None:
            SandeshSystem.set_sandesh_send_rate_limit(
                self._args.sandesh_send_rate_limit)
        self.redefine_sandesh_handles()
        self._sandesh.init_generator(
            self._module_name, self._hostname, self._node_type_name,
            self._instance_id, self._args.random_collectors,
            '%s_context' % self.context, int(self._args.http_server_port),
            ['cfgm_common', '%s.sandesh' % self.module_pkg], self.discovery,
            logger_class=self._args.logger_class,
            logger_config_file=self._args.logging_conf)

        self._sandesh.set_logging_params(
            enable_local_log=self._args.log_local,
            category=self._args.log_category,
            level=self._args.log_level,
            file=self._args.log_file,
            enable_syslog=self._args.use_syslog,
            syslog_facility=self._args.syslog_facility)

        # connection state init
        ConnectionState.init(
                self._sandesh, self._hostname, self._module_name,
                self._instance_id,
                staticmethod(ConnectionState.get_process_state_cb),
                NodeStatusUVE, NodeStatus, self.table)
예제 #6
0
    def __init__(self, db, discovery, args=None):
        self._args = args
        self._db = db

        module = Module.SVC_MONITOR
        node_type = Module2NodeType[module]
        self._module_name = ModuleNames[module]
        self._node_type_name = NodeTypeNames[node_type]
        self._instance_id = INSTANCE_ID_DEFAULT
        self._hostname = socket.gethostname()

        #sandesh init
        self._sandesh = self._sandesh_init(discovery)

        # connection state init
        ConnectionState.init(self._sandesh, self._hostname, self._module_name,
            self._instance_id,
            staticmethod(ConnectionState.get_process_state_cb),
            NodeStatusUVE, NodeStatus)

        #create cpu_info object to send periodic updates
        sysinfo_req = False
        cpu_info = vnc_cpu_info.CpuInfo(self._module_name, 
            self._instance_id, sysinfo_req, self._sandesh, 60)
        self._cpu_info = cpu_info
    def __init__(self, rule_file, discovery_server,
                 discovery_port, collector_addr):
        self.module = Module.COMPUTE_NODE_MGR
        self.module_id = ModuleNames[self.module]

        node_type = Module2NodeType[self.module]
        node_type_name = NodeTypeNames[node_type]
        self.sandesh_global = sandesh_global
        EventManager.__init__(self, rule_file, discovery_server,
                              discovery_port, collector_addr, sandesh_global)
        self.node_type = "contrail-vrouter"
        self.table = "ObjectVRouter"
        _disc = self.get_discovery_client()
        sandesh_global.init_generator(
            self.module_id, socket.gethostname(),
            node_type_name, self.instance_id, self.collector_addr,
            self.module_id, 8102, ['vrouter.loadbalancer',
                'nodemgr.common.sandesh'], _disc)
        sandesh_global.set_logging_params(enable_local_log=True)
        self.supervisor_serverurl = "unix:///var/run/supervisord_vrouter.sock"
        self.add_current_process()
        ConnectionState.init(sandesh_global, socket.gethostname(), self.module_id,
            self.instance_id,
            staticmethod(ConnectionState.get_process_state_cb),
            NodeStatusUVE, NodeStatus, self.table)

        self.lb_stats = LoadbalancerStatsUVE()
        self.send_system_cpu_info()
        self.third_party_process_dict = {}
 def __init__(self, rule_file, discovery_server,
              discovery_port, collector_addr):
     EventManager.__init__(
         self, rule_file, discovery_server,
         discovery_port, collector_addr, sandesh_global)
     self.node_type = 'contrail-analytics'
     self.table = "ObjectCollectorInfo"
     self.module = Module.ANALYTICS_NODE_MGR
     self.module_id = ModuleNames[self.module]
     self.supervisor_serverurl = "unix:///var/run/supervisord_analytics.sock"
     self.add_current_process()
     node_type = Module2NodeType[self.module]
     node_type_name = NodeTypeNames[node_type]
     _disc = self.get_discovery_client()
     sandesh_global.init_generator(
         self.module_id, socket.gethostname(),
         node_type_name, self.instance_id, self.collector_addr,
         self.module_id, 8104, ['analytics', 'nodemgr.common.sandesh'], _disc)
     sandesh_global.set_logging_params(enable_local_log=True)
     ConnectionState.init(sandesh_global, socket.gethostname(), self.module_id,
         self.instance_id,
         staticmethod(ConnectionState.get_process_state_cb),
         NodeStatusUVE, NodeStatus, self.table)
     self.send_system_cpu_info()
     self.third_party_process_list = [ ]
예제 #9
0
    def sandesh_init(self, http_server_port=None):
        """ Init sandesh """
        self._sandesh = Sandesh()
        self.redefine_sandesh_handles()
        if not http_server_port:
            http_server_port = self._args.http_server_port
        self._sandesh.init_generator(
            self._module_name, self._hostname, self._node_type_name,
            self._instance_id, self._args.random_collectors,
            '%s_context' % self.context, int(http_server_port),
            ['cfgm_common', '%s.sandesh' % self.module_pkg],
            logger_class=self._args.logger_class,
            logger_config_file=self._args.logging_conf,
            config=self._args.sandesh_config)

        self._sandesh.set_logging_params(
            enable_local_log=self._args.log_local,
            category=self._args.log_category,
            level=self._args.log_level,
            file=self._args.log_file,
            enable_syslog=self._args.use_syslog,
            syslog_facility=self._args.syslog_facility)

        # connection state init
        ConnectionState.init(
                self._sandesh, self._hostname, self._module_name,
                self._instance_id,
                staticmethod(ConnectionState.get_conn_state_cb),
                NodeStatusUVE, NodeStatus, self.table)
        VncGreenlet.register_sandesh_handler()
예제 #10
0
    def sandesh_init(self):
        """ Init Sandesh """
        self._sandesh = Sandesh()

        # Register custom sandesh request handlers.
        self._redefine_sandesh_handles()

        # Initialize Sandesh generator.
        self._sandesh.init_generator(
            self._module["name"], self._module["hostname"],
            self._module["node_type_name"], self._module["instance_id"],
            self._args.random_collectors, 'kube_manager_context',
            int(self._args.http_server_port),
            ['cfgm_common', 'kube_manager'],
            logger_class=self._args.logger_class,
            logger_config_file=self._args.logging_conf,
            config=self._args.sandesh_config)

        # Set Sandesh logging params.
        self._sandesh.set_logging_params(
            enable_local_log=self._args.log_local,
            category=self._args.log_category, level=self._args.log_level,
            file=self._args.log_file, enable_syslog=self._args.use_syslog,
            syslog_facility=self._args.syslog_facility)

        # Connect to collector.
        ConnectionState.init(
            self._sandesh, self._module["hostname"], self._module["name"],
            self._module["instance_id"],
            staticmethod(ConnectionState.get_process_state_cb),
            NodeStatusUVE, NodeStatus, self._module["table"])
예제 #11
0
 def __init__(self, rule_file, discovery_server,
              discovery_port, collector_addr):
     self.node_type = "contrail-control"
     self.uve_node_type = UVENodeTypeNames[NodeType.CONTROL]
     self.table = "ObjectBgpRouter"
     self.module = Module.CONTROL_NODE_MGR
     self.module_id = ModuleNames[self.module]
     self.supervisor_serverurl = "unix:///var/run/supervisord_control.sock"
     self.add_current_process()
     node_type = Module2NodeType[self.module]
     node_type_name = NodeTypeNames[node_type]
     self.sandesh_global = sandesh_global;
     EventManager.__init__(
         self, rule_file, discovery_server,
         discovery_port, collector_addr, sandesh_global)
     _disc = self.get_discovery_client()
     sandesh_global.init_generator(
         self.module_id, socket.gethostname(),
         node_type_name, self.instance_id, self.collector_addr,
         self.module_id, 8101, ['nodemgr.common.sandesh'], _disc)
     sandesh_global.set_logging_params(enable_local_log=True)
     ConnectionState.init(sandesh_global, socket.gethostname(), self.module_id,
         self.instance_id,
         staticmethod(ConnectionState.get_process_state_cb),
         NodeStatusUVE, NodeStatus, self.table)
     self.send_init_info()
     self.third_party_process_dict = {}
예제 #12
0
    def sandesh_init(self):
        """ Init Sandesh """
        self._sandesh = Sandesh()

        # Initialize Sandesh generator.
        self._sandesh.init_generator(
            self.module['name'], self.module['hostname'],
            self.module['node_type_name'], self.module['instance_id'],
            self._args.collectors, 'mesos_manager_context',
            int(self._args.http_server_port),
            ['cfgm_common', 'mesos_manager.sandesh'],
            logger_class=self._args.logger_class,
            logger_config_file=self._args.logging_conf,
            config=self._args.sandesh_config)

        # Set Sandesh logging params.
        self._sandesh.set_logging_params(
            enable_local_log=self._args.log_local,
            category=self._args.log_category,
            level=self._args.log_level, file=self._args.log_file,
            enable_syslog=self._args.use_syslog,
            syslog_facility=self._args.syslog_facility)

        # Connect to collector.
        ConnectionState.init(self._sandesh, self.module['hostname'],
            self.module['name'], self.module['instance_id'],
            staticmethod(ConnectionState.get_process_state_cb),
            NodeStatusUVE, NodeStatus, self.module['table'])
예제 #13
0
 def test_basic(self):
     ConnectionState.init(sandesh = self._sandesh, hostname = "TestHost",
         module_id = "TestModule", instance_id = "0",
         status_cb = self._check_process_status_cb,
         uve_type_cls = NodeStatusTestUVE,
         uve_data_type_cls = NodeStatusTest)
     vcinfos = []
     self._update_conn_info("Test1", ConnectionStatus.UP, "Test1 UP",
         vcinfos)
     self._update_conn_state("Test1", ConnectionStatus.UP, "Test1 UP",
         vcinfos)
     self._update_conn_info("Test2", ConnectionStatus.UP, "Test2 UP",
         vcinfos)
     self._update_conn_state("Test2", ConnectionStatus.UP, "Test2 UP",
         vcinfos)
     vcinfos = self._delete_conn_info("Test2", vcinfos)
     self._delete_conn_state("Test2", vcinfos)
예제 #14
0
 def __init__(self, rule_file, discovery_server,
              discovery_port, collector_addr,
              hostip, minimum_diskgb, contrail_databases,
              cassandra_repair_interval,
              cassandra_repair_logdir):
     self.node_type = "contrail-database"
     self.uve_node_type = UVENodeTypeNames[NodeType.DATABASE]
     self.table = "ObjectDatabaseInfo"
     self.module = Module.DATABASE_NODE_MGR
     self.module_id = ModuleNames[self.module]
     self.hostip = hostip
     self.minimum_diskgb = minimum_diskgb
     self.contrail_databases = contrail_databases
     self.cassandra_repair_interval = cassandra_repair_interval
     self.cassandra_repair_logdir = cassandra_repair_logdir
     self.cassandra_mgr = CassandraManager(cassandra_repair_logdir)
     self.supervisor_serverurl = "unix:///var/run/supervisord_database.sock"
     self.add_current_process()
     node_type = Module2NodeType[self.module]
     node_type_name = NodeTypeNames[node_type]
     self.sandesh_global = sandesh_global
     EventManager.__init__(
         self, rule_file, discovery_server,
         discovery_port, collector_addr, sandesh_global)
     self.sandesh_global = sandesh_global
     if self.rule_file is '':
         self.rule_file = "/etc/contrail/" + \
             "supervisord_database_files/contrail-database.rules"
     json_file = open(self.rule_file)
     self.rules_data = json.load(json_file)
     _disc = self.get_discovery_client()
     sandesh_global.init_generator(
         self.module_id, socket.gethostname(), node_type_name,
         self.instance_id, self.collector_addr, self.module_id, 8103,
         ['database.sandesh', 'nodemgr.common.sandesh'], _disc)
     sandesh_global.set_logging_params(enable_local_log=True)
     ConnectionState.init(sandesh_global, socket.gethostname(), self.module_id,
         self.instance_id,
         staticmethod(ConnectionState.get_process_state_cb),
         NodeStatusUVE, NodeStatus, self.table)
     self.send_init_info()
     self.third_party_process_dict = {}
     self.third_party_process_dict["cassandra"] = "Dcassandra-pidfile=.*cassandra\.pid"
     self.third_party_process_dict["zookeeper"] = "org.apache.zookeeper.server.quorum.QuorumPeerMain"
예제 #15
0
    def __init__(self, conf, instance="0"):
        self._conf = conf
        module = Module.CONTRAIL_SNMP_COLLECTOR
        self._moduleid = ModuleNames[module]
        node_type = Module2NodeType[module]
        self._node_type_name = NodeTypeNames[node_type]
        self.table = "ObjectCollectorInfo"
        self._hostname = socket.gethostname()
        self._instance_id = instance
        if self._conf.sandesh_send_rate_limit() is not None:
            SandeshSystem.set_sandesh_send_rate_limit(self._conf.sandesh_send_rate_limit())
        sandesh_global.init_generator(
            self._moduleid,
            self._hostname,
            self._node_type_name,
            self._instance_id,
            self._conf.collectors(),
            self._node_type_name,
            self._conf.http_port(),
            ["contrail_snmp_collector.sandesh"],
            self._conf._disc,
        )
        sandesh_global.set_logging_params(
            enable_local_log=self._conf.log_local(),
            category=self._conf.log_category(),
            level=self._conf.log_level(),
            file=self._conf.log_file(),
            enable_syslog=self._conf.use_syslog(),
            syslog_facility=self._conf.syslog_facility(),
        )
        ConnectionState.init(
            sandesh_global,
            self._hostname,
            self._moduleid,
            self._instance_id,
            staticmethod(ConnectionState.get_process_state_cb),
            NodeStatusUVE,
            NodeStatus,
            self.table,
        )

        self.if_stat = {}
        self._logger = sandesh_global.logger()
 def __init__(self, rule_file, discovery_server,
              discovery_port, collector_addr,
              hostip, minimum_diskgb, contrail_databases,
              cassandra_repair_interval,
              cassandra_repair_logdir):
     self.node_type = "contrail-database"
     self.module = Module.DATABASE_NODE_MGR
     self.module_id = ModuleNames[self.module]
     self.hostip = hostip
     self.minimum_diskgb = minimum_diskgb
     self.contrail_databases = contrail_databases
     self.cassandra_repair_interval = cassandra_repair_interval
     self.cassandra_repair_logdir = cassandra_repair_logdir
     self.supervisor_serverurl = "unix:///var/run/supervisord_database.sock"
     self.add_current_process()
     node_type = Module2NodeType[self.module]
     node_type_name = NodeTypeNames[node_type]
     self.sandesh_global = sandesh_global
     EventManager.__init__(
         self, rule_file, discovery_server,
         discovery_port, collector_addr, sandesh_global, send_build_info = True)
     self.sandesh_global = sandesh_global
     if self.rule_file is '':
         self.rule_file = "/etc/contrail/" + \
             "supervisord_database_files/contrail-database.rules"
     json_file = open(self.rule_file)
     self.rules_data = json.load(json_file)
     _disc = self.get_discovery_client()
     sandesh_global.init_generator(
         self.module_id, socket.gethostname(), node_type_name,
         self.instance_id, self.collector_addr, self.module_id, 8103,
         ['database.sandesh'], _disc)
     sandesh_global.set_logging_params(enable_local_log=True)
     ConnectionState.init(sandesh_global, socket.gethostname(), self.module_id,
         self.instance_id,
         staticmethod(ConnectionState.get_process_state_cb),
         NodeStatusUVE, NodeStatus)
     self.send_system_cpu_info()
     self.third_party_process_list = [ "cassandra", "zookeeper" ]
예제 #17
0
    def __init__(self, config, type_info, unit_names, update_process_list=False):
        self.config = config
        self.type_info = type_info
        self.max_cores = 4
        self.max_old_cores = 3
        self.max_new_cores = 1
        self.all_core_file_list = []
        self.tick_count = 0
        self.fail_status_bits = 0
        self.prev_fail_status_bits = 1
        self.instance_id = INSTANCE_ID_DEFAULT
        self.sandesh_instance = sandesh_global
        self.curr_build_info = None
        self.new_build_info = None
        self.hostip = self.config.hostip
        self.hostname = socket.getfqdn(self.hostip)

        self.collector_chksum = 0
        self.random_collectors = list()
        if config.collectors:
            config.collectors.sort()
            self.collector_chksum = hashlib.md5("".join(config.collectors)).hexdigest()
            self.random_collectors = random.sample(config.collectors, len(config.collectors))

        ConnectionState.init(self.sandesh_instance, self.hostname,
            self.type_info._module_name, self.instance_id,
            staticmethod(ConnectionState.get_conn_state_cb),
            NodeStatusUVE, NodeStatus, self.type_info._object_table,
            self._get_process_state_cb)
        self.sandesh_instance.init_generator(
            self.type_info._module_name, self.hostname,
            self.type_info._node_type_name, self.instance_id,
            self.random_collectors, self.type_info._module_name,
            ServiceHttpPortMap[self.type_info._module_name],
            ['nodemgr.common.sandesh'] + self.type_info._sandesh_packages,
            config=SandeshConfig.from_parser_arguments(self.config))
        self.sandesh_instance.set_logging_params(
            enable_local_log=self.config.log_local,
            category=self.config.log_category,
            level=self.config.log_level,
            file=self.config.log_file,
            enable_syslog=self.config.use_syslog,
            syslog_facility=self.config.syslog_facility)
        self.logger = self.sandesh_instance.logger()

        event_handlers = {}
        event_handlers['PROCESS_STATE'] = self._event_process_state
        event_handlers['PROCESS_COMMUNICATION'] = self._event_process_communication
        event_handlers['PROCESS_LIST_UPDATE'] = self._update_current_processes
        if platform.system() == 'Windows':
            self.system_data = WindowsSysData()
            self.process_info_manager = WindowsProcessInfoManager(event_handlers)
        else:
            gevent.signal(signal.SIGHUP, self.nodemgr_sighup_handler)
            self.system_data = LinuxSysData(self.msg_log, self.config.corefile_path)
            if DockerProcessInfoManager and (utils.is_running_in_docker()
                                             or utils.is_running_in_kubepod()):
                self.process_info_manager = DockerProcessInfoManager(
                    type_info._module_type, unit_names, event_handlers,
                    update_process_list)
            else:
                self.msg_log('Node manager could not detect process manager',
                            SandeshLevel.SYS_ERR)
                exit(-1)

        self.process_state_db = self._get_current_processes()
        for group in self.process_state_db:
            self._send_init_info(group)
예제 #18
0
    def __init__(self, args=None):
        self._args = args

        # Initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            self._disc = client.DiscoveryClient(
                self._args.disc_server_ip, self._args.disc_server_port,
                ModuleNames[Module.DEVICE_MANAGER])

        PushConfigState.set_repush_interval(int(self._args.repush_interval))
        PushConfigState.set_repush_max_interval(
            int(self._args.repush_max_interval))
        PushConfigState.set_push_delay_per_kb(
            float(self._args.push_delay_per_kb))
        PushConfigState.set_push_delay_max(int(self._args.push_delay_max))
        PushConfigState.set_push_delay_enable(
            bool(self._args.push_delay_enable))

        self._sandesh = Sandesh()
        # Reset the sandesh send rate limit value
        if self._args.sandesh_send_rate_limit is not None:
            SandeshSystem.set_sandesh_send_rate_limit(
                self._args.sandesh_send_rate_limit)
        module = Module.DEVICE_MANAGER
        module_name = ModuleNames[module]
        node_type = Module2NodeType[module]
        node_type_name = NodeTypeNames[node_type]
        self.table = "ObjectConfigNode"
        instance_id = INSTANCE_ID_DEFAULT
        hostname = socket.gethostname()
        self._sandesh.init_generator(module_name, hostname, node_type_name,
                                     instance_id, self._args.collectors,
                                     'to_bgp_context',
                                     int(args.http_server_port),
                                     ['cfgm_common', 'device_manager.sandesh'],
                                     self._disc)
        self._sandesh.set_logging_params(enable_local_log=args.log_local,
                                         category=args.log_category,
                                         level=args.log_level,
                                         file=args.log_file,
                                         enable_syslog=args.use_syslog,
                                         syslog_facility=args.syslog_facility)
        PhysicalRouterDM._sandesh = self._sandesh
        ConnectionState.init(
            self._sandesh, hostname, module_name, instance_id,
            staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE,
            NodeStatus, self.table)

        # Retry till API server is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        while not connected:
            try:
                self._vnc_lib = VncApi(
                    args.admin_user,
                    args.admin_password,
                    args.admin_tenant_name,
                    args.api_server_ip,
                    args.api_server_port,
                    api_server_use_ssl=args.api_server_use_ssl)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:  # haproxy throws 503
                time.sleep(3)

        rabbit_servers = self._args.rabbit_server
        rabbit_port = self._args.rabbit_port
        rabbit_user = self._args.rabbit_user
        rabbit_password = self._args.rabbit_password
        rabbit_vhost = self._args.rabbit_vhost
        rabbit_ha_mode = self._args.rabbit_ha_mode

        self._db_resync_done = gevent.event.Event()

        q_name = 'device_manager.%s' % (socket.gethostname())
        self._vnc_kombu = VncKombuClient(
            rabbit_servers,
            rabbit_port,
            rabbit_user,
            rabbit_password,
            rabbit_vhost,
            rabbit_ha_mode,
            q_name,
            self._vnc_subscribe_callback,
            self.config_log,
            rabbit_use_ssl=self._args.rabbit_use_ssl,
            kombu_ssl_version=self._args.kombu_ssl_version,
            kombu_ssl_keyfile=self._args.kombu_ssl_keyfile,
            kombu_ssl_certfile=self._args.kombu_ssl_certfile,
            kombu_ssl_ca_certs=self._args.kombu_ssl_ca_certs)

        self._cassandra = DMCassandraDB.getInstance(self, _zookeeper_client)

        DBBaseDM.init(self, self._sandesh.logger(), self._cassandra)
        for obj in GlobalSystemConfigDM.list_obj():
            GlobalSystemConfigDM.locate(obj['uuid'], obj)

        for obj in GlobalVRouterConfigDM.list_obj():
            GlobalVRouterConfigDM.locate(obj['uuid'], obj)

        for obj in VirtualNetworkDM.list_obj():
            vn = VirtualNetworkDM.locate(obj['uuid'], obj)
            if vn is not None and vn.routing_instances is not None:
                for ri_id in vn.routing_instances:
                    ri_obj = RoutingInstanceDM.locate(ri_id)

        for obj in BgpRouterDM.list_obj():
            BgpRouterDM.locate(obj['uuid'], obj)

        pr_obj_list = PhysicalRouterDM.list_obj()
        pr_uuid_set = set([pr_obj['uuid'] for pr_obj in pr_obj_list])
        self._cassandra.handle_pr_deletes(pr_uuid_set)

        for obj in PortTupleDM.list_obj():
            PortTupleDM.locate(obj['uuid'], obj)

        for obj in pr_obj_list:
            pr = PhysicalRouterDM.locate(obj['uuid'], obj)
            li_set = pr.logical_interfaces
            vmi_set = set()
            for pi_id in pr.physical_interfaces:
                pi = PhysicalInterfaceDM.locate(pi_id)
                if pi:
                    li_set |= pi.logical_interfaces
                    vmi_set |= pi.virtual_machine_interfaces
            for li_id in li_set:
                li = LogicalInterfaceDM.locate(li_id)
                if li and li.virtual_machine_interface:
                    vmi_set |= set([li.virtual_machine_interface])
            for vmi_id in vmi_set:
                vmi = VirtualMachineInterfaceDM.locate(vmi_id)

        si_obj_list = ServiceInstanceDM.list_obj()
        si_uuid_set = set([si_obj['uuid'] for si_obj in si_obj_list])
        self._cassandra.handle_pnf_resource_deletes(si_uuid_set)

        for obj in si_obj_list:
            ServiceInstanceDM.locate(obj['uuid'], obj)

        for obj in InstanceIpDM.list_obj():
            InstanceIpDM.locate(obj['uuid'], obj)

        for obj in FloatingIpDM.list_obj():
            FloatingIpDM.locate(obj['uuid'], obj)

        for vn in VirtualNetworkDM.values():
            vn.update_instance_ip_map()

        for pr in PhysicalRouterDM.values():
            pr.set_config_state()

        self._db_resync_done.set()
        gevent.joinall(self._vnc_kombu.greenlets())
예제 #19
0
    def __init__(self, conf, test_logger=None):
        self._conf = conf
        module = Module.ALARM_GENERATOR
        self._moduleid = ModuleNames[module]
        node_type = Module2NodeType[module]
        self._node_type_name = NodeTypeNames[node_type]
        self._hostname = socket.gethostname()
        self._instance_id = self._conf.worker_id()
        is_collector = True
        if test_logger is not None:
            is_collector = False
        self._sandesh = Sandesh()
        # Reset the sandesh send rate limit value
        if self._conf.sandesh_send_rate_limit() is not None:
            SandeshSystem.set_sandesh_send_rate_limit(self._conf.sandesh_send_rate_limit())
        self._sandesh.init_generator(
            self._moduleid,
            self._hostname,
            self._node_type_name,
            self._instance_id,
            self._conf.collectors(),
            self._node_type_name,
            self._conf.http_port(),
            ["opserver.sandesh", "sandesh"],
            host_ip=self._conf.host_ip(),
            connect_to_collector=is_collector,
        )
        if test_logger is not None:
            self._logger = test_logger
        else:
            self._sandesh.set_logging_params(
                enable_local_log=self._conf.log_local(),
                category=self._conf.log_category(),
                level=self._conf.log_level(),
                file=self._conf.log_file(),
                enable_syslog=self._conf.use_syslog(),
                syslog_facility=self._conf.syslog_facility(),
            )
            self._logger = self._sandesh._logger
        # Trace buffer list
        self.trace_buf = [{"name": "DiscoveryMsg", "size": 1000}]
        # Create trace buffers
        for buf in self.trace_buf:
            self._sandesh.trace_buffer_create(name=buf["name"], size=buf["size"])

        tables = ["ObjectCollectorInfo", "ObjectDatabaseInfo", "ObjectVRouter", "ObjectBgpRouter", "ObjectConfigNode"]
        self.mgrs = {}
        self.tab_alarms = {}
        self.ptab_info = {}
        self.tab_perf = {}
        self.tab_perf_prev = {}
        for table in tables:
            self.mgrs[table] = hook.HookManager(
                namespace="contrail.analytics.alarms",
                name=table,
                invoke_on_load=True,
                invoke_args=(),
                on_load_failure_callback=self.fail_cb,
            )

            for extn in self.mgrs[table][table]:
                self._logger.info(
                    "Loaded extensions for %s: %s,%s doc %s"
                    % (table, extn.name, extn.entry_point_target, extn.obj.__doc__)
                )

            self.tab_alarms[table] = {}
            self.tab_perf[table] = AGTabStats()

        ConnectionState.init(
            self._sandesh,
            self._hostname,
            self._moduleid,
            self._instance_id,
            staticmethod(ConnectionState.get_process_state_cb),
            NodeStatusUVE,
            NodeStatus,
        )

        self._us = UVEServer(None, self._logger, self._conf.redis_password())

        self._workers = {}
        self._uvestats = {}
        self._uveq = {}
        self._uveqf = {}

        self.disc = None
        self._libpart_name = self._hostname + ":" + self._instance_id
        self._libpart = None
        self._partset = set()
        if self._conf.discovery()["server"]:
            data = {"ip-address": self._hostname, "port": self._instance_id}
            self.disc = client.DiscoveryClient(
                self._conf.discovery()["server"], self._conf.discovery()["port"], ModuleNames[Module.ALARM_GENERATOR]
            )
            self._logger.info("Disc Publish to %s : %s" % (str(self._conf.discovery()), str(data)))
            self.disc.publish(ALARM_GENERATOR_SERVICE_NAME, data)
        else:
            # If there is no discovery service, use fixed redis_uve list
            redis_uve_list = []
            try:
                for redis_uve in self._conf.redis_uve_list():
                    redis_ip_port = redis_uve.split(":")
                    redis_elem = (redis_ip_port[0], int(redis_ip_port[1]), 0)
                    redis_uve_list.append(redis_elem)
            except Exception as e:
                self._logger.error("Failed to parse redis_uve_list: %s" % e)
            else:
                self._us.update_redis_uve_list(redis_uve_list)

            # If there is no discovery service, use fixed alarmgen list
            self._libpart = self.start_libpart(self._conf.alarmgen_list())

        PartitionOwnershipReq.handle_request = self.handle_PartitionOwnershipReq
        PartitionStatusReq.handle_request = self.handle_PartitionStatusReq
        UVETableAlarmReq.handle_request = self.handle_UVETableAlarmReq
        UVETableInfoReq.handle_request = self.handle_UVETableInfoReq
        UVETablePerfReq.handle_request = self.handle_UVETablePerfReq
예제 #20
0
    def __init__(self, args=None):
        self._args = args
        self._fabric_rt_inst_obj = None

        # Initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            self._disc = client.DiscoveryClient(
                self._args.disc_server_ip, self._args.disc_server_port,
                ModuleNames[Module.SCHEMA_TRANSFORMER])

        self._sandesh = Sandesh()
        # Reset the sandesh send rate limit value
        if args.sandesh_send_rate_limit is not None:
            SandeshSystem.set_sandesh_send_rate_limit(
                args.sandesh_send_rate_limit)
        sandesh.VnList.handle_request = self.sandesh_vn_handle_request
        sandesh.RoutintInstanceList.handle_request = \
            self.sandesh_ri_handle_request
        sandesh.ServiceChainList.handle_request = \
            self.sandesh_sc_handle_request
        sandesh.StObjectReq.handle_request = \
            self.sandesh_st_object_handle_request
        module = Module.SCHEMA_TRANSFORMER
        module_name = ModuleNames[module]
        node_type = Module2NodeType[module]
        node_type_name = NodeTypeNames[node_type]
        self.table = "ObjectConfigNode"
        instance_id = INSTANCE_ID_DEFAULT
        hostname = socket.gethostname()
        self._sandesh.init_generator(
            module_name,
            hostname,
            node_type_name,
            instance_id,
            self._args.collectors,
            'to_bgp_context',
            int(args.http_server_port),
            ['cfgm_common', 'schema_transformer.sandesh'],
            self._disc,
            logger_class=args.logger_class,
            logger_config_file=args.logging_conf)
        self._sandesh.set_logging_params(enable_local_log=args.log_local,
                                         category=args.log_category,
                                         level=args.log_level,
                                         file=args.log_file,
                                         enable_syslog=args.use_syslog,
                                         syslog_facility=args.syslog_facility)
        ConnectionState.init(
            self._sandesh, hostname, module_name, instance_id,
            staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE,
            NodeStatus, self.table)

        self._sandesh.trace_buffer_create(name="MessageBusNotifyTraceBuf",
                                          size=1000)

        rabbit_servers = self._args.rabbit_server
        rabbit_port = self._args.rabbit_port
        rabbit_user = self._args.rabbit_user
        rabbit_password = self._args.rabbit_password
        rabbit_vhost = self._args.rabbit_vhost
        rabbit_ha_mode = self._args.rabbit_ha_mode

        self._db_resync_done = gevent.event.Event()

        q_name = 'schema_transformer.%s' % (socket.gethostname())
        self._vnc_kombu = VncKombuClient(
            rabbit_servers,
            rabbit_port,
            rabbit_user,
            rabbit_password,
            rabbit_vhost,
            rabbit_ha_mode,
            q_name,
            self._vnc_subscribe_callback,
            self.config_log,
            rabbit_use_ssl=self._args.rabbit_use_ssl,
            kombu_ssl_version=self._args.kombu_ssl_version,
            kombu_ssl_keyfile=self._args.kombu_ssl_keyfile,
            kombu_ssl_certfile=self._args.kombu_ssl_certfile,
            kombu_ssl_ca_certs=self._args.kombu_ssl_ca_certs)
        try:
            self._cassandra = SchemaTransformerDB(self, _zookeeper_client)
            DBBaseST.init(self, self._sandesh.logger(), self._cassandra)
            DBBaseST._sandesh = self._sandesh
            DBBaseST._vnc_lib = _vnc_lib
            ServiceChain.init()
            self.reinit()
            # create cpu_info object to send periodic updates
            sysinfo_req = False
            cpu_info = vnc_cpu_info.CpuInfo(module_name, instance_id,
                                            sysinfo_req, self._sandesh, 60)
            self._cpu_info = cpu_info
            self._db_resync_done.set()
        except Exception as e:
            # If any of the above tasks like CassandraDB read fails, cleanup
            # the RMQ constructs created earlier and then give up.
            self._vnc_kombu.shutdown()
            raise e
예제 #21
0
    def __init__(self, conf):
        self._conf = conf
        module = Module.ALARM_GENERATOR
        self._moduleid = ModuleNames[module]
        node_type = Module2NodeType[module]
        self._node_type_name = NodeTypeNames[node_type]
        self._hostname = socket.gethostname()
        self._instance_id = self._conf.worker_id()
        sandesh_global.init_generator(self._moduleid, self._hostname,
                                      self._node_type_name, self._instance_id,
                                      self._conf.collectors(), 
                                      self._node_type_name,
                                      self._conf.http_port(),
                                      ['opserver.sandesh', 'sandesh'],
                                      host_ip=self._conf.host_ip())
        sandesh_global.set_logging_params(
            enable_local_log=self._conf.log_local(),
            category=self._conf.log_category(),
            level=self._conf.log_level(),
            file=self._conf.log_file(),
            enable_syslog=self._conf.use_syslog(),
            syslog_facility=self._conf.syslog_facility())
        self._logger = sandesh_global._logger

        # Trace buffer list
        self.trace_buf = [
            {'name':'DiscoveryMsg', 'size':1000}
        ]
        # Create trace buffers 
        for buf in self.trace_buf:
            sandesh_global.trace_buffer_create(name=buf['name'], size=buf['size'])

        tables = [ "ObjectCollectorInfo",
                   "ObjectDatabaseInfo",
                   "ObjectVRouter",
                   "ObjectBgpRouter",
                   "ObjectConfigNode" ] 
        self.mgrs = {}
        self.tab_alarms = {}
        for table in tables:
            self.mgrs[table] = hook.HookManager(
                namespace='contrail.analytics.alarms',
                name=table,
                invoke_on_load=True,
                invoke_args=(),
                on_load_failure_callback=Controller.fail_cb
            )
            
            for extn in self.mgrs[table][table]:
                self._logger.info('Loaded extensions for %s: %s,%s doc %s' % \
                    (table, extn.name, extn.entry_point_target, extn.obj.__doc__))

            self.tab_alarms[table] = {}

        ConnectionState.init(sandesh_global, self._hostname, self._moduleid,
            self._instance_id,
            staticmethod(ConnectionState.get_process_state_cb),
            NodeStatusUVE, NodeStatus)

        self._us = UVEServer(None, self._logger, self._conf.redis_password())

        self._workers = {}

        self.disc = None
        self._libpart_name = self._hostname + ":" + self._instance_id
        self._libpart = None
        self._partset = set()
        if self._conf.discovery()['server']:
            import discoveryclient.client as client 
            data = {
                'ip-address': self._hostname ,
                'port': self._instance_id
            }
            self.disc = client.DiscoveryClient(
                self._conf.discovery()['server'],
                self._conf.discovery()['port'],
                ModuleNames[Module.ALARM_GENERATOR])
            self._logger.info("Disc Publish to %s : %s"
                          % (str(self._conf.discovery()), str(data)))
            self.disc.publish(ALARM_GENERATOR_SERVICE_NAME, data)
        else:
            # If there is no discovery service, use fixed redis_uve list
            redis_uve_list = []
            try:
                for redis_uve in self._conf.redis_uve_list():
                    redis_ip_port = redis_uve.split(':')
                    redis_ip_port = (redis_ip_port[0], int(redis_ip_port[1]))
                    redis_uve_list.append(redis_ip_port)
            except Exception as e:
                self._logger.error('Failed to parse redis_uve_list: %s' % e)
            else:
                self._us.update_redis_uve_list(redis_uve_list)

            # If there is no discovery service, use fixed alarmgen list
            self._libpart = self.start_libpart(self._conf.alarmgen_list())

        PartitionOwnershipReq.handle_request = self.handle_PartitionOwnershipReq
        PartitionStatusReq.handle_request = self.handle_PartitionStatusReq
        UVETableAlarmReq.handle_request = self.handle_UVETableAlarmReq 
예제 #22
0
    def __init__(self, args=None):
        self._args = args

        # Initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            self._disc = client.DiscoveryClient(
                self._args.disc_server_ip,
                self._args.disc_server_port,
                ModuleNames[Module.DEVICE_MANAGER])

        self._sandesh = Sandesh()
        module = Module.DEVICE_MANAGER
        module_name = ModuleNames[module]
        node_type = Module2NodeType[module]
        node_type_name = NodeTypeNames[node_type]
        instance_id = INSTANCE_ID_DEFAULT
        hostname = socket.gethostname()
        self._sandesh.init_generator(
            module_name, hostname, node_type_name, instance_id,
            self._args.collectors, 'to_bgp_context',
            int(args.http_server_port),
            ['cfgm_common', 'device_manager.sandesh'], self._disc)
        self._sandesh.set_logging_params(enable_local_log=args.log_local,
                                         category=args.log_category,
                                         level=args.log_level,
                                         file=args.log_file,
                                         enable_syslog=args.use_syslog,
                                         syslog_facility=args.syslog_facility)
        ConnectionState.init(
            self._sandesh, hostname, module_name, instance_id,
            staticmethod(ConnectionState.get_process_state_cb),
            NodeStatusUVE, NodeStatus)

        # Retry till API server is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        while not connected:
            try:
                self._vnc_lib = VncApi(
                    args.admin_user, args.admin_password,
                    args.admin_tenant_name, args.api_server_ip,
                    args.api_server_port)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:  # haproxy throws 503
                time.sleep(3)

        rabbit_server = self._args.rabbit_server
        rabbit_port = self._args.rabbit_port
        rabbit_user = self._args.rabbit_user
        rabbit_password = self._args.rabbit_password
        rabbit_vhost = self._args.rabbit_vhost

        self._db_resync_done = gevent.event.Event()

        q_name = 'device_manager.%s' % (socket.gethostname())
        self._vnc_kombu = VncKombuClient(rabbit_server, rabbit_port,
                                         rabbit_user, rabbit_password,
                                         rabbit_vhost, q_name,
                                         self._vnc_subscribe_callback,
                                         self.config_log)

        cass_server_list = self._args.cassandra_server_list
        reset_config = self._args.reset_config
        self._cassandra = VncCassandraClient(cass_server_list, reset_config,
                                             self._args.cluster_id, None,
                                             self.config_log)

        DBBaseDM.init(self._sandesh.logger(), self._cassandra)
        ok, pr_list = self._cassandra._cassandra_physical_router_list()
        if not ok:
            self.config_log('physical router list returned error: %s' %
                            pr_list)
        else:
            vn_set = set()
            for fq_name, uuid in pr_list:
                pr = PhysicalRouterDM.locate(uuid)
                if pr.bgp_router:
                    BgpRouterDM.locate(pr.bgp_router)
                vn_set |= pr.virtual_networks
                li_set = pr.logical_interfaces
                for pi_id in pr.physical_interfaces:
                    pi = PhysicalInterfaceDM.locate(pi_id)
                    if pi:
                        li_set |= pi.logical_interfaces
                vmi_set = set()
                for li_id in li_set:
                    li = LogicalInterfaceDM.locate(li_id)
                    if li and li.virtual_machine_interface:
                        vmi_set |= set([li.virtual_machine_interface])
                for vmi_id in vmi_set:
                    vmi = VirtualMachineInterfaceDM.locate(vmi_id)
                    if vmi:
                        vn_set |= vmi.virtual_networks

            for vn_id in vn_set:
                VirtualNetworkDM.locate(vn_id)

            for pr in PhysicalRouterDM.values():
                pr.push_config()

        self._db_resync_done.set()
        while 1:
            self._vnc_kombu._subscribe_greenlet.join()
            # In case _subscribe_greenlet dies for some reason, it will be
            # respawned. sleep for 1 second to wait for it to be respawned
            time.sleep(1)
    def __init__(self,
                 config,
                 type_info,
                 unit_names,
                 update_process_list=False):
        self.config = config
        self.type_info = type_info
        self.max_cores = 4
        self.max_old_cores = 3
        self.max_new_cores = 1
        self.all_core_file_list = []
        self.tick_count = 0
        self.fail_status_bits = 0
        self.prev_fail_status_bits = 1
        self.instance_id = INSTANCE_ID_DEFAULT
        self.sandesh_instance = sandesh_global
        self.curr_build_info = None
        self.new_build_info = None
        self.hostip = self.config.hostip
        self.hostname = socket.getfqdn(self.hostip)

        self.collector_chksum = 0
        self.random_collectors = list()
        if config.collectors:
            config.collectors.sort()
            self.collector_chksum = hashlib.md5("".join(
                config.collectors)).hexdigest()
            self.random_collectors = random.sample(config.collectors,
                                                   len(config.collectors))

        ConnectionState.init(self.sandesh_instance, self.hostname,
                             self.type_info._module_name, self.instance_id,
                             staticmethod(ConnectionState.get_conn_state_cb),
                             NodeStatusUVE, NodeStatus,
                             self.type_info._object_table,
                             self._get_process_state_cb)
        self.sandesh_instance.init_generator(
            self.type_info._module_name,
            self.hostname,
            self.type_info._node_type_name,
            self.instance_id,
            self.random_collectors,
            self.type_info._module_name,
            ServiceHttpPortMap[self.type_info._module_name],
            ['nodemgr.common.sandesh'] + self.type_info._sandesh_packages,
            config=SandeshConfig.from_parser_arguments(self.config))
        self.sandesh_instance.set_logging_params(
            enable_local_log=self.config.log_local,
            category=self.config.log_category,
            level=self.config.log_level,
            file=self.config.log_file,
            enable_syslog=self.config.use_syslog,
            syslog_facility=self.config.syslog_facility)
        self.logger = self.sandesh_instance.logger()

        event_handlers = {}
        event_handlers['PROCESS_STATE'] = self._event_process_state
        event_handlers[
            'PROCESS_COMMUNICATION'] = self._event_process_communication
        event_handlers['PROCESS_LIST_UPDATE'] = self._update_current_processes
        if platform.system() == 'Windows':
            self.system_data = WindowsSysData()
            self.process_info_manager = WindowsProcessInfoManager(
                event_handlers)
        else:
            gevent.signal(signal.SIGHUP, self.nodemgr_sighup_handler)
            self.system_data = LinuxSysData(self.msg_log,
                                            self.config.corefile_path)
            if DockerProcessInfoManager and (utils.is_running_in_docker()
                                             or utils.is_running_in_kubepod()):
                self.process_info_manager = DockerProcessInfoManager(
                    type_info._module_type, unit_names, event_handlers,
                    update_process_list)
            else:
                self.msg_log('Node manager could not detect process manager',
                             SandeshLevel.SYS_ERR)
                exit(-1)

        self.process_state_db = self._get_current_processes()
        for group in self.process_state_db:
            self._send_init_info(group)
    def __init__(self, args=None):
        self._args = args

        # Initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            self._disc = client.DiscoveryClient(
                self._args.disc_server_ip,
                self._args.disc_server_port,
                ModuleNames[Module.DEVICE_MANAGER])

        self._sandesh = Sandesh()
        module = Module.DEVICE_MANAGER
        module_name = ModuleNames[module]
        node_type = Module2NodeType[module]
        node_type_name = NodeTypeNames[node_type]
        instance_id = INSTANCE_ID_DEFAULT
        hostname = socket.gethostname()
        self._sandesh.init_generator(
            module_name, hostname, node_type_name, instance_id,
            self._args.collectors, 'to_bgp_context',
            int(args.http_server_port),
            ['cfgm_common', 'device_manager.sandesh'], self._disc)
        self._sandesh.set_logging_params(enable_local_log=args.log_local,
                                         category=args.log_category,
                                         level=args.log_level,
                                         file=args.log_file,
                                         enable_syslog=args.use_syslog,
                                         syslog_facility=args.syslog_facility)
        PhysicalRouterDM._sandesh = self._sandesh
        ConnectionState.init(
            self._sandesh, hostname, module_name, instance_id,
            staticmethod(ConnectionState.get_process_state_cb),
            NodeStatusUVE, NodeStatus)

        # Retry till API server is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        while not connected:
            try:
                self._vnc_lib = VncApi(
                    args.admin_user, args.admin_password,
                    args.admin_tenant_name, args.api_server_ip,
                    args.api_server_port)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:  # haproxy throws 503
                time.sleep(3)

        rabbit_servers = self._args.rabbit_server
        rabbit_port = self._args.rabbit_port
        rabbit_user = self._args.rabbit_user
        rabbit_password = self._args.rabbit_password
        rabbit_vhost = self._args.rabbit_vhost
        rabbit_ha_mode = self._args.rabbit_ha_mode

        self._db_resync_done = gevent.event.Event()

        q_name = 'device_manager.%s' % (socket.gethostname())
        self._vnc_kombu = VncKombuClient(rabbit_servers, rabbit_port,
                                         rabbit_user, rabbit_password,
                                         rabbit_vhost, rabbit_ha_mode,
                                         q_name, self._vnc_subscribe_callback,
                                         self.config_log)

        cass_server_list = self._args.cassandra_server_list
        self._cassandra = VncCassandraClient(cass_server_list,
                                             self._args.cluster_id,
                                             None,
                                             self.config_log)

        DBBaseDM.init(self, self._sandesh.logger(), self._cassandra)
        ok, global_system_config_list = self._cassandra._cassandra_global_system_config_list()
        if not ok:
            self.config_log('global system config list returned error: %s' %
                            global_system_config_list)
        else:
            for fq_name, uuid in global_system_config_list:
                GlobalSystemConfigDM.locate(uuid)

        ok, global_vrouter_config_list = self._cassandra._cassandra_global_vrouter_config_list()
        if not ok:
            self.config_log('global vrouter config list returned error: %s' %
                            global_vrouter_config_list)
        else:
            for fq_name, uuid in global_vrouter_config_list:
                GlobalVRouterConfigDM.locate(uuid)

        ok, vn_list = self._cassandra._cassandra_virtual_network_list()
        if not ok:
            self.config_log('virtual network list returned error: %s' %
                            vn_list)
        else:
            for fq_name, uuid in vn_list:
                vn = VirtualNetworkDM.locate(uuid)
                if vn is not None and vn.routing_instances is not None:
                    for ri_id in vn.routing_instances:
                        ri_obj = RoutingInstanceDM.locate(ri_id)

        ok, bgp_list = self._cassandra._cassandra_bgp_router_list()
        if not ok:
            self.config_log('bgp router list returned error: %s' %
                            bgp_list)
        else:
            for fq_name, uuid in bgp_list:
                BgpRouterDM.locate(uuid)

        ok, pr_list = self._cassandra._cassandra_physical_router_list()
        if not ok:
            self.config_log('physical router list returned error: %s' %
                            pr_list)
        else:
            for fq_name, uuid in pr_list:
                pr = PhysicalRouterDM.locate(uuid)
                if pr.bgp_router:
                    BgpRouterDM.locate(pr.bgp_router)
                li_set = pr.logical_interfaces
                for pi_id in pr.physical_interfaces:
                    pi = PhysicalInterfaceDM.locate(pi_id)
                    if pi:
                        li_set |= pi.logical_interfaces
                vmi_set = set()
                for li_id in li_set:
                    li = LogicalInterfaceDM.locate(li_id)
                    if li and li.virtual_machine_interface:
                        vmi_set |= set([li.virtual_machine_interface])
                for vmi_id in vmi_set:
                    vmi = VirtualMachineInterfaceDM.locate(vmi_id)

            ok, ip_list = self._cassandra._cassandra_instance_ip_list()
            if not ok:
                self.config_log('instance ip list returned error: %s' %
                            ip_list)
            else:
                for fq_name, uuid in ip_list:
                    InstanceIpDM.locate(uuid)

            ok, fip_list = self._cassandra._cassandra_floating_ip_list()
            if not ok:
                self.config_log('floating ip list returned error: %s' %
                            fip_list)
            else:
                for fq_name, uuid in fip_list:
                    FloatingIpDM.locate(uuid)

            for fq_name, uuid in vn_list:
                vn = VirtualNetworkDM.locate(uuid)
                if vn is not None:
                    vn.update_instance_ip_map()

            for pr in PhysicalRouterDM.values():
                pr.set_config_state()
        self._db_resync_done.set()
        while 1:
            # Just wait indefinitely
            time.sleep(5)
예제 #25
0
    def __init__(self,
                 config,
                 type_info,
                 sandesh_instance,
                 unit_names,
                 update_process_list=False):
        self.config = config
        self.type_info = type_info
        self.max_cores = 4
        self.max_old_cores = 3
        self.max_new_cores = 1
        self.all_core_file_list = []
        self.core_dir_modified_time = 0
        self.tick_count = 0
        self.fail_status_bits = 0
        self.prev_fail_status_bits = 1
        self.instance_id = INSTANCE_ID_DEFAULT
        self.collector_addr = self.config.collectors
        self.sandesh_instance = sandesh_instance
        self.curr_build_info = None
        self.new_build_info = None
        self.last_cpu = None
        self.last_time = 0
        self.own_version = None
        self.hostname = socket.gethostname()
        event_handlers = {}
        event_handlers['PROCESS_STATE'] = self.event_process_state
        event_handlers[
            'PROCESS_COMMUNICATION'] = self.event_process_communication
        event_handlers['PROCESS_LIST_UPDATE'] = self.update_current_processes
        ConnectionState.init(
            self.sandesh_instance, self.hostname,
            self.type_info._module_name, self.instance_id,
            staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE,
            NodeStatus, self.type_info._object_table)
        self.sandesh_instance.init_generator(
            self.type_info._module_name,
            self.hostname,
            self.type_info._node_type_name,
            self.instance_id,
            self.collector_addr,
            self.type_info._module_name,
            ServiceHttpPortMap[self.type_info._module_name],
            ['nodemgr.common.sandesh'] + self.type_info._sandesh_packages,
            config=SandeshConfig.from_parser_arguments(self.config))
        self.sandesh_instance.set_logging_params(
            enable_local_log=self.config.log_local,
            category=self.config.log_category,
            level=self.config.log_level,
            file=self.config.log_file,
            enable_syslog=self.config.use_syslog,
            syslog_facility=self.config.syslog_facility)
        self.logger = self.sandesh_instance.logger()

        if DockerProcessInfoManager and (utils.is_running_in_docker()
                                         or utils.is_running_in_kubepod()):
            self.process_info_manager = DockerProcessInfoManager(
                type_info._module_type, unit_names, event_handlers,
                update_process_list)
        else:
            self.msg_log('Node manager could not detect process manager',
                         SandeshLevel.SYS_ERR)
            exit(-1)

        self.process_state_db = self.get_current_processes()
        for group in self.process_state_db:
            self.send_init_info(group)
예제 #26
0
 def __init__(self,
              type_info,
              rule_file,
              collector_addr,
              sandesh_instance,
              sandesh_config,
              update_process_list=False):
     self.type_info = type_info
     self.stdin = sys.stdin
     self.stdout = sys.stdout
     self.stderr = sys.stderr
     self.rule_file = rule_file
     self.rules_data = {'Rules': []}
     self.max_cores = 4
     self.max_old_cores = 3
     self.max_new_cores = 1
     self.all_core_file_list = []
     self.core_dir_modified_time = 0
     self.tick_count = 0
     self.fail_status_bits = 0
     self.prev_fail_status_bits = 1
     self.instance_id = INSTANCE_ID_DEFAULT
     self.collector_addr = collector_addr
     self.sandesh_instance = sandesh_instance
     self.curr_build_info = None
     self.new_build_info = None
     self.last_cpu = None
     self.last_time = 0
     self.installed_package_version = None
     event_handlers = {}
     event_handlers['PROCESS_STATE'] = self.event_process_state
     event_handlers['PROCESS_COMMUNICATION'] = \
         self.event_process_communication
     event_handlers['PROCESS_LIST_UPDATE'] = self.update_current_process
     if is_systemd_based():
         if not pydbus_present:
             sys.stderr.write('Node manager cannot run without pydbus\n')
             sys.stderr.flush()
             exit(-1)
         self.process_info_manager = SystemdProcessInfoManager(
             self.type_info._unit_names, event_handlers,
             update_process_list)
     else:
         self.process_info_manager = SupervisorProcessInfoManager(
             self.stdin, self.stdout, self.type_info._supervisor_serverurl,
             event_handlers, update_process_list)
     ConnectionState.init(
         self.sandesh_instance, socket.gethostname(),
         self.type_info._module_name, self.instance_id,
         staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE,
         NodeStatus, self.type_info._object_table)
     self.sandesh_instance.init_generator(
         self.type_info._module_name,
         socket.gethostname(),
         self.type_info._node_type_name,
         self.instance_id,
         self.collector_addr,
         self.type_info._module_name,
         ServiceHttpPortMap[self.type_info._module_name],
         ['nodemgr.common.sandesh'] + self.type_info._sandesh_packages,
         config=sandesh_config)
     self.sandesh_instance.set_logging_params(enable_local_log=True)
     self.add_current_process()
     for group in self.process_state_db:
         self.send_init_info(group)
     self.third_party_process_dict = self.type_info._third_party_processes
 def __init__(self, config, type_info, rule_file, sandesh_instance,
              update_process_list=False):
     self.config = config
     self.type_info = type_info
     self.stdin = sys.stdin
     self.stdout = sys.stdout
     self.stderr = sys.stderr
     self.rule_file = rule_file
     self.rules_data = {'Rules':[]}
     self.max_cores = 4
     self.max_old_cores = 3
     self.max_new_cores = 1
     self.all_core_file_list = []
     self.core_dir_modified_time = 0
     self.tick_count = 0
     self.fail_status_bits = 0
     self.prev_fail_status_bits = 1
     self.instance_id = INSTANCE_ID_DEFAULT
     self.collector_addr = self.config.collectors
     self.sandesh_instance = sandesh_instance
     self.curr_build_info = None
     self.new_build_info = None
     self.last_cpu = None
     self.last_time = 0
     self.installed_package_version = None
     SupervisorEventsReq.handle_request = self.sandesh_supervisor_handle_request
     event_handlers = {}
     event_handlers['PROCESS_STATE'] = self.event_process_state
     event_handlers['PROCESS_COMMUNICATION'] = \
         self.event_process_communication
     event_handlers['PROCESS_LIST_UPDATE'] = self.update_current_process
     ConnectionState.init(self.sandesh_instance, socket.gethostname(),
         self.type_info._module_name, self.instance_id,
         staticmethod(ConnectionState.get_process_state_cb),
         NodeStatusUVE, NodeStatus, self.type_info._object_table)
     self.sandesh_instance.init_generator(
         self.type_info._module_name, socket.gethostname(),
         self.type_info._node_type_name, self.instance_id,
         self.collector_addr, self.type_info._module_name,
         ServiceHttpPortMap[self.type_info._module_name],
         ['nodemgr.common.sandesh'] + self.type_info._sandesh_packages,
         config=SandeshConfig.from_parser_arguments(self.config))
     self.sandesh_instance.set_logging_params(
         enable_local_log=self.config.log_local,
         category=self.config.log_category,
         level=self.config.log_level,
         file=self.config.log_file,
         enable_syslog=self.config.use_syslog,
         syslog_facility=self.config.syslog_facility)
     self.logger = self.sandesh_instance.logger()
     if is_systemd_based():
         if not pydbus_present:
             self.msg_log('Node manager cannot run without pydbus', SandeshLevel.SYS_ERR)
             exit(-1)
         # In docker, systemd notifications via sd_notify do not
         # work, hence we will poll the process status
         self.process_info_manager = SystemdProcessInfoManager(
             self.type_info._unit_names, event_handlers,
             update_process_list, is_running_in_docker())
     else:
         if not 'SUPERVISOR_SERVER_URL' in os.environ:
             self.msg_log('Node manager must be run as a supervisor event listener',
                           SandeshLevel.SYS_ERR)
             exit(-1)
         self.process_info_manager = SupervisorProcessInfoManager(
             self.stdin, self.stdout, self.type_info._supervisor_serverurl,
             event_handlers, update_process_list)
     self.add_current_process()
     for group in self.process_state_db:
         self.send_init_info(group)
     self.third_party_process_dict = self.type_info._third_party_processes
예제 #28
0
 def __init__(self, config, type_info, rule_file, sandesh_instance,
              update_process_list=False):
     self.config = config
     self.type_info = type_info
     self.stdin = sys.stdin
     self.stdout = sys.stdout
     self.stderr = sys.stderr
     self.rule_file = rule_file
     self.rules_data = {'Rules':[]}
     self.max_cores = 4
     self.max_old_cores = 3
     self.max_new_cores = 1
     self.all_core_file_list = []
     self.core_dir_modified_time = 0
     self.tick_count = 0
     self.fail_status_bits = 0
     self.prev_fail_status_bits = 1
     self.instance_id = INSTANCE_ID_DEFAULT
     self.collector_addr = self.config.collectors
     self.sandesh_instance = sandesh_instance
     self.curr_build_info = None
     self.new_build_info = None
     self.last_cpu = None
     self.last_time = 0
     self.installed_package_version = None
     SupervisorEventsReq.handle_request = self.sandesh_supervisor_handle_request
     event_handlers = {}
     event_handlers['PROCESS_STATE'] = self.event_process_state
     event_handlers['PROCESS_COMMUNICATION'] = \
         self.event_process_communication
     event_handlers['PROCESS_LIST_UPDATE'] = self.update_current_process
     ConnectionState.init(self.sandesh_instance, socket.gethostname(),
         self.type_info._module_name, self.instance_id,
         staticmethod(ConnectionState.get_process_state_cb),
         NodeStatusUVE, NodeStatus, self.type_info._object_table)
     self.sandesh_instance.init_generator(
         self.type_info._module_name, socket.gethostname(),
         self.type_info._node_type_name, self.instance_id,
         self.collector_addr, self.type_info._module_name,
         ServiceHttpPortMap[self.type_info._module_name],
         ['nodemgr.common.sandesh'] + self.type_info._sandesh_packages,
         config=SandeshConfig.from_parser_arguments(self.config))
     self.sandesh_instance.set_logging_params(
         enable_local_log=self.config.log_local,
         category=self.config.log_category,
         level=self.config.log_level,
         file=self.config.log_file,
         enable_syslog=self.config.use_syslog,
         syslog_facility=self.config.syslog_facility)
     self.logger = self.sandesh_instance.logger()
     if is_systemd_based():
         if not pydbus_present:
             self.msg_log('Node manager cannot run without pydbus', SandeshLevel.SYS_ERR)
             exit(-1)
         # In docker, systemd notifications via sd_notify do not
         # work, hence we will poll the process status
         self.process_info_manager = SystemdProcessInfoManager(
             self.type_info._unit_names, event_handlers,
             update_process_list, is_running_in_docker())
     else:
         if not 'SUPERVISOR_SERVER_URL' in os.environ:
             self.msg_log('Node manager must be run as a supervisor event listener',
                           SandeshLevel.SYS_ERR)
             exit(-1)
         self.process_info_manager = SupervisorProcessInfoManager(
             self.stdin, self.stdout, self.type_info._supervisor_serverurl,
             event_handlers, update_process_list)
     self.add_current_process()
     for group in self.process_state_db:
         self.send_init_info(group)
     self.third_party_process_dict = self.type_info._third_party_processes
예제 #29
0
    def __init__(self, args=None):
        self._args = args

        # Initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            self._disc = client.DiscoveryClient(
                self._args.disc_server_ip,
                self._args.disc_server_port,
                ModuleNames[Module.DEVICE_MANAGER])

        PushConfigState.set_repush_interval(int(self._args.repush_interval))
        PushConfigState.set_repush_max_interval(int(self._args.repush_max_interval))
        PushConfigState.set_push_delay_per_kb(float(self._args.push_delay_per_kb))
        PushConfigState.set_push_delay_max(int(self._args.push_delay_max))
        PushConfigState.set_push_delay_enable(bool(self._args.push_delay_enable))

        self._sandesh = Sandesh()
        # Reset the sandesh send rate limit value
        if self._args.sandesh_send_rate_limit is not None:
            SandeshSystem.set_sandesh_send_rate_limit( \
                self._args.sandesh_send_rate_limit)
        module = Module.DEVICE_MANAGER
        module_name = ModuleNames[module]
        node_type = Module2NodeType[module]
        node_type_name = NodeTypeNames[node_type]
        instance_id = INSTANCE_ID_DEFAULT
        hostname = socket.gethostname()
        self._sandesh.init_generator(
            module_name, hostname, node_type_name, instance_id,
            self._args.collectors, 'to_bgp_context',
            int(args.http_server_port),
            ['cfgm_common', 'device_manager.sandesh'], self._disc)
        self._sandesh.set_logging_params(enable_local_log=args.log_local,
                                         category=args.log_category,
                                         level=args.log_level,
                                         file=args.log_file,
                                         enable_syslog=args.use_syslog,
                                         syslog_facility=args.syslog_facility)
        PhysicalRouterDM._sandesh = self._sandesh
        ConnectionState.init(
            self._sandesh, hostname, module_name, instance_id,
            staticmethod(ConnectionState.get_process_state_cb),
            NodeStatusUVE, NodeStatus)

        # Retry till API server is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        while not connected:
            try:
                self._vnc_lib = VncApi(
                    args.admin_user, args.admin_password,
                    args.admin_tenant_name, args.api_server_ip,
                    args.api_server_port, api_server_use_ssl=args.api_server_use_ssl)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:  # haproxy throws 503
                time.sleep(3)

        rabbit_servers = self._args.rabbit_server
        rabbit_port = self._args.rabbit_port
        rabbit_user = self._args.rabbit_user
        rabbit_password = self._args.rabbit_password
        rabbit_vhost = self._args.rabbit_vhost
        rabbit_ha_mode = self._args.rabbit_ha_mode

        self._db_resync_done = gevent.event.Event()

        q_name = 'device_manager.%s' % (socket.gethostname())
        self._vnc_kombu = VncKombuClient(rabbit_servers, rabbit_port,
                                         rabbit_user, rabbit_password,
                                         rabbit_vhost, rabbit_ha_mode,
                                         q_name, self._vnc_subscribe_callback,
                                         self.config_log)

        self._cassandra = DMCassandraDB.getInstance(self) 

        DBBaseDM.init(self, self._sandesh.logger(), self._cassandra)
        for obj in GlobalSystemConfigDM.list_obj():
            GlobalSystemConfigDM.locate(obj['uuid'], obj)

        for obj in GlobalVRouterConfigDM.list_obj():
            GlobalVRouterConfigDM.locate(obj['uuid'], obj)

        for obj in VirtualNetworkDM.list_obj():
            vn = VirtualNetworkDM.locate(obj['uuid'], obj)
            if vn is not None and vn.routing_instances is not None:
                for ri_id in vn.routing_instances:
                    ri_obj = RoutingInstanceDM.locate(ri_id)

        for obj in BgpRouterDM.list_obj():
            BgpRouterDM.locate(obj['uuid'], obj)

        pr_obj_list = PhysicalRouterDM.list_obj()
        pr_uuid_set = set([pr_obj['uuid'] for pr_obj in pr_obj_list])
        self._cassandra.handle_pr_deletes(pr_uuid_set)

        for obj in pr_obj_list:
            pr = PhysicalRouterDM.locate(obj['uuid'], obj)
            li_set = pr.logical_interfaces
            for pi_id in pr.physical_interfaces:
                pi = PhysicalInterfaceDM.locate(pi_id)
                if pi:
                    li_set |= pi.logical_interfaces
            vmi_set = set()
            for li_id in li_set:
                li = LogicalInterfaceDM.locate(li_id)
                if li and li.virtual_machine_interface:
                    vmi_set |= set([li.virtual_machine_interface])
            for vmi_id in vmi_set:
                vmi = VirtualMachineInterfaceDM.locate(vmi_id)

        for obj in InstanceIpDM.list_obj():
            InstanceIpDM.locate(obj['uuid'], obj)

        for obj in FloatingIpDM.list_obj():
            FloatingIpDM.locate(obj['uuid'], obj)

        for vn in VirtualNetworkDM.values():
            vn.update_instance_ip_map()

        for pr in PhysicalRouterDM.values():
            pr.set_config_state()

        self._db_resync_done.set()
        gevent.joinall(self._vnc_kombu.greenlets())
예제 #30
0
    def __init__(self, conf):
        self._conf = conf
        module = Module.ALARM_GENERATOR
        self._moduleid = ModuleNames[module]
        node_type = Module2NodeType[module]
        self._node_type_name = NodeTypeNames[node_type]
        self._hostname = socket.gethostname()
        self._instance_id = self._conf.worker_id()
        sandesh_global.init_generator(self._moduleid, self._hostname,
                                      self._node_type_name, self._instance_id,
                                      self._conf.collectors(),
                                      self._node_type_name,
                                      self._conf.http_port(),
                                      ['opserver.sandesh', 'sandesh'])
        sandesh_global.set_logging_params(
            enable_local_log=self._conf.log_local(),
            category=self._conf.log_category(),
            level=self._conf.log_level(),
            file=self._conf.log_file(),
            enable_syslog=self._conf.use_syslog(),
            syslog_facility=self._conf.syslog_facility())
        self._logger = sandesh_global._logger

        # Trace buffer list
        self.trace_buf = [{'name': 'DiscoveryMsg', 'size': 1000}]
        # Create trace buffers
        for buf in self.trace_buf:
            sandesh_global.trace_buffer_create(name=buf['name'],
                                               size=buf['size'])

        tables = [
            "ObjectCollectorInfo", "ObjectDatabaseInfo", "ObjectVRouter",
            "ObjectBgpRouter", "ObjectConfigNode"
        ]
        self.mgrs = {}
        self.tab_alarms = {}
        for table in tables:
            self.mgrs[table] = hook.HookManager(
                namespace='contrail.analytics.alarms',
                name=table,
                invoke_on_load=True,
                invoke_args=(),
                on_load_failure_callback=Controller.fail_cb)

            for extn in self.mgrs[table][table]:
                self._logger.info('Loaded extensions for %s: %s,%s doc %s' % \
                    (table, extn.name, extn.entry_point_target, extn.obj.__doc__))

            self.tab_alarms[table] = {}

        ConnectionState.init(
            sandesh_global, self._hostname, self._moduleid, self._instance_id,
            staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE,
            NodeStatus)

        self._us = UVEServer(None, self._logger, self._conf.redis_password())

        self._workers = {}

        self.disc = None
        self._libpart_name = self._hostname + ":" + self._instance_id
        self._libpart = None
        self._partset = set()
        if self._conf.discovery()['server']:
            import discoveryclient.client as client
            data = {'ip-address': self._hostname, 'port': self._instance_id}
            self.disc = client.DiscoveryClient(
                self._conf.discovery()['server'],
                self._conf.discovery()['port'],
                ModuleNames[Module.ALARM_GENERATOR])
            self._logger.info("Disc Publish to %s : %s" %
                              (str(self._conf.discovery()), str(data)))
            self.disc.publish(ALARM_GENERATOR_SERVICE_NAME, data)
        else:
            # If there is no discovery service, use fixed redis_uve list
            redis_uve_list = []
            try:
                for redis_uve in self._conf.redis_uve_list():
                    redis_ip_port = redis_uve.split(':')
                    redis_ip_port = (redis_ip_port[0], int(redis_ip_port[1]))
                    redis_uve_list.append(redis_ip_port)
            except Exception as e:
                self._logger.error('Failed to parse redis_uve_list: %s' % e)
            else:
                self._us.update_redis_uve_list(redis_uve_list)

            # If there is no discovery service, use fixed alarmgen list
            self._libpart = self.start_libpart(self._conf.alarmgen_list())

        PartitionOwnershipReq.handle_request = self.handle_PartitionOwnershipReq
        PartitionStatusReq.handle_request = self.handle_PartitionStatusReq
        UVETableAlarmReq.handle_request = self.handle_UVETableAlarmReq
예제 #31
0
    def __init__(self, args=None):
        self._args = args
        self._fabric_rt_inst_obj = None

        # Initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            self._disc = client.DiscoveryClient(
                self._args.disc_server_ip,
                self._args.disc_server_port,
                ModuleNames[Module.SCHEMA_TRANSFORMER])

        self._sandesh = Sandesh()
        # Reset the sandesh send rate limit value
        if args.sandesh_send_rate_limit is not None:
            SandeshSystem.set_sandesh_send_rate_limit(
                args.sandesh_send_rate_limit)
        sandesh.VnList.handle_request = self.sandesh_vn_handle_request
        sandesh.RoutintInstanceList.handle_request = \
            self.sandesh_ri_handle_request
        sandesh.ServiceChainList.handle_request = \
            self.sandesh_sc_handle_request
        sandesh.StObjectReq.handle_request = \
            self.sandesh_st_object_handle_request
        module = Module.SCHEMA_TRANSFORMER
        module_name = ModuleNames[module]
        node_type = Module2NodeType[module]
        node_type_name = NodeTypeNames[node_type]
        instance_id = INSTANCE_ID_DEFAULT
        hostname = socket.gethostname()
        self._sandesh.init_generator(
            module_name, hostname, node_type_name, instance_id,
            self._args.collectors, 'to_bgp_context',
            int(args.http_server_port),
            ['cfgm_common', 'schema_transformer.sandesh'], self._disc,
            logger_class=args.logger_class,
            logger_config_file=args.logging_conf)
        self._sandesh.set_logging_params(enable_local_log=args.log_local,
                                    category=args.log_category,
                                    level=args.log_level,
                                    file=args.log_file,
                                    enable_syslog=args.use_syslog,
                                    syslog_facility=args.syslog_facility)
        ConnectionState.init(self._sandesh, hostname, module_name, instance_id,
                staticmethod(ConnectionState.get_process_state_cb),
                NodeStatusUVE, NodeStatus)

        self._sandesh.trace_buffer_create(name="MessageBusNotifyTraceBuf",
                                          size=1000)

        rabbit_servers = self._args.rabbit_server
        rabbit_port = self._args.rabbit_port
        rabbit_user = self._args.rabbit_user
        rabbit_password = self._args.rabbit_password
        rabbit_vhost = self._args.rabbit_vhost
        rabbit_ha_mode = self._args.rabbit_ha_mode

        self._db_resync_done = gevent.event.Event()

        q_name = 'schema_transformer.%s' % (socket.gethostname())
        self._vnc_kombu = VncKombuClient(rabbit_servers, rabbit_port,
                                         rabbit_user, rabbit_password,
                                         rabbit_vhost, rabbit_ha_mode,
                                         q_name, self._vnc_subscribe_callback,
                                         self.config_log)
        self._cassandra = SchemaTransformerDB(self, _zookeeper_client)
        DBBaseST.init(self, self._sandesh.logger(), self._cassandra)
        DBBaseST._sandesh = self._sandesh
        DBBaseST._vnc_lib = _vnc_lib
        ServiceChain.init()
        self.reinit()
        # create cpu_info object to send periodic updates
        sysinfo_req = False
        cpu_info = vnc_cpu_info.CpuInfo(
            module_name, instance_id, sysinfo_req, self._sandesh, 60)
        self._cpu_info = cpu_info
        self._db_resync_done.set()
예제 #32
0
    def __init__(self, args=None):
        self._args = args

        # Initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            self._disc = client.DiscoveryClient(
                self._args.disc_server_ip, self._args.disc_server_port,
                ModuleNames[Module.DEVICE_MANAGER])

        self._sandesh = Sandesh()
        module = Module.DEVICE_MANAGER
        module_name = ModuleNames[module]
        node_type = Module2NodeType[module]
        node_type_name = NodeTypeNames[node_type]
        instance_id = INSTANCE_ID_DEFAULT
        hostname = socket.gethostname()
        self._sandesh.init_generator(module_name, hostname, node_type_name,
                                     instance_id, self._args.collectors,
                                     'to_bgp_context',
                                     int(args.http_server_port),
                                     ['cfgm_common', 'device_manager.sandesh'],
                                     self._disc)
        self._sandesh.set_logging_params(enable_local_log=args.log_local,
                                         category=args.log_category,
                                         level=args.log_level,
                                         file=args.log_file,
                                         enable_syslog=args.use_syslog,
                                         syslog_facility=args.syslog_facility)
        ConnectionState.init(
            self._sandesh, hostname, module_name, instance_id,
            staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE,
            NodeStatus)

        # Retry till API server is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        while not connected:
            try:
                self._vnc_lib = VncApi(args.admin_user, args.admin_password,
                                       args.admin_tenant_name,
                                       args.api_server_ip,
                                       args.api_server_port)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:  # haproxy throws 503
                time.sleep(3)

        rabbit_servers = self._args.rabbit_server
        rabbit_port = self._args.rabbit_port
        rabbit_user = self._args.rabbit_user
        rabbit_password = self._args.rabbit_password
        rabbit_vhost = self._args.rabbit_vhost
        rabbit_ha_mode = self._args.rabbit_ha_mode

        self._db_resync_done = gevent.event.Event()

        q_name = 'device_manager.%s' % (socket.gethostname())
        self._vnc_kombu = VncKombuClient(rabbit_servers, rabbit_port,
                                         rabbit_user, rabbit_password,
                                         rabbit_vhost, rabbit_ha_mode, q_name,
                                         self._vnc_subscribe_callback,
                                         self.config_log)

        cass_server_list = self._args.cassandra_server_list
        reset_config = self._args.reset_config
        self._cassandra = VncCassandraClient(cass_server_list, reset_config,
                                             self._args.cluster_id, None,
                                             self.config_log)

        DBBase.init(self, self._sandesh.logger(), self._cassandra)
        ok, pr_list = self._cassandra._cassandra_physical_router_list()
        if not ok:
            self.config_log('physical router list returned error: %s' %
                            pr_list)
        else:
            vn_set = set()
            for fq_name, uuid in pr_list:
                pr = PhysicalRouterDM.locate(uuid)
                if pr.bgp_router:
                    BgpRouterDM.locate(pr.bgp_router)
                vn_set |= pr.virtual_networks
                li_set = pr.logical_interfaces
                for pi_id in pr.physical_interfaces:
                    pi = PhysicalInterfaceDM.locate(pi_id)
                    if pi:
                        li_set |= pi.logical_interfaces
                vmi_set = set()
                for li_id in li_set:
                    li = LogicalInterfaceDM.locate(li_id)
                    if li and li.virtual_machine_interface:
                        vmi_set |= set([li.virtual_machine_interface])
                for vmi_id in vmi_set:
                    vmi = VirtualMachineInterfaceDM.locate(vmi_id)
                    if vmi:
                        vn_set |= set([vmi.virtual_network])

            for vn_id in vn_set:
                VirtualNetworkDM.locate(vn_id)

            for pr in PhysicalRouterDM.values():
                pr.push_config()
        self._db_resync_done.set()
        while 1:
            # Just wait indefinitely
            time.sleep(5)