def init_cassandra(self, ks_cf_info=None):
     ssl_enabled = (self._api_args.cassandra_use_ssl
                    if 'cassandra_use_ssl' in self._api_args else False)
     try:
         self._cassandra = VncCassandraClient(
             self._api_args.cassandra_server_list,
             self._api_args.cluster_id,
             rw_keyspaces=ks_cf_info,
             ro_keyspaces=None,
             logger=self.log,
             reset_config=False,
             ssl_enabled=self._api_args.cassandra_use_ssl,
             ca_certs=self._api_args.cassandra_ca_certs)
     except NotFoundException:
         # in certain  multi-node setup, keyspace/CF are not ready yet when
         # we connect to them, retry later
         gevent.sleep(20)
         self._cassandra = VncCassandraClient(
             self._api_args.cassandra_server_list,
             self._api_args.cluster_id,
             rw_keyspaces=ks_cf_info,
             ro_keyspaces=None,
             logger=self.log,
             reset_config=False,
             ssl_enabled=self._api_args.cassandra_use_ssl,
             ca_certs=self._api_args.cassandra_ca_certs)
     try:
         self._get_cf = self._cassandra._cassandra_driver.get_cf
     except AttributeError:  # backward conpat before R2002
         self._get_cf = self._cassandra.get_cf
    def issu_prepare(self):
        self._logger(
            "Issu contrail cassandra prepare...",
            level=SandeshLevel.SYS_INFO,
        )
        for issu_func, ks, cflist in self._issu_info:

            if issu_func is None:
                issu_func = self._issu_basic_function
            ks_issu_func_info = {ks: issu_func}

            nks = {ks: cflist}
            oks = {ks: cflist}
            self._nkeyspaces.update(nks)
            self._okeyspaces.update(oks)
            self._ks_issu_func_info.update(ks_issu_func_info)

        self._oldversion_handle = VncCassandraClient(
            self._oldversion_server_list,
            self._odb_prefix,
            None,
            self._okeyspaces,
            self._logger,
            credential=self._old_creds)

        self._newversion_handle = VncCassandraClient(
            self._newversion_server_list,
            self._ndb_prefix,
            self._nkeyspaces,
            None,
            self._logger,
            credential=self._new_creds)
 def init_cassandra(self, ks_cf_info=None):
     self._cassandra = VncCassandraClient(
         self._api_args.cassandra_server_list, self._api_args.cluster_id,
         rw_keyspaces=ks_cf_info, ro_keyspaces=None, logger=self.log,
         reset_config=False,
         ssl_enabled=self._api_args.cassandra_use_ssl,
         ca_certs=self._api_args.cassandra_ca_certs)
    def issu_prepare(self):
        self._logger(
            "Issu contrail cassandra prepare...",
            level=SandeshLevel.SYS_INFO,
        )
        for issu_func, ks, cflist in self._issu_info:

            if issu_func is None:
                issu_func = self._issu_basic_function
            ks_issu_func_info = {ks: issu_func}

            nks = {ks: cflist}
            oks = {ks: cflist}
            self._nkeyspaces.update(nks)
            self._okeyspaces.update(oks)
            self._ks_issu_func_info.update(ks_issu_func_info)


        self._oldversion_handle = VncCassandraClient(
            self._oldversion_server_list, self._odb_prefix,
            None, self._okeyspaces, self._logger,
            credential=self._old_creds,
            ssl_enabled=self._odb_use_ssl,
            ca_certs=self._odb_ca_certs)

        self._newversion_handle = VncCassandraClient(
            self._newversion_server_list, self._ndb_prefix,
            self._nkeyspaces, None, self._logger,
            credential=self._new_creds,
            ssl_enabled=self._ndb_use_ssl,
            ca_certs=self._ndb_ca_certs)
Exemple #5
0
    def __init__(self, force, resources_file, cassandra_servers,
                 cassandra_username, cassandra_password, db_prefix,
                 cassandra_batch_size, zookeeper_servers,
                 rules_per_security_group, keystone_client):
        self._force = force
        self._resource_distribution = yaml.load(resources_file)
        self._cassandra_batch_size = cassandra_batch_size
        self._rules_per_security_group = rules_per_security_group
        self._keystone_client = keystone_client

        # Connect to cassandra database
        logger.debug("Initilizing the cassandra connection on %s",
                     cassandra_servers)
        cassandra_credentials = {}
        if (cassandra_username is not None and cassandra_password is not None):
            cassandra_credentials = {
                'username': cassandra_username,
                'password': cassandra_password,
            }

        def vnc_cassandra_client_logger(msg, level=logging.INFO):
            logger.log(msg=msg, level=level)

        self._cassandra_db = VncCassandraClient(
            cassandra_servers,
            db_prefix,
            self._UUID_KEYSPACE,
            None,
            vnc_cassandra_client_logger,
            credential=cassandra_credentials)
        self._uuid_cf = self._cassandra_db.get_cf('obj_uuid_table')
        self._fqname_cf = self._cassandra_db.get_cf('obj_fq_name_table')

        # Initilize zookeeper client
        self._zk_client = ZookeeperClient(zookeeper_servers)
Exemple #6
0
 def __init__(self, args_str):
     self._parse_args(args_str)
     self._cassandra = VncCassandraClient(
         self._api_args.cassandra_server_list,
         False,
         self._api_args.cluster_id,
         None,
         logger=self.log)
     self._zookeeper = kazoo.client.KazooClient(self._api_args.zk_server_ip)
     self._zookeeper.start()
 def init_cassandra(self, ks_cf_info=None):
     self._cassandra = VncCassandraClient(
         self._api_args.cassandra_server_list, self._api_args.cluster_id,
         rw_keyspaces=ks_cf_info, ro_keyspaces=None, logger=self.log,
         reset_config=False,
         ssl_enabled=self._api_args.cassandra_use_ssl,
         ca_certs=self._api_args.cassandra_ca_certs)
 def __init__(self, args_str):
     self._parse_args(args_str)
     self._cassandra = VncCassandraClient(
         self._api_args.cassandra_server_list, False,
         self._api_args.cluster_id, None, logger=self.log)
     self._zookeeper = kazoo.client.KazooClient(
         self._api_args.zk_server_ip)
     self._zookeeper.start()
Exemple #9
0
    def _connect_rabbit(self):
        rabbit_server = self._args.rabbit_server
        rabbit_port = self._args.rabbit_port
        rabbit_user = self._args.rabbit_user
        rabbit_password = self._args.rabbit_password
        rabbit_vhost = self._args.rabbit_vhost
        rabbit_ha_mode = self._args.rabbit_ha_mode

        self._db_resync_done = gevent.event.Event()

        q_name = 'svc_mon.%s' % (socket.gethostname())
        self._vnc_kombu = VncKombuClient(rabbit_server, rabbit_port,
                                         rabbit_user, rabbit_password,
                                         rabbit_vhost, rabbit_ha_mode, q_name,
                                         self._vnc_subscribe_callback,
                                         self.config_log)

        cass_server_list = self._args.cassandra_server_list
        reset_config = self._args.reset_config
        self._cassandra = VncCassandraClient(cass_server_list, reset_config,
                                             self._args.cluster_id, None,
                                             self.config_log)
        DBBase.init(self, self.logger, self._cassandra)
    def issu_prepare(self):
        self._logger(
            "Issu contrail cassandra prepare...",
            level=SandeshLevel.SYS_INFO,
        )
        for issu_func, ks, cflist in self._issu_info:

            if issu_func is None:
                issu_func = self._issu_basic_function
            ks_issu_func_info = {ks: issu_func}

            nks = {ks: cflist}
            oks = {ks: cflist}
            self._nkeyspaces.update(nks)
            self._okeyspaces.update(oks)
            self._ks_issu_func_info.update(ks_issu_func_info)

        self._oldversion_handle = VncCassandraClient(
            self._oldversion_server_list,
            cassandra_driver='thrift',
            db_prefix=self._odb_prefix,
            ro_keyspaces=self._okeyspaces,
            logger=self._logger,
            credential=self._old_creds,
            ssl_enabled=self._odb_use_ssl,
            ca_certs=self._odb_ca_certs)

        self._newversion_handle = VncCassandraClient(
            self._newversion_server_list,
            cassandra_driver='thrift',
            db_prefix=self._ndb_prefix,
            rw_keyspaces=self._nkeyspaces,
            logger=self._logger,
            credential=self._new_creds,
            ssl_enabled=self._ndb_use_ssl,
            ca_certs=self._ndb_ca_certs)
Exemple #11
0
    def __init__(self, force, resources_file, cassandra_servers,
                 cassandra_username, cassandra_password, db_prefix,
                 cassandra_batch_size, zookeeper_servers,
                 rules_per_security_group, keystone_client,
                 dont_populate_zookeeper):
        self._force = force
        self._resource_distribution = yaml.load(resources_file)
        self._cassandra_batch_size = cassandra_batch_size
        self._rules_per_security_group = rules_per_security_group
        self._keystone_client = keystone_client
        self._dont_populate_zookeeper = dont_populate_zookeeper

        # Connect to cassandra database
        logger.debug("Initilizing the cassandra connection on %s",
                     cassandra_servers)
        cassandra_credentials = {}
        if (cassandra_username is not None and
                cassandra_password is not None):
            cassandra_credentials = {
                'username': cassandra_username,
                'password': cassandra_password,
            }

        def vnc_cassandra_client_logger(msg, level=logging.INFO):
            logger.log(msg=msg, level=level)

        self._cassandra_db = VncCassandraClient(
            cassandra_servers,
            db_prefix,
            self._UUID_KEYSPACE,
            None,
            vnc_cassandra_client_logger,
            credential=cassandra_credentials)
        self._uuid_cf = self._cassandra_db.get_cf('obj_uuid_table')
        self._fqname_cf = self._cassandra_db.get_cf('obj_fq_name_table')

        # Initilize zookeeper client
        if self._dont_populate_zookeeper:
            self._zk_client = DummyZookeeperClient()
        else:
            self._zk_client = ZookeeperClient(zookeeper_servers)
    def _connect_rabbit(self):
        rabbit_server = self._args.rabbit_server
        rabbit_port = self._args.rabbit_port
        rabbit_user = self._args.rabbit_user
        rabbit_password = self._args.rabbit_password
        rabbit_vhost = self._args.rabbit_vhost

        self._db_resync_done = gevent.event.Event()

        q_name = 'svc_mon.%s' % (socket.gethostname())
        self._vnc_kombu = VncKombuClient(rabbit_server, rabbit_port,
                                         rabbit_user, rabbit_password,
                                         rabbit_vhost, q_name,
                                         self._vnc_subscribe_callback,
                                         self.config_log)

        cass_server_list = self._args.cassandra_server_list
        reset_config = self._args.reset_config
        self._cassandra = VncCassandraClient(cass_server_list, reset_config,
                                             self._args.cluster_id, None,
                                             self.config_log)
        DBBase.init(self, self.logger, self._cassandra)
class DeviceManager(object):
    _REACTION_MAP = {
        'physical_router': {
            'self': ['bgp_router', 'physical_interface', 'logical_interface'],
            'bgp_router': [],
            'physical_interface': [],
            'logical_interface': [],
            'virtual_network': [],
        },
        'bgp_router': {
            'self': ['bgp_router', 'physical_router'],
            'physical_router': [],
        },
        'physical_interface': {
            'self': ['physical_router', 'logical_interface'],
            'physical_router': ['logical_interface'],
            'logical_interface': ['physical_router'],
        },
        'logical_interface': {
            'self': ['physical_router', 'physical_interface',
                     'virtual_machine_interface'],
            'physical_interface': ['virtual_machine_interface'],
            'virtual_machine_interface': ['physical_router',
                                          'physical_interface'],
            'physical_router': ['virtual_machine_interface']
        },
        'virtual_machine_interface': {
            'self': ['logical_interface', 'virtual_network'],
            'logical_interface': ['virtual_network'],
            'virtual_network': ['logical_interface']
        },
        'virtual_network': {
            'self': ['physical_router', 'virtual_machine_interface'],
            'routing_instance': ['physical_router',
                                 'virtual_machine_interface'],
            'physical_router': [],
            'virtual_machine_interface': []
        },
        'routing_instance': {
            'self': ['virtual_network'],
            'virtual_network': []
        },
    }

    def __init__(self, args=None):
        self._args = args

        # Initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            self._disc = client.DiscoveryClient(
                self._args.disc_server_ip,
                self._args.disc_server_port,
                ModuleNames[Module.DEVICE_MANAGER])

        self._sandesh = Sandesh()
        module = Module.DEVICE_MANAGER
        module_name = ModuleNames[module]
        node_type = Module2NodeType[module]
        node_type_name = NodeTypeNames[node_type]
        instance_id = INSTANCE_ID_DEFAULT
        hostname = socket.gethostname()
        self._sandesh.init_generator(
            module_name, hostname, node_type_name, instance_id,
            self._args.collectors, 'to_bgp_context',
            int(args.http_server_port),
            ['cfgm_common', 'device_manager.sandesh'], self._disc)
        self._sandesh.set_logging_params(enable_local_log=args.log_local,
                                         category=args.log_category,
                                         level=args.log_level,
                                         file=args.log_file,
                                         enable_syslog=args.use_syslog,
                                         syslog_facility=args.syslog_facility)
        ConnectionState.init(
            self._sandesh, hostname, module_name, instance_id,
            staticmethod(ConnectionState.get_process_state_cb),
            NodeStatusUVE, NodeStatus)

        # Retry till API server is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        while not connected:
            try:
                self._vnc_lib = VncApi(
                    args.admin_user, args.admin_password,
                    args.admin_tenant_name, args.api_server_ip,
                    args.api_server_port)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:  # haproxy throws 503
                time.sleep(3)

        rabbit_server = self._args.rabbit_server
        rabbit_port = self._args.rabbit_port
        rabbit_user = self._args.rabbit_user
        rabbit_password = self._args.rabbit_password
        rabbit_vhost = self._args.rabbit_vhost

        self._db_resync_done = gevent.event.Event()

        q_name = 'device_manager.%s' % (socket.gethostname())
        self._vnc_kombu = VncKombuClient(rabbit_server, rabbit_port,
                                         rabbit_user, rabbit_password,
                                         rabbit_vhost, q_name,
                                         self._vnc_subscribe_callback,
                                         self.config_log)

        cass_server_list = self._args.cassandra_server_list
        reset_config = self._args.reset_config
        self._cassandra = VncCassandraClient(cass_server_list, reset_config,
                                             self._args.cluster_id, None,
                                             self.config_log)

        DBBaseDM.init(self._sandesh.logger(), self._cassandra)
        ok, pr_list = self._cassandra._cassandra_physical_router_list()
        if not ok:
            self.config_log('physical router list returned error: %s' %
                            pr_list)
        else:
            vn_set = set()
            for fq_name, uuid in pr_list:
                pr = PhysicalRouterDM.locate(uuid)
                if pr.bgp_router:
                    BgpRouterDM.locate(pr.bgp_router)
                vn_set |= pr.virtual_networks
                li_set = pr.logical_interfaces
                for pi_id in pr.physical_interfaces:
                    pi = PhysicalInterfaceDM.locate(pi_id)
                    if pi:
                        li_set |= pi.logical_interfaces
                vmi_set = set()
                for li_id in li_set:
                    li = LogicalInterfaceDM.locate(li_id)
                    if li and li.virtual_machine_interface:
                        vmi_set |= set([li.virtual_machine_interface])
                for vmi_id in vmi_set:
                    vmi = VirtualMachineInterfaceDM.locate(vmi_id)
                    if vmi:
                        vn_set |= vmi.virtual_networks

            for vn_id in vn_set:
                VirtualNetworkDM.locate(vn_id)

            for pr in PhysicalRouterDM.values():
                pr.push_config()

        self._db_resync_done.set()
        while 1:
            self._vnc_kombu._subscribe_greenlet.join()
            # In case _subscribe_greenlet dies for some reason, it will be
            # respawned. sleep for 1 second to wait for it to be respawned
            time.sleep(1)
    # end __init__

    def connection_state_update(self, status, message=None):
        ConnectionState.update(
            conn_type=ConnectionType.APISERVER, name='ApiServer',
            status=status, message=message or '',
            server_addrs=['%s:%s' % (self._args.api_server_ip,
                                     self._args.api_server_port)])
    # end connection_state_update

    def config_log(self, msg, level):
        self._sandesh.logger().log(SandeshLogger.get_py_logger_level(level),
                                   msg)

    def _vnc_subscribe_callback(self, oper_info):
        self._db_resync_done.wait()
        try:
            msg = "Notification Message: %s" % (pformat(oper_info))
            self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
            obj_type = oper_info['type'].replace('-', '_')
            obj_class = DBBaseDM._OBJ_TYPE_MAP.get(obj_type)
            if obj_class is None:
                return

            if oper_info['oper'] == 'CREATE':
                obj_dict = oper_info['obj_dict']
                obj_id = obj_dict['uuid']
                obj = obj_class.locate(obj_id, obj_dict)
                dependency_tracker = DependencyTracker(DBBaseDM._OBJ_TYPE_MAP,
                                                       self._REACTION_MAP)
                dependency_tracker.evaluate(obj_type, obj)
            elif oper_info['oper'] == 'UPDATE':
                obj_id = oper_info['uuid']
                obj = obj_class.get(obj_id)
                dependency_tracker = DependencyTracker(DBBaseDM._OBJ_TYPE_MAP,
                                                       self._REACTION_MAP)
                if obj is not None:
                    dependency_tracker.evaluate(obj_type, obj)
                else:
                    obj = obj_class.locate(obj_id)
                obj.update()
                dependency_tracker.evaluate(obj_type, obj)
            elif oper_info['oper'] == 'DELETE':
                obj_id = oper_info['uuid']
                obj = obj_class.get(obj_id)
                if obj is None:
                    return
                dependency_tracker = DependencyTracker(DBBaseDM._OBJ_TYPE_MAP,
                                                       self._REACTION_MAP)
                dependency_tracker.evaluate(obj_type, obj)
                obj_class.delete(obj_id)
            else:
                # unknown operation
                self.config_log('Unknown operation %s' % oper_info['oper'],
                                level=SandeshLevel.SYS_ERR)
                return

            if obj is None:
                self.config_log('Error while accessing %s uuid %s' % (
                                obj_type, obj_id))
                return

        except Exception:
            string_buf = cStringIO.StringIO()
            cgitb.Hook(file=string_buf, format="text").handle(sys.exc_info())
            self.config_log(string_buf.getvalue(), level=SandeshLevel.SYS_ERR)

        for pr_id in dependency_tracker.resources.get('physical_router', []):
            pr = PhysicalRouterDM.get(pr_id)
            if pr is not None:
                pr.push_config()
    def __init__(self, args=None):
        self._args = args

        # Initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            self._disc = client.DiscoveryClient(
                self._args.disc_server_ip, self._args.disc_server_port,
                ModuleNames[Module.DEVICE_MANAGER])

        self._sandesh = Sandesh()
        module = Module.DEVICE_MANAGER
        module_name = ModuleNames[module]
        node_type = Module2NodeType[module]
        node_type_name = NodeTypeNames[node_type]
        instance_id = INSTANCE_ID_DEFAULT
        hostname = socket.gethostname()
        self._sandesh.init_generator(module_name, hostname, node_type_name,
                                     instance_id, self._args.collectors,
                                     'to_bgp_context',
                                     int(args.http_server_port),
                                     ['cfgm_common', 'device_manager.sandesh'],
                                     self._disc)
        self._sandesh.set_logging_params(enable_local_log=args.log_local,
                                         category=args.log_category,
                                         level=args.log_level,
                                         file=args.log_file,
                                         enable_syslog=args.use_syslog,
                                         syslog_facility=args.syslog_facility)
        ConnectionState.init(
            self._sandesh, hostname, module_name, instance_id,
            staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE,
            NodeStatus)

        # Retry till API server is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        while not connected:
            try:
                self._vnc_lib = VncApi(args.admin_user, args.admin_password,
                                       args.admin_tenant_name,
                                       args.api_server_ip,
                                       args.api_server_port)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:  # haproxy throws 503
                time.sleep(3)

        rabbit_servers = self._args.rabbit_server
        rabbit_port = self._args.rabbit_port
        rabbit_user = self._args.rabbit_user
        rabbit_password = self._args.rabbit_password
        rabbit_vhost = self._args.rabbit_vhost
        rabbit_ha_mode = self._args.rabbit_ha_mode

        self._db_resync_done = gevent.event.Event()

        q_name = 'device_manager.%s' % (socket.gethostname())
        self._vnc_kombu = VncKombuClient(rabbit_servers, rabbit_port,
                                         rabbit_user, rabbit_password,
                                         rabbit_vhost, rabbit_ha_mode, q_name,
                                         self._vnc_subscribe_callback,
                                         self.config_log)

        cass_server_list = self._args.cassandra_server_list
        reset_config = self._args.reset_config
        self._cassandra = VncCassandraClient(cass_server_list, reset_config,
                                             self._args.cluster_id, None,
                                             self.config_log)

        DBBase.init(self, self._sandesh.logger(), self._cassandra)
        ok, global_system_config_list = self._cassandra._cassandra_global_system_config_list(
        )
        if not ok:
            self.config_log('global system config list returned error: %s' %
                            global_system_config_list)
        else:
            for fq_name, uuid in global_system_config_list:
                GlobalSystemConfigDM.locate(uuid)

        ok, vn_list = self._cassandra._cassandra_virtual_network_list()
        if not ok:
            self.config_log('virtual network list returned error: %s' %
                            vn_list)
        else:
            for fq_name, uuid in vn_list:
                vn = VirtualNetworkDM.locate(uuid)
                if vn is not None and vn.routing_instances is not None:
                    for ri_id in vn.routing_instances:
                        ri_obj = RoutingInstanceDM.locate(ri_id)

        ok, bgp_list = self._cassandra._cassandra_bgp_router_list()
        if not ok:
            self.config_log('bgp router list returned error: %s' % bgp_list)
        else:
            for fq_name, uuid in bgp_list:
                BgpRouterDM.locate(uuid)

        ok, pr_list = self._cassandra._cassandra_physical_router_list()
        if not ok:
            self.config_log('physical router list returned error: %s' %
                            pr_list)
        else:
            for fq_name, uuid in pr_list:
                pr = PhysicalRouterDM.locate(uuid)
                if pr.bgp_router:
                    BgpRouterDM.locate(pr.bgp_router)
                li_set = pr.logical_interfaces
                for pi_id in pr.physical_interfaces:
                    pi = PhysicalInterfaceDM.locate(pi_id)
                    if pi:
                        li_set |= pi.logical_interfaces
                vmi_set = set()
                for li_id in li_set:
                    li = LogicalInterfaceDM.locate(li_id)
                    if li and li.virtual_machine_interface:
                        vmi_set |= set([li.virtual_machine_interface])
                for vmi_id in vmi_set:
                    vmi = VirtualMachineInterfaceDM.locate(vmi_id)

            for pr in PhysicalRouterDM.values():
                pr.push_config()
        self._db_resync_done.set()
        while 1:
            # Just wait indefinitely
            time.sleep(5)
class DeviceManager(object):
    _REACTION_MAP = {
        'physical_router': {
            'self': ['bgp_router', 'physical_interface', 'logical_interface'],
            'bgp_router': [],
            'physical_interface': [],
            'logical_interface': [],
            'virtual_network': [],
        },
        'bgp_router': {
            'self': ['bgp_router', 'physical_router'],
            'bgp_router': ['physical_router'],
            'physical_router': [],
        },
        'physical_interface': {
            'self': ['physical_router', 'logical_interface'],
            'physical_router': ['logical_interface'],
            'logical_interface': ['physical_router'],
        },
        'logical_interface': {
            'self': [
                'physical_router', 'physical_interface',
                'virtual_machine_interface'
            ],
            'physical_interface': ['virtual_machine_interface'],
            'virtual_machine_interface':
            ['physical_router', 'physical_interface'],
            'physical_router': ['virtual_machine_interface']
        },
        'virtual_machine_interface': {
            'self': ['logical_interface', 'virtual_network'],
            'logical_interface': ['virtual_network'],
            'virtual_network': ['logical_interface']
        },
        'virtual_network': {
            'self': ['physical_router', 'virtual_machine_interface'],
            'routing_instance':
            ['physical_router', 'virtual_machine_interface'],
            'physical_router': [],
            'virtual_machine_interface': []
        },
        'routing_instance': {
            'self': ['routing_instance', 'virtual_network'],
            'routing_instance': ['virtual_network'],
            'virtual_network': []
        },
    }

    def __init__(self, args=None):
        self._args = args

        # Initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            self._disc = client.DiscoveryClient(
                self._args.disc_server_ip, self._args.disc_server_port,
                ModuleNames[Module.DEVICE_MANAGER])

        self._sandesh = Sandesh()
        module = Module.DEVICE_MANAGER
        module_name = ModuleNames[module]
        node_type = Module2NodeType[module]
        node_type_name = NodeTypeNames[node_type]
        instance_id = INSTANCE_ID_DEFAULT
        hostname = socket.gethostname()
        self._sandesh.init_generator(module_name, hostname, node_type_name,
                                     instance_id, self._args.collectors,
                                     'to_bgp_context',
                                     int(args.http_server_port),
                                     ['cfgm_common', 'device_manager.sandesh'],
                                     self._disc)
        self._sandesh.set_logging_params(enable_local_log=args.log_local,
                                         category=args.log_category,
                                         level=args.log_level,
                                         file=args.log_file,
                                         enable_syslog=args.use_syslog,
                                         syslog_facility=args.syslog_facility)
        ConnectionState.init(
            self._sandesh, hostname, module_name, instance_id,
            staticmethod(ConnectionState.get_process_state_cb), NodeStatusUVE,
            NodeStatus)

        # Retry till API server is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        while not connected:
            try:
                self._vnc_lib = VncApi(args.admin_user, args.admin_password,
                                       args.admin_tenant_name,
                                       args.api_server_ip,
                                       args.api_server_port)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:  # haproxy throws 503
                time.sleep(3)

        rabbit_servers = self._args.rabbit_server
        rabbit_port = self._args.rabbit_port
        rabbit_user = self._args.rabbit_user
        rabbit_password = self._args.rabbit_password
        rabbit_vhost = self._args.rabbit_vhost
        rabbit_ha_mode = self._args.rabbit_ha_mode

        self._db_resync_done = gevent.event.Event()

        q_name = 'device_manager.%s' % (socket.gethostname())
        self._vnc_kombu = VncKombuClient(rabbit_servers, rabbit_port,
                                         rabbit_user, rabbit_password,
                                         rabbit_vhost, rabbit_ha_mode, q_name,
                                         self._vnc_subscribe_callback,
                                         self.config_log)

        cass_server_list = self._args.cassandra_server_list
        reset_config = self._args.reset_config
        self._cassandra = VncCassandraClient(cass_server_list, reset_config,
                                             self._args.cluster_id, None,
                                             self.config_log)

        DBBase.init(self, self._sandesh.logger(), self._cassandra)
        ok, global_system_config_list = self._cassandra._cassandra_global_system_config_list(
        )
        if not ok:
            self.config_log('global system config list returned error: %s' %
                            global_system_config_list)
        else:
            for fq_name, uuid in global_system_config_list:
                GlobalSystemConfigDM.locate(uuid)

        ok, vn_list = self._cassandra._cassandra_virtual_network_list()
        if not ok:
            self.config_log('virtual network list returned error: %s' %
                            vn_list)
        else:
            for fq_name, uuid in vn_list:
                vn = VirtualNetworkDM.locate(uuid)
                if vn is not None and vn.routing_instances is not None:
                    for ri_id in vn.routing_instances:
                        ri_obj = RoutingInstanceDM.locate(ri_id)

        ok, bgp_list = self._cassandra._cassandra_bgp_router_list()
        if not ok:
            self.config_log('bgp router list returned error: %s' % bgp_list)
        else:
            for fq_name, uuid in bgp_list:
                BgpRouterDM.locate(uuid)

        ok, pr_list = self._cassandra._cassandra_physical_router_list()
        if not ok:
            self.config_log('physical router list returned error: %s' %
                            pr_list)
        else:
            for fq_name, uuid in pr_list:
                pr = PhysicalRouterDM.locate(uuid)
                if pr.bgp_router:
                    BgpRouterDM.locate(pr.bgp_router)
                li_set = pr.logical_interfaces
                for pi_id in pr.physical_interfaces:
                    pi = PhysicalInterfaceDM.locate(pi_id)
                    if pi:
                        li_set |= pi.logical_interfaces
                vmi_set = set()
                for li_id in li_set:
                    li = LogicalInterfaceDM.locate(li_id)
                    if li and li.virtual_machine_interface:
                        vmi_set |= set([li.virtual_machine_interface])
                for vmi_id in vmi_set:
                    vmi = VirtualMachineInterfaceDM.locate(vmi_id)

            for pr in PhysicalRouterDM.values():
                pr.push_config()
        self._db_resync_done.set()
        while 1:
            # Just wait indefinitely
            time.sleep(5)

    # end __init__

    def connection_state_update(self, status, message=None):
        ConnectionState.update(
            conn_type=ConnectionType.APISERVER,
            name='ApiServer',
            status=status,
            message=message or '',
            server_addrs=[
                '%s:%s' %
                (self._args.api_server_ip, self._args.api_server_port)
            ])

    # end connection_state_update

    def config_log(self, msg, level):
        self._sandesh.logger().log(SandeshLogger.get_py_logger_level(level),
                                   msg)

    def _vnc_subscribe_callback(self, oper_info):
        self._db_resync_done.wait()
        try:
            msg = "Notification Message: %s" % (pformat(oper_info))
            self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
            obj_type = oper_info['type'].replace('-', '_')
            obj_class = DBBase._OBJ_TYPE_MAP.get(obj_type)
            if obj_class is None:
                return

            if oper_info['oper'] == 'CREATE':
                obj_dict = oper_info['obj_dict']
                obj_id = obj_dict['uuid']
                obj = obj_class.locate(obj_id, obj_dict)
                dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP,
                                                       self._REACTION_MAP)
                dependency_tracker.evaluate(obj_type, obj)
            elif oper_info['oper'] == 'UPDATE':
                obj_id = oper_info['uuid']
                obj = obj_class.get(obj_id)
                old_dt = None
                if obj is not None:
                    old_dt = DependencyTracker(DBBase._OBJ_TYPE_MAP,
                                               self._REACTION_MAP)
                    old_dt.evaluate(obj_type, obj)
                else:
                    obj = obj_class.locate(obj_id)
                obj.update()
                dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP,
                                                       self._REACTION_MAP)
                dependency_tracker.evaluate(obj_type, obj)
                if old_dt:
                    for resource, ids in old_dt.resources.items():
                        if resource not in dependency_tracker.resources:
                            dependency_tracker.resources[resource] = ids
                        else:
                            dependency_tracker.resources[resource] = list(
                                set(dependency_tracker.resources[resource])
                                | set(ids))
            elif oper_info['oper'] == 'DELETE':
                obj_id = oper_info['uuid']
                obj = obj_class.get(obj_id)
                if obj is None:
                    return
                dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP,
                                                       self._REACTION_MAP)
                dependency_tracker.evaluate(obj_type, obj)
                obj_class.delete(obj_id)
            else:
                # unknown operation
                self.config_log('Unknown operation %s' % oper_info['oper'],
                                level=SandeshLevel.SYS_ERR)
                return

            if obj is None:
                self.config_log('Error while accessing %s uuid %s' %
                                (obj_type, obj_id))
                return

        except Exception:
            string_buf = cStringIO.StringIO()
            cgitb.Hook(file=string_buf, format="text").handle(sys.exc_info())
            self.config_log(string_buf.getvalue(), level=SandeshLevel.SYS_ERR)

        for pr_id in dependency_tracker.resources.get('physical_router', []):
            pr = PhysicalRouterDM.get(pr_id)
            if pr is not None:
                pr.push_config()
Exemple #16
0
class SvcMonitor(object):
    """
    data + methods used/referred to by ssrc and arc greenlets
    """
    _REACTION_MAP = {
        "service_appliance_set": {
            'self': [],
            'service_appliance': []
        },
        "service_appliance": {
            'self': ['service_appliance_set'],
            'service_appliance_set': []
        },
        "loadbalancer_pool": {
            'self': [],
            'virtual_ip': [],
            'loadbalancer_member': [],
            'loadbalancer_healthmonitor': [],
        },
        "loadbalancer_member": {
            'self': ['loadbalancer_pool'],
            'loadbalancer_pool': []
        },
        "virtual_ip": {
            'self': ['loadbalancer_pool'],
            'loadbalancer_pool': []
        },
        "loadbalancer_healthmonitor": {
            'self': ['loadbalancer_pool'],
            'loadbalancer_pool': []
        },
        "service_instance": {
            'self': ['virtual_machine'],
            'virtual_machine': []
        },
        "instance_ip": {
            'self': [],
        },
        "floating_ip": {
            'self': [],
        },
        "service_template": {
            'self': [],
        },
        "physical_router": {
            'self': [],
        },
        "physical_interface": {
            'self': [],
        },
        "logical_interface": {
            'self': [],
        },
        "virtual_network": {
            'self': [],
        },
        "virtual_machine": {
            'self': ['virtual_machine_interface'],
            'service_instance': [],
            'virtual_machine_interface': [],
        },
        "virtual_machine_interface": {
            'self': ['interface_route_table', 'virtual_machine'],
            'interface_route_table': [],
            'virtual_machine': [],
        },
        "interface_route_table": {
            'self': [],
            'virtual_machine_interface': [],
        },
        "project": {
            'self': [],
        },
    }

    def __init__(self, args=None):
        self._args = args

        # create database and logger
        self.si_db = ServiceInstanceDB(args)

        # initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            self._disc = client.DiscoveryClient(
                self._args.disc_server_ip, self._args.disc_server_port,
                ModuleNames[Module.SVC_MONITOR])

        # initialize logger
        self.logger = ServiceMonitorLogger(self.si_db, self._disc, args)
        self.si_db.add_logger(self.logger)
        self.si_db.init_database()

        # rotating log file for catchall errors
        self._err_file = self._args.trace_file
        self._svc_err_logger = logging.getLogger('SvcErrLogger')
        self._svc_err_logger.setLevel(logging.ERROR)
        try:
            with open(self._err_file, 'a'):
                handler = logging.handlers.RotatingFileHandler(self._err_file,
                                                               maxBytes=64 *
                                                               1024,
                                                               backupCount=2)
                self._svc_err_logger.addHandler(handler)
        except IOError:
            self.logger.log_warning("Failed to open trace file %s" %
                                    self._err_file)

        # Connect to Rabbit and Initialize cassandra connection
        self._connect_rabbit()

    def _connect_rabbit(self):
        rabbit_server = self._args.rabbit_server
        rabbit_port = self._args.rabbit_port
        rabbit_user = self._args.rabbit_user
        rabbit_password = self._args.rabbit_password
        rabbit_vhost = self._args.rabbit_vhost
        rabbit_ha_mode = self._args.rabbit_ha_mode

        self._db_resync_done = gevent.event.Event()

        q_name = 'svc_mon.%s' % (socket.gethostname())
        self._vnc_kombu = VncKombuClient(rabbit_server, rabbit_port,
                                         rabbit_user, rabbit_password,
                                         rabbit_vhost, rabbit_ha_mode, q_name,
                                         self._vnc_subscribe_callback,
                                         self.config_log)

        cass_server_list = self._args.cassandra_server_list
        reset_config = self._args.reset_config
        self._cassandra = VncCassandraClient(cass_server_list, reset_config,
                                             self._args.cluster_id, None,
                                             self.config_log)
        DBBase.init(self, self.logger, self._cassandra)

    # end _connect_rabbit

    def config_log(self, msg, level):
        self.logger.log(msg)

    def _vnc_subscribe_callback(self, oper_info):
        self._db_resync_done.wait()
        try:
            msg = "Notification Message: %s" % (pformat(oper_info))
            self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
            obj_type = oper_info['type'].replace('-', '_')
            obj_class = DBBase._OBJ_TYPE_MAP.get(obj_type)
            if obj_class is None:
                return

            if oper_info['oper'] == 'CREATE' or oper_info['oper'] == 'UPDATE':
                dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP,
                                                       self._REACTION_MAP)
                obj_id = oper_info['uuid']
                obj = obj_class.get(obj_id)
                if obj is not None:
                    dependency_tracker.evaluate(obj_type, obj)
                else:
                    obj = obj_class.locate(obj_id)
                obj.update()
                dependency_tracker.evaluate(obj_type, obj)
            elif oper_info['oper'] == 'DELETE':
                obj_id = oper_info['uuid']
                obj = obj_class.get(obj_id)
                if obj is None:
                    return
                dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP,
                                                       self._REACTION_MAP)
                dependency_tracker.evaluate(obj_type, obj)
                obj_class.delete(obj_id)
            else:
                # unknown operation
                self.config_log('Unknown operation %s' % oper_info['oper'],
                                level=SandeshLevel.SYS_ERR)
                return

            if obj is None:
                self.config_log('Error while accessing %s uuid %s' %
                                (obj_type, obj_id))
                return

        except Exception:
            string_buf = cStringIO.StringIO()
            cgitb.Hook(file=string_buf, format="text").handle(sys.exc_info())
            self.config_log(string_buf.getvalue(), level=SandeshLevel.SYS_ERR)

        for sas_id in dependency_tracker.resources.get('service_appliance_set',
                                                       []):
            sas_obj = ServiceApplianceSetSM.get(sas_id)
            if sas_obj is not None:
                sas_obj.add()

        for lb_pool_id in dependency_tracker.resources.get(
                'loadbalancer_pool', []):
            lb_pool = LoadbalancerPoolSM.get(lb_pool_id)
            if lb_pool is not None:
                lb_pool.add()

        for si_id in dependency_tracker.resources.get('service_instance', []):
            si = ServiceInstanceSM.get(si_id)
            if si:
                self._create_service_instance(si)
            else:
                for vm_id in dependency_tracker.resources.get(
                        'virtual_machine', []):
                    vm = VirtualMachineSM.get(vm_id)
                    self._delete_service_instance(vm)

        for vn_id in dependency_tracker.resources.get('virtual_network', []):
            vn = VirtualNetworkSM.get(vn_id)
            if vn:
                for si_id in ServiceInstanceSM:
                    si = ServiceInstanceSM.get(si_id)
                    if (':').join(vn.fq_name) in si.params.values():
                        self._create_service_instance(si)

        for vmi_id in dependency_tracker.resources.get(
                'virtual_machine_interface', []):
            vmi = VirtualMachineInterfaceSM.get(vmi_id)
            if vmi:
                for vm_id in dependency_tracker.resources.get(
                        'virtual_machine', []):
                    vm = VirtualMachineSM.get(vm_id)
                    if vm:
                        self.check_link_si_to_vm(vm, vmi)
            else:
                for irt_id in dependency_tracker.resources.get(
                        'interface_route_table', []):
                    self._delete_interface_route_table(irt_id)

        for fip_id in dependency_tracker.resources.get('floating_ip', []):
            fip = FloatingIpSM.get(fip_id)
            if fip:
                for vmi_id in fip.virtual_machine_interfaces:
                    vmi = VirtualMachineInterfaceSM.get(vmi_id)
                    if vmi and vmi.virtual_ip:
                        self.netns_manager.add_fip_to_vip_vmi(vmi, fip)

    def post_init(self, vnc_lib, args=None):
        # api server
        self._vnc_lib = vnc_lib

        self._nova_client = importutils.import_object(
            'svc_monitor.nova_client.ServiceMonitorNovaClient', self._args,
            self.logger)

        # load vrouter scheduler
        self.vrouter_scheduler = importutils.import_object(
            self._args.si_netns_scheduler_driver, self._vnc_lib,
            self._nova_client, self._args)

        # load virtual machine instance manager
        self.vm_manager = importutils.import_object(
            'svc_monitor.virtual_machine_manager.VirtualMachineManager',
            self._vnc_lib, self.si_db, self.logger, self.vrouter_scheduler,
            self._nova_client, self._args)

        # load network namespace instance manager
        self.netns_manager = importutils.import_object(
            'svc_monitor.instance_manager.NetworkNamespaceManager',
            self._vnc_lib, self.si_db, self.logger, self.vrouter_scheduler,
            self._nova_client, self._args)

        # load a vrouter instance manager
        self.vrouter_manager = importutils.import_object(
            'svc_monitor.vrouter_instance_manager.VRouterInstanceManager',
            self._vnc_lib, self.si_db, self.logger, self.vrouter_scheduler,
            self._nova_client, self._args)

        # load a loadbalancer agent
        self.loadbalancer_agent = LoadbalancerAgent(self, self._vnc_lib,
                                                    self._args)

        # Read the cassandra and populate the entry in ServiceMonitor DB
        self.sync_sm()

        # create default analyzer template
        self._create_default_template('analyzer-template',
                                      'analyzer',
                                      flavor='m1.medium',
                                      image_name='analyzer')
        # create default NAT template
        self._create_default_template('nat-template',
                                      'firewall',
                                      svc_mode='in-network-nat',
                                      image_name='analyzer',
                                      flavor='m1.medium')
        # create default netns SNAT template
        self._create_default_template('netns-snat-template',
                                      'source-nat',
                                      svc_mode='in-network-nat',
                                      hypervisor_type='network-namespace',
                                      scaling=True)
        # create default loadbalancer template
        self._create_default_template('haproxy-loadbalancer-template',
                                      'loadbalancer',
                                      svc_mode='in-network-nat',
                                      hypervisor_type='network-namespace',
                                      scaling=True)
        self._create_default_template('docker-template',
                                      'firewall',
                                      svc_mode='transparent',
                                      image_name="ubuntu",
                                      hypervisor_type='vrouter-instance',
                                      vrouter_instance_type='docker',
                                      instance_data={"command": "/bin/bash"})

        # upgrade handling
        self.upgrade()

        # check services
        self.launch_services()

        self._db_resync_done.set()

    def upgrade(self):
        for si in ServiceInstanceSM.values():
            st = ServiceTemplateSM.get(si.service_template)
            if not st:
                continue
            for vm_id in si.virtual_machines:
                vm = VirtualMachineSM.get(vm_id)
                if vm.virtualization_type:
                    continue
                nova_vm = self._nova_client.oper('servers',
                                                 'get',
                                                 si.proj_name,
                                                 id=vm_id)
                if not nova_vm:
                    continue

                si_obj = ServiceInstance()
                si_obj.name = si.name
                si_obj.fq_name = si.fq_name
                instance_name = self.vm_manager._get_instance_name(
                    si_obj, vm.index)
                if vm.name == instance_name:
                    continue
                nova_vm.update(name=instance_name)
                vm_obj = VirtualMachine()
                vm_obj.uuid = vm_id
                vm_obj.fq_name = [vm_id]
                vm_obj.set_display_name(instance_name + '__' +
                                        st.virtualization_type)
                try:
                    self._vnc_lib.virtual_machine_update(vm_obj)
                except Exception:
                    pass

    def launch_services(self):
        for si in ServiceInstanceSM.values():
            self._create_service_instance(si)

    def sync_sm(self):
        vn_set = set()
        vmi_set = set()
        iip_set = set()
        ok, lb_pool_list = self._cassandra._cassandra_loadbalancer_pool_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in lb_pool_list:
                lb_pool = LoadbalancerPoolSM.locate(uuid)
                if lb_pool.virtual_machine_interface:
                    vmi_set.add(lb_pool.virtual_machine_interface)

        ok, lb_pool_member_list = self._cassandra._cassandra_loadbalancer_member_list(
        )
        if not ok:
            pass
        else:
            for fq_name, uuid in lb_pool_member_list:
                lb_pool_member = LoadbalancerMemberSM.locate(uuid)

        ok, lb_vip_list = self._cassandra._cassandra_virtual_ip_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in lb_vip_list:
                virtual_ip = VirtualIpSM.locate(uuid)
                if virtual_ip.virtual_machine_interface:
                    vmi_set.add(virtual_ip.virtual_machine_interface)

        ok, lb_hm_list = self._cassandra._cassandra_loadbalancer_healthmonitor_list(
        )
        if not ok:
            pass
        else:
            for fq_name, uuid in lb_hm_list:
                lb_hm = HealthMonitorSM.locate(uuid)

        ok, si_list = self._cassandra._cassandra_service_instance_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in si_list:
                si = ServiceInstanceSM.locate(uuid)

        ok, st_list = self._cassandra._cassandra_service_template_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in st_list:
                st = ServiceTemplateSM.locate(uuid)

        ok, vn_list = self._cassandra._cassandra_virtual_network_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in vn_list:
                vn = VirtualNetworkSM.locate(uuid)
                vmi_set |= vn.virtual_machine_interfaces

        ok, ifd_list = self._cassandra._cassandra_physical_interface_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in ifd_list:
                ifd = PhysicalInterfaceSM.locate(uuid)

        ok, ifl_list = self._cassandra._cassandra_logical_interface_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in ifl_list:
                ifl = LogicalInterfaceSM.locate(uuid)
                if ifl.virtual_machine_interface:
                    vmi_set.add(ifl.virtual_machine_interface)

        ok, pr_list = self._cassandra._cassandra_physical_router_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in pr_list:
                pr = PhysicalRouterSM.locate(uuid)

        ok, vr_list = self._cassandra._cassandra_virtual_router_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in vr_list:
                vr = VirtualRouterSM.locate(uuid)

        ok, vmi_list = self._cassandra._cassandra_virtual_machine_interface_list(
        )
        if not ok:
            pass
        else:
            for fq_name, uuid in vmi_list:
                vmi = VirtualMachineInterfaceSM.locate(uuid)
                if vmi.instance_ip:
                    iip_set.add(vmi.instance_ip)

        ok, irt_list = self._cassandra._cassandra_interface_route_table_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in irt_list:
                irt = InterfaceRouteTableSM.locate(uuid)

        ok, project_list = self._cassandra._cassandra_project_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in project_list:
                prj = ProjectSM.locate(uuid)

        ok, sas_list = self._cassandra._cassandra_service_appliance_set_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in sas_list:
                sas = ServiceApplianceSetSM.locate(uuid)

        ok, sa_list = self._cassandra._cassandra_service_appliance_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in sa_list:
                sa = ServiceApplianceSM.locate(uuid)

        ok, domain_list = self._cassandra._cassandra_domain_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in domain_list:
                DomainSM.locate(uuid)

        ok, iip_list = self._cassandra._cassandra_instance_ip_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in iip_list:
                InstanceIpSM.locate(uuid)

        ok, fip_list = self._cassandra._cassandra_floating_ip_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in fip_list:
                FloatingIpSM.locate(uuid)

        ok, sg_list = self._cassandra._cassandra_security_group_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in sg_list:
                SecurityGroupSM.locate(uuid)

        ok, vm_list = self._cassandra._cassandra_virtual_machine_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in vm_list:
                vm = VirtualMachineSM.locate(uuid)
                if vm.service_instance:
                    continue
                for vmi_id in vm.virtual_machine_interfaces:
                    vmi = VirtualMachineInterfaceSM.get(vmi_id)
                    if not vmi:
                        continue
                    self.check_link_si_to_vm(vm, vmi)

        # Load the loadbalancer driver
        self.loadbalancer_agent.load_drivers()

        for lb_pool in LoadbalancerPoolSM.values():
            lb_pool.add()

        # Audit the lb pools
        self.loadbalancer_agent.audit_lb_pools()

    # end sync_sm

    # create service template
    def _create_default_template(self,
                                 st_name,
                                 svc_type,
                                 svc_mode=None,
                                 hypervisor_type='virtual-machine',
                                 image_name=None,
                                 flavor=None,
                                 scaling=False,
                                 vrouter_instance_type=None,
                                 instance_data=None):
        domain_name = 'default-domain'
        domain_fq_name = [domain_name]
        st_fq_name = [domain_name, st_name]
        self.logger.log_info("Creating %s %s hypervisor %s" %
                             (domain_name, st_name, hypervisor_type))

        domain_obj = None
        for domain in DomainSM.values():
            if domain.fq_name == domain_fq_name:
                domain_obj = Domain()
                domain_obj.uuid = domain.uuid
                domain_obj.fq_name = domain_fq_name
                break
        if not domain_obj:
            self.logger.log_error("%s domain not found" % (domain_name))
            return

        for st in ServiceTemplateSM.values():
            if st.fq_name == st_fq_name:
                self.logger.log_info("%s exists uuid %s" %
                                     (st.name, str(st.uuid)))
                return

        svc_properties = ServiceTemplateType()
        svc_properties.set_service_type(svc_type)
        svc_properties.set_service_mode(svc_mode)
        svc_properties.set_service_virtualization_type(hypervisor_type)
        svc_properties.set_image_name(image_name)
        svc_properties.set_flavor(flavor)
        svc_properties.set_ordered_interfaces(True)
        svc_properties.set_service_scaling(scaling)

        # set interface list
        if svc_type == 'analyzer':
            if_list = [['left', False]]
        elif hypervisor_type == 'network-namespace':
            if_list = [['right', True], ['left', True]]
        else:
            if_list = [['management', False], ['left', False],
                       ['right', False]]

        for itf in if_list:
            if_type = ServiceTemplateInterfaceType(shared_ip=itf[1])
            if_type.set_service_interface_type(itf[0])
            svc_properties.add_interface_type(if_type)

        if vrouter_instance_type is not None:
            svc_properties.set_vrouter_instance_type(vrouter_instance_type)

        if instance_data is not None:
            svc_properties.set_instance_data(
                json.dumps(instance_data, separators=(',', ':')))

        st_obj = ServiceTemplate(name=st_name, domain_obj=domain)
        st_obj.set_service_template_properties(svc_properties)
        try:
            st_uuid = self._vnc_lib.service_template_create(st_obj)
        except Exception as e:
            print e

        self.logger.log_info("%s created with uuid %s" %
                             (st_name, str(st_uuid)))

    #_create_default_analyzer_template

    def check_link_si_to_vm(self, vm, vmi):
        if vm.service_instance:
            return
        if not vmi.if_type:
            return

        si_fq_name = vmi.name.split('__')[0:3]
        index = int(vmi.name.split('__')[3]) - 1
        for si in ServiceInstanceSM.values():
            if si.fq_name != si_fq_name:
                continue
            st = ServiceTemplateSM.get(si.service_template)
            self.vm_manager.link_si_to_vm(si, st, index, vm.uuid)
            return

    def _create_service_instance(self, si):
        if si.state == 'active':
            return
        st = ServiceTemplateSM.get(si.service_template)
        if not st:
            self.logger.log_error("template not found for %s" %
                                  ((':').join(si.fq_name)))
            return

        try:
            if st.virtualization_type == 'virtual-machine':
                self.vm_manager.create_service(st, si)
            elif st.virtualization_type == 'network-namespace':
                self.netns_manager.create_service(st, si)
            elif st.virtualization_type == 'vrouter-instance':
                self.vrouter_manager.create_service(st, si)
            else:
                self.logger.log_error("Unknown virt type: %s" %
                                      st.virtualization_type)
        except Exception:
            cgitb_error_log(self)

    def _delete_service_instance(self, vm):
        self.logger.log_info("Deleting VM %s %s" %
                             ((':').join(vm.proj_fq_name), vm.uuid))

        try:
            if vm.virtualization_type == svc_info.get_vm_instance_type():
                self.vm_manager.delete_service(vm)
            elif vm.virtualization_type == svc_info.get_netns_instance_type():
                self.netns_manager.delete_service(vm)
            elif vm.virtualization_type == 'vrouter-instance':
                self.vrouter_manager.delete_service(vm)
        except Exception:
            cgitb_error_log(self)

        # generate UVE
        si_fq_name = vm.display_name.split('__')[:-2]
        si_fq_str = (':').join(si_fq_name)
        self.logger.uve_svc_instance(si_fq_str,
                                     status='DELETE',
                                     vms=[{
                                         'uuid': vm.uuid
                                     }])
        return True

    def _relaunch_service_instance(self, si):
        if si.state == 'active':
            si.state = 'relaunch'
            self._create_service_instance(si)

    def _check_service_running(self, si):
        if si.state != 'active':
            return
        st = ServiceTemplateSM.get(si.service_template)
        if st.virtualization_type == 'virtual-machine':
            status = self.vm_manager.check_service(si)
        elif st.virtualization_type == 'network-namespace':
            status = self.netns_manager.check_service(si)
        elif st.virtualization_type == 'vrouter-instance':
            status = self.vrouter_manager.check_service(si)

        return status

    def _delete_interface_route_table(self, irt_uuid):
        try:
            self._vnc_lib.interface_route_table_delete(id=irt_uuid)
        except (NoIdError, RefsExistError):
            return

    def _delete_shared_vn(self, vn_uuid):
        try:
            self.logger.log_info("Deleting vn %s" % (vn_uuid))
            self._vnc_lib.virtual_network_delete(id=vn_uuid)
        except (NoIdError, RefsExistError):
            pass

    @staticmethod
    def reset():
        for cls in DBBase._OBJ_TYPE_MAP.values():
            cls.reset()
Exemple #17
0
class DatabaseExim(object):
    def __init__(self, args_str):
        self._parse_args(args_str)
        self._cassandra = VncCassandraClient(
            self._api_args.cassandra_server_list,
            False,
            self._api_args.cluster_id,
            None,
            logger=self.log)
        self._zookeeper = kazoo.client.KazooClient(self._api_args.zk_server_ip)
        self._zookeeper.start()

    # end __init__

    def log(self, msg, level):
        pass

    # end log

    def _parse_args(self, args_str):
        parser = argparse.ArgumentParser()

        help = "Path to contrail-api conf file, default /etc/contrail-api.conf"
        parser.add_argument("--api-conf",
                            help=help,
                            default="/etc/contrail/contrail-api.conf")
        parser.add_argument("--verbose",
                            help="Run in verbose/INFO mode, default False",
                            action='store_true',
                            default=False)
        parser.add_argument("--debug",
                            help="Run in debug mode, default False",
                            action='store_true',
                            default=False)
        parser.add_argument("--import-from",
                            help="Import from this json file to database",
                            metavar='FILE',
                            default='db.json')
        parser.add_argument("--export-to",
                            help="Export from database to this json file",
                            metavar='FILE')

        args_obj, remaining_argv = parser.parse_known_args(args_str.split())
        self._args = args_obj

        self._api_args = utils.parse_args(
            '-c %s %s' % (self._args.api_conf, ' '.join(remaining_argv)))[0]
        pass

    # end _parse_args

    def db_import(self):
        if self._args.import_from.endswith('.gz'):
            try:
                f = gzip.open(self._args.import_from, 'rb')
                self.import_data = json.loads(f.read())
            finally:
                f.close()
        else:
            with open(self._args.import_from, 'r') as f:
                self.import_data = json.loads(f.read())

        # refuse import if db already has data
        if len(
                list(
                    self._cassandra.get_cf('obj_uuid_table').get_range(
                        column_count=0))) > 0:
            raise CassandraNotEmptyError('obj_uuid_table has entries')
        if len(
                list(
                    self._cassandra.get_cf('obj_fq_name_table').get_range(
                        column_count=0))) > 0:
            raise CassandraNotEmptyError('obj_fq_name_table has entries')
        zk_nodes = self._zookeeper.get_children('/')
        zk_nodes.remove('zookeeper')
        if len(zk_nodes) > 0:
            raise ZookeeperNotEmptyError('Zookeeper has entries')

        # seed cassandra
        for cf_name in ['obj_fq_name_table', 'obj_uuid_table']:
            for row, column in self.import_data['cassandra'][cf_name].items():
                self._cassandra.add(cf_name, row, column)

        # seed zookeeper
        for path_value_ts in json.loads(self.import_data['zookeeper']):
            path = path_value_ts[0]
            if path.endswith('/'):
                path = path[:-1]
            if path.startswith('/zookeeper'):
                continue
            value = path_value_ts[1][0]
            self._zookeeper.create(path, str(value), makepath=True)

    # end db_import

    def db_export(self):
        pass
Exemple #18
0
class DatabaseExim(object):
    def __init__(self, args_str):
        self._parse_args(args_str)

        self._zookeeper = kazoo.client.KazooClient(
            self._api_args.zk_server_ip,
            timeout=400,
            handler=kazoo.handlers.gevent.SequentialGeventHandler())
        self._zookeeper.start()

    # end __init__

    def init_cassandra(self, ks_cf_info=None):
        self._cassandra = VncCassandraClient(
            self._api_args.cassandra_server_list,
            self._api_args.cluster_id,
            rw_keyspaces=ks_cf_info,
            ro_keyspaces=None,
            logger=self.log,
            reset_config=False)

    # end init_cassandra

    def log(self, msg, level):
        pass

    # end log

    def _parse_args(self, args_str):
        parser = argparse.ArgumentParser()

        help = "Path to contrail-api conf file, default /etc/contrail-api.conf"
        parser.add_argument("--api-conf",
                            help=help,
                            default="/etc/contrail/contrail-api.conf")
        parser.add_argument("--verbose",
                            help="Run in verbose/INFO mode, default False",
                            action='store_true',
                            default=False)
        parser.add_argument("--debug",
                            help="Run in debug mode, default False",
                            action='store_true',
                            default=False)
        parser.add_argument("--import-from",
                            help="Import from this json file to database",
                            metavar='FILE')
        parser.add_argument("--export-to",
                            help="Export from database to this json file",
                            metavar='FILE')
        parser.add_argument("--omit-keyspaces",
                            nargs='*',
                            help="List of keyspaces to omit in export/import",
                            metavar='FILE')

        args_obj, remaining_argv = parser.parse_known_args(args_str.split())
        if ((args_obj.import_from is not None)
                and (args_obj.export_to is not None)):
            raise InvalidArguments(
                'Both --import-from and --export-to cannot be specified %s' %
                (args_obj))
        self._args = args_obj

        self._api_args = utils.parse_args(
            '-c %s %s' % (self._args.api_conf, ' '.join(remaining_argv)))[0]
        pass

    # end _parse_args

    def db_import(self):
        if self._args.import_from.endswith('.gz'):
            try:
                f = gzip.open(self._args.import_from, 'rb')
                self.import_data = json.loads(f.read())
            finally:
                f.close()
        else:
            with open(self._args.import_from, 'r') as f:
                self.import_data = json.loads(f.read())

        ks_cf_info = dict((ks, dict((c, {}) for c in cf.keys()))
                          for ks, cf in self.import_data['cassandra'].items())
        self.init_cassandra(ks_cf_info)

        # refuse import if db already has data
        non_empty_errors = []
        for ks in self.import_data['cassandra'].keys():
            for cf in self.import_data['cassandra'][ks].keys():
                if len(
                        list(
                            self._cassandra.get_cf(cf).get_range(
                                column_count=0))) > 0:
                    non_empty_errors.append(
                        'Keyspace %s CF %s already has entries.' % (ks, cf))

        if non_empty_errors:
            raise CassandraNotEmptyError('\n'.join(non_empty_errors))

        non_empty_errors = []
        existing_zk_dirs = set(
            self._zookeeper.get_children(self._api_args.cluster_id + '/'))
        import_zk_dirs = set([
            p_v_ts[0].split('/')[1]
            for p_v_ts in json.loads(self.import_data['zookeeper'] or "[]")
        ])

        for non_empty in ((existing_zk_dirs & import_zk_dirs) -
                          set(['zookeeper'])):
            non_empty_errors.append('Zookeeper has entries at /%s.' %
                                    (non_empty))

        if non_empty_errors:
            raise ZookeeperNotEmptyError('\n'.join(non_empty_errors))

        # seed cassandra
        for ks_name in self.import_data['cassandra'].keys():
            for cf_name in self.import_data['cassandra'][ks_name].keys():
                cf = self._cassandra.get_cf(cf_name)
                for row, cols in self.import_data['cassandra'][ks_name][
                        cf_name].items():
                    for col_name, col_val_ts in cols.items():
                        cf.insert(row, {col_name: col_val_ts[0]})
        # end seed cassandra

        zk_ignore_list = [
            'consumers', 'config', 'controller', 'isr_change_notification',
            'admin', 'brokers', 'zookeeper', 'controller_epoch',
            'api-server-election', 'schema-transformer', 'device-manager',
            'svc-monitor', 'contrail_cs', 'lockpath'
        ]
        # seed zookeeper
        for path_value_ts in json.loads(self.import_data['zookeeper'] or "{}"):
            path = path_value_ts[0]
            if path.endswith('/'):
                path = path[:-1]
            if path.split('/')[1] in zk_ignore_list:
                continue
            value = path_value_ts[1][0]
            self._zookeeper.create(path, str(value), makepath=True)

    # end db_import

    def db_export(self):
        db_contents = {'cassandra': {}, 'zookeeper': {}}

        cassandra_contents = db_contents['cassandra']
        for ks_name in (set(KEYSPACES) - set(self._args.omit_keyspaces or [])):
            if self._api_args.cluster_id:
                full_ks_name = '%s_%s' % (self._api_args.cluster_id, ks_name)
            else:
                full_ks_name = ks_name
            cassandra_contents[ks_name] = {}

            pool = pycassa.ConnectionPool(full_ks_name,
                                          self._api_args.cassandra_server_list,
                                          pool_timeout=120,
                                          max_retries=-1,
                                          timeout=5)

            creds = None
            if (self._api_args.cassandra_user
                    and self._api_args.cassandra_password):
                creds = {
                    'username': self._api_args.cassandra_user,
                    'password': self._api_args.cassandra_password
                }
            sys_mgr = SystemManager(self._api_args.cassandra_server_list[0],
                                    credentials=creds)
            for cf_name in sys_mgr.get_keyspace_column_families(full_ks_name):
                cassandra_contents[ks_name][cf_name] = {}
                cf = pycassa.ColumnFamily(pool, cf_name)
                for r, c in cf.get_range(column_count=10000000,
                                         include_timestamp=True):
                    cassandra_contents[ks_name][cf_name][r] = c

        def get_nodes(path):
            if not zk.get_children(path):
                return [(path, zk.get(path))]

            nodes = []
            for child in zk.get_children(path):
                nodes.extend(get_nodes('%s%s/' % (path, child)))

            return nodes

        zk = kazoo.client.KazooClient(self._api_args.zk_server_ip)
        zk.start()
        nodes = get_nodes(self._api_args.cluster_id + '/')
        zk.stop()
        db_contents['zookeeper'] = json.dumps(nodes)

        f = open(self._args.export_to, 'w')
        try:
            f.write(json.dumps(db_contents))
        finally:
            f.close()
    def __init__(self, args=None):
        self._args = args

        # Initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            self._disc = client.DiscoveryClient(
                self._args.disc_server_ip,
                self._args.disc_server_port,
                ModuleNames[Module.DEVICE_MANAGER])

        self._sandesh = Sandesh()
        module = Module.DEVICE_MANAGER
        module_name = ModuleNames[module]
        node_type = Module2NodeType[module]
        node_type_name = NodeTypeNames[node_type]
        instance_id = INSTANCE_ID_DEFAULT
        hostname = socket.gethostname()
        self._sandesh.init_generator(
            module_name, hostname, node_type_name, instance_id,
            self._args.collectors, 'to_bgp_context',
            int(args.http_server_port),
            ['cfgm_common', 'device_manager.sandesh'], self._disc)
        self._sandesh.set_logging_params(enable_local_log=args.log_local,
                                         category=args.log_category,
                                         level=args.log_level,
                                         file=args.log_file,
                                         enable_syslog=args.use_syslog,
                                         syslog_facility=args.syslog_facility)
        PhysicalRouterDM._sandesh = self._sandesh
        ConnectionState.init(
            self._sandesh, hostname, module_name, instance_id,
            staticmethod(ConnectionState.get_process_state_cb),
            NodeStatusUVE, NodeStatus)

        # Retry till API server is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        while not connected:
            try:
                self._vnc_lib = VncApi(
                    args.admin_user, args.admin_password,
                    args.admin_tenant_name, args.api_server_ip,
                    args.api_server_port)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:  # haproxy throws 503
                time.sleep(3)

        rabbit_servers = self._args.rabbit_server
        rabbit_port = self._args.rabbit_port
        rabbit_user = self._args.rabbit_user
        rabbit_password = self._args.rabbit_password
        rabbit_vhost = self._args.rabbit_vhost
        rabbit_ha_mode = self._args.rabbit_ha_mode

        self._db_resync_done = gevent.event.Event()

        q_name = 'device_manager.%s' % (socket.gethostname())
        self._vnc_kombu = VncKombuClient(rabbit_servers, rabbit_port,
                                         rabbit_user, rabbit_password,
                                         rabbit_vhost, rabbit_ha_mode,
                                         q_name, self._vnc_subscribe_callback,
                                         self.config_log)

        cass_server_list = self._args.cassandra_server_list
        self._cassandra = VncCassandraClient(cass_server_list,
                                             self._args.cluster_id,
                                             None,
                                             self.config_log)

        DBBaseDM.init(self, self._sandesh.logger(), self._cassandra)
        ok, global_system_config_list = self._cassandra._cassandra_global_system_config_list()
        if not ok:
            self.config_log('global system config list returned error: %s' %
                            global_system_config_list)
        else:
            for fq_name, uuid in global_system_config_list:
                GlobalSystemConfigDM.locate(uuid)

        ok, global_vrouter_config_list = self._cassandra._cassandra_global_vrouter_config_list()
        if not ok:
            self.config_log('global vrouter config list returned error: %s' %
                            global_vrouter_config_list)
        else:
            for fq_name, uuid in global_vrouter_config_list:
                GlobalVRouterConfigDM.locate(uuid)

        ok, vn_list = self._cassandra._cassandra_virtual_network_list()
        if not ok:
            self.config_log('virtual network list returned error: %s' %
                            vn_list)
        else:
            for fq_name, uuid in vn_list:
                vn = VirtualNetworkDM.locate(uuid)
                if vn is not None and vn.routing_instances is not None:
                    for ri_id in vn.routing_instances:
                        ri_obj = RoutingInstanceDM.locate(ri_id)

        ok, bgp_list = self._cassandra._cassandra_bgp_router_list()
        if not ok:
            self.config_log('bgp router list returned error: %s' %
                            bgp_list)
        else:
            for fq_name, uuid in bgp_list:
                BgpRouterDM.locate(uuid)

        ok, pr_list = self._cassandra._cassandra_physical_router_list()
        if not ok:
            self.config_log('physical router list returned error: %s' %
                            pr_list)
        else:
            for fq_name, uuid in pr_list:
                pr = PhysicalRouterDM.locate(uuid)
                if pr.bgp_router:
                    BgpRouterDM.locate(pr.bgp_router)
                li_set = pr.logical_interfaces
                for pi_id in pr.physical_interfaces:
                    pi = PhysicalInterfaceDM.locate(pi_id)
                    if pi:
                        li_set |= pi.logical_interfaces
                vmi_set = set()
                for li_id in li_set:
                    li = LogicalInterfaceDM.locate(li_id)
                    if li and li.virtual_machine_interface:
                        vmi_set |= set([li.virtual_machine_interface])
                for vmi_id in vmi_set:
                    vmi = VirtualMachineInterfaceDM.locate(vmi_id)

            ok, ip_list = self._cassandra._cassandra_instance_ip_list()
            if not ok:
                self.config_log('instance ip list returned error: %s' %
                            ip_list)
            else:
                for fq_name, uuid in ip_list:
                    InstanceIpDM.locate(uuid)

            ok, fip_list = self._cassandra._cassandra_floating_ip_list()
            if not ok:
                self.config_log('floating ip list returned error: %s' %
                            fip_list)
            else:
                for fq_name, uuid in fip_list:
                    FloatingIpDM.locate(uuid)

            for fq_name, uuid in vn_list:
                vn = VirtualNetworkDM.locate(uuid)
                if vn is not None:
                    vn.update_instance_ip_map()

            for pr in PhysicalRouterDM.values():
                pr.set_config_state()
        self._db_resync_done.set()
        while 1:
            # Just wait indefinitely
            time.sleep(5)
Exemple #20
0
class DatabaseExim(object):
    def __init__(self, args_str):
        self._parse_args(args_str)

        self._zookeeper = kazoo.client.KazooClient(
            self._api_args.zk_server_ip,
            timeout=400,
            handler=kazoo.handlers.gevent.SequentialGeventHandler())
        self._zookeeper.start()

    # end __init__

    def init_cassandra(self, ks_cf_info=None):
        self._cassandra = VncCassandraClient(
            self._api_args.cassandra_server_list,
            self._api_args.cluster_id,
            rw_keyspaces=ks_cf_info,
            ro_keyspaces=None,
            logger=self.log,
            reset_config=False)

    # end init_cassandra

    def log(self, msg, level):
        pass

    # end log

    def _parse_args(self, args_str):
        parser = argparse.ArgumentParser()

        help = "Path to contrail-api conf file, default /etc/contrail-api.conf"
        parser.add_argument("--api-conf",
                            help=help,
                            default="/etc/contrail/contrail-api.conf")
        parser.add_argument("--verbose",
                            help="Run in verbose/INFO mode, default False",
                            action='store_true',
                            default=False)
        parser.add_argument("--debug",
                            help="Run in debug mode, default False",
                            action='store_true',
                            default=False)
        parser.add_argument("--import-from",
                            help="Import from this json file to database",
                            metavar='FILE')
        parser.add_argument("--export-to",
                            help="Export from database to this json file",
                            metavar='FILE')

        args_obj, remaining_argv = parser.parse_known_args(args_str.split())
        if ((args_obj.import_from is not None)
                and (args_obj.export_to is not None)):
            raise InvalidArguments(
                'Both --import-from and --export-to cannot be specified %s' %
                (args_obj))
        self._args = args_obj

        self._api_args = utils.parse_args(
            '-c %s %s' % (self._args.api_conf, ' '.join(remaining_argv)))[0]
        pass

    # end _parse_args

    def db_import(self):
        if self._args.import_from.endswith('.gz'):
            try:
                f = gzip.open(self._args.import_from, 'rb')
                self.import_data = json.loads(f.read())
            finally:
                f.close()
        else:
            with open(self._args.import_from, 'r') as f:
                self.import_data = json.loads(f.read())

        # check older format export file which had only config_db_uuid
        # CF names at top-level
        if set(['obj_uuid_table', 'obj_fq_name_table'
                ]) == set(self.import_data['cassandra'].keys()):
            self.init_cassandra()
        else:
            try:
                # in pre 3.1 releases, tuple for cf_info not dict
                ks_cf_info = dict(
                    (ks, [(c, None) for c in cf.keys()])
                    for ks, cf in self.import_data['cassandra'].items())
                self.init_cassandra(ks_cf_info)
            except TypeError as e:
                if not 'list indices must be integers, not tuple' in e:
                    raise
                ks_cf_info = dict(
                    (ks, dict((c, {}) for c in cf.keys()))
                    for ks, cf in self.import_data['cassandra'].items())
                self.init_cassandra(ks_cf_info)

        # refuse import if db already has data
        if len(
                list(
                    self._cassandra.get_cf('obj_uuid_table').get_range(
                        column_count=0))) > 0:
            raise CassandraNotEmptyError('obj_uuid_table has entries')
        if len(
                list(
                    self._cassandra.get_cf('obj_fq_name_table').get_range(
                        column_count=0))) > 0:
            raise CassandraNotEmptyError('obj_fq_name_table has entries')
        zk_nodes = self._zookeeper.get_children('/')

        zk_ignore_list = [
            'consumers', 'config', 'controller', 'isr_change_notification',
            'admin', 'brokers', 'zookeeper', 'controller_epoch'
        ]
        for ignore in zk_ignore_list:
            try:
                zk_nodes.remove(ignore)
            except ValueError:
                pass
        if len(zk_nodes) > 0:
            raise ZookeeperNotEmptyError('Zookeeper has entries')

        # seed cassandra
        if 'obj_uuid_table' in self.import_data['cassandra']:
            # old format only fqn and uuid table were exported at top-level
            for cf_name in ['obj_fq_name_table', 'obj_uuid_table']:
                cf = self._cassandra.get_cf(cf_name)
                for row, cols in self.import_data['cassandra'][cf_name].items(
                ):
                    for col_name, col_val_ts in cols.items():
                        cf.insert(row, {col_name: col_val_ts[0]})
        else:
            for ks_name in self.import_data['cassandra'].keys():
                for cf_name in self.import_data['cassandra'][ks_name].keys():
                    cf = self._cassandra.get_cf(cf_name)
                    for row, cols in self.import_data['cassandra'][ks_name][
                            cf_name].items():
                        for col_name, col_val_ts in cols.items():
                            cf.insert(row, {col_name: col_val_ts[0]})
        # end seed cassandra

        # seed zookeeper
        for path_value_ts in json.loads(self.import_data['zookeeper'] or "{}"):
            path = path_value_ts[0]
            if path.endswith('/'):
                path = path[:-1]
            if path.split('/')[1] in zk_ignore_list:
                continue
            value = path_value_ts[1][0]
            self._zookeeper.create(path, str(value), makepath=True)

    # end db_import

    def db_export(self):
        db_contents = {'cassandra': {}, 'zookeeper': {}}

        cassandra_contents = db_contents['cassandra']
        for ks_name in [
                'config_db_uuid',
                'useragent',
                'to_bgp_keyspace',
                'svc_monitor_keyspace',
                'DISCOVERY_SERVER',
        ]:
            cassandra_contents[ks_name] = {}
            if ks_name == 'DISCOVERY_SERVER':
                # stringify key as composite column is used
                stringify_col_name = True
            else:
                stringify_col_name = False

            pool = pycassa.ConnectionPool(
                ks_name, [self._api_args.cassandra_server_list],
                pool_timeout=120,
                max_retries=-1,
                timeout=5)
            sys_mgr = SystemManager(self._api_args.cassandra_server_list[0],
                                    credentials={
                                        'username':
                                        self._api_args.cassandra_user,
                                        'password':
                                        self._api_args.cassandra_password
                                    })
            for cf_name in sys_mgr.get_keyspace_column_families(ks_name):
                cassandra_contents[ks_name][cf_name] = {}
                cf = pycassa.ColumnFamily(pool, cf_name)
                for r, c in cf.get_range(column_count=10000000,
                                         include_timestamp=True):
                    if stringify_col_name:
                        cassandra_contents[ks_name][cf_name][r] = dict(
                            (str(k), v) for k, v in c.items())
                    else:
                        cassandra_contents[ks_name][cf_name][r] = c

        def get_nodes(path):
            if not zk.get_children(path):
                return [(path, zk.get(path))]

            nodes = []
            for child in zk.get_children(path):
                nodes.extend(get_nodes('%s%s/' % (path, child)))

            return nodes

        zk = kazoo.client.KazooClient(self._api_args.zk_server_ip)
        zk.start()
        nodes = get_nodes('/')
        zk.stop()
        db_contents['zookeeper'] = json.dumps(nodes)

        f = open(self._args.export_to, 'w')
        try:
            f.write(json.dumps(db_contents))
        finally:
            f.close()
    def __init__(self, args=None):
        self._args = args

        # Initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            self._disc = client.DiscoveryClient(
                self._args.disc_server_ip,
                self._args.disc_server_port,
                ModuleNames[Module.DEVICE_MANAGER])

        self._sandesh = Sandesh()
        module = Module.DEVICE_MANAGER
        module_name = ModuleNames[module]
        node_type = Module2NodeType[module]
        node_type_name = NodeTypeNames[node_type]
        instance_id = INSTANCE_ID_DEFAULT
        hostname = socket.gethostname()
        self._sandesh.init_generator(
            module_name, hostname, node_type_name, instance_id,
            self._args.collectors, 'to_bgp_context',
            int(args.http_server_port),
            ['cfgm_common', 'device_manager.sandesh'], self._disc)
        self._sandesh.set_logging_params(enable_local_log=args.log_local,
                                         category=args.log_category,
                                         level=args.log_level,
                                         file=args.log_file,
                                         enable_syslog=args.use_syslog,
                                         syslog_facility=args.syslog_facility)
        ConnectionState.init(
            self._sandesh, hostname, module_name, instance_id,
            staticmethod(ConnectionState.get_process_state_cb),
            NodeStatusUVE, NodeStatus)

        # Retry till API server is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        while not connected:
            try:
                self._vnc_lib = VncApi(
                    args.admin_user, args.admin_password,
                    args.admin_tenant_name, args.api_server_ip,
                    args.api_server_port)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:  # haproxy throws 503
                time.sleep(3)

        rabbit_server = self._args.rabbit_server
        rabbit_port = self._args.rabbit_port
        rabbit_user = self._args.rabbit_user
        rabbit_password = self._args.rabbit_password
        rabbit_vhost = self._args.rabbit_vhost

        self._db_resync_done = gevent.event.Event()

        q_name = 'device_manager.%s' % (socket.gethostname())
        self._vnc_kombu = VncKombuClient(rabbit_server, rabbit_port,
                                         rabbit_user, rabbit_password,
                                         rabbit_vhost, q_name,
                                         self._vnc_subscribe_callback,
                                         self.config_log)

        cass_server_list = self._args.cassandra_server_list
        reset_config = self._args.reset_config
        self._cassandra = VncCassandraClient(cass_server_list, reset_config,
                                             self._args.cluster_id, None,
                                             self.config_log)

        DBBaseDM.init(self._sandesh.logger(), self._cassandra)
        ok, pr_list = self._cassandra._cassandra_physical_router_list()
        if not ok:
            self.config_log('physical router list returned error: %s' %
                            pr_list)
        else:
            vn_set = set()
            for fq_name, uuid in pr_list:
                pr = PhysicalRouterDM.locate(uuid)
                if pr.bgp_router:
                    BgpRouterDM.locate(pr.bgp_router)
                vn_set |= pr.virtual_networks
                li_set = pr.logical_interfaces
                for pi_id in pr.physical_interfaces:
                    pi = PhysicalInterfaceDM.locate(pi_id)
                    if pi:
                        li_set |= pi.logical_interfaces
                vmi_set = set()
                for li_id in li_set:
                    li = LogicalInterfaceDM.locate(li_id)
                    if li and li.virtual_machine_interface:
                        vmi_set |= set([li.virtual_machine_interface])
                for vmi_id in vmi_set:
                    vmi = VirtualMachineInterfaceDM.locate(vmi_id)
                    if vmi:
                        vn_set |= vmi.virtual_networks

            for vn_id in vn_set:
                VirtualNetworkDM.locate(vn_id)

            for pr in PhysicalRouterDM.values():
                pr.push_config()

        self._db_resync_done.set()
        while 1:
            self._vnc_kombu._subscribe_greenlet.join()
            # In case _subscribe_greenlet dies for some reason, it will be
            # respawned. sleep for 1 second to wait for it to be respawned
            time.sleep(1)
Exemple #22
0
class LoadDataBase(object):
    _UUID_KEYSPACE_NAME = 'config_db_uuid'
    _OBJ_UUID_CF_NAME = 'obj_uuid_table'
    _OBJ_FQ_NAME_CF_NAME = 'obj_fq_name_table'
    _OBJ_SHARED_CF_NAME = 'obj_shared_table'
    _UUID_KEYSPACE = {
        _UUID_KEYSPACE_NAME: {
            _OBJ_UUID_CF_NAME: {
                'cf_args': {
                    'autopack_names': False,
                    'autopack_values': False,
                    },
                },
            _OBJ_FQ_NAME_CF_NAME: {
                'cf_args': {
                    'autopack_values': False,
                    },
                },
            _OBJ_SHARED_CF_NAME: {}
            }
        }
    # Resources supported by that script
    # The order of that list is import, that defines the resources
    # order creation
    _SUPPORTED_RESOURCES = [
        'project',
        'security-group',
        'virtual-network',
        'virtual-machine-interface',
    ]
    _PERMS2 = {
        'owner': None,
        'owner_access': 7,
        'global_access': 0,
        'share': [],
    }

    BATCH_QUEUE_SIZE = 1000
    RULES_PER_SG = 4

    def __init__(self, force, resources_file, cassandra_servers,
                 cassandra_username, cassandra_password, db_prefix,
                 cassandra_batch_size, zookeeper_servers,
                 rules_per_security_group, keystone_client,
                 dont_populate_zookeeper):
        self._force = force
        self._resource_distribution = yaml.load(resources_file)
        self._cassandra_batch_size = cassandra_batch_size
        self._rules_per_security_group = rules_per_security_group
        self._keystone_client = keystone_client
        self._dont_populate_zookeeper = dont_populate_zookeeper

        # Connect to cassandra database
        logger.debug("Initilizing the cassandra connection on %s",
                     cassandra_servers)
        cassandra_credentials = {}
        if (cassandra_username is not None and
                cassandra_password is not None):
            cassandra_credentials = {
                'username': cassandra_username,
                'password': cassandra_password,
            }

        def vnc_cassandra_client_logger(msg, level=logging.INFO):
            logger.log(msg=msg, level=level)

        self._cassandra_db = VncCassandraClient(
            cassandra_servers,
            db_prefix,
            self._UUID_KEYSPACE,
            None,
            vnc_cassandra_client_logger,
            credential=cassandra_credentials)
        self._uuid_cf = self._cassandra_db.get_cf('obj_uuid_table')
        self._fqname_cf = self._cassandra_db.get_cf('obj_fq_name_table')

        # Initilize zookeeper client
        if self._dont_populate_zookeeper:
            self._zk_client = DummyZookeeperClient()
        else:
            self._zk_client = ZookeeperClient(zookeeper_servers)

    def sanitize_resources(self):
        logger.debug("Santizing resources distribution")
        self._resource_map = OrderedDict()
        for resource_type in self._SUPPORTED_RESOURCES:
            object_path = 'contrail_db_loader.resources.%s.%s' %\
                          (resource_type.replace('-', '_'),
                           camel_case(resource_type))
            kwargs = {
                'db_manager': self._cassandra_db,
                'batch_size': self._cassandra_batch_size,
                'zk_client': self._zk_client,
                'project_amount': self._resource_distribution.get('project',
                                                                  0),
                'amount_per_project': self._resource_distribution.get(
                    resource_type, 0),
            }
            self._resource_map[resource_type] = import_object(object_path,
                                                              **kwargs)

        resources_not_supported = (set(self._resource_distribution.keys()) -
                                   set(self._SUPPORTED_RESOURCES))
        if resources_not_supported:
            logger.warning('Loading resources %s are not supported' %
                           ', '.join(resources_not_supported))

    def summarize_resources_to_create(self):
        msg = """Will populate %(project)d projects with:
    - security groups:           %(sg)d
    - access control lists:      %(acl)d
    - virtual networks:          %(vn)d
    - routing instances:         %(ri)d
    - route targets:             %(rt)d
    - virtual machine interface: %(vmi)d
    - virtual machine:           %(vm)d
    - intance ip:                %(iip)d
That will load %(sum)d resources into database."""
        dict = {
            'project': self._resource_map['project'].total_amount,
            'sg': self._resource_map['security-group'].amount_per_project + 1,
            'acl': (self._resource_map['security-group'].amount_per_project +
                    1) * 2,
            'vn': self._resource_map['virtual-network'].amount_per_project,
            'ri': self._resource_map['virtual-network'].amount_per_project,
            'rt': self._resource_map['virtual-network'].amount_per_project,
            'vmi': self._resource_map['virtual-machine-interface'].
            amount_per_project,
            'vm': self._resource_map['virtual-machine-interface'].
            amount_per_project,
            'iip': self._resource_map['virtual-machine-interface'].
            amount_per_project,
        }
        dict['sum'] = 0
        for resource in self._resource_map.values():
            dict['sum'] += resource.total_amount
        logger.warning(msg, dict)
        if (not self._force and
                not prompt('Do you want to load that amount of resources?')):
            exit(0)

    def create_resources(self):
        self._zk_client.connect()
        for resource in self._resource_map.values():
            logger.info("Loading '%s' resources into the database...",
                        resource.type)
            if resource.type == 'project':
                _, time_elapsed = resource.create_resources(
                    self._keystone_client)
            elif resource.type == 'security-group':
                _, time_elapsed = resource.create_resources(
                    self._rules_per_security_group)
            elif resource.type == 'virtual-machine-interface':
                _, time_elapsed = resource.create_resources(
                    self._resource_map['virtual-network'].amount_per_project)
            else:
                _, time_elapsed = resource.create_resources()
            logger.info("%d resources were created to load %d '%s' in "
                        "%2.2f seconds.", resource.total_amount,
                        resource.amount_per_project, resource.type,
                        time_elapsed)
        self._zk_client.disconnect()
class ICCassandraClient:
    def _issu_basic_function(self, kspace, cfam, cols):
        return dict(cols)

    # end

    def __init__(self, oldversion_server_list, newversion_server_list, odb_prefix, ndb_prefix, issu_info, logger):
        self._oldversion_server_list = oldversion_server_list
        self._newversion_server_list = newversion_server_list
        self._odb_prefix = odb_prefix
        self._ndb_prefix = ndb_prefix
        self._issu_info = issu_info
        self._logger = logger
        self._ks_issu_func_info = {}
        self._nkeyspaces = {}
        self._okeyspaces = {}
        self._logger("Issu contrail cassandra initialized...", level=SandeshLevel.SYS_INFO)
        self.issu_prepare()

    # end

    def issu_prepare(self):
        self._logger("Issu contrail cassandra prepare...", level=SandeshLevel.SYS_INFO)
        for issu_func, ks, cflist in self._issu_info:

            if issu_func is None:
                issu_func = self._issu_basic_function
            ks_issu_func_info = {ks: issu_func}

            nks = {ks: cflist}
            oks = {ks: cflist}
            self._nkeyspaces.update(nks)
            self._okeyspaces.update(oks)
            self._ks_issu_func_info.update(ks_issu_func_info)

        self._oldversion_handle = VncCassandraClient(
            self._oldversion_server_list, self._odb_prefix, None, self._okeyspaces, self._logger
        )

        self._newversion_handle = VncCassandraClient(
            self._newversion_server_list, self._ndb_prefix, self._nkeyspaces, None, self._logger
        )

    # end

    def _fetch_issu_func(self, ks):
        return self._ks_issu_func_info[ks]

    # end

    # Overwrite what is seen in the newer with the old version.
    def _merge_overwrite(self, new, current):
        updated = current
        updated.update(new)
        return updated

    # end

    #  For now this function should be called for only config_db_uuid.
    #  If we separate out config_db_uuid keyspace from VncCassandraClient,
    #  then we don't need to pass keyspaces here.
    def issu_merge_copy(self, keyspaces):
        for ks, cflist in keyspaces.items():
            self._logger("Issu contrail cassandra merge copy, keyspace: " + str(ks), level=SandeshLevel.SYS_INFO)
            issu_funct = self._fetch_issu_func(ks)
            for cf in cflist:
                newList = []
                newversion_result = self._newversion_handle.get_range(cf)
                self._logger("Building New DB memory for columnfamily: " + str(cf), level=SandeshLevel.SYS_INFO)
                new_db = dict(newversion_result)

                oldversion_result = self._oldversion_handle.get_range(cf)
                self._logger("Doing ISSU copy for columnfamily: " + str(cf), level=SandeshLevel.SYS_INFO)
                for rows, columns in oldversion_result:
                    out = issu_funct(ks, cf, columns)
                    current = new_db.pop(rows, None)
                    if current is not None:
                        updated = self._merge_overwrite(out, dict(current))
                        x = self._newversion_handle.add(cf, rows, updated)
                    else:
                        updated = []
                        x = self._newversion_handle.add(cf, rows, out)
                    diff = set(updated) - set(out)
                    y = self._newversion_handle.get_cf(cf).remove(rows, diff)
                self._logger(
                    "Pruning New DB if entires don't exist in old DB column " "family: " + str(cf),
                    level=SandeshLevel.SYS_INFO,
                )
                for item in new_db:
                    # TBD should be catch exception and fail ISSU
                    self._newversion_handle.delete(cf, item)

    # end

    #  This is issu_copy function.
    def issu_copy(self, keyspaces):
        for ks, cflist in keyspaces.items():
            issu_funct = self._fetch_issu_func(ks)
            for cf in cflist:
                self._logger(
                    "Issu Copy KeySpace: " + str(ks) + " Column Family: " + str(cf), level=SandeshLevel.SYS_INFO
                )
                oldversion_result = self._oldversion_handle.get_range(cf)

                for rows, columns in oldversion_result:
                    out = issu_funct(ks, cf, columns)

                    x = self._newversion_handle.add(cf, rows, out)
                    # TBD If failure to add, fail ISSU

    # end

    def issu_sync_row(self, msg, cf):
        if msg["oper"] == "CREATE":
            self._logger(msg, level=SandeshLevel.SYS_INFO)
            self._newversion_handle.object_create(msg["type"].replace("-", "_"), msg["uuid"], msg["obj_dict"])
        elif msg["oper"] == "UPDATE":
            self._logger(msg, level=SandeshLevel.SYS_INFO)
            uuid_list = []
            uuid_list.append(msg["uuid"])
            bool1, current = self._newversion_handle.object_read(msg["type"].replace("-", "_"), uuid_list)
            bool2, new = self._oldversion_handle.object_read(msg["type"].replace("-", "_"), uuid_list)
            updated = self._merge_overwrite(dict(new.pop()), dict(current.pop()))
            #  New object dictionary should be created, for now passing as is
            self._newversion_handle.object_update(msg["type"].replace("-", "_"), msg["uuid"], updated)
        elif msg["oper"] == "DELETE":
            self._logger(msg, level=SandeshLevel.SYS_INFO)
            self._newversion_handle.object_delete(msg["type"].replace("-", "_"), msg["uuid"])
        return
Exemple #24
0
class LoadDataBase(object):
    _UUID_KEYSPACE_NAME = 'config_db_uuid'
    _OBJ_UUID_CF_NAME = 'obj_uuid_table'
    _OBJ_FQ_NAME_CF_NAME = 'obj_fq_name_table'
    _OBJ_SHARED_CF_NAME = 'obj_shared_table'
    _UUID_KEYSPACE = {
        _UUID_KEYSPACE_NAME: [
            (_OBJ_UUID_CF_NAME, None),
            (_OBJ_FQ_NAME_CF_NAME, None),
            (_OBJ_SHARED_CF_NAME, None),
        ],
    }
    # Resources supported by that script
    # The order of that list is import, that defines the resources
    # order creation
    _SUPPORTED_RESOURCES = [
        'project',
        'security-group',
        'virtual-network',
        'virtual-machine-interface',
    ]
    _PERMS2 = {
        'owner': None,
        'owner_access': 7,
        'global_access': 0,
        'share': [],
    }

    BATCH_QUEUE_SIZE = 1000
    RULES_PER_SG = 4

    def __init__(self, force, resources_file, cassandra_servers,
                 cassandra_username, cassandra_password, db_prefix,
                 cassandra_batch_size, zookeeper_servers,
                 rules_per_security_group, keystone_client):
        self._force = force
        self._resource_distribution = yaml.load(resources_file)
        self._cassandra_batch_size = cassandra_batch_size
        self._rules_per_security_group = rules_per_security_group
        self._keystone_client = keystone_client

        # Connect to cassandra database
        logger.debug("Initilizing the cassandra connection on %s",
                     cassandra_servers)
        cassandra_credentials = {}
        if (cassandra_username is not None and cassandra_password is not None):
            cassandra_credentials = {
                'username': cassandra_username,
                'password': cassandra_password,
            }

        def vnc_cassandra_client_logger(msg, level=logging.INFO):
            logger.log(msg=msg, level=level)

        self._cassandra_db = VncCassandraClient(
            cassandra_servers,
            db_prefix,
            self._UUID_KEYSPACE,
            None,
            vnc_cassandra_client_logger,
            credential=cassandra_credentials)
        self._uuid_cf = self._cassandra_db.get_cf('obj_uuid_table')
        self._fqname_cf = self._cassandra_db.get_cf('obj_fq_name_table')

        # Initilize zookeeper client
        self._zk_client = ZookeeperClient(zookeeper_servers)

    def sanitize_resources(self):
        logger.debug("Santizing resources distribution")
        self._resource_map = OrderedDict()
        for resource_type in self._SUPPORTED_RESOURCES:
            object_path = 'contrail_db_loader.resources.%s.%s' %\
                          (resource_type.replace('-', '_'),
                           camel_case(resource_type))
            kwargs = {
                'db_manager':
                self._cassandra_db,
                'batch_size':
                self._cassandra_batch_size,
                'zk_client':
                self._zk_client,
                'project_amount':
                self._resource_distribution.get('project', 0),
                'amount_per_project':
                self._resource_distribution.get(resource_type, 0),
            }
            self._resource_map[resource_type] = import_object(
                object_path, **kwargs)

        resources_not_supported = (set(self._resource_distribution.keys()) -
                                   set(self._SUPPORTED_RESOURCES))
        if resources_not_supported:
            logger.warning('Loading resources %s are not supported' %
                           ', '.join(resources_not_supported))

    def summarize_resources_to_create(self):
        msg = """Will populate %(project)d projects with:
    - security groups:           %(sg)d
    - access control lists:      %(acl)d
    - virtual networks:          %(vn)d
    - routing instances:         %(ri)d
    - route targets:             %(rt)d
    - virtual machine interface: %(vmi)d
    - virtual machine:           %(vm)d
    - intance ip:                %(iip)d
That will load %(sum)d resources into database."""
        dict = {
            'project':
            self._resource_map['project'].total_amount,
            'sg':
            self._resource_map['security-group'].amount_per_project + 1,
            'acl':
            (self._resource_map['security-group'].amount_per_project + 1) * 2,
            'vn':
            self._resource_map['virtual-network'].amount_per_project,
            'ri':
            self._resource_map['virtual-network'].amount_per_project,
            'rt':
            self._resource_map['virtual-network'].amount_per_project,
            'vmi':
            self._resource_map['virtual-machine-interface'].amount_per_project,
            'vm':
            self._resource_map['virtual-machine-interface'].amount_per_project,
            'iip':
            self._resource_map['virtual-machine-interface'].amount_per_project,
        }
        dict['sum'] = 0
        for resource in self._resource_map.values():
            dict['sum'] += resource.total_amount
        logger.warning(msg, dict)
        if (not self._force and
                not prompt('Do you want to load that amount of resources?')):
            exit(0)

    def create_resources(self):
        self._zk_client.connect()
        for resource in self._resource_map.values():
            logger.info("Loading '%s' resources into the database...",
                        resource.type)
            if resource.type == 'project':
                _, time_elapsed = resource.create_resources(
                    self._keystone_client)
            elif resource.type == 'security-group':
                _, time_elapsed = resource.create_resources(
                    self._rules_per_security_group)
            elif resource.type == 'virtual-machine-interface':
                _, time_elapsed = resource.create_resources(
                    self._resource_map['virtual-network'].amount_per_project)
            else:
                _, time_elapsed = resource.create_resources()
            logger.info(
                "%d resources were created to load %d '%s' in "
                "%2.2f seconds.", resource.total_amount,
                resource.amount_per_project, resource.type, time_elapsed)
        self._zk_client.disconnect()
class SvcMonitor(object):

    """
    data + methods used/referred to by ssrc and arc greenlets
    """
    _REACTION_MAP = {
        "service_appliance_set": {
            'self': [],
            'service_appliance': []
        },
        "service_appliance": {
            'self': ['service_appliance_set'],
            'service_appliance_set': []
        },
        "loadbalancer_pool": {
            'self': [],
            'virtual_ip': [],
            'loadbalancer_member': [],
            'loadbalancer_healthmonitor': [],
        },
        "loadbalancer_member": {
            'self': ['loadbalancer_pool'],
            'loadbalancer_pool': []
        },
        "virtual_ip": {
            'self': ['loadbalancer_pool'],
            'loadbalancer_pool': []
        },
        "loadbalancer_healthmonitor": {
            'self': ['loadbalancer_pool'],
            'loadbalancer_pool': []
        },
        "service_instance": {
            'self': ['virtual_machine'],
            'virtual_machine': []
        },
        "instance_ip": {
            'self': [],
        },
        "floating_ip": {
            'self': [],
        },
        "service_template": {
            'self': [],
        },
        "physical_router": {
            'self': [],
        },
        "physical_interface": {
            'self': [],
        },
        "logical_interface": {
            'self': [],
        },
        "virtual_network": {
            'self': [],
        },
        "virtual_machine": {
            'self': ['virtual_machine_interface'],
            'service_instance': [],
            'virtual_machine_interface': [],
        },
        "virtual_machine_interface": {
            'self': ['interface_route_table', 'virtual_machine'],
            'interface_route_table': [],
            'virtual_machine': [],
        },
        "interface_route_table": {
            'self': [],
            'virtual_machine_interface': [],
        },
        "project": {
            'self': [],
        },
    }

    def __init__(self, args=None):
        self._args = args

        # create database and logger
        self.si_db = ServiceInstanceDB(args)

        # initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            self._disc = client.DiscoveryClient(self._args.disc_server_ip,
                                                self._args.disc_server_port,
                                                ModuleNames[Module.SVC_MONITOR])

        # initialize logger
        self.logger = ServiceMonitorLogger(self.si_db, self._disc, args)
        self.si_db.add_logger(self.logger)
        self.si_db.init_database()

        # rotating log file for catchall errors
        self._err_file = self._args.trace_file
        self._svc_err_logger = logging.getLogger('SvcErrLogger')
        self._svc_err_logger.setLevel(logging.ERROR)
        try:
            with open(self._err_file, 'a'):
                handler = logging.handlers.RotatingFileHandler(
                    self._err_file, maxBytes=64*1024, backupCount=2)
                self._svc_err_logger.addHandler(handler)
        except IOError:
            self.logger.log_warning("Failed to open trace file %s" %
                self._err_file)

        # Connect to Rabbit and Initialize cassandra connection
        self._connect_rabbit()

    def _connect_rabbit(self):
        rabbit_server = self._args.rabbit_server
        rabbit_port = self._args.rabbit_port
        rabbit_user = self._args.rabbit_user
        rabbit_password = self._args.rabbit_password
        rabbit_vhost = self._args.rabbit_vhost
        rabbit_ha_mode = self._args.rabbit_ha_mode

        self._db_resync_done = gevent.event.Event()

        q_name = 'svc_mon.%s' % (socket.gethostname())
        self._vnc_kombu = VncKombuClient(rabbit_server, rabbit_port,
                                         rabbit_user, rabbit_password,
                                         rabbit_vhost, rabbit_ha_mode,
                                         q_name, self._vnc_subscribe_callback,
                                         self.config_log)

        cass_server_list = self._args.cassandra_server_list
        reset_config = self._args.reset_config
        self._cassandra = VncCassandraClient(cass_server_list, reset_config,
                                             self._args.cluster_id, None,
                                             self.config_log)
        DBBase.init(self, self.logger, self._cassandra)
    # end _connect_rabbit

    def config_log(self, msg, level):
        self.logger.log(msg)

    def _vnc_subscribe_callback(self, oper_info):
        self._db_resync_done.wait()
        try:
            self._vnc_subscribe_actions(oper_info)
        except Exception:
            cgitb_error_log(self)

    def _vnc_subscribe_actions(self, oper_info):
        try:
            msg = "Notification Message: %s" % (pformat(oper_info))
            self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
            obj_type = oper_info['type'].replace('-', '_')
            obj_class = DBBase._OBJ_TYPE_MAP.get(obj_type)
            if obj_class is None:
                return

            if oper_info['oper'] == 'CREATE' or oper_info['oper'] == 'UPDATE':
                dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP,
                    self._REACTION_MAP)
                obj_id = oper_info['uuid']
                obj = obj_class.get(obj_id)
                if obj is not None:
                    dependency_tracker.evaluate(obj_type, obj)
                else:
                    obj = obj_class.locate(obj_id)
                obj.update()
                dependency_tracker.evaluate(obj_type, obj)
            elif oper_info['oper'] == 'DELETE':
                obj_id = oper_info['uuid']
                obj = obj_class.get(obj_id)
                if obj is None:
                    return
                dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP,
                    self._REACTION_MAP)
                dependency_tracker.evaluate(obj_type, obj)
                obj_class.delete(obj_id)
            else:
                # unknown operation
                self.config_log('Unknown operation %s' % oper_info['oper'],
                                level=SandeshLevel.SYS_ERR)
                return

            if obj is None:
                self.config_log('Error while accessing %s uuid %s' % (
                                obj_type, obj_id))
                return

        except Exception:
            cgitb_error_log(self)

        for sas_id in dependency_tracker.resources.get('service_appliance_set', []):
            sas_obj = ServiceApplianceSetSM.get(sas_id)
            if sas_obj is not None:
                sas_obj.add()

        for lb_pool_id in dependency_tracker.resources.get('loadbalancer_pool', []):
            lb_pool = LoadbalancerPoolSM.get(lb_pool_id)
            if lb_pool is not None:
                lb_pool.add()

        for si_id in dependency_tracker.resources.get('service_instance', []):
            si = ServiceInstanceSM.get(si_id)
            if si:
                self._create_service_instance(si)
            else:
                for vm_id in dependency_tracker.resources.get(
                        'virtual_machine', []):
                    vm = VirtualMachineSM.get(vm_id)
                    self._delete_service_instance(vm)

        for vn_id in dependency_tracker.resources.get('virtual_network', []):
            vn = VirtualNetworkSM.get(vn_id)
            if vn:
                for si_id in ServiceInstanceSM:
                    si = ServiceInstanceSM.get(si_id)
                    if (':').join(vn.fq_name) in si.params.values():
                        self._create_service_instance(si)

        for vmi_id in dependency_tracker.resources.get('virtual_machine_interface', []):
            vmi = VirtualMachineInterfaceSM.get(vmi_id)
            if vmi:
                for vm_id in dependency_tracker.resources.get(
                        'virtual_machine', []):
                    vm = VirtualMachineSM.get(vm_id)
                    if vm:
                        self.check_link_si_to_vm(vm, vmi)
            else:
                for irt_id in dependency_tracker.resources.get(
                        'interface_route_table', []):
                    self._delete_interface_route_table(irt_id)

        for fip_id in dependency_tracker.resources.get('floating_ip', []):
            fip = FloatingIpSM.get(fip_id)
            if fip:
                for vmi_id in fip.virtual_machine_interfaces:
                    vmi = VirtualMachineInterfaceSM.get(vmi_id)
                    if vmi and vmi.virtual_ip:
                        self.netns_manager.add_fip_to_vip_vmi(vmi, fip)

    def post_init(self, vnc_lib, args=None):
        # api server
        self._vnc_lib = vnc_lib

        self._nova_client = importutils.import_object(
            'svc_monitor.nova_client.ServiceMonitorNovaClient',
            self._args, self.logger)

        # load vrouter scheduler
        self.vrouter_scheduler = importutils.import_object(
            self._args.si_netns_scheduler_driver,
            self._vnc_lib, self._nova_client,
            self._args)

        # load virtual machine instance manager
        self.vm_manager = importutils.import_object(
            'svc_monitor.virtual_machine_manager.VirtualMachineManager',
            self._vnc_lib, self.si_db, self.logger,
            self.vrouter_scheduler, self._nova_client, self._args)

        # load network namespace instance manager
        self.netns_manager = importutils.import_object(
            'svc_monitor.instance_manager.NetworkNamespaceManager',
            self._vnc_lib, self.si_db, self.logger,
            self.vrouter_scheduler, self._nova_client, self._args)

        # load a vrouter instance manager
        self.vrouter_manager = importutils.import_object(
            'svc_monitor.vrouter_instance_manager.VRouterInstanceManager',
            self._vnc_lib, self.si_db, self.logger,
            self.vrouter_scheduler, self._nova_client, self._args)

        # load a loadbalancer agent
        self.loadbalancer_agent = LoadbalancerAgent(self, self._vnc_lib, self._args)

        # Read the cassandra and populate the entry in ServiceMonitor DB
        self.sync_sm()

        # create default analyzer template
        self._create_default_template('analyzer-template', 'analyzer',
                                      flavor='m1.medium',
                                      image_name='analyzer')
        # create default NAT template
        self._create_default_template('nat-template', 'firewall',
                                      svc_mode='in-network-nat',
                                      image_name='analyzer',
                                      flavor='m1.medium')
        # create default netns SNAT template
        self._create_default_template('netns-snat-template', 'source-nat',
                                      svc_mode='in-network-nat',
                                      hypervisor_type='network-namespace',
                                      scaling=True)
        # create default loadbalancer template
        self._create_default_template('haproxy-loadbalancer-template', 'loadbalancer',
                                      svc_mode='in-network-nat',
                                      hypervisor_type='network-namespace',
                                      scaling=True)
        self._create_default_template('docker-template', 'firewall',
                                      svc_mode='transparent',
                                      image_name="ubuntu",
                                      hypervisor_type='vrouter-instance',
                                      vrouter_instance_type='docker',
                                      instance_data={
                                          "command": "/bin/bash"
                                      })

        # upgrade handling
        self.upgrade()

        # check services
        self.launch_services()

        self._db_resync_done.set()

    def upgrade(self):
        for si in ServiceInstanceSM.values():
            st = ServiceTemplateSM.get(si.service_template)
            if not st:
                continue

            for vm_id in si.virtual_machines:
                vm = VirtualMachineSM.get(vm_id)
                if vm.virtualization_type:
                    continue
                nova_vm = self._nova_client.oper('servers', 'get',
                    si.proj_name, id=vm_id)
                if not nova_vm:
                    continue

                si_obj = ServiceInstance()
                si_obj.name = si.name
                si_obj.fq_name = si.fq_name
                instance_name = self.vm_manager._get_instance_name(
                    si_obj, vm.index)
                if vm.name == instance_name:
                    continue
                nova_vm.update(name=instance_name)
                vm_obj = VirtualMachine()
                vm_obj.uuid = vm_id
                vm_obj.fq_name = [vm_id]
                vm_obj.set_display_name(instance_name + '__' +
                    st.virtualization_type)
                try:
                    self._vnc_lib.virtual_machine_update(vm_obj)
                except Exception:
                    pass

    def launch_services(self):
        for si in ServiceInstanceSM.values():
            self._create_service_instance(si)

    def sync_sm(self):
        vn_set = set()
        vmi_set = set()
        iip_set = set()
        ok, lb_pool_list = self._cassandra._cassandra_loadbalancer_pool_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in lb_pool_list:
                lb_pool = LoadbalancerPoolSM.locate(uuid)
                if lb_pool.virtual_machine_interface:
                    vmi_set.add(lb_pool.virtual_machine_interface)

        ok, lb_pool_member_list = self._cassandra._cassandra_loadbalancer_member_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in lb_pool_member_list:
                lb_pool_member = LoadbalancerMemberSM.locate(uuid)

        ok, lb_vip_list = self._cassandra._cassandra_virtual_ip_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in lb_vip_list:
                virtual_ip = VirtualIpSM.locate(uuid)
                if virtual_ip.virtual_machine_interface:
                    vmi_set.add(virtual_ip.virtual_machine_interface)

        ok, lb_hm_list = self._cassandra._cassandra_loadbalancer_healthmonitor_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in lb_hm_list:
                lb_hm = HealthMonitorSM.locate(uuid)

        ok, si_list = self._cassandra._cassandra_service_instance_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in si_list:
                si = ServiceInstanceSM.locate(uuid)

        ok, st_list = self._cassandra._cassandra_service_template_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in st_list:
                st = ServiceTemplateSM.locate(uuid)

        ok, vn_list = self._cassandra._cassandra_virtual_network_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in vn_list:
                vn = VirtualNetworkSM.locate(uuid)
                vmi_set |= vn.virtual_machine_interfaces

        ok, ifd_list = self._cassandra._cassandra_physical_interface_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in ifd_list:
                ifd = PhysicalInterfaceSM.locate(uuid)


        ok, ifl_list = self._cassandra._cassandra_logical_interface_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in ifl_list:
                ifl = LogicalInterfaceSM.locate(uuid)
                if ifl.virtual_machine_interface:
                    vmi_set.add(ifl.virtual_machine_interface)

        ok, pr_list = self._cassandra._cassandra_physical_router_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in pr_list:
                pr = PhysicalRouterSM.locate(uuid)

        ok, vr_list = self._cassandra._cassandra_virtual_router_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in vr_list:
                vr = VirtualRouterSM.locate(uuid)

        ok, vmi_list = self._cassandra._cassandra_virtual_machine_interface_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in vmi_list:
                vmi = VirtualMachineInterfaceSM.locate(uuid)
                if vmi.instance_ip:
                    iip_set.add(vmi.instance_ip)

        ok, irt_list = self._cassandra._cassandra_interface_route_table_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in irt_list:
                irt = InterfaceRouteTableSM.locate(uuid)

        ok, project_list = self._cassandra._cassandra_project_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in project_list:
                prj = ProjectSM.locate(uuid)

        ok, sas_list = self._cassandra._cassandra_service_appliance_set_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in sas_list:
                sas = ServiceApplianceSetSM.locate(uuid)

        ok, sa_list = self._cassandra._cassandra_service_appliance_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in sa_list:
                sa = ServiceApplianceSM.locate(uuid)

        ok, domain_list = self._cassandra._cassandra_domain_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in domain_list:
                DomainSM.locate(uuid)

        ok, iip_list = self._cassandra._cassandra_instance_ip_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in iip_list:
                InstanceIpSM.locate(uuid)

        ok, fip_list = self._cassandra._cassandra_floating_ip_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in fip_list:
                FloatingIpSM.locate(uuid)

        ok, sg_list = self._cassandra._cassandra_security_group_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in sg_list:
                SecurityGroupSM.locate(uuid)

        ok, vm_list = self._cassandra._cassandra_virtual_machine_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in vm_list:
                vm = VirtualMachineSM.locate(uuid)
                if vm.service_instance:
                    continue
                for vmi_id in vm.virtual_machine_interfaces:
                    vmi = VirtualMachineInterfaceSM.get(vmi_id)
                    if not vmi:
                        continue
                    self.check_link_si_to_vm(vm, vmi)

        # Load the loadbalancer driver
        self.loadbalancer_agent.load_drivers()

        for lb_pool in LoadbalancerPoolSM.values():
            lb_pool.add()

        # Audit the lb pools
        self.loadbalancer_agent.audit_lb_pools()

    # end sync_sm

    # create service template
    def _create_default_template(self, st_name, svc_type, svc_mode=None,
                                 hypervisor_type='virtual-machine',
                                 image_name=None, flavor=None, scaling=False,
                                 vrouter_instance_type=None,
                                 instance_data=None):
        domain_name = 'default-domain'
        domain_fq_name = [domain_name]
        st_fq_name = [domain_name, st_name]
        self.logger.log_info("Creating %s %s hypervisor %s" %
            (domain_name, st_name, hypervisor_type))

        domain_obj = None
        for domain in DomainSM.values():
            if domain.fq_name == domain_fq_name:
                domain_obj = Domain()
                domain_obj.uuid = domain.uuid
                domain_obj.fq_name = domain_fq_name
                break
        if not domain_obj:
            self.logger.log_error("%s domain not found" % (domain_name))
            return

        for st in ServiceTemplateSM.values():
            if st.fq_name == st_fq_name:
                self.logger.log_info("%s exists uuid %s" %
                    (st.name, str(st.uuid)))
                return

        svc_properties = ServiceTemplateType()
        svc_properties.set_service_type(svc_type)
        svc_properties.set_service_mode(svc_mode)
        svc_properties.set_service_virtualization_type(hypervisor_type)
        svc_properties.set_image_name(image_name)
        svc_properties.set_flavor(flavor)
        svc_properties.set_ordered_interfaces(True)
        svc_properties.set_service_scaling(scaling)

        # set interface list
        if svc_type == 'analyzer':
            if_list = [['left', False]]
        elif hypervisor_type == 'network-namespace':
            if_list = [['right', True], ['left', True]]
        else:
            if_list = [
                ['management', False], ['left', False], ['right', False]]

        for itf in if_list:
            if_type = ServiceTemplateInterfaceType(shared_ip=itf[1])
            if_type.set_service_interface_type(itf[0])
            svc_properties.add_interface_type(if_type)

        if vrouter_instance_type is not None:
            svc_properties.set_vrouter_instance_type(vrouter_instance_type)

        if instance_data is not None:
            svc_properties.set_instance_data(
                json.dumps(instance_data, separators=(',', ':')))

        st_obj = ServiceTemplate(name=st_name, domain_obj=domain)
        st_obj.set_service_template_properties(svc_properties)
        try:
            st_uuid = self._vnc_lib.service_template_create(st_obj)
        except Exception as e:
            self.logger.log_error("%s create failed with error %s" %
                (st_name, str(e)))
            return

        # Create the service template in local db
        ServiceTemplateSM.locate(st_uuid)

        self.logger.log_info("%s created with uuid %s" %
            (st_name, str(st_uuid)))
    #_create_default_analyzer_template

    def check_link_si_to_vm(self, vm, vmi):
        if vm.service_instance:
            return
        if not vmi.if_type:
            return

        si_fq_name = vmi.name.split('__')[0:3]
        index = int(vmi.name.split('__')[3]) - 1
        for si in ServiceInstanceSM.values():
            if si.fq_name != si_fq_name:
                continue
            st = ServiceTemplateSM.get(si.service_template)
            self.vm_manager.link_si_to_vm(si, st, index, vm.uuid)
            return

    def _create_service_instance(self, si):
        if si.state == 'active':
            return
        st = ServiceTemplateSM.get(si.service_template)
        if not st:
            self.logger.log_error("template not found for %s" %
                ((':').join(si.fq_name)))
            return

        try:
            if st.virtualization_type == 'virtual-machine':
                self.vm_manager.create_service(st, si)
            elif st.virtualization_type == 'network-namespace':
                self.netns_manager.create_service(st, si)
            elif st.virtualization_type == 'vrouter-instance':
                self.vrouter_manager.create_service(st, si)
            else:
                self.logger.log_error("Unknown virt type: %s" %
                    st.virtualization_type)
        except Exception:
            cgitb_error_log(self)
        si.launch_count += 1

    def _delete_service_instance(self, vm):
        self.logger.log_info("Deleting VM %s %s" %
            ((':').join(vm.proj_fq_name), vm.uuid))

        try:
            if vm.virtualization_type == svc_info.get_vm_instance_type():
                self.vm_manager.delete_service(vm)
            elif vm.virtualization_type == svc_info.get_netns_instance_type():
                self.netns_manager.delete_service(vm)
            elif vm.virtualization_type == 'vrouter-instance':
                self.vrouter_manager.delete_service(vm)
        except Exception:
            cgitb_error_log(self)

        # generate UVE
        si_fq_name = vm.display_name.split('__')[:-2]
        si_fq_str = (':').join(si_fq_name)
        self.logger.uve_svc_instance(si_fq_str, status='DELETE',
                                     vms=[{'uuid': vm.uuid}])
        return True

    def _relaunch_service_instance(self, si):
        si.state = 'relaunch'
        self._create_service_instance(si)

    def _check_service_running(self, si):
        st = ServiceTemplateSM.get(si.service_template)
        if st.virtualization_type == 'virtual-machine':
            status = self.vm_manager.check_service(si)
        elif st.virtualization_type == 'network-namespace':
            status = self.netns_manager.check_service(si)
        elif st.virtualization_type == 'vrouter-instance':
            status = self.vrouter_manager.check_service(si)

        return status

    def _delete_interface_route_table(self, irt_uuid):
        try:
            self._vnc_lib.interface_route_table_delete(id=irt_uuid)
        except (NoIdError, RefsExistError):
            return

    def _delete_shared_vn(self, vn_uuid):
        try:
            self.logger.log_info("Deleting vn %s" % (vn_uuid))
            self._vnc_lib.virtual_network_delete(id=vn_uuid)
        except (NoIdError, RefsExistError):
            pass

    @staticmethod
    def reset():
        for cls in DBBase._OBJ_TYPE_MAP.values():
            cls.reset()
class ICCassandraClient():
    def _issu_basic_function(self, kspace, cfam, cols):
        return dict(cols)

    # end

    def __init__(self, oldversion_server_list, newversion_server_list,
                 old_user, old_password, new_user, new_password, odb_prefix,
                 ndb_prefix, issu_info, logger):
        self._oldversion_server_list = oldversion_server_list
        self._newversion_server_list = newversion_server_list
        self._odb_prefix = odb_prefix
        self._ndb_prefix = ndb_prefix
        self._issu_info = issu_info
        self._logger = logger
        self._ks_issu_func_info = {}
        self._nkeyspaces = {}
        self._okeyspaces = {}

        self._old_creds = None
        if old_user and old_password:
            self._old_creds = {
                'username': old_user,
                'password': old_password,
            }
        self._new_creds = None
        if new_user and new_password:
            self._new_creds = {
                'username': new_user,
                'password': new_password,
            }

        self._logger(
            "Issu contrail cassandra initialized...",
            level=SandeshLevel.SYS_INFO,
        )
        self.issu_prepare()

    # end

    def issu_prepare(self):
        self._logger(
            "Issu contrail cassandra prepare...",
            level=SandeshLevel.SYS_INFO,
        )
        for issu_func, ks, cflist in self._issu_info:

            if issu_func is None:
                issu_func = self._issu_basic_function
            ks_issu_func_info = {ks: issu_func}

            nks = {ks: cflist}
            oks = {ks: cflist}
            self._nkeyspaces.update(nks)
            self._okeyspaces.update(oks)
            self._ks_issu_func_info.update(ks_issu_func_info)

        self._oldversion_handle = VncCassandraClient(
            self._oldversion_server_list,
            self._odb_prefix,
            None,
            self._okeyspaces,
            self._logger,
            credential=self._old_creds)

        self._newversion_handle = VncCassandraClient(
            self._newversion_server_list,
            self._ndb_prefix,
            self._nkeyspaces,
            None,
            self._logger,
            credential=self._new_creds)

    # end

    def _fetch_issu_func(self, ks):
        return self._ks_issu_func_info[ks]

    # end

    # Overwrite what is seen in the newer with the old version.
    def _merge_overwrite(self, new, current):
        updated = current
        updated.update(new)
        return updated

    # end

    #  For now this function should be called for only config_db_uuid.
    #  If we separate out config_db_uuid keyspace from VncCassandraClient,
    #  then we don't need to pass keyspaces here.
    def issu_merge_copy(self, keyspaces):
        for ks, cflist in keyspaces.items():
            self._logger("Issu contrail cassandra merge copy, keyspace: " +
                         str(ks),
                         level=SandeshLevel.SYS_INFO)
            issu_funct = self._fetch_issu_func(ks)
            for cf in cflist:
                newList = []
                newversion_result = self._newversion_handle.get_range(cf) or {}
                self._logger("Building New DB memory for columnfamily: " +
                             str(cf),
                             level=SandeshLevel.SYS_INFO)
                new_db = dict(newversion_result)

                oldversion_result = self._oldversion_handle.get_range(cf) or {}
                self._logger("Doing ISSU copy for columnfamily: " + str(cf),
                             level=SandeshLevel.SYS_INFO)
                for rows, columns in oldversion_result:
                    out = issu_funct(ks, cf, columns)
                    current = new_db.pop(rows, None)
                    if current is not None:
                        updated = self._merge_overwrite(out, dict(current))
                        x = self._newversion_handle.add(cf, rows, updated)
                    else:
                        updated = []
                        x = self._newversion_handle.add(cf, rows, out)
                    diff = set(updated) - set(out)
                    y = self._newversion_handle.get_cf(cf).remove(rows, diff)
                self._logger(
                    "Pruning New DB if entires don't exist in old DB column "
                    "family: " + str(cf),
                    level=SandeshLevel.SYS_INFO)
                for item in new_db:
                    # TBD should be catch exception and fail ISSU
                    self._newversion_handle.delete(cf, item)

    # end

    #  This is issu_copy function.
    def issu_copy(self, keyspaces):
        for ks, cflist in keyspaces.items():
            issu_funct = self._fetch_issu_func(ks)
            for cf in cflist:
                self._logger("Issu Copy KeySpace: " + str(ks) +
                             " Column Family: " + str(cf),
                             level=SandeshLevel.SYS_INFO)
                oldversion_result = self._oldversion_handle.get_range(cf) or {}

                for rows, columns in oldversion_result:
                    out = issu_funct(ks, cf, columns)

                    x = self._newversion_handle.add(cf, rows, out)
                    # TBD If failure to add, fail ISSU

    # end

    def issu_sync_row(self, msg, cf):
        if msg['oper'] == "CREATE":
            self._logger(msg, level=SandeshLevel.SYS_INFO)
            try:
                self._newversion_handle.object_create(msg['type'], msg['uuid'],
                                                      msg['obj_dict'])
            except Exception as e:
                self._logger(str(e), level=SandeshLevel.SYS_ERR)

        elif msg['oper'] == "UPDATE":
            self._logger(msg, level=SandeshLevel.SYS_INFO)
            uuid_list = []
            uuid_list.append(msg['uuid'])
            try:
                bool1, current = self._newversion_handle.object_read(
                    msg['type'], uuid_list)
                bool2, new = self._oldversion_handle.object_read(
                    msg['type'], uuid_list)
            except Exception as e:
                self._logger(str(e), level=SandeshLevel.SYS_ERR)
                return
            updated = self._merge_overwrite(dict(new.pop()),
                                            dict(current.pop()))
            #  New object dictionary should be created, for now passing as is
            try:
                self._newversion_handle.object_update(msg['type'], msg['uuid'],
                                                      updated)
            except Exception as e:
                self._logger(str(e), level=SandeshLevel.SYS_ERR)

        elif msg['oper'] == "DELETE":
            self._logger(msg, level=SandeshLevel.SYS_INFO)
            try:
                self._newversion_handle.object_delete(msg['type'], msg['uuid'])
            except Exception as e:
                self._logger(str(e), level=SandeshLevel.SYS_ERR)
        return
class SvcMonitor(object):

    """
    data + methods used/referred to by ssrc and arc greenlets
    """
    _REACTION_MAP = {
        "loadbalancer_pool": {
            'self': [],
            'virtual_ip': [],
            'loadbalancer_member': [],
            'loadbalancer_healthmonitor': [],
        },
        "loadbalancer_member": {
            'self': ['loadbalancer_pool'],
            'loadbalancer_pool': []
        },
        "virtual_ip": {
            'self': ['loadbalancer_pool'],
            'loadbalancer_pool': []
        },
        "loadbalancer_healthmonitor": {
            'self': ['loadbalancer_pool'],
            'loadbalancer_pool': []
        },
        "service_instance": {
            'self': [],
        },
        "service_template": {
            'self': [],
        }
    }

    def __init__(self, args=None):
        self._args = args

        # create database and logger
        self.db = ServiceMonitorDB(args)

        # initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            self._disc = client.DiscoveryClient(self._args.disc_server_ip,
                                                self._args.disc_server_port,
                                                ModuleNames[Module.SVC_MONITOR])

        # initialize logger
        self.logger = ServiceMonitorLogger(self.db, self._disc, args)
        self.db.add_logger(self.logger)
        self.db.init_database()

        # rotating log file for catchall errors
        self._err_file = self._args.trace_file
        self._svc_err_logger = logging.getLogger('SvcErrLogger')
        self._svc_err_logger.setLevel(logging.ERROR)
        try:
            with open(self._err_file, 'a'):
                handler = logging.handlers.RotatingFileHandler(
                    self._err_file, maxBytes=64*1024, backupCount=2)
                self._svc_err_logger.addHandler(handler)
        except IOError:
            self.logger.log("Failed to open trace file %s" % self._err_file)

        # Connect to Rabbit and Initialize cassandra connection
        # TODO activate this code
        # self._connect_rabbit()

    def _connect_rabbit(self):
        rabbit_server = self._args.rabbit_server
        rabbit_port = self._args.rabbit_port
        rabbit_user = self._args.rabbit_user
        rabbit_password = self._args.rabbit_password
        rabbit_vhost = self._args.rabbit_vhost

        self._db_resync_done = gevent.event.Event()

        q_name = 'svc_mon.%s' % (socket.gethostname())
        self._vnc_kombu = VncKombuClient(rabbit_server, rabbit_port,
                                         rabbit_user, rabbit_password,
                                         rabbit_vhost, q_name,
                                         self._vnc_subscribe_callback,
                                         self.config_log)

        cass_server_list = self._args.cassandra_server_list
        reset_config = self._args.reset_config
        self._cassandra = VncCassandraClient(cass_server_list, reset_config,
                                             self._args.cluster_id, None,
                                             self.config_log)
        DBBase.init(self, self.logger, self._cassandra)
    # end _connect_rabbit

    def config_log(self, msg, level):
        self.logger.log(msg)

    def _vnc_subscribe_callback(self, oper_info):
        import pdb;pdb.set_trace()
        self._db_resync_done.wait()
        try:
            msg = "Notification Message: %s" % (pformat(oper_info))
            self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
            obj_type = oper_info['type'].replace('-', '_')
            obj_class = DBBase._OBJ_TYPE_MAP.get(obj_type)
            if obj_class is None:
                return

            if oper_info['oper'] == 'CREATE' or oper_info['oper'] == 'UPDATE':
                dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP, self._REACTION_MAP)
                obj_id = oper_info['uuid']
                obj = obj_class.get(obj_id)
                if obj is not None:
                    dependency_tracker.evaluate(obj_type, obj)
                else:
                    obj = obj_class.locate(obj_id)
                obj.update()
                dependency_tracker.evaluate(obj_type, obj)
            elif oper_info['oper'] == 'DELETE':
                obj_id = oper_info['uuid']
                obj = obj_class.get(obj_id)
                if obj is None:
                    return
                dependency_tracker = DependencyTracker(DBBase._OBJ_TYPE_MAP, self._REACTION_MAP)
                dependency_tracker.evaluate(obj_type, obj)
                obj_class.delete(obj_id)
            else:
                # unknown operation
                self.config_log('Unknown operation %s' % oper_info['oper'],
                                level=SandeshLevel.SYS_ERR)
                return

            if obj is None:
                self.config_log('Error while accessing %s uuid %s' % (
                                obj_type, obj_id))
                return


        except Exception:
            string_buf = cStringIO.StringIO()
            cgitb.Hook(file=string_buf, format="text").handle(sys.exc_info())
            self.config_log(string_buf.getvalue(), level=SandeshLevel.SYS_ERR)

        for lb_pool_id in dependency_tracker.resources.get('loadbalancer_pool', []):
            lb_pool = LoadbalancerPoolSM.get(lb_pool_id)
            if lb_pool is not None:
                lb_pool.add()
    # end _vnc_subscribe_callback


    def post_init(self, vnc_lib, args=None):
        # api server
        self._vnc_lib = vnc_lib

        # create default analyzer template
        self._create_default_template('analyzer-template', 'analyzer',
                                      flavor='m1.medium',
                                      image_name='analyzer')
        # create default NAT template
        self._create_default_template('nat-template', 'firewall',
                                      svc_mode='in-network-nat',
                                      image_name='analyzer',
                                      flavor='m1.medium')
        # create default netns SNAT template
        self._create_default_template('netns-snat-template', 'source-nat',
                                      svc_mode='in-network-nat',
                                      hypervisor_type='network-namespace',
                                      scaling=True)
        # create default loadbalancer template
        self._create_default_template('haproxy-loadbalancer-template', 'loadbalancer',
                                      svc_mode='in-network-nat',
                                      hypervisor_type='network-namespace',
                                      scaling=True)
        self._create_default_template('docker-template', 'firewall',
                                      svc_mode='transparent',
                                      image_name="ubuntu",
                                      hypervisor_type='vrouter-instance',
                                      vrouter_instance_type='docker',
                                      instance_data={
                                          "command": "/bin/bash"
                                      })

        self._nova_client = importutils.import_object(
            'svc_monitor.nova_client.ServiceMonitorNovaClient',
            self._args, self.logger)

        # load vrouter scheduler
        self.vrouter_scheduler = importutils.import_object(
            self._args.si_netns_scheduler_driver,
            self._vnc_lib, self._nova_client,
            self._args)

        # load virtual machine instance manager
        self.vm_manager = importutils.import_object(
            'svc_monitor.virtual_machine_manager.VirtualMachineManager',
            self._vnc_lib, self.db, self.logger,
            self.vrouter_scheduler, self._nova_client, self._args)

        # load network namespace instance manager
        self.netns_manager = importutils.import_object(
            'svc_monitor.instance_manager.NetworkNamespaceManager',
            self._vnc_lib, self.db, self.logger,
            self.vrouter_scheduler, self._nova_client, self._args)

        # load a vrouter instance manager
        self.vrouter_manager = importutils.import_object(
            'svc_monitor.vrouter_instance_manager.VRouterInstanceManager',
            self._vnc_lib, self.db, self.logger,
            self.vrouter_scheduler, self._nova_client, self._args)

        # load a loadbalancer agent
        # TODO : activate the code 
        # self.loadbalancer_agent = LoadbalancerAgent(self._vnc_lib, self._args)

        # Read the cassandra and populate the entry in ServiceMonitor DB
        # TODO : activate the code 
        # self.sync_sm()

        # resync db
        self.db_resync()

    def db_resync(self):
        si_list = self.db.service_instance_list()
        for si_fq_str, si in si_list or []:
            for idx in range(0, int(si.get('max-instances', '0'))):
                prefix = self.db.get_vm_db_prefix(idx)
                vm_key = prefix + 'uuid'
                if vm_key not in si.keys():
                    continue

                try:
                    vm_obj = self._vnc_lib.virtual_machine_read(id=si[vm_key])
                except NoIdError:
                    continue

                vmi_back_refs = vm_obj.get_virtual_machine_interface_back_refs()
                for vmi_back_ref in vmi_back_refs or []:
                    try:
                        vmi_obj = self._vnc_lib.virtual_machine_interface_read(
                            id=vmi_back_ref['uuid'])
                    except NoIdError:
                        continue
                    vmi_props = vmi_obj.get_virtual_machine_interface_properties()
                    if not vmi_props:
                        continue
                    if_type = vmi_props.get_service_interface_type()
                    if not if_type:
                        continue
                    key = prefix + 'if-' + if_type
                    self.db.service_instance_insert(si_fq_str,
                                                    {key:vmi_obj.uuid})

    def sync_sm(self):
        vn_set = set()
        vmi_set = set()
        iip_set = set()
        ok, lb_pool_list = self._cassandra._cassandra_loadbalancer_pool_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in lb_pool_list:
                lb_pool = LoadbalancerPoolSM.locate(uuid)
                if lb_pool.virtual_machine_interface:
                    vmi_set.add(lb_pool.virtual_machine_interface)

        ok, lb_pool_member_list = self._cassandra._cassandra_loadbalancer_member_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in lb_pool_member_list:
                lb_pool_member = LoadbalancerMemberSM.locate(uuid)

        ok, lb_vip_list = self._cassandra._cassandra_virtual_ip_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in lb_vip_list:
                virtual_ip = VirtualIpSM.locate(uuid)
                if virtual_ip.virtual_machine_interface:
                    vmi_set.add(virtual_ip.virtual_machine_interface)

        ok, lb_hm_list = self._cassandra._cassandra_loadbalancer_healthmonitor_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in lb_hm_list:
                lb_hm = HealthMonitorSM.locate(uuid)

        ok, si_list = self._cassandra._cassandra_service_instance_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in si_list:
                si = ServiceInstanceSM.locate(uuid)

        ok, st_list = self._cassandra._cassandra_service_template_list()
        if not ok:
            pass
        else:
            for fq_name, uuid in si_list:
                si = ServiceInstanceSM.locate(uuid)

        for vmi_id in vmi_set:
            vmi = VirtualMachineInterfaceSM.locate(vmi_id)
            if (vmi.instance_ip):
                iip_set.add(vmi.instance_ip)

        for iip_id in iip_set:
            iip = InstanceIpSM.locate(iip_id)

        for lb_pool in LoadbalancerPoolSM.values():
            lb_pool.add()

        self._db_resync_done.set()
    # end sync_sm

    # create service template
    def _create_default_template(self, st_name, svc_type, svc_mode=None,
                                 hypervisor_type='virtual-machine',
                                 image_name=None, flavor=None, scaling=False,
                                 vrouter_instance_type=None,
                                 instance_data=None):
        domain_name = 'default-domain'
        domain_fq_name = [domain_name]
        st_fq_name = [domain_name, st_name]
        self.logger.log("Creating %s %s hypervisor %s" %
                         (domain_name, st_name, hypervisor_type))

        try:
            st_obj = self._vnc_lib.service_template_read(fq_name=st_fq_name)
            st_uuid = st_obj.uuid
            self.logger.log("%s exists uuid %s" % (st_name, str(st_uuid)))
            return
        except NoIdError:
            domain = self._vnc_lib.domain_read(fq_name=domain_fq_name)
            st_obj = ServiceTemplate(name=st_name, domain_obj=domain)
            st_uuid = self._vnc_lib.service_template_create(st_obj)

        svc_properties = ServiceTemplateType()
        svc_properties.set_service_type(svc_type)
        svc_properties.set_service_mode(svc_mode)
        svc_properties.set_service_virtualization_type(hypervisor_type)
        svc_properties.set_image_name(image_name)
        svc_properties.set_flavor(flavor)
        svc_properties.set_ordered_interfaces(True)
        svc_properties.set_service_scaling(scaling)

        # set interface list
        if svc_type == 'analyzer':
            if_list = [['left', False]]
        elif hypervisor_type == 'network-namespace':
            if_list = [['right', True], ['left', True]]
        else:
            if_list = [
                ['management', False], ['left', False], ['right', False]]

        for itf in if_list:
            if_type = ServiceTemplateInterfaceType(shared_ip=itf[1])
            if_type.set_service_interface_type(itf[0])
            svc_properties.add_interface_type(if_type)

        if vrouter_instance_type is not None:
            svc_properties.set_vrouter_instance_type(vrouter_instance_type)

        if instance_data is not None:
            svc_properties.set_instance_data(
                json.dumps(instance_data, separators=(',', ':')))

        try:
            st_obj.set_service_template_properties(svc_properties)
            self._vnc_lib.service_template_update(st_obj)
        except Exception as e:
            print e

        self.logger.log("%s created with uuid %s" % (st_name, str(st_uuid)))
    #_create_default_analyzer_template

    def cleanup(self):
        pass
    # end cleanup

    def _get_proj_name_from_si_fq_str(self, si_fq_str):
        return si_fq_str.split(':')[1]
    # enf _get_si_fq_str_to_proj_name

    def _get_virtualization_type(self, st_props):
        return st_props.get_service_virtualization_type() or 'virtual-machine'
    # end _get_virtualization_type

    def _check_store_si_info(self, st_obj, si_obj):
        config_complete = True
        st_props = st_obj.get_service_template_properties()
        st_if_list = st_props.get_interface_type()
        si_props = si_obj.get_service_instance_properties()
        si_if_list = si_props.get_interface_list()
        # for lb relax the check because vip and pool could be in same net
        if (st_props.get_service_type() != svc_info.get_lb_service_type()) \
                and si_if_list and (len(si_if_list) != len(st_if_list)):
            self.logger.log("Error: IF mismatch template %s instance %s" %
                             (len(st_if_list), len(si_if_list)))
            return

        # read existing si_entry
        si_entry = self.db.service_instance_get(si_obj.get_fq_name_str())
        if not si_entry:
            si_entry = {}
        si_entry['instance_type'] = self._get_virtualization_type(st_props)
        si_entry['uuid'] = si_obj.uuid

        # walk the interface list
        for idx in range(0, len(st_if_list)):
            st_if = st_if_list[idx]
            itf_type = st_if.service_interface_type

            si_if = None
            if si_if_list and st_props.get_ordered_interfaces():
                try:
                    si_if = si_if_list[idx]
                except IndexError:
                    continue
                si_vn_str = si_if.get_virtual_network()
            else:
                funcname = "get_" + itf_type + "_virtual_network"
                func = getattr(si_props, funcname)
                si_vn_str = func()

            if not si_vn_str:
                continue

            si_entry[itf_type + '-vn'] = si_vn_str
            try:
                vn_obj = self._vnc_lib.virtual_network_read(
                    fq_name_str=si_vn_str)
                if vn_obj.uuid != si_entry.get(si_vn_str, None):
                    si_entry[si_vn_str] = vn_obj.uuid
            except NoIdError:
                self.logger.log("Warn: VN %s add is pending" % si_vn_str)
                si_entry[si_vn_str] = 'pending'
                config_complete = False

        if config_complete:
            self.logger.log("SI %s info is complete" %
                             si_obj.get_fq_name_str())
            si_entry['state'] = 'config_complete'
        else:
            self.logger.log("Warn: SI %s info is not complete" %
                             si_obj.get_fq_name_str())
            si_entry['state'] = 'pending_config'

        #insert entry
        self.db.service_instance_insert(si_obj.get_fq_name_str(), si_entry)
        return config_complete
    #end _check_store_si_info

    def _restart_svc(self, si_fq_str):
        si_obj = self._vnc_lib.service_instance_read(fq_name_str=si_fq_str)
        st_list = si_obj.get_service_template_refs()
        if st_list is not None:
            fq_name = st_list[0]['to']
            st_obj = self._vnc_lib.service_template_read(fq_name=fq_name)
            self._create_svc_instance(st_obj, si_obj)
    # end _restart_svc

    def _create_svc_instance(self, st_obj, si_obj):
        #check if all config received before launch
        if not self._check_store_si_info(st_obj, si_obj):
            return

        st_props = st_obj.get_service_template_properties()
        if st_props is None:
            self.logger.log("Cannot find service template associated to "
                             "service instance %s" % si_obj.get_fq_name_str())
        virt_type = self._get_virtualization_type(st_props)

        if virt_type == 'virtual-machine':
            self.vm_manager.create_service(st_obj, si_obj)
        elif virt_type == 'network-namespace':
            self.netns_manager.create_service(st_obj, si_obj)
        elif virt_type == 'vrouter-instance':
            self.vrouter_manager.create_service(st_obj, si_obj)
        else:
            self.logger.log("Unkown virtualization type: %s" % virt_type)

    def _delete_svc_instance(self, vm_uuid, proj_name,
                             si_fq_str=None, virt_type=None):
        self.logger.log("Deleting VM %s %s" % (proj_name, vm_uuid))

        try:
            if virt_type == svc_info.get_vm_instance_type():
                self.vm_manager.delete_iip(vm_uuid)
                self.vm_manager.delete_service(si_fq_str, vm_uuid, proj_name)
            elif virt_type == svc_info.get_netns_instance_type():
                self.netns_manager.delete_service(si_fq_str, vm_uuid)
            elif virt_type == 'vrouter-instance':
                self.vrouter_manager.delete_service(si_fq_str, vm_uuid)
        except KeyError:
            return True

        # generate UVE
        self.logger.uve_svc_instance(si_fq_str, status='DELETE',
                                     vms=[{'uuid': vm_uuid}])
        return False

    def _delete_shared_vn(self, vn_uuid, proj_name):
        try:
            vn_obj = self._vnc_lib.virtual_network_read(id=vn_uuid)
        except NoIdError:
            self.logger.log("Deleted VN %s %s" % (proj_name, vn_uuid))
            return True

        iip_back_refs = vn_obj.get_instance_ip_back_refs()
        for iip_back_ref in iip_back_refs or []:
            try:
                self._vnc_lib.instance_ip_delete(id=iip_back_ref['uuid'])
            except (NoIdError, RefsExistError):
                continue

        try:
            self.logger.log("Deleting VN %s %s" % (proj_name, vn_uuid))
            self._vnc_lib.virtual_network_delete(id=vn_uuid)
        except RefsExistError:
            pass
        except NoIdError:
            self.logger.log("Deleted VN %s %s" % (proj_name, vn_uuid))
            return True
        return False

    def _cleanup_si(self, si_fq_str):
        si_info = self.db.service_instance_get(si_fq_str)
        if not si_info:
            return
        cleaned_up = True
        state = {}
        state['state'] = 'deleting'
        self.db.service_instance_insert(si_fq_str, state)
        proj_name = self._get_proj_name_from_si_fq_str(si_fq_str)

        for idx in range(0, int(si_info.get('max-instances', '0'))):
            prefix = self.db.get_vm_db_prefix(idx)
            vm_key = prefix + 'uuid'
            if vm_key in si_info.keys():
                if not self._delete_svc_instance(
                        si_info[vm_key], proj_name, si_fq_str=si_fq_str,
                        virt_type=si_info['instance_type']):
                    cleaned_up = False

        if cleaned_up:
            vn_name = 'snat-si-left_%s' % si_fq_str.split(':')[-1]
            if vn_name in si_info.keys():
                if not self._delete_shared_vn(si_info[vn_name], proj_name):
                    cleaned_up = False

        if cleaned_up:
            for vn_name in svc_info.get_shared_vn_list():
                if vn_name in si_info.keys():
                    self._delete_shared_vn(si_info[vn_name], proj_name)
            self.db.service_instance_remove(si_fq_str)

    def _check_si_status(self, si_fq_name_str, si_info):
        try:
            si_obj = self._vnc_lib.service_instance_read(id=si_info['uuid'])
        except NoIdError:
            # cleanup service instance
            return 'DELETE'

        # check status only if service is active
        if si_info['state'] != 'active':
            return ''

        if si_info['instance_type'] == 'virtual-machine':
            proj_name = self._get_proj_name_from_si_fq_str(si_fq_name_str)
            status = self.vm_manager.check_service(si_obj, proj_name)
        elif si_info['instance_type'] == 'network-namespace':
            status = self.netns_manager.check_service(si_obj)
        elif si_info['instance_type'] == 'vrouter-instance':
            status = self.vrouter_manager.check_service(si_obj)

        return status 

    def _delmsg_virtual_machine_service_instance(self, idents):
        vm_fq_str = idents['virtual-machine']
        si_fq_str = idents['service-instance']
        self.db.remove_vm_info(si_fq_str, vm_fq_str)

    def _delmsg_virtual_machine_interface_virtual_network(self, idents):
        vmi_fq_str = idents['virtual-machine-interface']
        vn_fq_str = idents['virtual-network']
        vn_fq_name = vn_fq_str.split(':')
        for vn_name in svc_info.get_shared_vn_list():
            if vn_name != vn_fq_name[2]:
                continue
            try:
                vn_id = self._vnc_lib.fq_name_to_id(
                    'virtual-network', vn_fq_name)
            except NoIdError:
                continue
            self._delete_shared_vn(vn_id, vn_fq_name[1])

    def _delmsg_service_instance_service_template(self, idents):
        self._cleanup_si(idents['service-instance'])

    def _delmsg_virtual_machine_interface_route_table(self, idents):
        rt_fq_str = idents['interface-route-table']

        try:
            rt_obj = self._vnc_lib.interface_route_table_read(
                fq_name_str=rt_fq_str)
        except NoIdError:
            return

        try:
            vmi_list = rt_obj.get_virtual_machine_interface_back_refs()
            if vmi_list is None:
                self._vnc_lib.interface_route_table_delete(id=rt_obj.uuid)
        except NoIdError:
            return

    def _addmsg_service_instance_service_template(self, idents):
        st_fq_str = idents['service-template']
        si_fq_str = idents['service-instance']

        try:
            st_obj = self._vnc_lib.service_template_read(
                fq_name_str=st_fq_str)
            si_obj = self._vnc_lib.service_instance_read(
                fq_name_str=si_fq_str)
        except NoIdError:
            self.logger.log("No template or service instance with ids: %s, %s"
                            % (st_fq_str, si_fq_str))
            return

        #launch VMs
        self._create_svc_instance(st_obj, si_obj)
    # end _addmsg_service_instance_service_template

    def _addmsg_service_instance_properties(self, idents):
        si_fq_str = idents['service-instance']

        try:
            si_obj = self._vnc_lib.service_instance_read(
                fq_name_str=si_fq_str)
        except NoIdError:
            return

        #update static routes
        self.vm_manager.update_static_routes(si_obj)

    def _addmsg_project_virtual_network(self, idents):
        vn_fq_str = idents['virtual-network']

        si_list = self.db.service_instance_list()
        if not si_list:
            return

        for si_fq_str, si_info in si_list:
            if vn_fq_str not in si_info.keys():
                continue

            try:
                si_obj = self._vnc_lib.service_instance_read(
                    fq_name_str=si_fq_str)
                if si_obj.get_virtual_machine_back_refs():
                    continue

                st_refs = si_obj.get_service_template_refs()
                fq_name = st_refs[0]['to']
                st_obj = self._vnc_lib.service_template_read(fq_name=fq_name)

                #launch VMs
                self._create_svc_instance(st_obj, si_obj)
            except Exception:
                continue

    def _addmsg_floating_ip_virtual_machine_interface(self, idents):
        fip_fq_str = idents['floating-ip']
        vmi_fq_str = idents['virtual-machine-interface']

        try:
            fip_obj = self._vnc_lib.floating_ip_read(
                fq_name_str=fip_fq_str)
            vmi_obj = self._vnc_lib.virtual_machine_interface_read(
                fq_name_str=vmi_fq_str)
        except NoIdError:
            return

        # handle only if VIP back ref exists
        vip_back_refs = vmi_obj.get_virtual_ip_back_refs()
        if vip_back_refs is None:
            return

        # associate fip to all VMIs
        iip_back_refs = vmi_obj.get_instance_ip_back_refs()
        try:
            iip_obj = self._vnc_lib.instance_ip_read(
                id=iip_back_refs[0]['uuid'])
        except NoIdError:
            return

        fip_updated = False
        vmi_refs_iip = iip_obj.get_virtual_machine_interface_refs()
        vmi_refs_fip = fip_obj.get_virtual_machine_interface_refs()
        for vmi_ref_iip in vmi_refs_iip:
            if vmi_ref_iip in vmi_refs_fip:
                continue
            try:
                vmi_obj = self._vnc_lib.virtual_machine_interface_read(
                    id=vmi_ref_iip['uuid'])
            except NoIdError:
                continue
            fip_obj.add_virtual_machine_interface(vmi_obj)
            fip_updated = True

        if fip_updated:
            self._vnc_lib.floating_ip_update(fip_obj)


    def process_poll_result(self, poll_result_str):
        result_list = parse_poll_result(poll_result_str)

        # process ifmap message
        for (result_type, idents, metas) in result_list:
            if 'ERROR' in idents.values():
                continue
            for meta in metas:
                meta_name = re.sub('{.*}', '', meta.tag)
                if result_type == 'deleteResult':
                    funcname = "_delmsg_" + meta_name.replace('-', '_')
                elif result_type in ['searchResult', 'updateResult']:
                    funcname = "_addmsg_" + meta_name.replace('-', '_')
                # end if result_type
                try:
                    func = getattr(self, funcname)
                except AttributeError:
                    pass
                else:
                    self.logger.log("%s with %s/%s"
                                     % (funcname, meta_name, idents))
                    func(idents)
class DatabaseExim(object):
    def __init__(self, args_str):
        self._parse_args(args_str)

        self._zookeeper = kazoo.client.KazooClient(
            self._api_args.zk_server_ip,
            timeout=400,
            handler=kazoo.handlers.gevent.SequentialGeventHandler())
        self._zookeeper.start()
    # end __init__

    def init_cassandra(self, ks_cf_info=None):
        self._cassandra = VncCassandraClient(
            self._api_args.cassandra_server_list, self._api_args.cluster_id,
            rw_keyspaces=ks_cf_info, ro_keyspaces=None, logger=self.log,
            reset_config=False,
            ssl_enabled=self._api_args.cassandra_use_ssl,
            ca_certs=self._api_args.cassandra_ca_certs)
    # end init_cassandra

    def log(self, msg, level):
        pass
    # end log

    def _parse_args(self, args_str):
        parser = argparse.ArgumentParser()

        help="Path to contrail-api conf file, default /etc/contrail-api.conf"
        parser.add_argument(
            "--api-conf", help=help, default="/etc/contrail/contrail-api.conf")
        parser.add_argument(
            "--verbose", help="Run in verbose/INFO mode, default False",
            action='store_true', default=False)
        parser.add_argument(
            "--debug", help="Run in debug mode, default False",
            action='store_true', default=False)
        parser.add_argument(
            "--import-from", help="Import from this json file to database",
            metavar='FILE')
        parser.add_argument(
            "--export-to", help="Export from database to this json file",
            metavar='FILE')
        parser.add_argument(
            "--omit-keyspaces",
            nargs='*',
            help="List of keyspaces to omit in export/import",
            metavar='FILE')

        args_obj, remaining_argv = parser.parse_known_args(args_str.split())
        if ((args_obj.import_from is not None) and
            (args_obj.export_to is not None)):
            raise InvalidArguments(
                'Both --import-from and --export-to cannot be specified %s' %(
                args_obj))
        self._args = args_obj

        self._api_args = utils.parse_args('-c %s %s'
            %(self._args.api_conf, ' '.join(remaining_argv)))[0]
        pass
    # end _parse_args

    def db_import(self):
        if self._args.import_from.endswith('.gz'):
            try:
                f = gzip.open(self._args.import_from, 'rb')
                self.import_data = json.loads(f.read())
            finally:
                f.close()
        else:
            with open(self._args.import_from, 'r') as f:
                self.import_data = json.loads(f.read())

        ks_cf_info = dict((ks, dict((c, {}) for c in cf.keys()))
            for ks,cf in self.import_data['cassandra'].items())
        self.init_cassandra(ks_cf_info)

        # refuse import if db already has data
        non_empty_errors = []
        for ks in self.import_data['cassandra'].keys():
            for cf in self.import_data['cassandra'][ks].keys():
                if len(list(self._cassandra.get_cf(cf).get_range(
                    column_count=0))) > 0:
                    non_empty_errors.append(
                        'Keyspace %s CF %s already has entries.' %(ks, cf))

        if non_empty_errors:
            raise CassandraNotEmptyError('\n'.join(non_empty_errors))

        non_empty_errors = []
        existing_zk_dirs = set(
            self._zookeeper.get_children(self._api_args.cluster_id+'/'))
        import_zk_dirs = set([p_v_ts[0].split('/')[1]
            for p_v_ts in json.loads(self.import_data['zookeeper'] or "[]")])

        for non_empty in ((existing_zk_dirs & import_zk_dirs) - 
                          set(['zookeeper'])):
            non_empty_errors.append(
                'Zookeeper has entries at /%s.' %(non_empty))

        if non_empty_errors:
            raise ZookeeperNotEmptyError('\n'.join(non_empty_errors))

        # seed cassandra
        for ks_name in self.import_data['cassandra'].keys():
            for cf_name in self.import_data['cassandra'][ks_name].keys():
                cf = self._cassandra.get_cf(cf_name)
                for row,cols in self.import_data['cassandra'][ks_name][cf_name].items():
                    for col_name, col_val_ts in cols.items():
                        cf.insert(row, {col_name: col_val_ts[0]})
        # end seed cassandra

        zk_ignore_list = ['consumers', 'config', 'controller',
                          'isr_change_notification', 'admin', 'brokers',
                          'zookeeper', 'controller_epoch',
                          'api-server-election', 'schema-transformer',
                          'device-manager', 'svc-monitor', 'contrail_cs',
                          'lockpath']
        # seed zookeeper
        for path_value_ts in json.loads(self.import_data['zookeeper'] or "{}"):
            path = path_value_ts[0]
            if path.endswith('/'):
                path = path[:-1]
            if path.split('/')[1] in zk_ignore_list:
                continue
            value = path_value_ts[1][0]
            self._zookeeper.create(path, str(value), makepath=True)
    # end db_import

    def db_export(self):
        db_contents = {'cassandra': {},
                       'zookeeper': {}}

        cassandra_contents = db_contents['cassandra']
        for ks_name in (set(KEYSPACES) -
                        set(self._args.omit_keyspaces or [])):
            if self._api_args.cluster_id:
                full_ks_name = '%s_%s' %(self._api_args.cluster_id, ks_name)
            else:
                full_ks_name = ks_name
            cassandra_contents[ks_name] = {}

            socket_factory = pycassa.connection.default_socket_factory
            if self._api_args.cassandra_use_ssl:
                socket_factory = pycassa.connection.make_ssl_socket_factory(
                    self._api_args.cassandra_ca_certs, validate=False)
            pool = pycassa.ConnectionPool(
                full_ks_name, self._api_args.cassandra_server_list,
                pool_timeout=120, max_retries=-1, timeout=5,
                socket_factory=socket_factory)

            creds = None
            if (self._api_args.cassandra_user and
                self._api_args.cassandra_password):
                creds = {'username': self._api_args.cassandra_user,
                         'password': self._api_args.cassandra_password}
            sys_mgr = SystemManager(self._api_args.cassandra_server_list[0],
                credentials=creds)
            for cf_name in sys_mgr.get_keyspace_column_families(full_ks_name):
                cassandra_contents[ks_name][cf_name] = {}
                cf = pycassa.ColumnFamily(pool, cf_name)
                for r,c in cf.get_range(column_count=10000000, include_timestamp=True):
                    cassandra_contents[ks_name][cf_name][r] = c

        def get_nodes(path):
            if not zk.get_children(path):
                return [(path, zk.get(path))]

            nodes = []
            for child in zk.get_children(path):
                nodes.extend(get_nodes('%s%s/' %(path, child)))

            return nodes

        zk = kazoo.client.KazooClient(self._api_args.zk_server_ip)
        zk.start()
        nodes = get_nodes(self._api_args.cluster_id+'/')
        zk.stop()
        db_contents['zookeeper'] = json.dumps(nodes)

        f = open(self._args.export_to, 'w')
        try:
            f.write(json.dumps(db_contents))
        finally:
            f.close()
class DeviceManager(object):
    _REACTION_MAP = {
        'physical_router': {
            'self': ['bgp_router', 'physical_interface', 'logical_interface'],
            'bgp_router': [],
            'physical_interface': [],
            'logical_interface': [],
            'virtual_network': [],
            'global_system_config': [],
        },
        'global_system_config': {
            'self': ['physical_router'],
            'physical_router': [],
        },
        'bgp_router': {
            'self': ['bgp_router', 'physical_router'],
            'bgp_router': ['physical_router'],
            'physical_router': [],
        },
        'physical_interface': {
            'self': ['physical_router', 'logical_interface'],
            'physical_router': ['logical_interface'],
            'logical_interface': ['physical_router'],
        },
        'logical_interface': {
            'self': ['physical_router', 'physical_interface',
                     'virtual_machine_interface'],
            'physical_interface': ['virtual_machine_interface'],
            'virtual_machine_interface': ['physical_router',
                                          'physical_interface'],
            'physical_router': ['virtual_machine_interface']
        },
        'virtual_machine_interface': {
            'self': ['logical_interface', 'virtual_network', 'floating_ip', 'instance_ip'],
            'logical_interface': ['virtual_network'],
            'virtual_network': ['logical_interface'],
            'floating_ip': ['virtual_network'],
            'instance_ip': ['virtual_network'],
        },
        'virtual_network': {
            'self': ['physical_router', 'virtual_machine_interface'],
            'routing_instance': ['physical_router',
                                 'virtual_machine_interface'],
            'physical_router': [],
            'virtual_machine_interface': ['physical_router'],
        },
        'routing_instance': {
            'self': ['routing_instance', 'virtual_network'],
            'routing_instance': ['virtual_network'],
            'virtual_network': []
        },
        'floating_ip': {
            'self': ['virtual_machine_interface'],
            'virtual_machine_interface': [],
        },
        'instance_ip': {
            'self': ['virtual_machine_interface'],
            'virtual_machine_interface': [],
        },
    }

    def __init__(self, args=None):
        self._args = args

        # Initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            self._disc = client.DiscoveryClient(
                self._args.disc_server_ip,
                self._args.disc_server_port,
                ModuleNames[Module.DEVICE_MANAGER])

        self._sandesh = Sandesh()
        module = Module.DEVICE_MANAGER
        module_name = ModuleNames[module]
        node_type = Module2NodeType[module]
        node_type_name = NodeTypeNames[node_type]
        instance_id = INSTANCE_ID_DEFAULT
        hostname = socket.gethostname()
        self._sandesh.init_generator(
            module_name, hostname, node_type_name, instance_id,
            self._args.collectors, 'to_bgp_context',
            int(args.http_server_port),
            ['cfgm_common', 'device_manager.sandesh'], self._disc)
        self._sandesh.set_logging_params(enable_local_log=args.log_local,
                                         category=args.log_category,
                                         level=args.log_level,
                                         file=args.log_file,
                                         enable_syslog=args.use_syslog,
                                         syslog_facility=args.syslog_facility)
        PhysicalRouterDM._sandesh = self._sandesh
        ConnectionState.init(
            self._sandesh, hostname, module_name, instance_id,
            staticmethod(ConnectionState.get_process_state_cb),
            NodeStatusUVE, NodeStatus)

        # Retry till API server is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        while not connected:
            try:
                self._vnc_lib = VncApi(
                    args.admin_user, args.admin_password,
                    args.admin_tenant_name, args.api_server_ip,
                    args.api_server_port)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:  # haproxy throws 503
                time.sleep(3)

        rabbit_servers = self._args.rabbit_server
        rabbit_port = self._args.rabbit_port
        rabbit_user = self._args.rabbit_user
        rabbit_password = self._args.rabbit_password
        rabbit_vhost = self._args.rabbit_vhost
        rabbit_ha_mode = self._args.rabbit_ha_mode

        self._db_resync_done = gevent.event.Event()

        q_name = 'device_manager.%s' % (socket.gethostname())
        self._vnc_kombu = VncKombuClient(rabbit_servers, rabbit_port,
                                         rabbit_user, rabbit_password,
                                         rabbit_vhost, rabbit_ha_mode,
                                         q_name, self._vnc_subscribe_callback,
                                         self.config_log)

        cass_server_list = self._args.cassandra_server_list
        cred = None
        if self._args.cassandra_user is not None and \
           self._args.cassandra_password is not None:
            cred={'username':self._args.cassandra_user,
                  'password':self._args.cassandra_password}
        self._cassandra = VncCassandraClient(cass_server_list,
                                             self._args.cluster_id,
                                             None,
                                             self.config_log,credential=cred)

        DBBaseDM.init(self, self._sandesh.logger(), self._cassandra)
        ok, global_system_config_list = self._cassandra._cassandra_global_system_config_list()
        if not ok:
            self.config_log('global system config list returned error: %s' %
                            global_system_config_list)
        else:
            for fq_name, uuid in global_system_config_list:
                GlobalSystemConfigDM.locate(uuid)

        ok, global_vrouter_config_list = self._cassandra._cassandra_global_vrouter_config_list()
        if not ok:
            self.config_log('global vrouter config list returned error: %s' %
                            global_vrouter_config_list)
        else:
            for fq_name, uuid in global_vrouter_config_list:
                GlobalVRouterConfigDM.locate(uuid)

        ok, vn_list = self._cassandra._cassandra_virtual_network_list()
        if not ok:
            self.config_log('virtual network list returned error: %s' %
                            vn_list)
        else:
            for fq_name, uuid in vn_list:
                vn = VirtualNetworkDM.locate(uuid)
                if vn is not None and vn.routing_instances is not None:
                    for ri_id in vn.routing_instances:
                        ri_obj = RoutingInstanceDM.locate(ri_id)

        ok, bgp_list = self._cassandra._cassandra_bgp_router_list()
        if not ok:
            self.config_log('bgp router list returned error: %s' %
                            bgp_list)
        else:
            for fq_name, uuid in bgp_list:
                BgpRouterDM.locate(uuid)

        ok, pr_list = self._cassandra._cassandra_physical_router_list()
        if not ok:
            self.config_log('physical router list returned error: %s' %
                            pr_list)
        else:
            for fq_name, uuid in pr_list:
                pr = PhysicalRouterDM.locate(uuid)
                if pr.bgp_router:
                    BgpRouterDM.locate(pr.bgp_router)
                li_set = pr.logical_interfaces
                for pi_id in pr.physical_interfaces:
                    pi = PhysicalInterfaceDM.locate(pi_id)
                    if pi:
                        li_set |= pi.logical_interfaces
                vmi_set = set()
                for li_id in li_set:
                    li = LogicalInterfaceDM.locate(li_id)
                    if li and li.virtual_machine_interface:
                        vmi_set |= set([li.virtual_machine_interface])
                for vmi_id in vmi_set:
                    vmi = VirtualMachineInterfaceDM.locate(vmi_id)

            ok, ip_list = self._cassandra._cassandra_instance_ip_list()
            if not ok:
                self.config_log('instance ip list returned error: %s' %
                            ip_list)
            else:
                for fq_name, uuid in ip_list:
                    InstanceIpDM.locate(uuid)

            ok, fip_list = self._cassandra._cassandra_floating_ip_list()
            if not ok:
                self.config_log('floating ip list returned error: %s' %
                            fip_list)
            else:
                for fq_name, uuid in fip_list:
                    FloatingIpDM.locate(uuid)

            for fq_name, uuid in vn_list:
                vn = VirtualNetworkDM.locate(uuid)
                if vn is not None:
                    vn.update_instance_ip_map()

            for pr in PhysicalRouterDM.values():
                pr.set_config_state()
        self._db_resync_done.set()
        while 1:
            # Just wait indefinitely
            time.sleep(5)
    # end __init__

    def connection_state_update(self, status, message=None):
        ConnectionState.update(
            conn_type=ConnectionType.APISERVER, name='ApiServer',
            status=status, message=message or '',
            server_addrs=['%s:%s' % (self._args.api_server_ip,
                                     self._args.api_server_port)])
    # end connection_state_update

    def config_log(self, msg, level):
        self._sandesh.logger().log(SandeshLogger.get_py_logger_level(level),
                                   msg)

    def _vnc_subscribe_callback(self, oper_info):
        self._db_resync_done.wait()
        dependency_tracker = None
        try:
            msg = "Notification Message: %s" % (pformat(oper_info))
            self.config_log(msg, level=SandeshLevel.SYS_DEBUG)
            obj_type = oper_info['type'].replace('-', '_')
            obj_class = DBBaseDM.get_obj_type_map().get(obj_type)
            if obj_class is None:
                return

            if oper_info['oper'] == 'CREATE':
                obj_dict = oper_info['obj_dict']
                obj_id = obj_dict['uuid']
                obj = obj_class.locate(obj_id, obj_dict)
                dependency_tracker = DependencyTracker(
                    DBBaseDM.get_obj_type_map(), self._REACTION_MAP)
                dependency_tracker.evaluate(obj_type, obj)
            elif oper_info['oper'] == 'UPDATE':
                obj_id = oper_info['uuid']
                obj = obj_class.get(obj_id)
                old_dt = None
                if obj is not None:
                    old_dt = DependencyTracker(
                        DBBaseDM.get_obj_type_map(), self._REACTION_MAP)
                    old_dt.evaluate(obj_type, obj)
                else:
                    obj = obj_class.locate(obj_id)
                obj.update()
                dependency_tracker = DependencyTracker(
                    DBBaseDM.get_obj_type_map(), self._REACTION_MAP)
                dependency_tracker.evaluate(obj_type, obj)
                if old_dt:
                    for resource, ids in old_dt.resources.items():
                        if resource not in dependency_tracker.resources:
                            dependency_tracker.resources[resource] = ids
                        else:
                            dependency_tracker.resources[resource] = list(
                                set(dependency_tracker.resources[resource]) |
                                set(ids))
            elif oper_info['oper'] == 'DELETE':
                obj_id = oper_info['uuid']
                obj = obj_class.get(obj_id)
                if obj is None:
                    return
                dependency_tracker = DependencyTracker(
                    DBBaseDM.get_obj_type_map(), self._REACTION_MAP)
                dependency_tracker.evaluate(obj_type, obj)
                obj_class.delete(obj_id)
            else:
                # unknown operation
                self.config_log('Unknown operation %s' % oper_info['oper'],
                                level=SandeshLevel.SYS_ERR)
                return

            if obj is None:
                self.config_log('Error while accessing %s uuid %s' % (
                                obj_type, obj_id))
                return

        except Exception:
            string_buf = cStringIO.StringIO()
            cgitb.Hook(file=string_buf, format="text").handle(sys.exc_info())
            self.config_log(string_buf.getvalue(), level=SandeshLevel.SYS_ERR)

        if not dependency_tracker:
            return

        for vn_id in dependency_tracker.resources.get('virtual_network', []):
            vn = VirtualNetworkDM.get(vn_id)
            if vn is not None:
                vn.update_instance_ip_map()

        for pr_id in dependency_tracker.resources.get('physical_router', []):
            pr = PhysicalRouterDM.get(pr_id)
            if pr is not None:
                pr.set_config_state()
class DatabaseExim(object):
    def __init__(self, args_str):
        self._parse_args(args_str)
        self._cassandra = VncCassandraClient(
            self._api_args.cassandra_server_list, False,
            self._api_args.cluster_id, None, logger=self.log)
        self._zookeeper = kazoo.client.KazooClient(
            self._api_args.zk_server_ip)
        self._zookeeper.start()
    # end __init__

    def log(self, msg, level):
        pass
    # end log

    def _parse_args(self, args_str):
        parser = argparse.ArgumentParser()

        help="Path to contrail-api conf file, default /etc/contrail-api.conf"
        parser.add_argument(
            "--api-conf", help=help, default="/etc/contrail/contrail-api.conf")
        parser.add_argument(
            "--verbose", help="Run in verbose/INFO mode, default False",
            action='store_true', default=False)
        parser.add_argument(
            "--debug", help="Run in debug mode, default False",
            action='store_true', default=False)
        parser.add_argument(
            "--import-from", help="Import from this json file to database",
            metavar='FILE', default='db.json')
        parser.add_argument(
            "--export-to", help="Export from database to this json file",
            metavar='FILE')

        args_obj, remaining_argv = parser.parse_known_args(args_str.split())
        self._args = args_obj

        self._api_args = utils.parse_args('-c %s %s'
            %(self._args.api_conf, ' '.join(remaining_argv)))[0]
        pass
    # end _parse_args

    def db_import(self):
        if self._args.import_from.endswith('.gz'):
            try:
                f = gzip.open(self._args.import_from, 'rb')
                self.import_data = json.loads(f.read())
            finally:
                f.close()
        else:
            with open(self._args.import_from, 'r') as f:
                self.import_data = json.loads(f.read())

        # refuse import if db already has data
        if len(list(self._cassandra.get_cf('obj_uuid_table').get_range(column_count=0))) > 0:
            raise CassandraNotEmptyError('obj_uuid_table has entries')
        if len(list(self._cassandra.get_cf('obj_fq_name_table').get_range(column_count=0))) > 0:
            raise CassandraNotEmptyError('obj_fq_name_table has entries')
        zk_nodes = self._zookeeper.get_children('/')
        zk_nodes.remove('zookeeper')
        if len(zk_nodes) > 0:
            raise ZookeeperNotEmptyError('Zookeeper has entries')

        # seed cassandra
        for cf_name in ['obj_fq_name_table', 'obj_uuid_table']:
            for row, column in self.import_data['cassandra'][cf_name].items():
                self._cassandra.add(cf_name, row, column)

        # seed zookeeper
        for path_value_ts in json.loads(self.import_data['zookeeper']):
            path = path_value_ts[0]
            if path.endswith('/'): 
                path = path[:-1]
            if path.startswith('/zookeeper'):
                continue
            value = path_value_ts[1][0]
            self._zookeeper.create(path, str(value), makepath=True)

    # end db_import

    def db_export(self):
        pass