Пример #1
0
 def start(self):
     # Connect to rabbitmq for config update notifications
     rabbitmq_qname = self._service_id
     self._vnc_amqp = VncAmqpHandle(self._sandesh, self._logger,
                                    self._db_cls, self._reaction_map,
                                    self._service_id, self._rabbitmq_cfg)
     self._vnc_amqp.establish()
     cassandra_credential = {
         'username': self._cassandra_cfg['user'],
         'password': self._cassandra_cfg['password']
     }
     if not all(cassandra_credential.values()):
         cassandra_credential = None
     try:
         self._vnc_db = VncObjectDBClient(self._cassandra_cfg['servers'],
                                          self._cassandra_cfg['cluster_id'],
                                          logger=self._logger.log,
                                          credential=cassandra_credential)
     except Exception as e:
         template = 'Exception {0} connecting to Config DB. Arguments:\n{1!r}'
         msg = template.format(type(e).__name__, e.args)
         self._logger.error('%s: %s' % (msg, traceback.format_exc()))
         exit()
     self._db_cls.init(self, self._logger, self._vnc_db)
     self._sync_config_db()
Пример #2
0
    def __init__(self, sm_logger=None, args=None):
        self._args = args
        self._args.analytics_api_ssl_params = analytics_api_ssl_params(args)
        # initialize logger
        if sm_logger is not None:
            self.logger = sm_logger
        else:
            # Initialize logger
            self.logger = ServiceMonitorLogger(args)

        # init object_db
        self._object_db = ServiceMonitorDB(self._args, self.logger)
        DBBaseSM.init(self, self.logger, self._object_db)

        # init rabbit connection
        rabbitmq_cfg = get_rabbitmq_cfg(args)
        if 'host_ip' in self._args:
            host_ip = self._args.host_ip
        else:
            host_ip = socket.gethostbyname(socket.getfqdn())
        self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger,
                                    DBBaseSM, REACTION_MAP, 'svc_monitor',
                                    rabbitmq_cfg, host_ip,
                                    self._args.trace_file)
        self.rabbit.establish()
    def __init__(self, args=None):
        self._args = args

        # initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            self._disc = client.DiscoveryClient(
                self._args.disc_server_ip,
                self._args.disc_server_port,
                ModuleNames[Module.SVC_MONITOR])
        # initialize logger
        self.logger = ServiceMonitorLogger(self._disc, args)

        # rotating log file for catchall errors
        self._err_file = self._args.trace_file
        self._svc_err_logger = logging.getLogger('SvcErrLogger')
        self._svc_err_logger.setLevel(logging.ERROR)
        try:
            with open(self._err_file, 'a'):
                handler = logging.handlers.RotatingFileHandler(
                    self._err_file, maxBytes=64*1024, backupCount=2)
                self._svc_err_logger.addHandler(handler)
        except IOError:
            self.logger.warning("Failed to open trace file %s" %
                                    self._err_file)

        # init object_db
        self._object_db = ServiceMonitorDB(self._args, self.logger)
        DBBaseSM.init(self, self.logger, self._object_db)

        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger, DBBaseSM,
                REACTION_MAP, 'svc_monitor', args=self._args)
        self.rabbit.establish()
Пример #4
0
    def __init__(self, sm_logger=None, args=None):
        self._args = args
        # initialize logger
        if sm_logger is not None:
            self.logger = sm_logger
        else:
            # Initialize logger
            self.logger = ServiceMonitorLogger(args)

        # rotating log file for catchall errors
        self._err_file = self._args.trace_file
        self._svc_err_logger = logging.getLogger('SvcErrLogger')
        self._svc_err_logger.setLevel(logging.ERROR)
        try:
            with open(self._err_file, 'a'):
                handler = logging.handlers.RotatingFileHandler(self._err_file,
                                                               maxBytes=64 *
                                                               1024,
                                                               backupCount=2)
                self._svc_err_logger.addHandler(handler)
        except IOError:
            self.logger.warning("Failed to open trace file %s" %
                                self._err_file)

        # init object_db
        self._object_db = ServiceMonitorDB(self._args, self.logger)
        DBBaseSM.init(self, self.logger, self._object_db)

        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger,
                                    DBBaseSM,
                                    REACTION_MAP,
                                    'svc_monitor',
                                    args=self._args)
        self.rabbit.establish()
Пример #5
0
class VncMesos(object):
    def __init__(self, args=None, logger=None, q=None):
        self.args = args
        self.logger = logger
        self.q = q

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # init access to db
        self._db = db.MesosNetworkManagerDB(self.args, self.logger)
        DBBaseMM.init(self, self.logger, self._db)

        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger,
                                    DBBaseMM,
                                    REACTION_MAP,
                                    'mesos_manager',
                                    args=self.args)
        self.rabbit.establish()

        # sync api server db in local cache
        self._sync_sm()
        self.rabbit._db_resync_done.set()

    def _vnc_connect(self):
        # Retry till API server connection is up
        connected = False
        while not connected:
            try:
                vnc_lib = VncApi(self.args.admin_user,
                                 self.args.admin_password,
                                 self.args.admin_tenant,
                                 self.args.vnc_endpoint_ip,
                                 self.args.vnc_endpoint_port)
                connected = True
            except requests.exceptions.ConnectionError as e:
                time.sleep(3)
            except ResourceExhaustionError:
                time.sleep(3)
        return vnc_lib

    def _sync_sm(self):
        for cls in DBBaseMM.get_obj_type_map().values():
            for obj in cls.list_obj():
                cls.locate(obj['uuid'], obj)

    @staticmethod
    def reset():
        for cls in DBBaseMM.get_obj_type_map().values():
            cls.reset()

    def vnc_process(self):
        while True:
            try:
                event = self.q.get()
            except Empty:
                gevent.sleep(0)
Пример #6
0
    def __init__(self, args=None, logger=None, q=None, kube=None):
        self.args = args
        self.logger = logger
        self.q = q
        self.kube = kube

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # init access to db
        self._db = db.KubeNetworkManagerDB(self.args, self.logger)
        DBBaseKM.init(self, self.logger, self._db)

        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger,
                                    DBBaseKM,
                                    REACTION_MAP,
                                    'kube_manager',
                                    args=self.args)
        self.rabbit.establish()

        # sync api server db in local cache
        self._sync_km()
        self.rabbit._db_resync_done.set()

        # provision cluster
        self._provision_cluster()

        # handle events
        self.label_cache = label_cache.LabelCache()
        self.namespace_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_namespace.VncNamespace',
            vnc_lib=self.vnc_lib,
            cluster_pod_subnets=self.args.pod_subnets)
        self.service_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_service.VncService', self.vnc_lib,
            self.label_cache, self.args, self.logger, self.kube)
        self.network_policy_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy',
            self.vnc_lib, self.label_cache, self.logger)
        self.pod_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_pod.VncPod',
            self.vnc_lib,
            self.label_cache,
            self.service_mgr,
            self.network_policy_mgr,
            self.q,
            svc_fip_pool=self._get_cluster_service_fip_pool())
        self.endpoints_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_endpoints.VncEndpoints', self.vnc_lib,
            self.logger, self.kube)
        self.ingress_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_ingress.VncIngress', self.args, self.vnc_lib,
            self.label_cache, self.logger, self.kube)
def main(args_str=None, kube_api_skip=False, event_queue=None,
         vnc_kubernetes_config_dict=None):
    _zookeeper_client = None

    args = kube_args.parse_args(args_str)
    if 'kube_timer_interval' not in args:
        args.kube_timer_interval = '60'

    if args.cluster_id:
        client_pfx = args.cluster_id + '-'
        zk_path_pfx = args.cluster_id + '/'
    else:
        client_pfx = ''
        zk_path_pfx = ''

    # randomize collector list
    args.random_collectors = args.collectors
    if args.collectors:
        args.random_collectors = random.sample(args.collectors,
                                           len(args.collectors))

    km_logger = logger.KubeManagerLogger(args, http_server_port=-1)

    if args.nested_mode == '0':
        # Initialize AMQP handler then close it to be sure remain queue of a
        # precedent run is cleaned
        rabbitmq_cfg = kube_args.rabbitmq_args(args)
        try:
            vnc_amqp = VncAmqpHandle(km_logger._sandesh, km_logger, DBBaseKM,
                                     REACTION_MAP, 'kube_manager',
                                     rabbitmq_cfg)
            vnc_amqp.establish()
            vnc_amqp.close()
        except Exception:
            pass
        finally:
            km_logger.debug("Removed remained AMQP queue")
 
        # Ensure zookeeper is up and running before starting kube-manager
        _zookeeper_client = ZookeeperClient(client_pfx+"kube-manager",
                                            args.zk_server_ip)

        km_logger.notice("Waiting to be elected as master...")
        _zookeeper_client.master_election(zk_path_pfx+"/kube-manager",
                                          os.getpid(), run_kube_manager,
                                          km_logger, args, kube_api_skip,
                                          event_queue, vnc_kubernetes_config_dict)

    else: #nested mode, skip zookeeper mastership check
        run_kube_manager(km_logger, args, kube_api_skip, event_queue,
                         vnc_kubernetes_config_dict)
Пример #8
0
    def __init__(self, sm_logger=None, args=None):
        self._args = args

        # initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            dss_kwargs = {}
            if self._args.disc_server_ssl:
                if self._args.disc_server_cert:
                    dss_kwargs.update({'cert': self._args.disc_server_cert})
                if self._args.disc_server_key:
                    dss_kwargs.update({'key': self._args.disc_server_key})
                if self._args.disc_server_cacert:
                    dss_kwargs.update(
                        {'cacert': self._args.disc_server_cacert})
            self._disc = client.DiscoveryClient(
                self._args.disc_server_ip, self._args.disc_server_port,
                ModuleNames[Module.SVC_MONITOR], **dss_kwargs)
        # initialize logger
        if sm_logger is not None:
            self.logger = sm_logger
        else:
            # Initialize logger
            self.logger = ServiceMonitorLogger(self._disc, args)

        # rotating log file for catchall errors
        self._err_file = self._args.trace_file
        self._svc_err_logger = logging.getLogger('SvcErrLogger')
        self._svc_err_logger.setLevel(logging.ERROR)
        try:
            with open(self._err_file, 'a'):
                handler = logging.handlers.RotatingFileHandler(self._err_file,
                                                               maxBytes=64 *
                                                               1024,
                                                               backupCount=2)
                self._svc_err_logger.addHandler(handler)
        except IOError:
            self.logger.warning("Failed to open trace file %s" %
                                self._err_file)

        # init cassandra
        self._cassandra = ServiceMonitorDB(self._args, self.logger)
        DBBaseSM.init(self, self.logger, self._cassandra)

        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger,
                                    DBBaseSM,
                                    REACTION_MAP,
                                    'svc_monitor',
                                    args=self._args)
        self.rabbit.establish()
Пример #9
0
    def __init__(self, args=None):
        self._args = args

        # initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            self._disc = client.DiscoveryClient(
                self._args.disc_server_ip,
                self._args.disc_server_port,
                ModuleNames[Module.SVC_MONITOR])
        # initialize logger
        self.logger = ServiceMonitorLogger(self._disc, args)

        # rotating log file for catchall errors
        self._err_file = self._args.trace_file
        self._svc_err_logger = logging.getLogger('SvcErrLogger')
        self._svc_err_logger.setLevel(logging.ERROR)
        try:
            with open(self._err_file, 'a'):
                handler = logging.handlers.RotatingFileHandler(
                    self._err_file, maxBytes=64*1024, backupCount=2)
                self._svc_err_logger.addHandler(handler)
        except IOError:
            self.logger.warning("Failed to open trace file %s" %
                                    self._err_file)

        # init object_db
        self._object_db = ServiceMonitorDB(self._args, self.logger)
        DBBaseSM.init(self, self.logger, self._object_db)

        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger, DBBaseSM,
                REACTION_MAP, 'svc_monitor', args=self._args)
        self.rabbit.establish()
 def start(self):
     # Connect to rabbitmq for config update notifications
     rabbitmq_qname = self._service_id
     self._vnc_amqp = VncAmqpHandle(self._sandesh, self._logger,
                                    self._db_cls, self._reaction_map,
                                    self._service_id, self._rabbitmq_cfg)
     self._vnc_amqp.establish()
     cassandra_credential = {
         'username': self._cassandra_cfg['user'],
         'password': self._cassandra_cfg['password']
     }
     if not all(cassandra_credential.values()):
         cassandra_credential = None
     self._vnc_db = VncObjectDBClient(self._cassandra_cfg['servers'],
                                      self._cassandra_cfg['cluster_id'],
                                      logger=self._logger.log,
                                      credential=cassandra_credential)
     self._db_cls.init(self, self._logger, self._vnc_db)
     self._sync_config_db()
Пример #11
0
    def __init__(self, args=None, logger=None):
        self.args = args
        self.logger = logger

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # init access to db
        self._db = db.MesosNetworkManagerDB(self.args, self.logger)
        DBBaseMM.init(self, self.logger, self._db)

        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger, DBBaseMM, REACTION_MAP,
                                    'mesos_manager', args=self.args)
        self.rabbit.establish()

        # sync api server db in local cache
        self._sync_sm()
        self.rabbit._db_resync_done.set()
    def __init__(self, sm_logger=None, args=None):
        self._args = args
        # initialize logger
        if sm_logger is not None:
            self.logger = sm_logger
        else:
            # Initialize logger
            self.logger = ServiceMonitorLogger(args)

        # init object_db
        self._object_db = ServiceMonitorDB(self._args, self.logger)
        DBBaseSM.init(self, self.logger, self._object_db)

        # init rabbit connection
        rabbitmq_cfg = get_rabbitmq_cfg(args)
        self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger,
                                    DBBaseSM, REACTION_MAP, 'svc_monitor',
                                    rabbitmq_cfg, self._args.trace_file)
        self.rabbit.establish()
Пример #13
0
def main(args_str=None):
    global _zookeeper_client

    if not args_str:
        args_str = ' '.join(sys.argv[1:])
    args = parse_args(args_str)
    if args.cluster_id:
        client_pfx = args.cluster_id + '-'
        zk_path_pfx = args.cluster_id + '/'
    else:
        client_pfx = ''
        zk_path_pfx = ''

    # randomize collector list
    args.random_collectors = args.collectors
    if args.collectors:
        args.random_collectors = random.sample(args.collectors,
                                               len(args.collectors))

    # Initialize logger without introspect thread
    sm_logger = ServiceMonitorLogger(args, http_server_port=-1)

    # Initialize AMQP handler then close it to be sure remain queue of a
    # precedent run is cleaned
    rabbitmq_cfg = get_rabbitmq_cfg(args)
    try:
        vnc_amqp = VncAmqpHandle(sm_logger._sandesh, sm_logger, DBBaseSM,
                                 REACTION_MAP, 'svc_monitor', rabbitmq_cfg,
                                 args.trace_file)
        vnc_amqp.establish()
        vnc_amqp.close()
    except Exception:
        pass
    finally:
        sm_logger.debug("Removed remained AMQP queue")

    # Waiting to be elected as master node
    _zookeeper_client = ZookeeperClient(
        client_pfx+"svc-monitor", args.zk_server_ip)
    sm_logger.notice("Waiting to be elected as master...")
    _zookeeper_client.master_election(zk_path_pfx+"/svc-monitor", os.getpid(),
                                      run_svc_monitor, sm_logger, args)
Пример #14
0
    def test_close(self):
        vnc = VncAmqpHandle(*(7 * [mock.MagicMock()]))

        # Should not raise anything
        vnc.close()

        # Pretends call of establish()
        vnc._vnc_kombu = mock.MagicMock()
        vnc.close()
        vnc._vnc_kombu.shutdown.assert_called_once_with()
Пример #15
0
def main(args_str=None,
         kube_api_skip=False,
         event_queue=None,
         vnc_kubernetes_config_dict=None):
    _zookeeper_client = None

    args = kube_args.parse_args(args_str)

    client_pfx = ''
    zk_path_pfx = ''
    if args.cluster_id:
        client_pfx += args.cluster_id + '-'
        zk_path_pfx += args.cluster_id + '/'
    client_pfx += args.cluster_name + '-'
    zk_path_pfx += args.cluster_name + '/'

    # randomize collector list
    args.random_collectors = args.collectors
    if args.collectors:
        args.random_collectors = random.sample(args.collectors,
                                               len(args.collectors))

    km_logger = common_logger.KubeManagerLogger(args, http_server_port=-1)

    if args.nested_mode == '0':
        # Initialize AMQP handler then close it to be sure remain queue of a
        # precedent run is cleaned
        rabbitmq_cfg = kube_args.rabbitmq_args(args)
        try:
            vnc_amqp = VncAmqpHandle(km_logger._sandesh, km_logger,
                                     config_db.DBBaseKM,
                                     reaction_map.REACTION_MAP,
                                     client_pfx + 'kube_manager', rabbitmq_cfg,
                                     args.host_ip)
            vnc_amqp.establish()
            vnc_amqp.close()
        except Exception:  # FIXME: Except clause is too broad
            pass
        finally:
            km_logger.debug("Removed remained AMQP queue")

        # Ensure zookeeper is up and running before starting kube-manager
        _zookeeper_client = ZookeeperClient(client_pfx + "kube-manager",
                                            args.zk_server_ip, args.host_ip)

        km_logger.notice("Waiting to be elected as master...")
        _zookeeper_client.master_election(zk_path_pfx + "kube-manager",
                                          os.getpid(), run_kube_manager,
                                          km_logger, args, kube_api_skip,
                                          event_queue,
                                          vnc_kubernetes_config_dict)

    else:  # nested mode, skip zookeeper mastership check
        run_kube_manager(km_logger, args, kube_api_skip, event_queue,
                         vnc_kubernetes_config_dict)
Пример #16
0
    def __init__(self, args=None, logger=None, q=None, kube=None):
        self.args = args
        self.logger = logger
        self.q = q
        self.kube = kube

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # init access to db
        self._db = db.KubeNetworkManagerDB(self.args, self.logger)
        DBBaseKM.init(self, self.logger, self._db)

        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger, DBBaseKM, REACTION_MAP, "kube_manager", args=self.args)
        self.rabbit.establish()

        # sync api server db in local cache
        self._sync_km()
        self.rabbit._db_resync_done.set()

        # provision cluster
        self._provision_cluster()

        # handle events
        self.label_cache = label_cache.LabelCache()
        self.namespace_mgr = importutils.import_object(
            "kube_manager.vnc.vnc_namespace.VncNamespace",
            vnc_lib=self.vnc_lib,
            cluster_pod_subnets=self.args.pod_subnets,
        )
        self.service_mgr = importutils.import_object(
            "kube_manager.vnc.vnc_service.VncService", self.vnc_lib, self.label_cache, self.args, self.logger, self.kube
        )
        self.pod_mgr = importutils.import_object(
            "kube_manager.vnc.vnc_pod.VncPod",
            self.vnc_lib,
            self.label_cache,
            self.service_mgr,
            svc_fip_pool=self._get_cluster_service_fip_pool(),
        )
        self.network_policy_mgr = importutils.import_object(
            "kube_manager.vnc.vnc_network_policy.VncNetworkPolicy", self.vnc_lib, self.label_cache, self.logger
        )
        self.endpoints_mgr = importutils.import_object(
            "kube_manager.vnc.vnc_endpoints.VncEndpoints", self.vnc_lib, self.label_cache
        )
Пример #17
0
    def __init__(self, args=None, logger=None, q=None):
        self.args = args
        self.logger = logger
        self.q = q

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # init access to db
        self._db = db.MesosNetworkManagerDB(self.args, self.logger)
        DBBaseMM.init(self, self.logger, self._db)

        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger, DBBaseMM, REACTION_MAP, "mesos_manager", args=self.args)
        self.rabbit.establish()

        # sync api server db in local cache
        self._sync_sm()
        self.rabbit._db_resync_done.set()
Пример #18
0
    def __init__(self, sm_logger=None, args=None):
        self._args = args
        # initialize logger
        if sm_logger is not None:
            self.logger = sm_logger
        else:
            # Initialize logger
            self.logger = ServiceMonitorLogger(args)

        # init object_db
        self._object_db = ServiceMonitorDB(self._args, self.logger)
        DBBaseSM.init(self, self.logger, self._object_db)

        # init rabbit connection
        rabbitmq_cfg = get_rabbitmq_cfg(args)
        self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger,
                DBBaseSM, REACTION_MAP, 'svc_monitor', rabbitmq_cfg,
                self._args.trace_file)
        self.rabbit.establish()
Пример #19
0
def main(args_str=None):
    global _zookeeper_client

    if not args_str:
        args_str = ' '.join(sys.argv[1:])
    args = parse_args(args_str)
    if args.cluster_id:
        client_pfx = args.cluster_id + '-'
        zk_path_pfx = args.cluster_id + '/'
    else:
        client_pfx = ''
        zk_path_pfx = ''

    # initialize discovery client
    discovery_client = None
    if args.disc_server_ip and args.disc_server_port:
        dss_kwargs = {}
        if args.disc_server_ssl:
            if args.disc_server_cert:
                dss_kwargs.update({'cert': args.disc_server_cert})
            if args.disc_server_key:
                dss_kwargs.update({'key': args.disc_server_key})
            if args.disc_server_cacert:
                dss_kwargs.update({'cacert': args.disc_server_cacert})
        discovery_client = client.DiscoveryClient(
            args.disc_server_ip, args.disc_server_port,
            ModuleNames[Module.SVC_MONITOR], **dss_kwargs)
    # Initialize logger
    sm_logger = ServiceMonitorLogger(discovery_client,
                                     args,
                                     http_server_port=-1)

    # Initialize AMQP handler then close it to be sure remain queue of a
    # precedent run is cleaned
    vnc_amqp = VncAmqpHandle(sm_logger,
                             DBBaseSM,
                             REACTION_MAP,
                             'svc_monitor',
                             args=args)
    vnc_amqp.establish()
    vnc_amqp.close()
    sm_logger.debug("Removed remained AMQP queue")

    # Waiting to be elected as master node
    _zookeeper_client = ZookeeperClient(client_pfx + "svc-monitor",
                                        args.zk_server_ip)
    sm_logger.notice("Waiting to be elected as master...")
    _zookeeper_client.master_election(zk_path_pfx + "/svc-monitor",
                                      os.getpid(), run_svc_monitor, sm_logger,
                                      args)
def main(args_str=None, kube_api_skip=False, event_queue=None):
    _zookeeper_client = None

    args = kube_args.parse_args(args_str)
    if 'kube_timer_interval' not in args:
        args.kube_timer_interval = '60'

    if args.cluster_id:
        client_pfx = args.cluster_id + '-'
        zk_path_pfx = args.cluster_id + '/'
    else:
        client_pfx = ''
        zk_path_pfx = ''

    # randomize collector list
    args.random_collectors = args.collectors
    if args.collectors:
        args.random_collectors = random.sample(args.collectors,
                                               len(args.collectors))

    km_logger = logger.KubeManagerLogger(args)

    if args.nested_mode == '0':
        # Initialize AMQP handler then close it to be sure remain queue of a
        # precedent run is cleaned
        rabbitmq_cfg = kube_args.rabbitmq_args(args)
        try:
            vnc_amqp = VncAmqpHandle(km_logger._sandesh, km_logger, DBBaseKM,
                                     REACTION_MAP, 'kube_manager',
                                     rabbitmq_cfg)
            vnc_amqp.establish()
            vnc_amqp.close()
        except Exception:
            pass
        finally:
            km_logger.debug("Removed remained AMQP queue")

        # Ensure zookeeper is up and running before starting kube-manager
        _zookeeper_client = ZookeeperClient(client_pfx + "kube-manager",
                                            args.zk_server_ip)

        km_logger.notice("Waiting to be elected as master...")
        _zookeeper_client.master_election(zk_path_pfx + "/kube-manager",
                                          os.getpid(), run_kube_manager,
                                          km_logger, args, kube_api_skip,
                                          event_queue)
        km_logger.notice("Elected master kube-manager node. Initializing...")

    else:  #nested mode, skip zookeeper mastership check
        run_kube_manager(km_logger, args, kube_api_skip, event_queue)
Пример #21
0
    def __init__(self, args=None, logger=None):
        self.args = args
        self.logger = logger

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # init access to db
        self._db = db.MesosNetworkManagerDB(self.args, self.logger)
        DBBaseMM.init(self, self.logger, self._db)

        rabbitmq_cfg = mesos_args.rabbitmq_args(self.args)
        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger, DBBaseMM,
                                    REACTION_MAP, 'mesos_manager', rabbitmq_cfg)
        self.rabbit.establish()

        # sync api server db in local cache
        self._sync_sm()
        self.rabbit._db_resync_done.set()

        VncService._vnc_mesos = self
Пример #22
0
def main(args_str=None):
    global _zookeeper_client

    if not args_str:
        args_str = ' '.join(sys.argv[1:])
    args = parse_args(args_str)
    if args.cluster_id:
        client_pfx = args.cluster_id + '-'
        zk_path_pfx = args.cluster_id + '/'
    else:
        client_pfx = ''
        zk_path_pfx = ''

    # randomize collector list
    args.random_collectors = args.collectors
    if args.collectors:
        args.random_collectors = random.sample(args.collectors,
                                               len(args.collectors))

    # Initialize logger without introspect thread
    sm_logger = ServiceMonitorLogger(args, http_server_port=-1)

    # Initialize AMQP handler then close it to be sure remain queue of a
    # precedent run is cleaned
    try:
        vnc_amqp = VncAmqpHandle(sm_logger,
                                 DBBaseSM,
                                 REACTION_MAP,
                                 'svc_monitor',
                                 args=args)
        vnc_amqp.establish()
        vnc_amqp.close()
    except Exception:
        pass
    finally:
        sm_logger.debug("Removed remained AMQP queue")

    # Waiting to be elected as master node
    _zookeeper_client = ZookeeperClient(client_pfx + "svc-monitor",
                                        args.zk_server_ip)
    sm_logger.notice("Waiting to be elected as master...")
    _zookeeper_client.master_election(zk_path_pfx + "/svc-monitor",
                                      os.getpid(), run_svc_monitor, sm_logger,
                                      args)
class SvcMonitor(object):
    def __init__(self, sm_logger=None, args=None):
        self._args = args
        # initialize logger
        if sm_logger is not None:
            self.logger = sm_logger
        else:
            # Initialize logger
            self.logger = ServiceMonitorLogger(args)

        # init object_db
        self._object_db = ServiceMonitorDB(self._args, self.logger)
        DBBaseSM.init(self, self.logger, self._object_db)

        # init rabbit connection
        rabbitmq_cfg = get_rabbitmq_cfg(args)
        self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger,
                                    DBBaseSM, REACTION_MAP, 'svc_monitor',
                                    rabbitmq_cfg, self._args.trace_file)
        self.rabbit.establish()

    def post_init(self, vnc_lib, args=None):
        # api server
        self._vnc_lib = vnc_lib

        try:
            self._nova_client = importutils.import_object(
                'svc_monitor.nova_client.ServiceMonitorNovaClient', self._args,
                self.logger)
        except Exception as e:
            self._nova_client = None

        # agent manager
        self._agent_manager = AgentManager()

        # load vrouter scheduler
        self.vrouter_scheduler = importutils.import_object(
            self._args.si_netns_scheduler_driver, self._vnc_lib,
            self._nova_client, None, self.logger, self._args)

        # load virtual machine instance manager
        self.vm_manager = importutils.import_object(
            'svc_monitor.virtual_machine_manager.VirtualMachineManager',
            self._vnc_lib, self._object_db, self.logger,
            self.vrouter_scheduler, self._nova_client, self._agent_manager,
            self._args)

        # load network namespace instance manager
        self.netns_manager = importutils.import_object(
            'svc_monitor.instance_manager.NetworkNamespaceManager',
            self._vnc_lib, self._object_db, self.logger,
            self.vrouter_scheduler, self._nova_client, self._agent_manager,
            self._args)

        # load a vrouter instance manager
        self.vrouter_manager = importutils.import_object(
            'svc_monitor.vrouter_instance_manager.VRouterInstanceManager',
            self._vnc_lib, self._object_db, self.logger,
            self.vrouter_scheduler, self._nova_client, self._agent_manager,
            self._args)

        # load PNF instance manager
        self.ps_manager = importutils.import_object(
            'svc_monitor.physical_service_manager.PhysicalServiceManager',
            self._vnc_lib, self._object_db, self.logger,
            self.vrouter_scheduler, self._nova_client, self._agent_manager,
            self._args)

        # load a loadbalancer agent
        self.loadbalancer_agent = LoadbalancerAgent(self, self._vnc_lib,
                                                    self._object_db,
                                                    self._args)
        self._agent_manager.register_agent(self.loadbalancer_agent)

        # load a snat agent
        self.snat_agent = SNATAgent(self, self._vnc_lib, self._object_db,
                                    self._args,
                                    ServiceMonitorModuleLogger(self.logger))
        self._agent_manager.register_agent(self.snat_agent)

        # load port tuple agent
        self.port_tuple_agent = PortTupleAgent(
            self, self._vnc_lib, self._object_db, self._args,
            ServiceMonitorModuleLogger(self.logger))
        self._agent_manager.register_agent(self.port_tuple_agent)

        # Read the object_db and populate the entry in ServiceMonitor DB
        self.sync_sm()

        # create default analyzer template
        self._create_default_template('analyzer-template',
                                      'analyzer',
                                      flavor='m1.medium',
                                      image_name='analyzer')
        # create default NAT template
        self._create_default_template('nat-template',
                                      'firewall',
                                      svc_mode='in-network-nat',
                                      image_name='analyzer',
                                      flavor='m1.medium')
        # create default netns SNAT template
        self._create_default_template('netns-snat-template',
                                      'source-nat',
                                      svc_mode='in-network-nat',
                                      hypervisor_type='network-namespace',
                                      scaling=True)
        # create default loadbalancer template
        self._create_default_template('haproxy-loadbalancer-template',
                                      'loadbalancer',
                                      svc_mode='in-network-nat',
                                      hypervisor_type='network-namespace',
                                      scaling=True)
        self._create_default_template('docker-template',
                                      'firewall',
                                      svc_mode='transparent',
                                      image_name="ubuntu",
                                      hypervisor_type='vrouter-instance',
                                      vrouter_instance_type='docker',
                                      instance_data={"command": "/bin/bash"})

        # upgrade handling
        self.upgrade()

        # check services
        self.vrouter_scheduler.vrouters_running()
        self.launch_services()

        self.rabbit._db_resync_done.set()

    def _upgrade_instance_ip(self, vm):
        for vmi_id in vm.virtual_machine_interfaces:
            vmi = VirtualMachineInterfaceSM.get(vmi_id)
            if not vmi:
                continue

            for iip_id in vmi.instance_ips:
                iip = InstanceIpSM.get(iip_id)
                if not iip or iip.service_instance_ip:
                    continue
                iip_obj = InstanceIp()
                iip_obj.name = iip.name
                iip_obj.uuid = iip.uuid
                iip_obj.set_service_instance_ip(True)
                try:
                    self._vnc_lib.instance_ip_update(iip_obj)
                except NoIdError:
                    self.logger.error(
                        "upgrade instance ip to service ip failed %s" %
                        (iip.name))
                    continue

    def _upgrade_auto_policy(self, si, st):
        if st.name != 'netns-snat-template':
            return
        if not si.params['auto_policy']:
            return

        si_obj = ServiceInstance()
        si_obj.uuid = si.uuid
        si_obj.fq_name = si.fq_name
        si_props = ServiceInstanceType(**si.params)
        si_props.set_auto_policy(False)
        si_obj.set_service_instance_properties(si_props)
        try:
            self._vnc_lib.service_instance_update(si_obj)
            self.logger.notice("snat policy upgraded for %s" % (si.name))
        except NoIdError:
            self.logger.error("snat policy upgrade failed for %s" % (si.name))
            return

    def upgrade(self):
        for lr in LogicalRouterSM.values():
            self.snat_agent.upgrade(lr)

        for si in ServiceInstanceSM.values():
            st = ServiceTemplateSM.get(si.service_template)
            if not st:
                continue

            self._upgrade_auto_policy(si, st)

            vm_id_list = list(si.virtual_machines)
            for vm_id in vm_id_list:
                vm = VirtualMachineSM.get(vm_id)
                self._upgrade_instance_ip(vm)
                if vm.virtualization_type:
                    continue

                try:
                    nova_vm = self._nova_client.oper('servers',
                                                     'get',
                                                     si.proj_name,
                                                     id=vm_id)
                except nc_exc.NotFound:
                    nova_vm = None

                if nova_vm:
                    vm_name = nova_vm.name
                    vm.proj_fq_name = nova_vm.name.split('__')[0:2]
                else:
                    vm_name = vm.name

                if not vm_name.split('__')[-1].isdigit():
                    continue

                vm.virtualization_type = st.virtualization_type
                self.delete_service_instance(vm)

    def launch_services(self):
        for si in ServiceInstanceSM.values():
            self.create_service_instance(si)

    def sync_sm(self):
        # Read and Sync all DBase
        for cls in DBBaseSM.get_obj_type_map().values():
            for obj in cls.list_obj():
                cls.locate(obj['uuid'], obj)

        # Link SI and VM
        for vm in VirtualMachineSM.values():
            if vm.service_instance:
                continue
            for vmi_id in vm.virtual_machine_interfaces:
                vmi = VirtualMachineInterfaceSM.get(vmi_id)
                if not vmi:
                    continue
                self.port_delete_or_si_link(vm, vmi)

        # invoke port tuple handling
        try:
            self.port_tuple_agent.update_port_tuples()
        except Exception:
            cgitb_error_log(self)

        # Load the loadbalancer driver
        self.loadbalancer_agent.load_drivers()

        # Invoke the health monitors
        for hm in HealthMonitorSM.values():
            hm.sync()

        # Invoke the loadbalancers
        for lb in LoadbalancerSM.values():
            lb.sync()

        # Invoke the loadbalancer listeners
        for lb_listener in LoadbalancerListenerSM.values():
            lb_listener.sync()

        # Invoke the loadbalancer pools
        for lb_pool in LoadbalancerPoolSM.values():
            lb_pool.sync()

        # Audit the lb pools
        self.loadbalancer_agent.audit_lb_pools()

        # Audit the SNAT instances
        self.snat_agent.audit_snat_instances()

    # end sync_sm

    # create service template
    def _create_default_template(self,
                                 st_name,
                                 svc_type,
                                 svc_mode=None,
                                 hypervisor_type='virtual-machine',
                                 image_name=None,
                                 flavor=None,
                                 scaling=False,
                                 vrouter_instance_type=None,
                                 instance_data=None):
        domain_name = 'default-domain'
        domain_fq_name = [domain_name]
        st_fq_name = [domain_name, st_name]
        self.logger.info("Creating %s %s hypervisor %s" %
                         (domain_name, st_name, hypervisor_type))

        domain_obj = None
        for domain in DomainSM.values():
            if domain.fq_name == domain_fq_name:
                domain_obj = Domain()
                domain_obj.uuid = domain.uuid
                domain_obj.fq_name = domain_fq_name
                break
        if not domain_obj:
            self.logger.error("%s domain not found" % (domain_name))
            return

        for st in ServiceTemplateSM.values():
            if st.fq_name == st_fq_name:
                self.logger.info("%s exists uuid %s" % (st.name, str(st.uuid)))
                return

        svc_properties = ServiceTemplateType()
        svc_properties.set_service_type(svc_type)
        svc_properties.set_service_mode(svc_mode)
        svc_properties.set_service_virtualization_type(hypervisor_type)
        svc_properties.set_image_name(image_name)
        svc_properties.set_flavor(flavor)
        svc_properties.set_ordered_interfaces(True)
        svc_properties.set_service_scaling(scaling)

        # set interface list
        if svc_type == 'analyzer':
            if_list = [['left', False]]
        elif hypervisor_type == 'network-namespace':
            if_list = [['right', True], ['left', True]]
        else:
            if_list = [['management', False], ['left', False],
                       ['right', False]]

        for itf in if_list:
            if_type = ServiceTemplateInterfaceType(shared_ip=itf[1])
            if_type.set_service_interface_type(itf[0])
            svc_properties.add_interface_type(if_type)

        if vrouter_instance_type is not None:
            svc_properties.set_vrouter_instance_type(vrouter_instance_type)

        if instance_data is not None:
            svc_properties.set_instance_data(
                json.dumps(instance_data, separators=(',', ':')))

        st_obj = ServiceTemplate(name=st_name, domain_obj=domain)
        st_obj.set_service_template_properties(svc_properties)
        try:
            st_uuid = self._vnc_lib.service_template_create(st_obj)
        except Exception as e:
            self.logger.error("%s create failed with error %s" %
                              (st_name, str(e)))
            return

        # Create the service template in local db
        ServiceTemplateSM.locate(st_uuid)

        self.logger.info("%s created with uuid %s" % (st_name, str(st_uuid)))

    #_create_default_analyzer_template

    def port_delete_or_si_link(self, vm, vmi):
        if vmi.port_tuples:
            return
        if (vmi.service_instances and vmi.virtual_machine == None):
            self.vm_manager.cleanup_svc_vm_ports([vmi.uuid])
            return

        if not vm or vm.service_instance:
            return
        if not vmi.if_type:
            return

        if len(vmi.name.split('__')) < 4:
            return
        si_fq_name = vmi.name.split('__')[0:3]
        index = int(vmi.name.split('__')[3]) - 1
        for si in ServiceInstanceSM.values():
            if si.fq_name != si_fq_name:
                continue
            st = ServiceTemplateSM.get(si.service_template)
            self.vm_manager.link_si_to_vm(si, st, index, vm.uuid)
            return

    def create_service_instance(self, si):
        if si.state == 'active':
            return
        st = ServiceTemplateSM.get(si.service_template)
        if not st:
            self.logger.error("template not found for %s" %
                              ((':').join(si.fq_name)))
            return
        if st.params and st.params.get('version', 1) == 2:
            return

        self.logger.info("Creating SI %s (%s)" %
                         ((':').join(si.fq_name), st.virtualization_type))
        try:
            if st.virtualization_type == 'virtual-machine':
                self.vm_manager.create_service(st, si)
            elif st.virtualization_type == 'network-namespace':
                self.netns_manager.create_service(st, si)
            elif st.virtualization_type == 'vrouter-instance':
                self.vrouter_manager.create_service(st, si)
            elif st.virtualization_type == 'physical-device':
                self.ps_manager.create_service(st, si)
            else:
                self.logger.error("Unknown virt type: %s" %
                                  st.virtualization_type)
        except Exception:
            cgitb_error_log(self)
        si.launch_count += 1
        self.logger.info("SI %s creation success" % (':').join(si.fq_name))

    def delete_service_instance(self, vm):
        self.logger.info("Deleting VM %s %s for SI %s" %
                         ((':').join(vm.fq_name), vm.uuid, vm.service_id))

        try:
            if vm.virtualization_type == svc_info.get_vm_instance_type():
                self.vm_manager.delete_service(vm)
            elif vm.virtualization_type == svc_info.get_netns_instance_type():
                self.netns_manager.delete_service(vm)
            elif vm.virtualization_type == 'vrouter-instance':
                self.vrouter_manager.delete_service(vm)
            elif vm.virtualization_type == 'physical-device':
                self.ps_manager.delete_service(vm)
            self.logger.info("Deleted VM %s %s for SI %s" %
                             ((':').join(vm.fq_name), vm.uuid, vm.service_id))
        except Exception:
            cgitb_error_log(self)

        # generate UVE
        si_fq_name = vm.display_name.split('__')[:-2]
        si_fq_str = (':').join(si_fq_name)
        self.logger.uve_svc_instance(si_fq_str,
                                     status='DELETE',
                                     vms=[{
                                         'uuid': vm.uuid
                                     }])
        return True

    def _relaunch_service_instance(self, si):
        si.state = 'relaunch'
        self.create_service_instance(si)

    def _check_service_running(self, si):
        st = ServiceTemplateSM.get(si.service_template)
        if st.params and st.params.get('version', 1) == 2:
            return
        if st.virtualization_type == 'virtual-machine':
            status = self.vm_manager.check_service(si)
        elif st.virtualization_type == 'network-namespace':
            status = self.netns_manager.check_service(si)
        elif st.virtualization_type == 'vrouter-instance':
            status = self.vrouter_manager.check_service(si)
        elif st.virtualization_type == 'physical-device':
            status = self.ps_manager.check_service(si)
        return status

    def delete_interface_route_table(self, irt_uuid):
        try:
            self._vnc_lib.interface_route_table_delete(id=irt_uuid)
            InterfaceRouteTableSM.delete(irt_uuid)
        except (NoIdError, RefsExistError):
            return

    def _delete_shared_vn(self, vn_uuid):
        try:
            self.logger.info("Deleting vn %s" % (vn_uuid))
            self._vnc_lib.virtual_network_delete(id=vn_uuid)
            VirtualNetworkSM.delete(vn_uuid)
        except (NoIdError, RefsExistError):
            pass

    @staticmethod
    def reset():
        for cls in DBBaseSM.get_obj_type_map().values():
            cls.reset()

    def sighup_handler(self):
        if self._conf_file:
            config = ConfigParser.SafeConfigParser()
            config.read(self._conf_file)
            if 'DEFAULTS' in config.sections():
                try:
                    collectors = config.get('DEFAULTS', 'collectors')
                    if type(collectors) is str:
                        collectors = collectors.split()
                        new_chksum = hashlib.md5(
                            "".join(collectors)).hexdigest()
                        if new_chksum != self._chksum:
                            self._chksum = new_chksum
                            config.random_collectors = random.sample(
                                collectors, len(collectors))
                        # Reconnect to achieve load-balance irrespective of list
                        self.logger.sandesh_reconfig_collectors(config)
                except ConfigParser.NoOptionError as e:
                    pass
Пример #24
0
class VncKubernetes(VncCommon):

    _vnc_kubernetes = None

    def __init__(self, args=None, logger=None, q=None, kube=None,
                 vnc_kubernetes_config_dict=None):
        self._name = type(self).__name__
        self.args = args
        self.logger = logger
        self.q = q
        self.kube = kube
        self._cluster_pod_ipam_fq_name = None
        self._cluster_service_ipam_fq_name = None
        self._cluster_ip_fabric_ipam_fq_name = None

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # Cache common config.
        self.vnc_kube_config = vnc_kube_config(logger=self.logger,
            vnc_lib=self.vnc_lib, args=self.args, queue=self.q, kube=self.kube)

        #
        # In nested mode, kube-manager connects to contrail components running
        # in underlay via global link local services. TCP flows established on
        # link local services will be torn down by vrouter, if there is no
        # activity for configured(or default) timeout. So disable flow timeout
        # on these connections, so these flows will persist.
        #
        # Note: The way to disable flow timeout is to set timeout to max
        #       possible value.
        #
        if self.args.nested_mode is '1':
            for cassandra_server in self.args.cassandra_server_list:
                cassandra_port = cassandra_server.split(':')[-1]
                flow_aging_manager.create_flow_aging_timeout_entry(self.vnc_lib,
                    "tcp", cassandra_port, 2147483647)

            if self.args.rabbit_port:
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", self.args.rabbit_port, 2147483647)

            if self.args.vnc_endpoint_port:
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", self.args.vnc_endpoint_port, 2147483647)

            for collector in self.args.collectors:
                collector_port = collector.split(':')[-1]
                flow_aging_manager.create_flow_aging_timeout_entry(self.vnc_lib,
                    "tcp", collector_port, 2147483647)

        # init access to db
        self._db = db.KubeNetworkManagerDB(self.args, self.logger)
        DBBaseKM.init(self, self.logger, self._db)

        # If nested mode is enabled via config, then record the directive.
        if self.args.nested_mode is '1':
            DBBaseKM.set_nested(True)

        # sync api server db in local cache
        self._sync_km()

        # init rabbit connection
        rabbitmq_cfg = kube_args.rabbitmq_args(self.args)
        self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger, DBBaseKM,
            REACTION_MAP, 'kube_manager', rabbitmq_cfg, self.args.host_ip)
        self.rabbit.establish()
        self.rabbit._db_resync_done.set()

        # Register label add and delete callbacks with label management entity.
        XLabelCache.register_label_add_callback(VncKubernetes.create_tags)
        XLabelCache.register_label_delete_callback(VncKubernetes.delete_tags)

        # Instantiate and init Security Policy Manager.
        self.security_policy_mgr = VncSecurityPolicy(self.vnc_lib,
                                                     VncKubernetes.get_tags)

        # provision cluster
        self._provision_cluster()

        if vnc_kubernetes_config_dict:
            self.vnc_kube_config.update(**vnc_kubernetes_config_dict)
        else:
            # Update common config.
            self.vnc_kube_config.update(
                cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(),
                cluster_service_ipam_fq_name=self._get_cluster_service_ipam_fq_name(),
                cluster_ip_fabric_ipam_fq_name=self._get_cluster_ip_fabric_ipam_fq_name())

        # handle events
        self.label_cache = label_cache.LabelCache()
        self.vnc_kube_config.update(label_cache=self.label_cache)

        self.tags_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_tags.VncTags')
        self.network_policy_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy')
        self.namespace_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_namespace.VncNamespace',
            self.network_policy_mgr)
        self.ingress_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_ingress.VncIngress', self.tags_mgr)
        self.service_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr)
        self.pod_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr,
            self.network_policy_mgr)
        self.endpoints_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_endpoints.VncEndpoints')
        self.network_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network.VncNetwork')

        # Create system default security policies.
        VncSecurityPolicy.create_deny_all_security_policy()
        VncSecurityPolicy.create_allow_all_security_policy()
        self.ingress_mgr.create_ingress_security_policy()

        VncKubernetes._vnc_kubernetes = self

        # Associate cluster with the APS.
        VncSecurityPolicy.tag_cluster_application_policy_set()

    def connection_state_update(self, status, message=None):
        ConnectionState.update(
            conn_type=ConnType.APISERVER, name='ApiServer',
            status=status, message=message or '',
            server_addrs=['%s:%s' % (self.args.vnc_endpoint_ip,
                                     self.args.vnc_endpoint_port)])
    # end connection_state_update

    def _vnc_connect(self):
        # Retry till API server connection is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        api_server_list = self.args.vnc_endpoint_ip.split(',')
        while not connected:
            try:
                vnc_lib = VncApi(self.args.auth_user,
                    self.args.auth_password, self.args.auth_tenant,
                    api_server_list, self.args.vnc_endpoint_port,
                    auth_token_url=self.args.auth_token_url)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:
                time.sleep(3)
        return vnc_lib

    def _sync_km(self):
        for cls in DBBaseKM.get_obj_type_map().values():
            for obj in cls.list_obj():
                cls.locate(obj['uuid'], obj)

    @staticmethod
    def reset():
        for cls in DBBaseKM.get_obj_type_map().values():
            cls.reset()

    def _attach_policy(self, vn_obj, *policies):
        for policy in policies or []:
            vn_obj.add_network_policy(policy,
                VirtualNetworkPolicyType(sequence=SequenceType(0, 0)))
        self.vnc_lib.virtual_network_update(vn_obj)
        for policy in policies or []:
            self.vnc_lib.ref_relax_for_delete(vn_obj.uuid, policy.uuid)

    def _create_policy_entry(self, src_vn_obj, dst_vn_obj, src_np_obj=None):
        if src_vn_obj:
            src_addresses = [
                AddressType(virtual_network = src_vn_obj.get_fq_name_str())
            ]
        else:
            src_addresses = [
                AddressType(network_policy = src_np_obj.get_fq_name_str())
            ]
        return PolicyRuleType(
                direction = '<>',
                action_list = ActionListType(simple_action='pass'),
                protocol = 'any',
                src_addresses = src_addresses,
                src_ports = [PortType(-1, -1)],
                dst_addresses = [
                    AddressType(virtual_network = dst_vn_obj.get_fq_name_str())
                ],
                dst_ports = [PortType(-1, -1)])

    def _create_vn_vn_policy(self, policy_name, proj_obj, *vn_obj):
        policy_exists = False
        policy = NetworkPolicy(name=policy_name, parent_obj=proj_obj)
        try:
            policy_obj = self.vnc_lib.network_policy_read(
                fq_name=policy.get_fq_name())
            policy_exists = True
        except NoIdError:
            # policy does not exist. Create one.
            policy_obj = policy
        network_policy_entries = PolicyEntriesType()
        total_vn = len(vn_obj)
        for i in range(0, total_vn):
            for j in range(i+1, total_vn):
                policy_entry = self._create_policy_entry(vn_obj[i], vn_obj[j])
                network_policy_entries.add_policy_rule(policy_entry)
        policy_obj.set_network_policy_entries(network_policy_entries)
        if policy_exists:
            self.vnc_lib.network_policy_update(policy)
        else:
            self.vnc_lib.network_policy_create(policy)
        return policy_obj

    def _create_np_vn_policy(self, policy_name, proj_obj, dst_vn_obj):
        policy_exists = False
        policy = NetworkPolicy(name=policy_name, parent_obj=proj_obj)
        try:
            policy_obj = self.vnc_lib.network_policy_read(
                fq_name=policy.get_fq_name())
            policy_exists = True
        except NoIdError:
            # policy does not exist. Create one.
            policy_obj = policy
        network_policy_entries = PolicyEntriesType()
        policy_entry = self._create_policy_entry(None, dst_vn_obj, policy)
        network_policy_entries.add_policy_rule(policy_entry)
        policy_obj.set_network_policy_entries(network_policy_entries)
        if policy_exists:
            self.vnc_lib.network_policy_update(policy)
        else:
            self.vnc_lib.network_policy_create(policy)
        return policy_obj

    def _create_attach_policy(self, proj_obj, ip_fabric_vn_obj,
            pod_vn_obj, service_vn_obj, cluster_vn_obj):
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-ip-fabric-np'
        ip_fabric_policy = \
            self._create_np_vn_policy(policy_name, proj_obj, ip_fabric_vn_obj)
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-service-np'
        cluster_service_network_policy = \
            self._create_np_vn_policy(policy_name, proj_obj, service_vn_obj)
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-pod-service-np'
        cluster_default_policy = self._create_vn_vn_policy(policy_name,
            proj_obj, pod_vn_obj, service_vn_obj)
        self._attach_policy(ip_fabric_vn_obj, ip_fabric_policy)
        self._attach_policy(pod_vn_obj,
            ip_fabric_policy, cluster_default_policy)
        self._attach_policy(service_vn_obj, ip_fabric_policy,
            cluster_service_network_policy, cluster_default_policy)

        # In nested mode, create and attach a network policy to the underlay
        # virtual network.
        if DBBaseKM.is_nested() and cluster_vn_obj:
            policy_name = vnc_kube_config.cluster_nested_underlay_policy_name()
            nested_underlay_policy = self._create_np_vn_policy(policy_name,
                                         proj_obj, cluster_vn_obj)
            self._attach_policy(cluster_vn_obj, nested_underlay_policy)

    def _create_project(self, project_name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(project_name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)
        try:
            self.vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self.vnc_lib.project_read(
                fq_name=proj_fq_name)
        ProjectKM.locate(proj_obj.uuid)
        return proj_obj

    def _create_ipam(self, ipam_name, subnets, proj_obj,
            type='flat-subnet'):
        ipam_obj = NetworkIpam(name=ipam_name, parent_obj=proj_obj)

        ipam_subnets = []
        for subnet in subnets:
            pfx, pfx_len = subnet.split('/')
            ipam_subnet = IpamSubnetType(subnet=SubnetType(pfx, int(pfx_len)))
            ipam_subnets.append(ipam_subnet)
        if not len(ipam_subnets):
            self.logger.error("%s - %s subnet is empty for %s" \
                 %(self._name, ipam_name, subnets))

        if type == 'flat-subnet':
            ipam_obj.set_ipam_subnet_method('flat-subnet')
            ipam_obj.set_ipam_subnets(IpamSubnets(ipam_subnets))

        ipam_update = False
        try:
            ipam_uuid = self.vnc_lib.network_ipam_create(ipam_obj)
            ipam_update = True
        except RefsExistError:
            curr_ipam_obj = self.vnc_lib.network_ipam_read(
                fq_name=ipam_obj.get_fq_name())
            ipam_uuid = curr_ipam_obj.get_uuid()
            if type == 'flat-subnet' and not curr_ipam_obj.get_ipam_subnets():
                self.vnc_lib.network_ipam_update(ipam_obj)
                ipam_update = True

        # Cache ipam info.
        NetworkIpamKM.locate(ipam_uuid)

        return ipam_update, ipam_obj, ipam_subnets

    def _is_ipam_exists(self, vn_obj, ipam_fq_name, subnet=None):
        curr_ipam_refs = vn_obj.get_network_ipam_refs()
        if curr_ipam_refs:
            for ipam_ref in curr_ipam_refs:
                if ipam_fq_name == ipam_ref['to']:
                   if subnet:
                       # Subnet is specified.
                       # Validate that we are able to match subnect as well.
                       if len(ipam_ref['attr'].ipam_subnets) and \
                           subnet == ipam_ref['attr'].ipam_subnets[0].subnet:
                           return True
                   else:
                       # Subnet is not specified.
                       # So ipam-fq-name match will suffice.
                       return True
        return False

    def _allocate_fabric_snat_port_translation_pools(self):
        global_vrouter_fq_name = \
            ['default-global-system-config', 'default-global-vrouter-config']
        count = 0
        while True:
            try:
                global_vrouter_obj = \
                    self.vnc_lib.global_vrouter_config_read(
                        fq_name=global_vrouter_fq_name)
                break
            except NoIdError:
                if count == 20:
                    return
                time.sleep(3)
                count+=1
        snat_port_range = PortType(start_port = 56000, end_port = 57023)
        port_pool_tcp = PortTranslationPool(
            protocol="tcp", port_count='1024', port_range=snat_port_range)
        snat_port_range = PortType(start_port = 57024, end_port = 58047)
        port_pool_udp = PortTranslationPool(
            protocol="udp", port_count='1024', port_range=snat_port_range)
        port_pools = PortTranslationPools([port_pool_tcp, port_pool_udp])
        global_vrouter_obj.set_port_translation_pools(port_pools)
        try:
            self.vnc_lib.global_vrouter_config_update(global_vrouter_obj)
        except NoIdError:
            pass

    def _provision_cluster(self):
        # Pre creating default project before namespace add event.
        proj_obj = self._create_project('default')

        # Create application policy set for the cluster project.
        VncSecurityPolicy.create_application_policy_set(
            vnc_kube_config.application_policy_set_name())

        # Allocate fabric snat port translation pools.
        self._allocate_fabric_snat_port_translation_pools()

        ip_fabric_fq_name = vnc_kube_config.cluster_ip_fabric_network_fq_name()
        ip_fabric_vn_obj = self.vnc_lib. \
            virtual_network_read(fq_name=ip_fabric_fq_name)

        cluster_vn_obj = None
        if DBBaseKM.is_nested():
            try:
                cluster_vn_obj = self.vnc_lib.virtual_network_read(
                    fq_name=vnc_kube_config.cluster_default_network_fq_name())
            except NoIdError:
                pass

        # Pre creating kube-system project before namespace add event.
        self._create_project('kube-system')
        # Create ip-fabric IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-ip-fabric-ipam'
        ip_fabric_ipam_update, ip_fabric_ipam_obj, ip_fabric_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.ip_fabric_subnets, proj_obj)
        self._cluster_ip_fabric_ipam_fq_name = ip_fabric_ipam_obj.get_fq_name()
        # Create Pod IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-pod-ipam'
        pod_ipam_update, pod_ipam_obj, pod_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.pod_subnets, proj_obj)
        # Cache cluster pod ipam name.
        # This will be referenced by ALL pods that are spawned in the cluster.
        self._cluster_pod_ipam_fq_name = pod_ipam_obj.get_fq_name()
        # Create a cluster-pod-network.
        if self.args.ip_fabric_forwarding:
            cluster_pod_vn_obj = self._create_network(
                vnc_kube_config.cluster_default_pod_network_name(),
                'pod-network', proj_obj,
                ip_fabric_ipam_obj, ip_fabric_ipam_update, ip_fabric_vn_obj)
        else:
            cluster_pod_vn_obj = self._create_network(
                vnc_kube_config.cluster_default_pod_network_name(),
                'pod-network', proj_obj,
                pod_ipam_obj, pod_ipam_update, ip_fabric_vn_obj)
        # Create Service IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-service-ipam'
        service_ipam_update, service_ipam_obj, service_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.service_subnets, proj_obj)
        self._cluster_service_ipam_fq_name = service_ipam_obj.get_fq_name()
        # Create a cluster-service-network.
        cluster_service_vn_obj = self._create_network(
            vnc_kube_config.cluster_default_service_network_name(),
            'service-network', proj_obj, service_ipam_obj, service_ipam_update)
        self._create_attach_policy(proj_obj, ip_fabric_vn_obj,
            cluster_pod_vn_obj, cluster_service_vn_obj, cluster_vn_obj)

    def _create_network(self, vn_name, vn_type, proj_obj,
            ipam_obj, ipam_update, provider=None):
        # Check if the VN already exists.
        # If yes, update existing VN object with k8s config.
        vn_exists = False
        vn = VirtualNetwork(name=vn_name, parent_obj=proj_obj,
                 address_allocation_mode='flat-subnet-only')
        try:
            vn_obj = self.vnc_lib.virtual_network_read(
                fq_name=vn.get_fq_name())
            vn_exists = True
        except NoIdError:
            # VN does not exist. Create one.
            vn_obj = vn

        # Attach IPAM to virtual network.
        #
        # For flat-subnets, the subnets are specified on the IPAM and
        # not on the virtual-network to IPAM link. So pass an empty
        # list of VnSubnetsType.
        if ipam_update or \
           not self._is_ipam_exists(vn_obj, ipam_obj.get_fq_name()):
            vn_obj.add_network_ipam(ipam_obj, VnSubnetsType([]))

        vn_obj.set_virtual_network_properties(
             VirtualNetworkType(forwarding_mode='l3'))

        fabric_snat = False
        if vn_type == 'pod-network':
            fabric_snat = True

        if not vn_exists:
            if self.args.ip_fabric_forwarding:
                if provider:
                    #enable ip_fabric_forwarding
                    vn_obj.add_virtual_network(provider)
            elif fabric_snat and self.args.ip_fabric_snat:
                #enable fabric_snat
                vn_obj.set_fabric_snat(True)
            else:
                #disable fabric_snat
                vn_obj.set_fabric_snat(False)
            # Create VN.
            self.vnc_lib.virtual_network_create(vn_obj)
        else:
            self.vnc_lib.virtual_network_update(vn_obj)

        vn_obj = self.vnc_lib.virtual_network_read(
            fq_name=vn_obj.get_fq_name())
        VirtualNetworkKM.locate(vn_obj.uuid)

        return vn_obj

    def _get_cluster_network(self):
        return VirtualNetworkKM.find_by_name_or_uuid(
            vnc_kube_config.cluster_default_network_name())

    def _get_cluster_pod_ipam_fq_name(self):
        return self._cluster_pod_ipam_fq_name

    def _get_cluster_service_ipam_fq_name(self):
        return self._cluster_service_ipam_fq_name

    def _get_cluster_ip_fabric_ipam_fq_name(self):
        return self._cluster_ip_fabric_ipam_fq_name

    def vnc_timer(self):
        try:
            self.network_policy_mgr.network_policy_timer()
            self.ingress_mgr.ingress_timer()
            self.service_mgr.service_timer()
            self.pod_mgr.pod_timer()
            self.namespace_mgr.namespace_timer()
        except Exception as e:
            string_buf = StringIO()
            cgitb_hook(file=string_buf, format="text")
            err_msg = string_buf.getvalue()
            self.logger.error("vnc_timer: %s - %s" %(self._name, err_msg))

    def vnc_process(self):
        while True:
            try:
                event = self.q.get()
                event_type = event['type']
                kind = event['object'].get('kind')
                metadata = event['object']['metadata']
                namespace = metadata.get('namespace')
                name = metadata.get('name')
                uid = metadata.get('uid')
                if kind == 'Pod':
                    self.pod_mgr.process(event)
                elif kind == 'Service':
                    self.service_mgr.process(event)
                elif kind == 'Namespace':
                    self.namespace_mgr.process(event)
                elif kind == 'NetworkPolicy':
                    self.network_policy_mgr.process(event)
                elif kind == 'Endpoints':
                    self.endpoints_mgr.process(event)
                elif kind == 'Ingress':
                    self.ingress_mgr.process(event)
                elif kind == 'NetworkAttachmentDefinition':
                    self.network_mgr.process(event)
                else:
                    print("%s - Event %s %s %s:%s:%s not handled"
                        %(self._name, event_type, kind, namespace, name, uid))
                    self.logger.error("%s - Event %s %s %s:%s:%s not handled"
                        %(self._name, event_type, kind, namespace, name, uid))
            except Empty:
                gevent.sleep(0)
            except Exception as e:
                string_buf = StringIO()
                cgitb_hook(file=string_buf, format="text")
                err_msg = string_buf.getvalue()
                self.logger.error("%s - %s" %(self._name, err_msg))

    @classmethod
    def get_instance(cls):
        return VncKubernetes._vnc_kubernetes

    @classmethod
    def destroy_instance(cls):
        inst = cls.get_instance()
        if inst is None:
            return
        inst.rabbit.close()
        for obj_cls in DBBaseKM.get_obj_type_map().values():
            obj_cls.reset()
        DBBase.clear()
        inst._db = None
        VncKubernetes._vnc_kubernetes = None

    @classmethod
    def create_tags(cls, type, value):
        if cls._vnc_kubernetes:
            cls.get_instance().tags_mgr.create(type, value)

    @classmethod
    def delete_tags(cls, type, value):
        if cls._vnc_kubernetes:
            cls.get_instance().tags_mgr.delete(type, value)

    @classmethod
    def get_tags(cls, kv_dict, create=False):
        if cls._vnc_kubernetes:
            return cls.get_instance().tags_mgr.get_tags_fq_name(kv_dict, create)
        return None
Пример #25
0
    def __init__(self,
                 args=None,
                 logger=None,
                 q=None,
                 kube=None,
                 vnc_kubernetes_config_dict=None):
        self._name = type(self).__name__
        self.args = args
        self.logger = logger
        self.q = q
        self.kube = kube
        self._cluster_pod_ipam_fq_name = None
        self._cluster_service_ipam_fq_name = None
        self._cluster_ip_fabric_ipam_fq_name = None

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # Cache common config.
        self.vnc_kube_config = vnc_kube_config(logger=self.logger,
                                               vnc_lib=self.vnc_lib,
                                               args=self.args,
                                               queue=self.q,
                                               kube=self.kube)

        #
        # In nested mode, kube-manager connects to contrail components running
        # in underlay via global link local services. TCP flows established on
        # link local services will be torn down by vrouter, if there is no
        # activity for configured(or default) timeout. So disable flow timeout
        # on these connections, so these flows will persist.
        #
        # Note: The way to disable flow timeout is to set timeout to max
        #       possible value.
        #
        if self.args.nested_mode == '1':
            for cassandra_server in self.args.cassandra_server_list:
                cassandra_port = cassandra_server.split(':')[-1]
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", cassandra_port, 2147483647)

            if self.args.rabbit_port:
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", self.args.rabbit_port, 2147483647)

            if self.args.vnc_endpoint_port:
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", self.args.vnc_endpoint_port,
                    2147483647)

            for collector in self.args.collectors:
                collector_port = collector.split(':')[-1]
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", collector_port, 2147483647)

        # init access to db
        self._db = db.KubeNetworkManagerDB(self.args, self.logger)
        DBBaseKM.init(self, self.logger, self._db)

        # If nested mode is enabled via config, then record the directive.
        if self.args.nested_mode == '1':
            DBBaseKM.set_nested(True)

        # sync api server db in local cache
        self._sync_km()

        # init rabbit connection
        rabbitmq_cfg = kube_args.rabbitmq_args(self.args)
        self.rabbit = VncAmqpHandle(
            self.logger._sandesh, self.logger, DBBaseKM,
            reaction_map.REACTION_MAP, self.args.cluster_id + '-' +
            self.args.cluster_name + '-kube_manager', rabbitmq_cfg,
            self.args.host_ip)
        self.rabbit.establish()
        self.rabbit._db_resync_done.set()

        # Register label add and delete callbacks with label management entity.
        label_cache.XLabelCache.register_label_add_callback(
            VncKubernetes.create_tags)
        label_cache.XLabelCache.register_label_delete_callback(
            VncKubernetes.delete_tags)

        # Instantiate and init Security Policy Manager.
        self.security_policy_mgr = VncSecurityPolicy(self.vnc_lib,
                                                     VncKubernetes.get_tags)

        # provision cluster
        self._provision_cluster()

        if vnc_kubernetes_config_dict:
            self.vnc_kube_config.update(**vnc_kubernetes_config_dict)
        else:
            # Update common config.
            self.vnc_kube_config.update(
                cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(),
                cluster_service_ipam_fq_name=self.
                _get_cluster_service_ipam_fq_name(),
                cluster_ip_fabric_ipam_fq_name=self.
                _get_cluster_ip_fabric_ipam_fq_name())

        # handle events
        self.label_cache = label_cache.LabelCache()
        self.vnc_kube_config.update(label_cache=self.label_cache)

        self.tags_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_tags.VncTags')
        self.network_policy_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy')
        self.namespace_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_namespace.VncNamespace',
            self.network_policy_mgr)
        self.ingress_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_ingress.VncIngress', self.tags_mgr)
        self.service_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr)
        self.pod_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr,
            self.network_policy_mgr)
        self.endpoints_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_endpoints.VncEndpoints')
        self.network_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network.VncNetwork')

        # Create system default security policies.
        VncSecurityPolicy.create_deny_all_security_policy()
        VncSecurityPolicy.create_allow_all_security_policy()
        self.ingress_mgr.create_ingress_security_policy()

        VncKubernetes._vnc_kubernetes = self

        # Associate cluster with the APS.
        VncSecurityPolicy.tag_cluster_application_policy_set()
Пример #26
0
class VncKubernetes(object):
    def __init__(self, args=None, logger=None, q=None):
        self.args = args
        self.logger = logger
        self.q = q

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # init access to db
        self._db = db.KubeNetworkManagerDB(self.args, self.logger)
        DBBaseKM.init(self, self.logger, self._db)

        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger,
                                    DBBaseKM,
                                    REACTION_MAP,
                                    'kube_manager',
                                    args=self.args)
        self.rabbit.establish()

        # sync api server db in local cache
        self._sync_sm()
        self.rabbit._db_resync_done.set()

        # provision cluster
        self._provision_cluster()

        # handle events
        self.label_cache = label_cache.LabelCache()
        self.namespace_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_namespace.VncNamespace', self.vnc_lib)
        self.service_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_service.VncService', self.vnc_lib,
            self.label_cache, self.args, self.logger)
        self.pod_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_pod.VncPod', self.vnc_lib, self.label_cache,
            self.service_mgr)
        self.network_policy_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy',
            self.vnc_lib, self.label_cache, self.logger)
        self.endpoints_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_endpoints.VncEndpoints', self.vnc_lib,
            self.label_cache)

    def _vnc_connect(self):
        # Retry till API server connection is up
        connected = False
        while not connected:
            try:
                vnc_lib = VncApi(self.args.admin_user,
                                 self.args.admin_password,
                                 self.args.admin_tenant,
                                 self.args.vnc_endpoint_ip,
                                 self.args.vnc_endpoint_port)
                connected = True
            except requests.exceptions.ConnectionError as e:
                time.sleep(3)
            except ResourceExhaustionError:
                time.sleep(3)
        return vnc_lib

    def _sync_sm(self):
        for cls in DBBaseKM.get_obj_type_map().values():
            for obj in cls.list_obj():
                cls.locate(obj['uuid'], obj)

    @staticmethod
    def reset():
        for cls in DBBaseKM.get_obj_type_map().values():
            cls.reset()

    def _create_project(self, project_name):
        proj_fq_name = ['default-domain', project_name]
        proj_obj = Project(name=project_name, fq_name=proj_fq_name)
        try:
            self.vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self.vnc_lib.project_read(fq_name=proj_fq_name)
        ProjectKM.locate(proj_obj.uuid)
        return proj_obj

    def _create_ipam(self,
                     ipam_name,
                     subnets,
                     proj_obj,
                     type='user-defined-subnet'):
        ipam_subnets = []
        for subnet in subnets:
            pfx, pfx_len = subnet.split('/')
            ipam_subnet = IpamSubnetType(subnet=SubnetType(pfx, int(pfx_len)))
            ipam_subnets.append(ipam_subnet)
        ipam_obj = NetworkIpam(name=ipam_name, parent_obj=proj_obj)

        if type == 'flat-subnet':
            ipam_obj.set_ipam_subnet_method('flat-subnet')
            ipam_obj.set_ipam_subnets(IpamSubnets(ipam_subnets))

        try:
            self.vnc_lib.network_ipam_create(ipam_obj)
        except RefsExistError:
            vn_obj = self.vnc_lib.network_ipam_read(
                fq_name=ipam_obj.get_fq_name())
        return ipam_obj, ipam_subnets

    def _create_cluster_network(self, vn_name, proj_obj):
        vn_obj = VirtualNetwork(
            name=vn_name,
            parent_obj=proj_obj,
            address_allocation_mode='user-defined-subnet-only')

        ipam_obj, ipam_subnets = self._create_ipam('pod-ipam',
                                                   self.args.pod_subnets,
                                                   proj_obj)
        vn_obj.add_network_ipam(ipam_obj, VnSubnetsType(ipam_subnets))

        ipam_obj, ipam_subnets = self._create_ipam('service-ipam',
                                                   self.args.service_subnets,
                                                   proj_obj,
                                                   type='flat-subnet')
        vn_obj.add_network_ipam(ipam_obj, VnSubnetsType([]))

        vn_obj.set_virtual_network_properties(
            VirtualNetworkType(forwarding_mode='l3'))
        try:
            self.vnc_lib.virtual_network_create(vn_obj)
        except RefsExistError:
            vn_obj = self.vnc_lib.virtual_network_read(
                fq_name=vn_obj.get_fq_name())
        VirtualNetworkKM.locate(vn_obj.uuid)

        return vn_obj.uuid

    def _provision_cluster(self):
        self._create_project('kube-system')
        proj_obj = self._create_project('default')
        self._create_cluster_network('cluster-network', proj_obj)

    def vnc_process(self):
        while True:
            try:
                event = self.q.get()
                print("\tGot %s %s %s:%s" %
                      (event['type'], event['object'].get('kind'),
                       event['object']['metadata'].get('namespace'),
                       event['object']['metadata'].get('name')))
                if event['object'].get('kind') == 'Pod':
                    self.pod_mgr.process(event)
                elif event['object'].get('kind') == 'Service':
                    self.service_mgr.process(event)
                elif event['object'].get('kind') == 'Namespace':
                    self.namespace_mgr.process(event)
                elif event['object'].get('kind') == 'NetworkPolicy':
                    self.network_policy_mgr.process(event)
                elif event['object'].get('kind') == 'Endpoints':
                    self.endpoints_mgr.process(event)
            except Empty:
                gevent.sleep(0)
Пример #27
0
    def __init__(self, args=None, logger=None, q=None, kube=None,
                 vnc_kubernetes_config_dict=None):
        self._name = type(self).__name__
        self.args = args
        self.logger = logger
        self.q = q
        self.kube = kube
        self._cluster_pod_ipam_fq_name = None
        self._cluster_service_ipam_fq_name = None
        self._cluster_ip_fabric_ipam_fq_name = None

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # Cache common config.
        self.vnc_kube_config = vnc_kube_config(logger=self.logger,
            vnc_lib=self.vnc_lib, args=self.args, queue=self.q, kube=self.kube)

        #
        # In nested mode, kube-manager connects to contrail components running
        # in underlay via global link local services. TCP flows established on
        # link local services will be torn down by vrouter, if there is no
        # activity for configured(or default) timeout. So disable flow timeout
        # on these connections, so these flows will persist.
        #
        # Note: The way to disable flow timeout is to set timeout to max
        #       possible value.
        #
        if self.args.nested_mode is '1':
            for cassandra_server in self.args.cassandra_server_list:
                cassandra_port = cassandra_server.split(':')[-1]
                flow_aging_manager.create_flow_aging_timeout_entry(self.vnc_lib,
                    "tcp", cassandra_port, 2147483647)

            if self.args.rabbit_port:
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", self.args.rabbit_port, 2147483647)

            if self.args.vnc_endpoint_port:
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", self.args.vnc_endpoint_port, 2147483647)

            for collector in self.args.collectors:
                collector_port = collector.split(':')[-1]
                flow_aging_manager.create_flow_aging_timeout_entry(self.vnc_lib,
                    "tcp", collector_port, 2147483647)

        # init access to db
        self._db = db.KubeNetworkManagerDB(self.args, self.logger)
        DBBaseKM.init(self, self.logger, self._db)

        # If nested mode is enabled via config, then record the directive.
        if self.args.nested_mode is '1':
            DBBaseKM.set_nested(True)

        # sync api server db in local cache
        self._sync_km()

        # init rabbit connection
        rabbitmq_cfg = kube_args.rabbitmq_args(self.args)
        self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger, DBBaseKM,
            REACTION_MAP, 'kube_manager', rabbitmq_cfg, self.args.host_ip)
        self.rabbit.establish()
        self.rabbit._db_resync_done.set()

        # Register label add and delete callbacks with label management entity.
        XLabelCache.register_label_add_callback(VncKubernetes.create_tags)
        XLabelCache.register_label_delete_callback(VncKubernetes.delete_tags)

        # Instantiate and init Security Policy Manager.
        self.security_policy_mgr = VncSecurityPolicy(self.vnc_lib,
                                                     VncKubernetes.get_tags)

        # provision cluster
        self._provision_cluster()

        if vnc_kubernetes_config_dict:
            self.vnc_kube_config.update(**vnc_kubernetes_config_dict)
        else:
            # Update common config.
            self.vnc_kube_config.update(
                cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(),
                cluster_service_ipam_fq_name=self._get_cluster_service_ipam_fq_name(),
                cluster_ip_fabric_ipam_fq_name=self._get_cluster_ip_fabric_ipam_fq_name())

        # handle events
        self.label_cache = label_cache.LabelCache()
        self.vnc_kube_config.update(label_cache=self.label_cache)

        self.tags_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_tags.VncTags')
        self.network_policy_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy')
        self.namespace_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_namespace.VncNamespace',
            self.network_policy_mgr)
        self.ingress_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_ingress.VncIngress', self.tags_mgr)
        self.service_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr)
        self.pod_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr,
            self.network_policy_mgr)
        self.endpoints_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_endpoints.VncEndpoints')
        self.network_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network.VncNetwork')

        # Create system default security policies.
        VncSecurityPolicy.create_deny_all_security_policy()
        VncSecurityPolicy.create_allow_all_security_policy()
        self.ingress_mgr.create_ingress_security_policy()

        VncKubernetes._vnc_kubernetes = self

        # Associate cluster with the APS.
        VncSecurityPolicy.tag_cluster_application_policy_set()
Пример #28
0
class VncKubernetes(vnc_common.VncCommon):

    _vnc_kubernetes = None

    def __init__(self,
                 args=None,
                 logger=None,
                 q=None,
                 kube=None,
                 vnc_kubernetes_config_dict=None):
        self._name = type(self).__name__
        self.args = args
        self.logger = logger
        self.q = q
        self.kube = kube
        self._cluster_pod_ipam_fq_name = None
        self._cluster_service_ipam_fq_name = None
        self._cluster_ip_fabric_ipam_fq_name = None

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # Cache common config.
        self.vnc_kube_config = vnc_kube_config(logger=self.logger,
                                               vnc_lib=self.vnc_lib,
                                               args=self.args,
                                               queue=self.q,
                                               kube=self.kube)

        #
        # In nested mode, kube-manager connects to contrail components running
        # in underlay via global link local services. TCP flows established on
        # link local services will be torn down by vrouter, if there is no
        # activity for configured(or default) timeout. So disable flow timeout
        # on these connections, so these flows will persist.
        #
        # Note: The way to disable flow timeout is to set timeout to max
        #       possible value.
        #
        if self.args.nested_mode == '1':
            for cassandra_server in self.args.cassandra_server_list:
                cassandra_port = cassandra_server.split(':')[-1]
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", cassandra_port, 2147483647)

            if self.args.rabbit_port:
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", self.args.rabbit_port, 2147483647)

            if self.args.vnc_endpoint_port:
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", self.args.vnc_endpoint_port,
                    2147483647)

            for collector in self.args.collectors:
                collector_port = collector.split(':')[-1]
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", collector_port, 2147483647)

        # init access to db
        self._db = db.KubeNetworkManagerDB(self.args, self.logger)
        DBBaseKM.init(self, self.logger, self._db)

        # If nested mode is enabled via config, then record the directive.
        if self.args.nested_mode == '1':
            DBBaseKM.set_nested(True)

        # sync api server db in local cache
        self._sync_km()

        # init rabbit connection
        rabbitmq_cfg = kube_args.rabbitmq_args(self.args)
        self.rabbit = VncAmqpHandle(
            self.logger._sandesh, self.logger, DBBaseKM,
            reaction_map.REACTION_MAP, self.args.cluster_id + '-' +
            self.args.cluster_name + '-kube_manager', rabbitmq_cfg,
            self.args.host_ip)
        self.rabbit.establish()
        self.rabbit._db_resync_done.set()

        # Register label add and delete callbacks with label management entity.
        label_cache.XLabelCache.register_label_add_callback(
            VncKubernetes.create_tags)
        label_cache.XLabelCache.register_label_delete_callback(
            VncKubernetes.delete_tags)

        # Instantiate and init Security Policy Manager.
        self.security_policy_mgr = VncSecurityPolicy(self.vnc_lib,
                                                     VncKubernetes.get_tags)

        # provision cluster
        self._provision_cluster()

        if vnc_kubernetes_config_dict:
            self.vnc_kube_config.update(**vnc_kubernetes_config_dict)
        else:
            # Update common config.
            self.vnc_kube_config.update(
                cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(),
                cluster_service_ipam_fq_name=self.
                _get_cluster_service_ipam_fq_name(),
                cluster_ip_fabric_ipam_fq_name=self.
                _get_cluster_ip_fabric_ipam_fq_name())

        # handle events
        self.label_cache = label_cache.LabelCache()
        self.vnc_kube_config.update(label_cache=self.label_cache)

        self.tags_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_tags.VncTags')
        self.network_policy_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy')
        self.namespace_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_namespace.VncNamespace',
            self.network_policy_mgr)
        self.ingress_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_ingress.VncIngress', self.tags_mgr)
        self.service_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr)
        self.pod_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr,
            self.network_policy_mgr)
        self.endpoints_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_endpoints.VncEndpoints')
        self.network_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network.VncNetwork')

        # Create system default security policies.
        VncSecurityPolicy.create_deny_all_security_policy()
        VncSecurityPolicy.create_allow_all_security_policy()
        self.ingress_mgr.create_ingress_security_policy()

        VncKubernetes._vnc_kubernetes = self

        # Associate cluster with the APS.
        VncSecurityPolicy.tag_cluster_application_policy_set()

    def connection_state_update(self, status, message=None):
        ConnectionState.update(
            conn_type=ConnType.APISERVER,
            name='ApiServer',
            status=status,
            message=message or '',
            server_addrs=[
                '%s:%s' %
                (self.args.vnc_endpoint_ip, self.args.vnc_endpoint_port)
            ])

    # end connection_state_update

    def _vnc_connect(self):
        # Retry till API server connection is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        api_server_list = self.args.vnc_endpoint_ip.split(',')
        while not connected:
            try:
                vnc_lib = VncApi(self.args.auth_user,
                                 self.args.auth_password,
                                 self.args.auth_tenant,
                                 api_server_list,
                                 self.args.vnc_endpoint_port,
                                 auth_token_url=self.args.auth_token_url,
                                 api_health_check=True)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:
                time.sleep(3)
        return vnc_lib

    def _sync_km(self):
        for cls in list(DBBaseKM.get_obj_type_map().values()):
            for obj in cls.list_obj():
                cls.locate(obj['uuid'], obj)

    @staticmethod
    def reset():
        for cls in list(DBBaseKM.get_obj_type_map().values()):
            cls.reset()

    def _attach_policy(self, vn_obj, *policies):
        for policy in policies or []:
            vn_obj.add_network_policy(
                policy, VirtualNetworkPolicyType(sequence=SequenceType(0, 0)))
        self.vnc_lib.virtual_network_update(vn_obj)
        for policy in policies or []:
            self.vnc_lib.ref_relax_for_delete(vn_obj.uuid, policy.uuid)

    def _create_policy_entry(self, src_vn_obj, dst_vn_obj, src_np_obj=None):
        if src_vn_obj:
            src_addresses = [
                AddressType(virtual_network=src_vn_obj.get_fq_name_str())
            ]
        else:
            src_addresses = [
                AddressType(network_policy=src_np_obj.get_fq_name_str())
            ]
        return PolicyRuleType(
            direction='<>',
            action_list=ActionListType(simple_action='pass'),
            protocol='any',
            src_addresses=src_addresses,
            src_ports=[PortType(-1, -1)],
            dst_addresses=[
                AddressType(virtual_network=dst_vn_obj.get_fq_name_str())
            ],
            dst_ports=[PortType(-1, -1)])

    def _create_vn_vn_policy(self, policy_name, proj_obj, *vn_obj):
        policy_exists = False
        policy = NetworkPolicy(name=policy_name, parent_obj=proj_obj)
        try:
            policy_obj = self.vnc_lib.network_policy_read(
                fq_name=policy.get_fq_name())
            policy_exists = True
        except NoIdError:
            # policy does not exist. Create one.
            policy_obj = policy
        network_policy_entries = PolicyEntriesType()
        total_vn = len(vn_obj)
        for i in range(0, total_vn):
            for j in range(i + 1, total_vn):
                policy_entry = self._create_policy_entry(vn_obj[i], vn_obj[j])
                network_policy_entries.add_policy_rule(policy_entry)
        policy_obj.set_network_policy_entries(network_policy_entries)
        if policy_exists:
            self.vnc_lib.network_policy_update(policy)
        else:
            self.vnc_lib.network_policy_create(policy)
        return policy_obj

    def _create_np_vn_policy(self, policy_name, proj_obj, dst_vn_obj):
        policy_exists = False
        policy = NetworkPolicy(name=policy_name, parent_obj=proj_obj)
        try:
            policy_obj = self.vnc_lib.network_policy_read(
                fq_name=policy.get_fq_name())
            policy_exists = True
        except NoIdError:
            # policy does not exist. Create one.
            policy_obj = policy
        network_policy_entries = PolicyEntriesType()
        policy_entry = self._create_policy_entry(None, dst_vn_obj, policy)
        network_policy_entries.add_policy_rule(policy_entry)
        policy_obj.set_network_policy_entries(network_policy_entries)
        if policy_exists:
            self.vnc_lib.network_policy_update(policy)
        else:
            self.vnc_lib.network_policy_create(policy)
        return policy_obj

    def _create_attach_policy(self, proj_obj, ip_fabric_vn_obj, pod_vn_obj,
                              service_vn_obj, cluster_vn_obj):
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-ip-fabric-np'
        ip_fabric_policy = \
            self._create_np_vn_policy(policy_name, proj_obj, ip_fabric_vn_obj)
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-service-np'
        cluster_service_network_policy = \
            self._create_np_vn_policy(policy_name, proj_obj, service_vn_obj)
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-pod-service-np'
        cluster_default_policy = self._create_vn_vn_policy(
            policy_name, proj_obj, pod_vn_obj, service_vn_obj)
        self._attach_policy(ip_fabric_vn_obj, ip_fabric_policy)
        self._attach_policy(pod_vn_obj, ip_fabric_policy,
                            cluster_default_policy)
        self._attach_policy(service_vn_obj, ip_fabric_policy,
                            cluster_service_network_policy,
                            cluster_default_policy)

        # In nested mode, create and attach a network policy to the underlay
        # virtual network.
        if DBBaseKM.is_nested() and cluster_vn_obj:
            policy_name = vnc_kube_config.cluster_nested_underlay_policy_name()
            nested_underlay_policy = self._create_np_vn_policy(
                policy_name, proj_obj, cluster_vn_obj)
            self._attach_policy(cluster_vn_obj, nested_underlay_policy)

    def _create_project(self, project_name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(project_name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)
        try:
            self.vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self.vnc_lib.project_read(fq_name=proj_fq_name)
        ProjectKM.locate(proj_obj.uuid)
        return proj_obj

    def _create_ipam(self, ipam_name, subnets, proj_obj, type='flat-subnet'):
        ipam_obj = NetworkIpam(name=ipam_name, parent_obj=proj_obj)

        ipam_subnets = []
        for subnet in subnets:
            pfx, pfx_len = subnet.split('/')
            ipam_subnet = IpamSubnetType(subnet=SubnetType(pfx, int(pfx_len)))
            ipam_subnets.append(ipam_subnet)
        if not len(ipam_subnets):
            self.logger.error("%s - %s subnet is empty for %s" %
                              (self._name, ipam_name, subnets))

        if type == 'flat-subnet':
            ipam_obj.set_ipam_subnet_method('flat-subnet')
            ipam_obj.set_ipam_subnets(IpamSubnets(ipam_subnets))

        ipam_update = False
        try:
            ipam_uuid = self.vnc_lib.network_ipam_create(ipam_obj)
            ipam_update = True
        except RefsExistError:
            curr_ipam_obj = self.vnc_lib.network_ipam_read(
                fq_name=ipam_obj.get_fq_name())
            ipam_uuid = curr_ipam_obj.get_uuid()
            if type == 'flat-subnet' and not curr_ipam_obj.get_ipam_subnets():
                self.vnc_lib.network_ipam_update(ipam_obj)
                ipam_update = True

        # Cache ipam info.
        NetworkIpamKM.locate(ipam_uuid)

        return ipam_update, ipam_obj, ipam_subnets

    def _is_ipam_exists(self, vn_obj, ipam_fq_name, subnet=None):
        curr_ipam_refs = vn_obj.get_network_ipam_refs()
        if curr_ipam_refs:
            for ipam_ref in curr_ipam_refs:
                if ipam_fq_name == ipam_ref['to']:
                    if subnet:
                        # Subnet is specified.
                        # Validate that we are able to match subnect as well.
                        if len(ipam_ref['attr'].ipam_subnets) and \
                                subnet == ipam_ref['attr'].ipam_subnets[0].subnet:
                            return True
                    else:
                        # Subnet is not specified.
                        # So ipam-fq-name match will suffice.
                        return True
        return False

    def _allocate_fabric_snat_port_translation_pools(self):
        global_vrouter_fq_name = \
            ['default-global-system-config', 'default-global-vrouter-config']
        count = 0
        while True:
            try:
                global_vrouter_obj = \
                    self.vnc_lib.global_vrouter_config_read(
                        fq_name=global_vrouter_fq_name)
                break
            except NoIdError:
                if count == 20:
                    return
                time.sleep(3)
                count += 1
        port_count = 1024
        start_port = 56000
        end_port = start_port + port_count - 1
        snat_port_range = PortType(start_port=start_port, end_port=end_port)
        port_pool_tcp = PortTranslationPool(protocol="tcp",
                                            port_range=snat_port_range)

        start_port = end_port + 1
        end_port = start_port + port_count - 1
        snat_port_range = PortType(start_port=start_port, end_port=end_port)
        port_pool_udp = PortTranslationPool(protocol="udp",
                                            port_range=snat_port_range)
        port_pools = PortTranslationPools([port_pool_tcp, port_pool_udp])
        global_vrouter_obj.set_port_translation_pools(port_pools)
        try:
            self.vnc_lib.global_vrouter_config_update(global_vrouter_obj)
        except NoIdError:
            pass

    def _provision_cluster(self):
        # Pre creating default project before namespace add event.
        proj_obj = self._create_project('default')

        # Create application policy set for the cluster project.
        VncSecurityPolicy.create_application_policy_set(
            vnc_kube_config.application_policy_set_name())

        # Allocate fabric snat port translation pools.
        self._allocate_fabric_snat_port_translation_pools()

        ip_fabric_fq_name = vnc_kube_config.cluster_ip_fabric_network_fq_name()
        ip_fabric_vn_obj = self.vnc_lib. \
            virtual_network_read(fq_name=ip_fabric_fq_name)

        cluster_vn_obj = None
        if DBBaseKM.is_nested():
            try:
                cluster_vn_obj = self.vnc_lib.virtual_network_read(
                    fq_name=vnc_kube_config.cluster_default_network_fq_name())
            except NoIdError:
                pass

        # Pre creating kube-system project before namespace add event.
        self._create_project('kube-system')
        # Create ip-fabric IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-ip-fabric-ipam'
        ip_fabric_ipam_update, ip_fabric_ipam_obj, ip_fabric_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.ip_fabric_subnets, proj_obj)
        self._cluster_ip_fabric_ipam_fq_name = ip_fabric_ipam_obj.get_fq_name()
        # Create Pod IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-pod-ipam'
        pod_ipam_update, pod_ipam_obj, pod_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.pod_subnets, proj_obj)
        # Cache cluster pod ipam name.
        # This will be referenced by ALL pods that are spawned in the cluster.
        self._cluster_pod_ipam_fq_name = pod_ipam_obj.get_fq_name()
        # Create a cluster-pod-network.
        if self.args.ip_fabric_forwarding:
            cluster_pod_vn_obj = self._create_network(
                vnc_kube_config.cluster_default_pod_network_name(),
                'pod-network', proj_obj, ip_fabric_ipam_obj,
                ip_fabric_ipam_update, ip_fabric_vn_obj)
        else:
            cluster_pod_vn_obj = self._create_network(
                vnc_kube_config.cluster_default_pod_network_name(),
                'pod-network', proj_obj, pod_ipam_obj, pod_ipam_update,
                ip_fabric_vn_obj)
        # Create Service IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-service-ipam'
        service_ipam_update, service_ipam_obj, service_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.service_subnets, proj_obj)
        self._cluster_service_ipam_fq_name = service_ipam_obj.get_fq_name()
        # Create a cluster-service-network.
        cluster_service_vn_obj = self._create_network(
            vnc_kube_config.cluster_default_service_network_name(),
            'service-network', proj_obj, service_ipam_obj, service_ipam_update)
        self._create_attach_policy(proj_obj, ip_fabric_vn_obj,
                                   cluster_pod_vn_obj, cluster_service_vn_obj,
                                   cluster_vn_obj)

    def _create_network(self,
                        vn_name,
                        vn_type,
                        proj_obj,
                        ipam_obj,
                        ipam_update,
                        provider=None):
        # Check if the VN already exists.
        # If yes, update existing VN object with k8s config.
        vn_exists = False
        vn = VirtualNetwork(name=vn_name,
                            parent_obj=proj_obj,
                            address_allocation_mode='flat-subnet-only')
        try:
            vn_obj = self.vnc_lib.virtual_network_read(
                fq_name=vn.get_fq_name())
            vn_exists = True
        except NoIdError:
            # VN does not exist. Create one.
            vn_obj = vn

        # Attach IPAM to virtual network.
        #
        # For flat-subnets, the subnets are specified on the IPAM and
        # not on the virtual-network to IPAM link. So pass an empty
        # list of VnSubnetsType.
        if ipam_update or \
           not self._is_ipam_exists(vn_obj, ipam_obj.get_fq_name()):
            vn_obj.add_network_ipam(ipam_obj, VnSubnetsType([]))

        vn_obj.set_virtual_network_properties(
            VirtualNetworkType(forwarding_mode='l3'))

        fabric_snat = False
        if vn_type == 'pod-network':
            fabric_snat = True

        if not vn_exists:
            if self.args.ip_fabric_forwarding:
                if provider:
                    # enable ip_fabric_forwarding
                    vn_obj.add_virtual_network(provider)
            elif fabric_snat and self.args.ip_fabric_snat:
                # enable fabric_snat
                vn_obj.set_fabric_snat(True)
            else:
                # disable fabric_snat
                vn_obj.set_fabric_snat(False)
            # Create VN.
            self.vnc_lib.virtual_network_create(vn_obj)
        else:
            self.vnc_lib.virtual_network_update(vn_obj)

        vn_obj = self.vnc_lib.virtual_network_read(
            fq_name=vn_obj.get_fq_name())
        VirtualNetworkKM.locate(vn_obj.uuid)

        return vn_obj

    def _get_cluster_network(self):
        return VirtualNetworkKM.find_by_name_or_uuid(
            vnc_kube_config.cluster_default_network_name())

    def _get_cluster_pod_ipam_fq_name(self):
        return self._cluster_pod_ipam_fq_name

    def _get_cluster_service_ipam_fq_name(self):
        return self._cluster_service_ipam_fq_name

    def _get_cluster_ip_fabric_ipam_fq_name(self):
        return self._cluster_ip_fabric_ipam_fq_name

    def vnc_timer(self):
        try:
            self.network_policy_mgr.network_policy_timer()
            self.ingress_mgr.ingress_timer()
            self.service_mgr.service_timer()
            self.pod_mgr.pod_timer()
            self.namespace_mgr.namespace_timer()
        except Exception:
            string_buf = StringIO()
            cgitb_hook(file=string_buf, format="text")
            err_msg = string_buf.getvalue()
            self.logger.error("vnc_timer: %s - %s" % (self._name, err_msg))

    def vnc_process(self):
        while True:
            try:
                event = self.q.get()
                event_type = event['type']
                kind = event['object'].get('kind')
                metadata = event['object']['metadata']
                namespace = metadata.get('namespace')
                name = metadata.get('name')
                uid = metadata.get('uid')
                if kind == 'Pod':
                    self.pod_mgr.process(event)
                elif kind == 'Service':
                    self.service_mgr.process(event)
                elif kind == 'Namespace':
                    self.namespace_mgr.process(event)
                elif kind == 'NetworkPolicy':
                    self.network_policy_mgr.process(event)
                elif kind == 'Endpoints':
                    self.endpoints_mgr.process(event)
                elif kind == 'Ingress':
                    self.ingress_mgr.process(event)
                elif kind == 'NetworkAttachmentDefinition':
                    self.network_mgr.process(event)
                else:
                    print("%s - Event %s %s %s:%s:%s not handled" %
                          (self._name, event_type, kind, namespace, name, uid))
                    self.logger.error(
                        "%s - Event %s %s %s:%s:%s not handled" %
                        (self._name, event_type, kind, namespace, name, uid))
            except Empty:
                gevent.sleep(0)
            except Exception:
                string_buf = StringIO()
                cgitb_hook(file=string_buf, format="text")
                err_msg = string_buf.getvalue()
                self.logger.error("%s - %s" % (self._name, err_msg))

    @classmethod
    def get_instance(cls):
        return VncKubernetes._vnc_kubernetes

    @classmethod
    def destroy_instance(cls):
        inst = cls.get_instance()
        if inst is None:
            return
        inst.rabbit.close()
        for obj_cls in list(DBBaseKM.get_obj_type_map().values()):
            obj_cls.reset()
        DBBase.clear()
        inst._db = None
        VncKubernetes._vnc_kubernetes = None

    @classmethod
    def create_tags(cls, type, value):
        if cls._vnc_kubernetes:
            cls.get_instance().tags_mgr.create(type, value)

    @classmethod
    def delete_tags(cls, type, value):
        if cls._vnc_kubernetes:
            cls.get_instance().tags_mgr.delete(type, value)

    @classmethod
    def get_tags(cls, kv_dict, create=False):
        if cls._vnc_kubernetes:
            return cls.get_instance().tags_mgr.get_tags_fq_name(
                kv_dict, create)
        return None
Пример #29
0
class VncMesos(object):
    def __init__(self, args=None, logger=None, q=None):
        self.args = args
        self.logger = logger
        self.q = q

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # init access to db
        self._db = db.MesosNetworkManagerDB(self.args, self.logger)
        DBBaseMM.init(self, self.logger, self._db)

        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger,
                                    DBBaseMM,
                                    REACTION_MAP,
                                    'mesos_manager',
                                    args=self.args)
        self.rabbit.establish()

        # sync api server db in local cache
        self._sync_sm()
        self.rabbit._db_resync_done.set()

    def _vnc_connect(self):
        # Retry till API server connection is up
        connected = False
        while not connected:
            try:
                vnc_lib = VncApi(self.args.admin_user,
                                 self.args.admin_password,
                                 self.args.admin_tenant,
                                 self.args.vnc_endpoint_ip,
                                 self.args.vnc_endpoint_port)
                connected = True
                self.logger.info(
                    "Connected to API-server %s:%s." %
                    (self.args.vnc_endpoint_ip, self.args.vnc_endpoint_port))
            except requests.exceptions.ConnectionError as e:
                time.sleep(3)
            except ResourceExhaustionError:
                time.sleep(3)
        return vnc_lib

    def _sync_sm(self):
        for cls in DBBaseMM.get_obj_type_map().values():
            for obj in cls.list_obj():
                cls.locate(obj['uuid'], obj)

    @staticmethod
    def reset():
        for cls in DBBaseMM.get_obj_type_map().values():
            cls.reset()

    def process_q_event(self, event):
        labels = event['labels']
        #subnet = event['ipam']['subnet'] if event['ipam'] else None

        for k, v in labels.items():
            if k == mesos_consts.MESOS_LABEL_PRIVATE_NETWORK:
                print v
            elif k == mesos_consts.MESOS_LABEL_PUBLIC_NETWORK:
                print v
            elif k == mesos_consts.MESOS_LABEL_PUBLIC_SUBNET:
                print v
            else:
                pass

    def vnc_process(self):
        while True:
            try:
                event = self.q.get()
                print event
                self.logger.info("VNC: Handle CNI Data for ContainerId: %s." %
                                 (event['cid']))
                self.process_q_event(event)
            except Empty:
                gevent.sleep(0)
Пример #30
0
class VncKubernetes(VncCommon):

    _vnc_kubernetes = None

    def __init__(self,
                 args=None,
                 logger=None,
                 q=None,
                 kube=None,
                 vnc_kubernetes_config_dict=None):
        self._name = type(self).__name__
        self.args = args
        self.logger = logger
        self.q = q
        self.kube = kube
        self._cluster_pod_ipam_fq_name = None

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # Cache common config.
        self.vnc_kube_config = vnc_kube_config(logger=self.logger,
                                               vnc_lib=self.vnc_lib,
                                               args=self.args,
                                               queue=self.q,
                                               kube=self.kube)

        # HACK ALERT.
        # Till we have an alternate means to get config objects,  we will
        # direcly connect to cassandra. Such a persistant connection is
        # discouraged, but is the only option we have for now.
        #
        # Disable flow timeout on this connection, so the flow persists.
        #
        if self.args.nested_mode is '1':
            for cassandra_server in self.args.cassandra_server_list:
                cassandra_port = cassandra_server.split(':')[-1]
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", cassandra_port, 2147483647)

        # init access to db
        self._db = db.KubeNetworkManagerDB(self.args, self.logger)
        DBBaseKM.init(self, self.logger, self._db)

        # If nested mode is enabled via config, then record the directive.
        if self.args.nested_mode is '1':
            DBBaseKM.set_nested(True)

        # sync api server db in local cache
        self._sync_km()

        # init rabbit connection
        rabbitmq_cfg = kube_args.rabbitmq_args(self.args)
        self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger,
                                    DBBaseKM, REACTION_MAP, 'kube_manager',
                                    rabbitmq_cfg)
        self.rabbit.establish()
        self.rabbit._db_resync_done.set()

        # provision cluster
        self._provision_cluster()

        if vnc_kubernetes_config_dict:
            self.vnc_kube_config.update(**vnc_kubernetes_config_dict)
        else:
            # Update common config.
            self.vnc_kube_config.update(
                cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(),
                cluster_service_fip_pool=self._get_cluster_service_fip_pool())

        # handle events
        self.label_cache = label_cache.LabelCache()
        self.vnc_kube_config.update(label_cache=self.label_cache)

        self.network_policy_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy')
        self.namespace_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_namespace.VncNamespace',
            self.network_policy_mgr)
        self.ingress_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_ingress.VncIngress')
        self.service_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr)
        self.pod_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr,
            self.network_policy_mgr)
        self.endpoints_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_endpoints.VncEndpoints')

        VncKubernetes._vnc_kubernetes = self

    def connection_state_update(self, status, message=None):
        ConnectionState.update(
            conn_type=ConnType.APISERVER,
            name='ApiServer',
            status=status,
            message=message or '',
            server_addrs=[
                '%s:%s' %
                (self.args.vnc_endpoint_ip, self.args.vnc_endpoint_port)
            ])

    # end connection_state_update

    def _vnc_connect(self):
        # Retry till API server connection is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        api_server_list = self.args.vnc_endpoint_ip.split(',')
        while not connected:
            try:
                vnc_lib = VncApi(self.args.auth_user,
                                 self.args.auth_password,
                                 self.args.auth_tenant,
                                 api_server_list,
                                 self.args.vnc_endpoint_port,
                                 auth_token_url=self.args.auth_token_url)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:
                time.sleep(3)
        return vnc_lib

    def _sync_km(self):
        for cls in DBBaseKM.get_obj_type_map().values():
            for obj in cls.list_obj():
                cls.locate(obj['uuid'], obj)

    @staticmethod
    def reset():
        for cls in DBBaseKM.get_obj_type_map().values():
            cls.reset()

    def _create_attach_ip_fabric_security_group(self, ip_fabric_vn_obj):
        def _get_rule(ingress, sg, prefix, ethertype):
            sgr_uuid = str(uuid.uuid4())
            if sg:
                addr = AddressType(security_group=proj_obj.get_fq_name_str() +
                                   ':' + sg)
            elif prefix:
                addr = AddressType(subnet=SubnetType(prefix, 0))
            local_addr = AddressType(security_group='local')
            if ingress:
                src_addr = addr
                dst_addr = local_addr
            else:
                src_addr = local_addr
                dst_addr = addr
            rule = PolicyRuleType(rule_uuid=sgr_uuid,
                                  direction='>',
                                  protocol='any',
                                  src_addresses=[src_addr],
                                  src_ports=[PortType(0, 65535)],
                                  dst_addresses=[dst_addr],
                                  dst_ports=[PortType(0, 65535)],
                                  ethertype=ethertype)
            return rule

        sg_dict = {}
        sg_name = "-".join(['ip-fabric-default'])
        DEFAULT_SECGROUP_DESCRIPTION = "Default ip-fabric security group"
        id_perms = IdPermsType(enable=True,
                               description=DEFAULT_SECGROUP_DESCRIPTION)
        rules = []
        rules.append(_get_rule(True, None, '0.0.0.0', 'IPv4'))
        rules.append(_get_rule(True, None, '::', 'IPv6'))
        rules.append(_get_rule(False, None, '0.0.0.0', 'IPv4'))
        rules.append(_get_rule(False, None, '::', 'IPv6'))
        sg_rules = PolicyEntriesType(rules)

        proj_fq_name = ['default-domain', 'default-project']
        proj_obj = self.vnc_lib.project_read(fq_name=proj_fq_name)

        sg_obj = SecurityGroup(name=sg_name,
                               parent_obj=proj_obj,
                               id_perms=id_perms,
                               security_group_entries=sg_rules)
        try:
            self.vnc_lib.security_group_create(sg_obj)
            self.vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())
        except RefsExistError:
            self.vnc_lib.security_group_update(sg_obj)

        vmi_back_refs = \
            ip_fabric_vn_obj.get_virtual_machine_interface_back_refs()
        sg_uuid = sg_obj.get_uuid()
        for vmi_back_ref in vmi_back_refs or []:
            self.vnc_lib.ref_update('virtual-machine-interface',
                                    vmi_back_ref['uuid'], 'security-group',
                                    sg_uuid, None, 'ADD')
            # Set False to virtual_machine_interface_disable_policy
            vmi_obj = self.vnc_lib.virtual_machine_interface_read(
                id=vmi_back_ref['uuid'])
            vmi_obj.virtual_machine_interface_disable_policy = False
            self.vnc_lib.virtual_machine_interface_update(vmi_obj)

    def _create_policy(self, policy_name, proj_obj, src_vn_obj, dst_vn_obj):
        policy_exists = False
        policy = NetworkPolicy(name=policy_name, parent_obj=proj_obj)
        try:
            policy_obj = self.vnc_lib.network_policy_read(
                fq_name=policy.get_fq_name())
            policy_exists = True
        except NoIdError:
            # policy does not exist. Create one.
            policy_obj = policy
        network_policy_entries = PolicyEntriesType([
            PolicyRuleType(
                direction='<>',
                action_list=ActionListType(simple_action='pass'),
                protocol='any',
                src_addresses=[
                    AddressType(virtual_network=src_vn_obj.get_fq_name_str())
                ],
                src_ports=[PortType(-1, -1)],
                dst_addresses=[
                    AddressType(virtual_network=dst_vn_obj.get_fq_name_str())
                ],
                dst_ports=[PortType(-1, -1)])
        ])
        policy_obj.set_network_policy_entries(network_policy_entries)
        if policy_exists:
            self.vnc_lib.network_policy_update(policy)
        else:
            self.vnc_lib.network_policy_create(policy)
        return policy_obj

    def _attach_policy(self, vn_obj, policy):
        vn_obj.add_network_policy(policy, \
            VirtualNetworkPolicyType(sequence=SequenceType(0, 0)))
        self.vnc_lib.virtual_network_update(vn_obj)
        self.vnc_lib.ref_relax_for_delete(vn_obj.uuid, policy.uuid)

    def _create_attach_policy(self, proj_obj, ip_fabric_vn_obj,
                              cluster_network_vn_obj):
        policy_name = '%s-%s-default' \
            %(ip_fabric_vn_obj.name, cluster_network_vn_obj.name)
        network_policy = self._create_policy(policy_name, proj_obj, \
            ip_fabric_vn_obj, cluster_network_vn_obj)
        self._attach_policy(ip_fabric_vn_obj, network_policy)
        self._attach_policy(cluster_network_vn_obj, network_policy)

    def _create_project(self, project_name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(project_name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)
        try:
            self.vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self.vnc_lib.project_read(fq_name=proj_fq_name)
        ProjectKM.locate(proj_obj.uuid)
        return proj_obj

    def _create_ipam(self,
                     ipam_name,
                     subnets,
                     proj_obj,
                     type='user-defined-subnet'):
        ipam_obj = NetworkIpam(name=ipam_name, parent_obj=proj_obj)

        ipam_subnets = []
        for subnet in subnets:
            pfx, pfx_len = subnet.split('/')
            ipam_subnet = IpamSubnetType(subnet=SubnetType(pfx, int(pfx_len)))
            ipam_subnets.append(ipam_subnet)
        if not len(ipam_subnets):
            self.logger.error("%s - %s subnet is empty for %s" \
                 %(self._name, ipam_name, subnets))

        if type == 'flat-subnet':
            ipam_obj.set_ipam_subnet_method('flat-subnet')
            ipam_obj.set_ipam_subnets(IpamSubnets(ipam_subnets))

        ipam_update = False
        try:
            ipam_uuid = self.vnc_lib.network_ipam_create(ipam_obj)
            ipam_update = True
        except RefsExistError:
            curr_ipam_obj = self.vnc_lib.network_ipam_read(
                fq_name=ipam_obj.get_fq_name())
            ipam_uuid = curr_ipam_obj.get_uuid()
            if type == 'flat-subnet' and not curr_ipam_obj.get_ipam_subnets():
                self.vnc_lib.network_ipam_update(ipam_obj)
                ipam_update = True

        # Cache ipam info.
        NetworkIpamKM.locate(ipam_uuid)

        return ipam_update, ipam_obj, ipam_subnets

    def _is_ipam_exists(self, vn_obj, ipam_fq_name, subnet=None):
        curr_ipam_refs = vn_obj.get_network_ipam_refs()
        if curr_ipam_refs:
            for ipam_ref in curr_ipam_refs:
                if ipam_fq_name == ipam_ref['to']:
                    if subnet:
                        # Subnet is specified.
                        # Validate that we are able to match subnect as well.
                        if len(ipam_ref['attr'].ipam_subnets) and \
                            subnet == ipam_ref['attr'].ipam_subnets[0].subnet:
                            return True
                    else:
                        # Subnet is not specified.
                        # So ipam-fq-name match will suffice.
                        return True
        return False

    def _create_cluster_network(self, vn_name, proj_obj):
        # Check if the VN already exists.
        # If yes, update existing VN object with k8s config.
        vn_exists = False
        vn = VirtualNetwork(name=vn_name,
                            parent_obj=proj_obj,
                            address_allocation_mode='user-defined-subnet-only')
        try:
            vn_obj = self.vnc_lib.virtual_network_read(
                fq_name=vn.get_fq_name())
            vn_exists = True
        except NoIdError:
            # VN does not exist. Create one.
            vn_obj = vn

        # Create Pod IPAM.
        pod_ipam_update, pod_ipam_obj, pod_ipam_subnets = \
            self._create_ipam('pod-ipam',
                self.args.pod_subnets, proj_obj, type='flat-subnet')

        # Cache cluster pod ipam name.
        # This will be referenced by ALL pods that are spawned in the cluster.
        self._cluster_pod_ipam_fq_name = pod_ipam_obj.get_fq_name()

        # Attach Pod IPAM to cluster virtual network.
        #
        # For flat-subnets, the subnets are specified on the IPAM and
        # not on the virtual-network to IPAM link. So pass an empty
        # list of VnSubnetsType.
        if pod_ipam_update or \
           not self._is_ipam_exists(vn_obj, pod_ipam_obj.get_fq_name()):
            vn_obj.add_network_ipam(pod_ipam_obj, VnSubnetsType([]))

        #
        # Create Service IPAM.
        #
        svc_ipam_update, svc_ipam_obj, svc_ipam_subnets = \
            self._create_ipam('service-ipam', self.args.service_subnets, proj_obj)

        # Attach Service IPAM to virtual-network.
        svc_subnet = None
        if len(svc_ipam_subnets):
            svc_subnet = svc_ipam_subnets[0].subnet
        if svc_ipam_update or \
           not self._is_ipam_exists(vn_obj, svc_ipam_obj.get_fq_name(), svc_subnet):
            vn_obj.add_network_ipam(svc_ipam_obj,
                                    VnSubnetsType(svc_ipam_subnets))

        vn_obj.set_virtual_network_properties(
            VirtualNetworkType(forwarding_mode='l3'))

        if vn_exists:
            # Update VN.
            self.vnc_lib.virtual_network_update(vn_obj)
        else:
            # Create VN.
            self.vnc_lib.virtual_network_create(vn_obj)

        # FIP pool creation requires a vnc object. Get it.
        vn_obj = self.vnc_lib.virtual_network_read(
            fq_name=vn_obj.get_fq_name())

        # Create service floating ip pool.
        self._create_cluster_service_fip_pool(vn_obj, pod_ipam_obj)

        VirtualNetworkKM.locate(vn_obj.uuid)
        return vn_obj

    def _get_cluster_service_fip_pool_name(self, vn_name):
        """
        Return fip pool name of cluster service network.
        """
        return 'svc-fip-pool-%s' % (vn_name)

    def _get_cluster_service_fip_pool(self):
        """
        Get floating ip pool of cluster service network.
        """
        vn_obj = self._get_cluster_network()
        return FloatingIpPoolKM.find_by_name_or_uuid(
            self._get_cluster_service_fip_pool_name(vn_obj.name))

    def _create_cluster_service_fip_pool(self, vn_obj, ipam_obj):
        # Create a floating-ip-pool in cluster service network.
        #
        # Service IP's in the k8s cluster are allocated from pod
        # IPAM in the cluster network. All pods spawned in isolated
        # virtual networks will be allocated an IP from this floating-ip-
        # pool. These pods, in those isolated virtual networks, will use this
        # floating-ip for outbound traffic to services in the k8s cluster.

        # Get IPAM refs from virtual-network.
        ipam_refs = vn_obj.get_network_ipam_refs()
        svc_subnet_uuid = None
        for ipam_ref in ipam_refs:
            if ipam_ref['to'] == ipam_obj.get_fq_name():
                ipam_subnets = ipam_ref['attr'].get_ipam_subnets()
                if not ipam_subnets:
                    continue
                # We will use the first subnet in the matching IPAM.
                svc_subnet_uuid = ipam_subnets[0].get_subnet_uuid()
                break

        fip_subnets = FloatingIpPoolSubnetType(subnet_uuid=[svc_subnet_uuid])
        fip_pool_obj = FloatingIpPool(self._get_cluster_service_fip_pool_name(
            vn_obj.name),
                                      floating_ip_pool_subnets=fip_subnets,
                                      parent_obj=vn_obj)

        try:
            # Create floating ip pool for cluster service network.
            self.vnc_lib.floating_ip_pool_create(fip_pool_obj)
        except RefsExistError:
            # Floating-ip-pool exists.
            #
            # Validate that existing floating-ip-pool has the service subnet
            # uuid as one of its subnets. If not raise an exception, as the
            # floating-ip-pool cannot be created, as one with the same name but
            # different attributes exists in the system.
            fip_pool_db_obj = self._get_cluster_service_fip_pool()
            svc_subnet_found = False
            fip_subnets = None

            if hasattr(fip_pool_db_obj, 'floating_ip_pool_subnets'):
                fip_subnets = fip_pool_db_obj.floating_ip_pool_subnets

            if fip_subnets:
                for subnet in fip_subnets['subnet_uuid']:
                    if subnet == svc_subnet_uuid:
                        svc_subnet_found = True
                        break

            if not svc_subnet_found:
                # Requested service subnet was not found in existing fip pool.
                # Update existing fip pool entry with desired subnet.
                self.logger.debug("Failed to create floating-ip-pool %s for"\
                    "subnet %s. A floating-ip-pool with same name exists."\
                    " Update existing entry." %\
                    (":".join(fip_pool_db_obj.fq_name), svc_subnet_uuid))

                # Update vnc.
                self.vnc_lib.floating_ip_pool_update(fip_pool_obj)
                # Update subnet info in local cache.
                fip_subnets['subnet_uuid'] = [svc_subnet_uuid]

        else:
            # Read and update local cache.
            fip_pool_obj = self.vnc_lib.floating_ip_pool_read(
                fq_name=fip_pool_obj.fq_name)
            FloatingIpPoolKM.locate(fip_pool_obj.get_uuid())

        return

    def _provision_cluster(self):
        self._create_project('kube-system')
        proj_obj = self._create_project(\
            vnc_kube_config.cluster_default_project_name())
        cluster_network_vn_obj = self._create_cluster_network(\
            vnc_kube_config.cluster_default_network_name(), proj_obj)
        ip_fabric_fq_name = vnc_kube_config.cluster_ip_fabric_network_fq_name()
        try:
            ip_fabric_vn_obj = self.vnc_lib. \
                virtual_network_read(fq_name=ip_fabric_fq_name)
            self._create_attach_policy(proj_obj, ip_fabric_vn_obj, \
                cluster_network_vn_obj)
            self._create_attach_ip_fabric_security_group(ip_fabric_vn_obj)
        except NoIdError:
            # unit-test may not have ip-fabric-network
            pass

    def _get_cluster_network(self):
        return VirtualNetworkKM.find_by_name_or_uuid(
            vnc_kube_config.cluster_default_network_name())

    def _get_cluster_pod_ipam_fq_name(self):
        return self._cluster_pod_ipam_fq_name

    def vnc_timer(self):
        try:
            self.network_policy_mgr.network_policy_timer()
            self.ingress_mgr.ingress_timer()
            self.service_mgr.service_timer()
            self.pod_mgr.pod_timer()
            self.namespace_mgr.namespace_timer()
        except Exception as e:
            string_buf = StringIO()
            cgitb_hook(file=string_buf, format="text")
            err_msg = string_buf.getvalue()
            self.logger.error("vnc_timer: %s - %s" % (self._name, err_msg))

    def vnc_process(self):
        while True:
            try:
                event = self.q.get()
                event_type = event['type']
                kind = event['object'].get('kind')
                metadata = event['object']['metadata']
                namespace = metadata.get('namespace')
                name = metadata.get('name')
                uid = metadata.get('uid')
                if kind == 'Pod':
                    self.pod_mgr.process(event)
                elif kind == 'Service':
                    self.service_mgr.process(event)
                elif kind == 'Namespace':
                    self.namespace_mgr.process(event)
                elif kind == 'NetworkPolicy':
                    self.network_policy_mgr.process(event)
                elif kind == 'Endpoints':
                    self.endpoints_mgr.process(event)
                elif kind == 'Ingress':
                    self.ingress_mgr.process(event)
                else:
                    print("%s - Event %s %s %s:%s:%s not handled" %
                          (self._name, event_type, kind, namespace, name, uid))
                    self.logger.error(
                        "%s - Event %s %s %s:%s:%s not handled" %
                        (self._name, event_type, kind, namespace, name, uid))
            except Empty:
                gevent.sleep(0)
            except Exception as e:
                string_buf = StringIO()
                cgitb_hook(file=string_buf, format="text")
                err_msg = string_buf.getvalue()
                self.logger.error("%s - %s" % (self._name, err_msg))

    @classmethod
    def get_instance(cls):
        return VncKubernetes._vnc_kubernetes

    @classmethod
    def destroy_instance(cls):
        inst = cls.get_instance()
        if inst is None:
            return
        inst.rabbit.close()
        for obj_cls in DBBaseKM.get_obj_type_map().values():
            obj_cls.reset()
        DBBase.clear()
        inst._db = None
        VncKubernetes._vnc_kubernetes = None
Пример #31
0
class VncKubernetes(object):
    def __init__(self, args=None, logger=None, q=None, kube=None):
        self.args = args
        self.logger = logger
        self.q = q
        self.kube = kube

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # init access to db
        self._db = db.KubeNetworkManagerDB(self.args, self.logger)
        DBBaseKM.init(self, self.logger, self._db)

        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger,
                                    DBBaseKM,
                                    REACTION_MAP,
                                    'kube_manager',
                                    args=self.args)
        self.rabbit.establish()

        # sync api server db in local cache
        self._sync_km()
        self.rabbit._db_resync_done.set()

        # provision cluster
        self._provision_cluster()

        # handle events
        self.label_cache = label_cache.LabelCache()
        self.namespace_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_namespace.VncNamespace',
            vnc_lib=self.vnc_lib,
            cluster_pod_subnets=self.args.pod_subnets)
        self.service_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_service.VncService', self.vnc_lib,
            self.label_cache, self.args, self.logger, self.kube)
        self.network_policy_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy',
            self.vnc_lib, self.label_cache, self.logger)
        self.pod_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_pod.VncPod',
            self.vnc_lib,
            self.label_cache,
            self.service_mgr,
            self.network_policy_mgr,
            self.q,
            svc_fip_pool=self._get_cluster_service_fip_pool())
        self.endpoints_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_endpoints.VncEndpoints', self.vnc_lib,
            self.logger, self.kube)
        self.ingress_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_ingress.VncIngress', self.args, self.vnc_lib,
            self.label_cache, self.logger, self.kube)

    def _vnc_connect(self):
        # Retry till API server connection is up
        connected = False
        while not connected:
            try:
                vnc_lib = VncApi(self.args.admin_user,
                                 self.args.admin_password,
                                 self.args.admin_tenant,
                                 self.args.vnc_endpoint_ip,
                                 self.args.vnc_endpoint_port)
                connected = True
            except requests.exceptions.ConnectionError as e:
                time.sleep(3)
            except ResourceExhaustionError:
                time.sleep(3)
        return vnc_lib

    def _sync_km(self):
        for cls in DBBaseKM.get_obj_type_map().values():
            for obj in cls.list_obj():
                cls.locate(obj['uuid'], obj)

    @staticmethod
    def reset():
        for cls in DBBaseKM.get_obj_type_map().values():
            cls.reset()

    def _create_project(self, project_name):
        proj_fq_name = ['default-domain', project_name]
        proj_obj = Project(name=project_name, fq_name=proj_fq_name)
        try:
            self.vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self.vnc_lib.project_read(fq_name=proj_fq_name)
        try:
            self._create_default_security_group(proj_obj)
        except RefsExistError:
            pass
        ProjectKM.locate(proj_obj.uuid)
        return proj_obj

    def _create_default_security_group(self, proj_obj):
        DEFAULT_SECGROUP_DESCRIPTION = "Default security group"

        def _get_rule(ingress, sg, prefix, ethertype):
            sgr_uuid = str(uuid.uuid4())
            if sg:
                addr = AddressType(security_group=proj_obj.get_fq_name_str() +
                                   ':' + sg)
            elif prefix:
                addr = AddressType(subnet=SubnetType(prefix, 0))
            local_addr = AddressType(security_group='local')
            if ingress:
                src_addr = addr
                dst_addr = local_addr
            else:
                src_addr = local_addr
                dst_addr = addr
            rule = PolicyRuleType(rule_uuid=sgr_uuid,
                                  direction='>',
                                  protocol='any',
                                  src_addresses=[src_addr],
                                  src_ports=[PortType(0, 65535)],
                                  dst_addresses=[dst_addr],
                                  dst_ports=[PortType(0, 65535)],
                                  ethertype=ethertype)
            return rule

        rules = [
            _get_rule(True, 'default', None, 'IPv4'),
            _get_rule(True, 'default', None, 'IPv6'),
            _get_rule(False, None, '0.0.0.0', 'IPv4'),
            _get_rule(False, None, '::', 'IPv6')
        ]
        sg_rules = PolicyEntriesType(rules)

        # create security group
        id_perms = IdPermsType(enable=True,
                               description=DEFAULT_SECGROUP_DESCRIPTION)
        sg_obj = SecurityGroup(name='default',
                               parent_obj=proj_obj,
                               id_perms=id_perms,
                               security_group_entries=sg_rules)

        self.vnc_lib.security_group_create(sg_obj)
        self.vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())

    def _create_ipam(self,
                     ipam_name,
                     subnets,
                     proj_obj,
                     type='user-defined-subnet'):
        ipam_obj = NetworkIpam(name=ipam_name, parent_obj=proj_obj)

        ipam_subnets = []
        for subnet in subnets:
            pfx, pfx_len = subnet.split('/')
            ipam_subnet = IpamSubnetType(subnet=SubnetType(pfx, int(pfx_len)))
            ipam_subnets.append(ipam_subnet)

        if type == 'flat-subnet':
            ipam_obj.set_ipam_subnet_method('flat-subnet')
            ipam_obj.set_ipam_subnets(IpamSubnets(ipam_subnets))

        try:
            self.vnc_lib.network_ipam_create(ipam_obj)
        except RefsExistError:
            ipam_obj = self.vnc_lib.network_ipam_read(
                fq_name=ipam_obj.get_fq_name())
        return ipam_obj, ipam_subnets

    def _create_cluster_network(self, vn_name, proj_obj):
        vn_obj = VirtualNetwork(
            name=vn_name,
            parent_obj=proj_obj,
            address_allocation_mode='user-defined-subnet-only')

        # Create Pod IPAM.
        ipam_obj, ipam_subnets = self._create_ipam('pod-ipam',
                                                   self.args.pod_subnets,
                                                   proj_obj)

        # Attach Pod IPAM to virtual-network.
        vn_obj.add_network_ipam(ipam_obj, VnSubnetsType(ipam_subnets))

        #
        # Create Service IPAM.
        #
        svc_ipam_obj, ipam_subnets = self._create_ipam(
            'service-ipam',
            self.args.service_subnets,
            proj_obj,
            type='flat-subnet')

        # Attach Service IPAM to virtual-network.
        #
        # For flat-subnets, the subnets are specified on the IPAM and
        # not on the virtual-network to IPAM link. So pass an empty
        # list of VnSubnetsType.
        vn_obj.add_network_ipam(svc_ipam_obj, VnSubnetsType([]))

        vn_obj.set_virtual_network_properties(
            VirtualNetworkType(forwarding_mode='l3'))
        try:
            self.vnc_lib.virtual_network_create(vn_obj)
        except RefsExistError:
            vn_obj = self.vnc_lib.virtual_network_read(
                fq_name=vn_obj.get_fq_name())

        VirtualNetworkKM.locate(vn_obj.uuid)

        # Create service floating ip pool.
        self._create_cluster_service_fip_pool(vn_obj, svc_ipam_obj)

        return vn_obj.uuid

    def _get_cluster_service_fip_pool_name(self, vn_name):
        """
        Return fip pool name of cluster service network.
        """
        return 'svc-fip-pool-%s' % (vn_name)

    def _get_cluster_service_fip_pool(self):
        """
        Get floating ip pool of cluster service network.
        """
        vn_obj = self._get_cluster_network()
        return FloatingIpPoolKM.find_by_name_or_uuid(
            self._get_cluster_service_fip_pool_name(vn_obj.name))

    def _create_cluster_service_fip_pool(self, vn_obj, ipam_obj):
        # Create a floating-ip-pool in cluster service network.
        #
        # Service IP's in the k8s cluster are allocated from service
        # IPAM in the cluster network. All pods spawned in isolated
        # virtual networks will be allocated an IP from this floating-ip-
        # pool. These pods, in those isolated virtual networks, will use this
        # floating-ip for outbound traffic to services in the k8s cluster.

        # Get IPAM refs from virtual-network.
        ipam_refs = vn_obj.get_network_ipam_refs()
        svc_subnet_uuid = None
        for ipam_ref in ipam_refs:
            if ipam_ref['to'] == ipam_obj.get_fq_name():
                ipam_subnets = ipam_ref['attr'].get_ipam_subnets()
                if not ipam_subnets:
                    continue
                # We will use the first subnet in the matching IPAM.
                svc_subnet_uuid = ipam_subnets[0].get_subnet_uuid()
                break

        fip_subnets = FloatingIpPoolSubnetType(subnet_uuid=[svc_subnet_uuid])
        fip_pool_obj = FloatingIpPool(self._get_cluster_service_fip_pool_name(
            vn_obj.name),
                                      floating_ip_pool_subnets=fip_subnets,
                                      parent_obj=vn_obj)
        try:
            # Create floating ip pool for cluster service network.
            fip_pool_vnc_obj =\
                self.vnc_lib.floating_ip_pool_create(fip_pool_obj)

        except RefsExistError:
            # Floating-ip-pool exists.
            #
            # Validate that existing floating-ip-pool has the service subnet
            # uuid as one of its subnets. If not raise an exception, as the
            # floating-ip-pool cannot be created, as one with the same name but
            # different attributes exists in the system.
            fip_pool_vnc_obj = self._get_cluster_service_fip_pool()
            svc_subnet_found = False
            fip_subnets = None

            if hasattr(fip_pool_vnc_obj, 'floating_ip_pool_subnets'):
                fip_subnets = fip_pool_vnc_obj.floating_ip_pool_subnets

            if fip_subnets:
                for subnet in fip_subnets['subnet_uuid']:
                    if subnet == svc_subnet_uuid:
                        svc_subnet_found = True
                        break

            if not svc_subnet_found:
                self.logger.error("Failed to create floating-ip-pool %s for"\
                    "subnet %s. A floating-ip-pool with same name exists." %\
                    (":".join(fip_pool_vnc_obj.fq_name), svc_subnet_uuid))

        else:
            # Update local cache.
            FloatingIpPoolKM.locate(fip_pool_vnc_obj)

        return

    def _provision_cluster(self):
        self._create_project('kube-system')
        proj_obj = self._create_project('default')
        self._create_cluster_network('cluster-network', proj_obj)

    def _get_cluster_network(self):
        return VirtualNetworkKM.find_by_name_or_uuid('cluster-network')

    def vnc_timer(self):
        self.pod_mgr.pod_timer()

    def vnc_process(self):
        while True:
            try:
                event = self.q.get()
                print("\tGot %s %s %s:%s" %
                      (event['type'], event['object'].get('kind'),
                       event['object']['metadata'].get('namespace'),
                       event['object']['metadata'].get('name')))
                if event['object'].get('kind') == 'Pod':
                    self.pod_mgr.process(event)
                elif event['object'].get('kind') == 'Service':
                    self.service_mgr.process(event)
                elif event['object'].get('kind') == 'Namespace':
                    self.namespace_mgr.process(event)
                elif event['object'].get('kind') == 'NetworkPolicy':
                    self.network_policy_mgr.process(event)
                elif event['object'].get('kind') == 'Endpoints':
                    self.endpoints_mgr.process(event)
                elif event['object'].get('kind') == 'Ingress':
                    self.ingress_mgr.process(event)
            except Empty:
                gevent.sleep(0)
Пример #32
0
class VncKubernetes(VncCommon):

    _vnc_kubernetes = None

    def __init__(self,
                 args=None,
                 logger=None,
                 q=None,
                 kube=None,
                 vnc_kubernetes_config_dict=None):
        self._name = type(self).__name__
        self.args = args
        self.logger = logger
        self.q = q
        self.kube = kube
        self._cluster_pod_ipam_fq_name = None
        self._cluster_service_ipam_fq_name = None

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # Cache common config.
        self.vnc_kube_config = vnc_kube_config(logger=self.logger,
                                               vnc_lib=self.vnc_lib,
                                               args=self.args,
                                               queue=self.q,
                                               kube=self.kube)

        # HACK ALERT.
        # Till we have an alternate means to get config objects,  we will
        # direcly connect to cassandra. Such a persistant connection is
        # discouraged, but is the only option we have for now.
        #
        # Disable flow timeout on this connection, so the flow persists.
        #
        if self.args.nested_mode is '1':
            for cassandra_server in self.args.cassandra_server_list:
                cassandra_port = cassandra_server.split(':')[-1]
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", cassandra_port, 2147483647)

        # init access to db
        self._db = db.KubeNetworkManagerDB(self.args, self.logger)
        DBBaseKM.init(self, self.logger, self._db)

        # If nested mode is enabled via config, then record the directive.
        if self.args.nested_mode is '1':
            DBBaseKM.set_nested(True)

        # sync api server db in local cache
        self._sync_km()

        # init rabbit connection
        rabbitmq_cfg = kube_args.rabbitmq_args(self.args)
        self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger,
                                    DBBaseKM, REACTION_MAP, 'kube_manager',
                                    rabbitmq_cfg)
        self.rabbit.establish()
        self.rabbit._db_resync_done.set()

        # provision cluster
        self._provision_cluster()

        if vnc_kubernetes_config_dict:
            self.vnc_kube_config.update(**vnc_kubernetes_config_dict)
        else:
            # Update common config.
            self.vnc_kube_config.update(
                cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(),
                cluster_service_ipam_fq_name=self.
                _get_cluster_service_ipam_fq_name())

        # handle events
        self.label_cache = label_cache.LabelCache()
        self.vnc_kube_config.update(label_cache=self.label_cache)

        self.network_policy_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy')
        self.namespace_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_namespace.VncNamespace',
            self.network_policy_mgr)
        self.ingress_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_ingress.VncIngress')
        self.service_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr)
        self.pod_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr,
            self.network_policy_mgr)
        self.endpoints_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_endpoints.VncEndpoints')

        VncKubernetes._vnc_kubernetes = self

    def connection_state_update(self, status, message=None):
        ConnectionState.update(
            conn_type=ConnType.APISERVER,
            name='ApiServer',
            status=status,
            message=message or '',
            server_addrs=[
                '%s:%s' %
                (self.args.vnc_endpoint_ip, self.args.vnc_endpoint_port)
            ])

    # end connection_state_update

    def _vnc_connect(self):
        # Retry till API server connection is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        api_server_list = self.args.vnc_endpoint_ip.split(',')
        while not connected:
            try:
                vnc_lib = VncApi(self.args.auth_user,
                                 self.args.auth_password,
                                 self.args.auth_tenant,
                                 api_server_list,
                                 self.args.vnc_endpoint_port,
                                 auth_token_url=self.args.auth_token_url)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:
                time.sleep(3)
        return vnc_lib

    def _sync_km(self):
        for cls in DBBaseKM.get_obj_type_map().values():
            for obj in cls.list_obj():
                cls.locate(obj['uuid'], obj)

    @staticmethod
    def reset():
        for cls in DBBaseKM.get_obj_type_map().values():
            cls.reset()

    def _create_policy(self, policy_name, proj_obj, src_vn_obj, dst_vn_obj):
        policy_exists = False
        policy = NetworkPolicy(name=policy_name, parent_obj=proj_obj)
        try:
            policy_obj = self.vnc_lib.network_policy_read(
                fq_name=policy.get_fq_name())
            policy_exists = True
        except NoIdError:
            # policy does not exist. Create one.
            policy_obj = policy
        network_policy_entries = PolicyEntriesType([
            PolicyRuleType(
                direction='<>',
                action_list=ActionListType(simple_action='pass'),
                protocol='any',
                src_addresses=[
                    AddressType(virtual_network=src_vn_obj.get_fq_name_str())
                ],
                src_ports=[PortType(-1, -1)],
                dst_addresses=[
                    AddressType(virtual_network=dst_vn_obj.get_fq_name_str())
                ],
                dst_ports=[PortType(-1, -1)])
        ])
        policy_obj.set_network_policy_entries(network_policy_entries)
        if policy_exists:
            self.vnc_lib.network_policy_update(policy)
        else:
            self.vnc_lib.network_policy_create(policy)
        return policy_obj

    def _attach_policy(self, vn_obj, *policies):
        for policy in policies or []:
            vn_obj.add_network_policy(policy, \
                VirtualNetworkPolicyType(sequence=SequenceType(0, 0)))
        self.vnc_lib.virtual_network_update(vn_obj)
        for policy in policies or []:
            self.vnc_lib.ref_relax_for_delete(vn_obj.uuid, policy.uuid)

    def _create_attach_policy(self, proj_obj, ip_fabric_vn_obj, \
            cluster_pod_vn_obj, cluster_service_vn_obj):
        policy_name = '%s-%s-default' \
            %(ip_fabric_vn_obj.name, cluster_pod_vn_obj.name)
        ip_fabric_pod_policy = self._create_policy(policy_name, proj_obj, \
            ip_fabric_vn_obj, cluster_pod_vn_obj)
        policy_name = '%s-%s-default'\
            %(ip_fabric_vn_obj.name, cluster_service_vn_obj.name)
        ip_fabric_service_policy = self._create_policy(policy_name, proj_obj, \
            ip_fabric_vn_obj, cluster_service_vn_obj)
        policy_name = '%s-%s-default'\
            %(cluster_service_vn_obj.name, cluster_pod_vn_obj.name)
        service_pod_policy = self._create_policy(policy_name, proj_obj, \
            cluster_service_vn_obj, cluster_pod_vn_obj)
        self._attach_policy(ip_fabric_vn_obj, \
            ip_fabric_pod_policy, ip_fabric_service_policy)
        self._attach_policy(cluster_pod_vn_obj, \
            ip_fabric_pod_policy, service_pod_policy)
        self._attach_policy(cluster_service_vn_obj, \
            ip_fabric_service_policy, service_pod_policy)

    def _create_project(self, project_name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(project_name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)
        try:
            self.vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self.vnc_lib.project_read(fq_name=proj_fq_name)
        ProjectKM.locate(proj_obj.uuid)
        return proj_obj

    def _create_ipam(self, ipam_name, subnets, proj_obj, type='flat-subnet'):
        ipam_obj = NetworkIpam(name=ipam_name, parent_obj=proj_obj)

        ipam_subnets = []
        for subnet in subnets:
            pfx, pfx_len = subnet.split('/')
            ipam_subnet = IpamSubnetType(subnet=SubnetType(pfx, int(pfx_len)))
            ipam_subnets.append(ipam_subnet)
        if not len(ipam_subnets):
            self.logger.error("%s - %s subnet is empty for %s" \
                 %(self._name, ipam_name, subnets))

        if type == 'flat-subnet':
            ipam_obj.set_ipam_subnet_method('flat-subnet')
            ipam_obj.set_ipam_subnets(IpamSubnets(ipam_subnets))

        ipam_update = False
        try:
            ipam_uuid = self.vnc_lib.network_ipam_create(ipam_obj)
            ipam_update = True
        except RefsExistError:
            curr_ipam_obj = self.vnc_lib.network_ipam_read(
                fq_name=ipam_obj.get_fq_name())
            ipam_uuid = curr_ipam_obj.get_uuid()
            if type == 'flat-subnet' and not curr_ipam_obj.get_ipam_subnets():
                self.vnc_lib.network_ipam_update(ipam_obj)
                ipam_update = True

        # Cache ipam info.
        NetworkIpamKM.locate(ipam_uuid)

        return ipam_update, ipam_obj, ipam_subnets

    def _is_ipam_exists(self, vn_obj, ipam_fq_name, subnet=None):
        curr_ipam_refs = vn_obj.get_network_ipam_refs()
        if curr_ipam_refs:
            for ipam_ref in curr_ipam_refs:
                if ipam_fq_name == ipam_ref['to']:
                    if subnet:
                        # Subnet is specified.
                        # Validate that we are able to match subnect as well.
                        if len(ipam_ref['attr'].ipam_subnets) and \
                            subnet == ipam_ref['attr'].ipam_subnets[0].subnet:
                            return True
                    else:
                        # Subnet is not specified.
                        # So ipam-fq-name match will suffice.
                        return True
        return False

    def _provision_cluster(self):
        proj_obj = self._create_project(\
            vnc_kube_config.cluster_default_project_name())
        ip_fabric_fq_name = vnc_kube_config.cluster_ip_fabric_network_fq_name()
        ip_fabric_vn_obj = self.vnc_lib. \
            virtual_network_read(fq_name=ip_fabric_fq_name)
        self._create_project('kube-system')
        # Create Pod IPAM.
        pod_ipam_update, pod_ipam_obj, pod_ipam_subnets = \
            self._create_ipam('pod-ipam', self.args.pod_subnets, proj_obj)
        # Cache cluster pod ipam name.
        # This will be referenced by ALL pods that are spawned in the cluster.
        self._cluster_pod_ipam_fq_name = pod_ipam_obj.get_fq_name()
        # Create a cluster-pod-network
        cluster_pod_vn_obj = self._create_network(
            vnc_kube_config.cluster_default_pod_network_name(), proj_obj, \
            pod_ipam_obj, pod_ipam_update, ip_fabric_vn_obj)
        # Create Service IPAM.
        service_ipam_update, service_ipam_obj, service_ipam_subnets = \
            self._create_ipam('service-ipam', self.args.service_subnets, proj_obj)
        self._cluster_service_ipam_fq_name = service_ipam_obj.get_fq_name()
        # Create a cluster-service-network
        cluster_service_vn_obj = self._create_network(
            vnc_kube_config.cluster_default_service_network_name(), proj_obj, \
            service_ipam_obj, service_ipam_update)
        self._create_attach_policy(proj_obj, ip_fabric_vn_obj, \
            cluster_pod_vn_obj, cluster_service_vn_obj)

    def _create_network(self, vn_name, proj_obj, \
            ipam_obj, ipam_update, provider=None):
        # Check if the VN already exists.
        # If yes, update existing VN object with k8s config.
        vn_exists = False
        vn = VirtualNetwork(name=vn_name,
                            parent_obj=proj_obj,
                            address_allocation_mode='user-defined-subnet-only')
        try:
            vn_obj = self.vnc_lib.virtual_network_read(
                fq_name=vn.get_fq_name())
            vn_exists = True
        except NoIdError:
            # VN does not exist. Create one.
            vn_obj = vn

        # Attach IPAM to virtual network.
        #
        # For flat-subnets, the subnets are specified on the IPAM and
        # not on the virtual-network to IPAM link. So pass an empty
        # list of VnSubnetsType.
        if ipam_update or \
           not self._is_ipam_exists(vn_obj, ipam_obj.get_fq_name()):
            vn_obj.add_network_ipam(ipam_obj, VnSubnetsType([]))

        vn_obj.set_virtual_network_properties(
            VirtualNetworkType(forwarding_mode='l3'))

        if provider:
            vn_obj.add_virtual_network(provider)
        else:
            vn_refs = vn_obj.get_virtual_network_refs()
            for vn_ref in vn_refs or []:
                vn_ref_obj = self.vnc_lib.virtual_network_read(
                    id=vn_ref['uuid'])
                vn_obj.del_virtual_network(vn_ref_obj)

        if vn_exists:
            # Update VN.
            self.vnc_lib.virtual_network_update(vn_obj)
        else:
            # Create VN.
            self.vnc_lib.virtual_network_create(vn_obj)

        # FIP pool creation requires a vnc object. Get it.
        vn_obj = self.vnc_lib.virtual_network_read(
            fq_name=vn_obj.get_fq_name())

        VirtualNetworkKM.locate(vn_obj.uuid)
        return vn_obj

    def _get_cluster_network(self):
        return VirtualNetworkKM.find_by_name_or_uuid(
            vnc_kube_config.cluster_default_network_name())

    def _get_cluster_pod_ipam_fq_name(self):
        return self._cluster_pod_ipam_fq_name

    def _get_cluster_service_ipam_fq_name(self):
        return self._cluster_service_ipam_fq_name

    def vnc_timer(self):
        try:
            self.network_policy_mgr.network_policy_timer()
            self.ingress_mgr.ingress_timer()
            self.service_mgr.service_timer()
            self.pod_mgr.pod_timer()
            self.namespace_mgr.namespace_timer()
        except Exception as e:
            string_buf = StringIO()
            cgitb_hook(file=string_buf, format="text")
            err_msg = string_buf.getvalue()
            self.logger.error("vnc_timer: %s - %s" % (self._name, err_msg))

    def vnc_process(self):
        while True:
            try:
                event = self.q.get()
                event_type = event['type']
                kind = event['object'].get('kind')
                metadata = event['object']['metadata']
                namespace = metadata.get('namespace')
                name = metadata.get('name')
                uid = metadata.get('uid')
                if kind == 'Pod':
                    self.pod_mgr.process(event)
                elif kind == 'Service':
                    self.service_mgr.process(event)
                elif kind == 'Namespace':
                    self.namespace_mgr.process(event)
                elif kind == 'NetworkPolicy':
                    self.network_policy_mgr.process(event)
                elif kind == 'Endpoints':
                    self.endpoints_mgr.process(event)
                elif kind == 'Ingress':
                    self.ingress_mgr.process(event)
                else:
                    print("%s - Event %s %s %s:%s:%s not handled" %
                          (self._name, event_type, kind, namespace, name, uid))
                    self.logger.error(
                        "%s - Event %s %s %s:%s:%s not handled" %
                        (self._name, event_type, kind, namespace, name, uid))
            except Empty:
                gevent.sleep(0)
            except Exception as e:
                string_buf = StringIO()
                cgitb_hook(file=string_buf, format="text")
                err_msg = string_buf.getvalue()
                self.logger.error("%s - %s" % (self._name, err_msg))

    @classmethod
    def get_instance(cls):
        return VncKubernetes._vnc_kubernetes

    @classmethod
    def destroy_instance(cls):
        inst = cls.get_instance()
        if inst is None:
            return
        inst.rabbit.close()
        for obj_cls in DBBaseKM.get_obj_type_map().values():
            obj_cls.reset()
        DBBase.clear()
        inst._db = None
        VncKubernetes._vnc_kubernetes = None
class VncKubernetes(VncCommon):

    _vnc_kubernetes = None

    def __init__(self, args=None, logger=None, q=None, kube=None,
                 vnc_kubernetes_config_dict=None):
        self._name = type(self).__name__
        self.args = args
        self.logger = logger
        self.q = q
        self.kube = kube
        self._cluster_pod_ipam_fq_name = None

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # Cache common config.
        self.vnc_kube_config = vnc_kube_config(logger=self.logger,
            vnc_lib=self.vnc_lib, args=self.args, queue=self.q, kube=self.kube)

        # HACK ALERT.
        # Till we have an alternate means to get config objects,  we will
        # direcly connect to cassandra. Such a persistant connection is
        # discouraged, but is the only option we have for now.
        #
        # Disable flow timeout on this connection, so the flow persists.
        #
        if self.args.nested_mode is '1':
            for cassandra_server in self.args.cassandra_server_list:
                cassandra_port = cassandra_server.split(':')[-1]
                flow_aging_manager.create_flow_aging_timeout_entry(self.vnc_lib,
                    "tcp", cassandra_port, 2147483647)

        # init access to db
        self._db = db.KubeNetworkManagerDB(self.args, self.logger)
        DBBaseKM.init(self, self.logger, self._db)

        # If nested mode is enabled via config, then record the directive.
        if self.args.nested_mode is '1':
            DBBaseKM.set_nested(True)

        # sync api server db in local cache
        self._sync_km()

        # init rabbit connection
        rabbitmq_cfg = kube_args.rabbitmq_args(self.args)
        self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger, DBBaseKM,
            REACTION_MAP, 'kube_manager', rabbitmq_cfg)
        self.rabbit.establish()
        self.rabbit._db_resync_done.set()

        # provision cluster
        self._provision_cluster()

        if vnc_kubernetes_config_dict:
            self.vnc_kube_config.update(**vnc_kubernetes_config_dict)
        else:
            # Update common config.
            self.vnc_kube_config.update(
                cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(),
                cluster_service_fip_pool=self._get_cluster_service_fip_pool())

        # handle events
        self.label_cache = label_cache.LabelCache()
        self.vnc_kube_config.update(label_cache=self.label_cache)

        self.network_policy_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy')
        self.namespace_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_namespace.VncNamespace',
            self.network_policy_mgr)
        self.ingress_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_ingress.VncIngress')
        self.service_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr)
        self.pod_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr,
            self.network_policy_mgr)
        self.endpoints_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_endpoints.VncEndpoints')

        VncKubernetes._vnc_kubernetes = self

    def connection_state_update(self, status, message=None):
        ConnectionState.update(
            conn_type=ConnType.APISERVER, name='ApiServer',
            status=status, message=message or '',
            server_addrs=['%s:%s' % (self.args.vnc_endpoint_ip,
                                     self.args.vnc_endpoint_port)])
    # end connection_state_update

    def _vnc_connect(self):
        # Retry till API server connection is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        api_server_list = self.args.vnc_endpoint_ip.split(',')
        while not connected:
            try:
                vnc_lib = VncApi(self.args.auth_user,
                    self.args.auth_password, self.args.auth_tenant,
                    api_server_list, self.args.vnc_endpoint_port,
                    auth_token_url=self.args.auth_token_url)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:
                time.sleep(3)
        return vnc_lib

    def _sync_km(self):
        for cls in DBBaseKM.get_obj_type_map().values():
            for obj in cls.list_obj():
                cls.locate(obj['uuid'], obj)

    @staticmethod
    def reset():
        for cls in DBBaseKM.get_obj_type_map().values():
            cls.reset()

    def _create_project(self, project_name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(project_name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)
        try:
            self.vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self.vnc_lib.project_read(
                fq_name=proj_fq_name)
        ProjectKM.locate(proj_obj.uuid)
        return proj_obj

    def _create_ipam(self, ipam_name, subnets, proj_obj,
            type='user-defined-subnet'):
        ipam_obj = NetworkIpam(name=ipam_name, parent_obj=proj_obj)

        ipam_subnets = []
        for subnet in subnets:
            pfx, pfx_len = subnet.split('/')
            ipam_subnet = IpamSubnetType(subnet=SubnetType(pfx, int(pfx_len)))
            ipam_subnets.append(ipam_subnet)
        if not len(ipam_subnets):
            self.logger.error("%s - %s subnet is empty for %s" \
                 %(self._name, ipam_name, subnets))

        if type == 'flat-subnet':
            ipam_obj.set_ipam_subnet_method('flat-subnet')
            ipam_obj.set_ipam_subnets(IpamSubnets(ipam_subnets))

        ipam_update = False
        try:
            ipam_uuid = self.vnc_lib.network_ipam_create(ipam_obj)
            ipam_update = True
        except RefsExistError:
            ipam_obj = self.vnc_lib.network_ipam_read(
                fq_name=ipam_obj.get_fq_name())
            ipam_uuid = ipam_obj.get_uuid()
            if type == 'flat-subnet' and not ipam_obj.get_ipam_subnets():
                self.vnc_lib.network_ipam_update(ipam_obj)
                ipam_update = True

        # Cache ipam info.
        NetworkIpamKM.locate(ipam_uuid)

        return ipam_update, ipam_obj, ipam_subnets

    def _is_ipam_exists(self, vn_obj, ipam_fq_name, subnet=None):
        curr_ipam_refs = vn_obj.get_network_ipam_refs()
        if curr_ipam_refs:
            for ipam_ref in curr_ipam_refs:
                if ipam_fq_name == ipam_ref['to']:
                   if subnet:
                       # Subnet is specified.
                       # Validate that we are able to match subnect as well.
                       if len(ipam_ref['attr'].ipam_subnets) and \
                           subnet == ipam_ref['attr'].ipam_subnets[0].subnet:
                           return True
                   else:
                       # Subnet is not specified.
                       # So ipam-fq-name match will suffice.
                       return True
        return False

    def _create_cluster_network(self, vn_name, proj_obj):
        # Check if the VN already exists.
        # If yes, update existing VN object with k8s config.
        vn_exists = False
        vn = VirtualNetwork(name=vn_name, parent_obj=proj_obj,
                 address_allocation_mode='user-defined-subnet-only')
        try:
            vn_obj = self.vnc_lib.virtual_network_read(
                fq_name=vn.get_fq_name())
            vn_exists = True
        except NoIdError:
            # VN does not exist. Create one.
            vn_obj = vn

        # Create Pod IPAM.
        pod_ipam_update, pod_ipam_obj, pod_ipam_subnets = \
            self._create_ipam('pod-ipam',
                self.args.pod_subnets, proj_obj, type='flat-subnet')

        # Cache cluster pod ipam name.
        # This will be referenced by ALL pods that are spawned in the cluster.
        self._cluster_pod_ipam_fq_name = pod_ipam_obj.get_fq_name()

        # Attach Pod IPAM to cluster virtual network.
        #
        # For flat-subnets, the subnets are specified on the IPAM and
        # not on the virtual-network to IPAM link. So pass an empty
        # list of VnSubnetsType.
        if pod_ipam_update or \
           not self._is_ipam_exists(vn_obj, pod_ipam_obj.get_fq_name()):
            vn_obj.add_network_ipam(pod_ipam_obj, VnSubnetsType([]))

        #
        # Create Service IPAM.
        #
        svc_ipam_update, svc_ipam_obj, svc_ipam_subnets = \
            self._create_ipam('service-ipam', self.args.service_subnets, proj_obj)

        # Attach Service IPAM to virtual-network.
        svc_subnet = None
        if len(svc_ipam_subnets):
            svc_subnet = svc_ipam_subnets[0].subnet
        if svc_ipam_update or \
           not self._is_ipam_exists(vn_obj, svc_ipam_obj.get_fq_name(), svc_subnet):
            vn_obj.add_network_ipam(svc_ipam_obj, VnSubnetsType(svc_ipam_subnets))

        vn_obj.set_virtual_network_properties(
             VirtualNetworkType(forwarding_mode='l3'))

        if vn_exists:
            # Update VN.
            self.vnc_lib.virtual_network_update(vn_obj)
        else:
            # Create VN.
            self.vnc_lib.virtual_network_create(vn_obj)

        # FIP pool creation requires a vnc object. Get it.
        vn_obj = self.vnc_lib.virtual_network_read(
            fq_name=vn_obj.get_fq_name())

        # Create service floating ip pool.
        self._create_cluster_service_fip_pool(vn_obj, pod_ipam_obj)

        VirtualNetworkKM.locate(vn_obj.uuid)
        return vn_obj.uuid

    def _get_cluster_service_fip_pool_name(self, vn_name):
        """
        Return fip pool name of cluster service network.
        """
        return 'svc-fip-pool-%s' %(vn_name)

    def _get_cluster_service_fip_pool(self):
        """
        Get floating ip pool of cluster service network.
        """
        vn_obj = self._get_cluster_network()
        return FloatingIpPoolKM.find_by_name_or_uuid(
            self._get_cluster_service_fip_pool_name(vn_obj.name))

    def _create_cluster_service_fip_pool(self, vn_obj, ipam_obj):
        # Create a floating-ip-pool in cluster service network.
        #
        # Service IP's in the k8s cluster are allocated from pod
        # IPAM in the cluster network. All pods spawned in isolated
        # virtual networks will be allocated an IP from this floating-ip-
        # pool. These pods, in those isolated virtual networks, will use this
        # floating-ip for outbound traffic to services in the k8s cluster.

        # Get IPAM refs from virtual-network.
        ipam_refs = vn_obj.get_network_ipam_refs()
        svc_subnet_uuid = None
        for ipam_ref in ipam_refs:
            if ipam_ref['to'] == ipam_obj.get_fq_name():
                ipam_subnets = ipam_ref['attr'].get_ipam_subnets()
                if not ipam_subnets:
                    continue
                # We will use the first subnet in the matching IPAM.
                svc_subnet_uuid = ipam_subnets[0].get_subnet_uuid()
                break

        fip_subnets = FloatingIpPoolSubnetType(subnet_uuid = [svc_subnet_uuid])
        fip_pool_obj = FloatingIpPool(
            self._get_cluster_service_fip_pool_name(vn_obj.name),
            floating_ip_pool_subnets = fip_subnets,
            parent_obj=vn_obj)

        try:
            # Create floating ip pool for cluster service network.
            self.vnc_lib.floating_ip_pool_create(fip_pool_obj)
        except RefsExistError:
            # Floating-ip-pool exists.
            #
            # Validate that existing floating-ip-pool has the service subnet
            # uuid as one of its subnets. If not raise an exception, as the
            # floating-ip-pool cannot be created, as one with the same name but
            # different attributes exists in the system.
            fip_pool_db_obj = self._get_cluster_service_fip_pool()
            svc_subnet_found = False
            fip_subnets = None

            if hasattr(fip_pool_db_obj, 'floating_ip_pool_subnets'):
                fip_subnets = fip_pool_db_obj.floating_ip_pool_subnets

            if fip_subnets:
                for subnet in fip_subnets['subnet_uuid']:
                    if subnet == svc_subnet_uuid:
                        svc_subnet_found = True
                        break

            if not svc_subnet_found:
                # Requested service subnet was not found in existing fip pool.
                # Update existing fip pool entry with desired subnet.
                self.logger.debug("Failed to create floating-ip-pool %s for"\
                    "subnet %s. A floating-ip-pool with same name exists."\
                    " Update existing entry." %\
                    (":".join(fip_pool_db_obj.fq_name), svc_subnet_uuid))

                # Update vnc.
                self.vnc_lib.floating_ip_pool_update(fip_pool_obj)
                # Update subnet info in local cache.
                fip_subnets['subnet_uuid'] = [svc_subnet_uuid]

        else:
            # Read and update local cache.
            fip_pool_obj = self.vnc_lib.floating_ip_pool_read(
                fq_name=fip_pool_obj.fq_name)
            FloatingIpPoolKM.locate(fip_pool_obj.get_uuid())

        return

    def _provision_cluster(self):
        self._create_project('kube-system')
        proj_obj = self._create_project(\
            vnc_kube_config.cluster_default_project_name())
        self._create_cluster_network(\
            vnc_kube_config.cluster_default_network_name(), proj_obj)

    def _get_cluster_network(self):
        return VirtualNetworkKM.find_by_name_or_uuid(
            vnc_kube_config.cluster_default_network_name())

    def _get_cluster_pod_ipam_fq_name(self):
        return self._cluster_pod_ipam_fq_name

    def vnc_timer(self):
        try:
            self.network_policy_mgr.network_policy_timer()
            self.ingress_mgr.ingress_timer()
            self.service_mgr.service_timer()
            self.pod_mgr.pod_timer()
            self.namespace_mgr.namespace_timer()
        except Exception as e:
            string_buf = StringIO()
            cgitb_hook(file=string_buf, format="text")
            err_msg = string_buf.getvalue()
            self.logger.error("vnc_timer: %s - %s" %(self._name, err_msg))

    def vnc_process(self):
        while True:
            try:
                event = self.q.get()
                event_type = event['type']
                kind = event['object'].get('kind')
                metadata = event['object']['metadata']
                namespace = metadata.get('namespace')
                name = metadata.get('name')
                uid = metadata.get('uid')
                if kind == 'Pod':
                    self.pod_mgr.process(event)
                elif kind == 'Service':
                    self.service_mgr.process(event)
                elif kind == 'Namespace':
                    self.namespace_mgr.process(event)
                elif kind == 'NetworkPolicy':
                    self.network_policy_mgr.process(event)
                elif kind == 'Endpoints':
                    self.endpoints_mgr.process(event)
                elif kind == 'Ingress':
                    self.ingress_mgr.process(event)
                else:
                    print("%s - Event %s %s %s:%s:%s not handled"
                        %(self._name, event_type, kind, namespace, name, uid))
                    self.logger.error("%s - Event %s %s %s:%s:%s not handled"
                        %(self._name, event_type, kind, namespace, name, uid))
            except Empty:
                gevent.sleep(0)
            except Exception as e:
                string_buf = StringIO()
                cgitb_hook(file=string_buf, format="text")
                err_msg = string_buf.getvalue()
                self.logger.error("%s - %s" %(self._name, err_msg))

    @classmethod
    def get_instance(cls):
        return VncKubernetes._vnc_kubernetes

    @classmethod
    def destroy_instance(cls):
        inst = cls.get_instance()
        if inst is None:
            return
        inst.rabbit.close()
        for obj_cls in DBBaseKM.get_obj_type_map().values():
            obj_cls.reset()
        DBBase.clear()
        inst._db = None
        VncKubernetes._vnc_kubernetes = None
    def __init__(self, args=None, logger=None, q=None, kube=None,
                 vnc_kubernetes_config_dict=None):
        self._name = type(self).__name__
        self.args = args
        self.logger = logger
        self.q = q
        self.kube = kube
        self._cluster_pod_ipam_fq_name = None

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # Cache common config.
        self.vnc_kube_config = vnc_kube_config(logger=self.logger,
            vnc_lib=self.vnc_lib, args=self.args, queue=self.q, kube=self.kube)

        # HACK ALERT.
        # Till we have an alternate means to get config objects,  we will
        # direcly connect to cassandra. Such a persistant connection is
        # discouraged, but is the only option we have for now.
        #
        # Disable flow timeout on this connection, so the flow persists.
        #
        if self.args.nested_mode is '1':
            for cassandra_server in self.args.cassandra_server_list:
                cassandra_port = cassandra_server.split(':')[-1]
                flow_aging_manager.create_flow_aging_timeout_entry(self.vnc_lib,
                    "tcp", cassandra_port, 2147483647)

        # init access to db
        self._db = db.KubeNetworkManagerDB(self.args, self.logger)
        DBBaseKM.init(self, self.logger, self._db)

        # If nested mode is enabled via config, then record the directive.
        if self.args.nested_mode is '1':
            DBBaseKM.set_nested(True)

        # sync api server db in local cache
        self._sync_km()

        # init rabbit connection
        rabbitmq_cfg = kube_args.rabbitmq_args(self.args)
        self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger, DBBaseKM,
            REACTION_MAP, 'kube_manager', rabbitmq_cfg)
        self.rabbit.establish()
        self.rabbit._db_resync_done.set()

        # provision cluster
        self._provision_cluster()

        if vnc_kubernetes_config_dict:
            self.vnc_kube_config.update(**vnc_kubernetes_config_dict)
        else:
            # Update common config.
            self.vnc_kube_config.update(
                cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(),
                cluster_service_fip_pool=self._get_cluster_service_fip_pool())

        # handle events
        self.label_cache = label_cache.LabelCache()
        self.vnc_kube_config.update(label_cache=self.label_cache)

        self.network_policy_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy')
        self.namespace_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_namespace.VncNamespace',
            self.network_policy_mgr)
        self.ingress_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_ingress.VncIngress')
        self.service_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr)
        self.pod_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr,
            self.network_policy_mgr)
        self.endpoints_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_endpoints.VncEndpoints')

        VncKubernetes._vnc_kubernetes = self
Пример #35
0
    def __init__(self,
                 args=None,
                 logger=None,
                 q=None,
                 kube=None,
                 vnc_kubernetes_config_dict=None):
        self._name = type(self).__name__
        self.args = args
        self.logger = logger
        self.q = q
        self.kube = kube
        self._cluster_pod_ipam_fq_name = None
        self._cluster_service_ipam_fq_name = None
        self._cluster_ip_fabric_ipam_fq_name = None

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # Cache common config.
        self.vnc_kube_config = vnc_kube_config(logger=self.logger,
                                               vnc_lib=self.vnc_lib,
                                               args=self.args,
                                               queue=self.q,
                                               kube=self.kube)

        # HACK ALERT.
        # Till we have an alternate means to get config objects,  we will
        # direcly connect to cassandra and rabbitmq. Such a persistant connection
        # is discouraged, but is the only option we have for now.
        #
        # Disable flow timeout on this connection, so the flow persists.
        #
        if self.args.nested_mode is '1':
            for cassandra_server in self.args.cassandra_server_list:
                cassandra_port = cassandra_server.split(':')[-1]
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", cassandra_port, 2147483647)

            if self.args.rabbit_port:
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", self.args.rabbit_port, 2147483647)

        # init access to db
        self._db = db.KubeNetworkManagerDB(self.args, self.logger)
        DBBaseKM.init(self, self.logger, self._db)

        # If nested mode is enabled via config, then record the directive.
        if self.args.nested_mode is '1':
            DBBaseKM.set_nested(True)

        # sync api server db in local cache
        self._sync_km()

        # init rabbit connection
        rabbitmq_cfg = kube_args.rabbitmq_args(self.args)
        self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger,
                                    DBBaseKM, REACTION_MAP, 'kube_manager',
                                    rabbitmq_cfg)
        self.rabbit.establish()
        self.rabbit._db_resync_done.set()

        # Register label add and delete callbacks with label management entity.
        XLabelCache.register_label_add_callback(VncKubernetes.create_tags)
        XLabelCache.register_label_delete_callback(VncKubernetes.delete_tags)

        # Instantiate and init Security Policy Manager.
        self.security_policy_mgr = VncSecurityPolicy(self.vnc_lib,
                                                     VncKubernetes.get_tags)

        # provision cluster
        self._provision_cluster()

        if vnc_kubernetes_config_dict:
            self.vnc_kube_config.update(**vnc_kubernetes_config_dict)
        else:
            # Update common config.
            self.vnc_kube_config.update(
                cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(),
                cluster_service_ipam_fq_name=self.
                _get_cluster_service_ipam_fq_name(),
                cluster_ip_fabric_ipam_fq_name=self.
                _get_cluster_ip_fabric_ipam_fq_name())

        # handle events
        self.label_cache = label_cache.LabelCache()
        self.vnc_kube_config.update(label_cache=self.label_cache)

        self.network_policy_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy')
        self.namespace_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_namespace.VncNamespace',
            self.network_policy_mgr)
        self.ingress_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_ingress.VncIngress')
        self.service_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr)
        self.pod_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr,
            self.network_policy_mgr)
        self.endpoints_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_endpoints.VncEndpoints')
        self.tags_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_tags.VncTags')

        # Create system default security policies.
        VncSecurityPolicy.create_deny_all_security_policy()
        VncSecurityPolicy.create_allow_all_security_policy()
        self.ingress_mgr.create_ingress_security_policy()

        VncKubernetes._vnc_kubernetes = self

        # Associate cluster with the APS.
        VncSecurityPolicy.tag_cluster_application_policy_set()
class VncKubernetes(VncCommon):
    def __init__(self, args=None, logger=None, q=None, kube=None):
        self._name = type(self).__name__
        self.args = args
        self.logger = logger
        self.q = q
        self.kube = kube
        self._cluster_pod_ipam_fq_name = None

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # init access to db
        self._db = db.KubeNetworkManagerDB(self.args, self.logger)
        DBBaseKM.init(self, self.logger, self._db)

        # If nested mode is enabled via config, then record the directive.
        if self.args.nested_mode is '1':
            DBBaseKM.set_nested(True)

        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger,
                                    DBBaseKM,
                                    REACTION_MAP,
                                    'kube_manager',
                                    args=self.args)
        self.rabbit.establish()

        # Cache common config.
        self.vnc_kube_config = vnc_kube_config(logger=self.logger,
                                               vnc_lib=self.vnc_lib,
                                               args=self.args,
                                               queue=self.q,
                                               kube=self.kube)

        # sync api server db in local cache
        self._sync_km()
        self.rabbit._db_resync_done.set()

        # provision cluster
        self._provision_cluster()

        # handle events
        self.label_cache = label_cache.LabelCache()

        # Update common config.
        self.vnc_kube_config.update(
            label_cache=self.label_cache,
            cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(),
            cluster_service_fip_pool=self._get_cluster_service_fip_pool())

        self.network_policy_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy')
        self.namespace_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_namespace.VncNamespace',
            self.network_policy_mgr)
        self.service_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_service.VncService')
        self.pod_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr,
            self.network_policy_mgr)
        self.endpoints_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_endpoints.VncEndpoints')
        self.ingress_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_ingress.VncIngress')

    def _vnc_connect(self):
        # Retry till API server connection is up
        connected = False
        while not connected:
            try:
                vnc_lib = VncApi(self.args.admin_user,
                                 self.args.admin_password,
                                 self.args.admin_tenant,
                                 self.args.vnc_endpoint_ip,
                                 self.args.vnc_endpoint_port,
                                 auth_token_url=self.args.auth_token_url)
                connected = True
            except requests.exceptions.ConnectionError as e:
                time.sleep(3)
            except ResourceExhaustionError:
                time.sleep(3)
        return vnc_lib

    def _sync_km(self):
        for cls in DBBaseKM.get_obj_type_map().values():
            for obj in cls.list_obj():
                cls.locate(obj['uuid'], obj)

    @staticmethod
    def reset():
        for cls in DBBaseKM.get_obj_type_map().values():
            cls.reset()

    def _create_project(self, project_name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(project_name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)
        try:
            self.vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self.vnc_lib.project_read(fq_name=proj_fq_name)
        ProjectKM.locate(proj_obj.uuid)
        return proj_obj

    def _create_ipam(self,
                     ipam_name,
                     subnets,
                     proj_obj,
                     type='user-defined-subnet'):
        ipam_obj = NetworkIpam(name=ipam_name, parent_obj=proj_obj)

        ipam_subnets = []
        for subnet in subnets:
            pfx, pfx_len = subnet.split('/')
            ipam_subnet = IpamSubnetType(subnet=SubnetType(pfx, int(pfx_len)))
            ipam_subnets.append(ipam_subnet)

        if type == 'flat-subnet':
            ipam_obj.set_ipam_subnet_method('flat-subnet')
            ipam_obj.set_ipam_subnets(IpamSubnets(ipam_subnets))

        try:
            ipam_uuid = self.vnc_lib.network_ipam_create(ipam_obj)
        except RefsExistError:
            ipam_obj = self.vnc_lib.network_ipam_read(
                fq_name=ipam_obj.get_fq_name())
            ipam_uuid = ipam_obj.get_uuid()

        # Cache ipam info.
        NetworkIpamKM.locate(ipam_uuid)

        return ipam_obj, ipam_subnets

    def _is_ipam_exists(self, vn_obj, ipam_fq_name):
        curr_ipam_refs = vn_obj.get_network_ipam_refs()
        if curr_ipam_refs:
            for ipam_ref in curr_ipam_refs:
                if ipam_fq_name == ipam_ref['to']:
                    return True
        return False

    def _create_cluster_network(self, vn_name, proj_obj):
        # Check if the VN already exists.
        # If yes, update existing VN object with k8s config.
        vn_exists = False
        vn = VirtualNetwork(name=vn_name,
                            parent_obj=proj_obj,
                            address_allocation_mode='user-defined-subnet-only')
        try:
            vn_obj = self.vnc_lib.virtual_network_read(
                fq_name=vn.get_fq_name())
            vn_exists = True
        except NoIdError:
            # VN does not exist. Create one.
            vn_obj = vn

        # Create Pod IPAM.
        pod_ipam_obj, pod_ipam_subnets = self._create_ipam(
            'pod-ipam', self.args.pod_subnets, proj_obj, type='flat-subnet')

        # Cache cluster pod ipam name.
        # This will be referenced by ALL pods that are spawned in the cluster.
        self._cluster_pod_ipam_fq_name = pod_ipam_obj.get_fq_name()

        # Attach Pod IPAM to cluster virtual network.
        #
        # For flat-subnets, the subnets are specified on the IPAM and
        # not on the virtual-network to IPAM link. So pass an empty
        # list of VnSubnetsType.
        if not self._is_ipam_exists(vn_obj, pod_ipam_obj.get_fq_name()):
            vn_obj.add_network_ipam(pod_ipam_obj, VnSubnetsType([]))

        #
        # Create Service IPAM.
        #
        svc_ipam_obj, svc_ipam_subnets = self._create_ipam(
            'service-ipam', self.args.service_subnets, proj_obj)

        # Attach Service IPAM to virtual-network.
        if not self._is_ipam_exists(vn_obj, svc_ipam_obj.get_fq_name()):
            vn_obj.add_network_ipam(svc_ipam_obj,
                                    VnSubnetsType(svc_ipam_subnets))

        vn_obj.set_virtual_network_properties(
            VirtualNetworkType(forwarding_mode='l3'))

        if vn_exists:
            # Update VN.
            self.vnc_lib.virtual_network_update(vn_obj)
        else:
            # Create VN.
            self.vnc_lib.virtual_network_create(vn_obj)

        # FIP pool creation requires a vnc object. Get it.
        vn_obj = self.vnc_lib.virtual_network_read(
            fq_name=vn_obj.get_fq_name())

        # Create service floating ip pool.
        self._create_cluster_service_fip_pool(vn_obj, pod_ipam_obj)

        VirtualNetworkKM.locate(vn_obj.uuid)
        return vn_obj.uuid

    def _get_cluster_service_fip_pool_name(self, vn_name):
        """
        Return fip pool name of cluster service network.
        """
        return 'svc-fip-pool-%s' % (vn_name)

    def _get_cluster_service_fip_pool(self):
        """
        Get floating ip pool of cluster service network.
        """
        vn_obj = self._get_cluster_network()
        return FloatingIpPoolKM.find_by_name_or_uuid(
            self._get_cluster_service_fip_pool_name(vn_obj.name))

    def _create_cluster_service_fip_pool(self, vn_obj, ipam_obj):
        # Create a floating-ip-pool in cluster service network.
        #
        # Service IP's in the k8s cluster are allocated from pod
        # IPAM in the cluster network. All pods spawned in isolated
        # virtual networks will be allocated an IP from this floating-ip-
        # pool. These pods, in those isolated virtual networks, will use this
        # floating-ip for outbound traffic to services in the k8s cluster.

        # Get IPAM refs from virtual-network.
        ipam_refs = vn_obj.get_network_ipam_refs()
        svc_subnet_uuid = None
        for ipam_ref in ipam_refs:
            if ipam_ref['to'] == ipam_obj.get_fq_name():
                ipam_subnets = ipam_ref['attr'].get_ipam_subnets()
                if not ipam_subnets:
                    continue
                # We will use the first subnet in the matching IPAM.
                svc_subnet_uuid = ipam_subnets[0].get_subnet_uuid()
                break

        fip_subnets = FloatingIpPoolSubnetType(subnet_uuid=[svc_subnet_uuid])
        fip_pool_obj = FloatingIpPool(self._get_cluster_service_fip_pool_name(
            vn_obj.name),
                                      floating_ip_pool_subnets=fip_subnets,
                                      parent_obj=vn_obj)
        try:
            # Create floating ip pool for cluster service network.
            fip_pool_vnc_obj =\
                self.vnc_lib.floating_ip_pool_create(fip_pool_obj)

        except RefsExistError:
            # Floating-ip-pool exists.
            #
            # Validate that existing floating-ip-pool has the service subnet
            # uuid as one of its subnets. If not raise an exception, as the
            # floating-ip-pool cannot be created, as one with the same name but
            # different attributes exists in the system.
            fip_pool_vnc_obj = self._get_cluster_service_fip_pool()
            svc_subnet_found = False
            fip_subnets = None

            if hasattr(fip_pool_vnc_obj, 'floating_ip_pool_subnets'):
                fip_subnets = fip_pool_vnc_obj.floating_ip_pool_subnets

            if fip_subnets:
                for subnet in fip_subnets['subnet_uuid']:
                    if subnet == svc_subnet_uuid:
                        svc_subnet_found = True
                        break

            if not svc_subnet_found:
                self.logger.error("Failed to create floating-ip-pool %s for"\
                    "subnet %s. A floating-ip-pool with same name exists." %\
                    (":".join(fip_pool_vnc_obj.fq_name), svc_subnet_uuid))

        else:
            # Update local cache.
            FloatingIpPoolKM.locate(fip_pool_vnc_obj)

        return

    def _provision_cluster(self):
        self._create_project('kube-system')
        proj_obj = self._create_project(\
            vnc_kube_config.cluster_default_project_name())
        self._create_cluster_network(\
            vnc_kube_config.cluster_default_network_name(), proj_obj)

    def _get_cluster_network(self):
        return VirtualNetworkKM.find_by_name_or_uuid(
            vnc_kube_config.cluster_default_network_name())

    def _get_cluster_pod_ipam_fq_name(self):
        return self._cluster_pod_ipam_fq_name

    def vnc_timer(self):
        self.network_policy_mgr.network_policy_timer()
        self.ingress_mgr.ingress_timer()
        self.service_mgr.service_timer()
        self.pod_mgr.pod_timer()
        self.pod_mgr.namespace_timer()

    def vnc_process(self):
        while True:
            try:
                event = self.q.get()
                event_type = event['type']
                kind = event['object'].get('kind')
                metadata = event['object']['metadata']
                namespace = metadata.get('namespace')
                name = metadata.get('name')
                uid = metadata.get('uid')
                if kind == 'Pod':
                    self.pod_mgr.process(event)
                elif kind == 'Service':
                    self.service_mgr.process(event)
                elif kind == 'Namespace':
                    self.namespace_mgr.process(event)
                elif kind == 'NetworkPolicy':
                    self.network_policy_mgr.process(event)
                elif kind == 'Endpoints':
                    self.endpoints_mgr.process(event)
                elif kind == 'Ingress':
                    self.ingress_mgr.process(event)
                else:
                    print("$s - Event %s %s %s:%s:%s not handled" %
                          (self._name, event_type, kind, namespace, name, uid))
                    self.logger.error(
                        "%s - Event %s %s %s:%s:%s not handled" %
                        (self._name, event_type, kind, namespace, name, uid))
            except Empty:
                gevent.sleep(0)
            except Exception as e:
                string_buf = StringIO()
                cgitb_hook(file=string_buf, format="text")
                err_msg = string_buf.getvalue()
                self.logger.error("%s - %s" % (self._name, err_msg))
Пример #37
0
class VncService(object):
    """"Client to handle vnc api server interactions"""

    _vnc_mesos = None

    def __init__(self, args=None, logger=None):
        self.args = args
        self.logger = logger

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # init access to db
        self._db = db.MesosNetworkManagerDB(self.args, self.logger)
        DBBaseMM.init(self, self.logger, self._db)

        rabbitmq_cfg = mesos_args.rabbitmq_args(self.args)
        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger, DBBaseMM,
                                    REACTION_MAP, 'mesos_manager', rabbitmq_cfg)
        self.rabbit.establish()

        # sync api server db in local cache
        self._sync_sm()
        self.rabbit._db_resync_done.set()

        VncService._vnc_mesos = self

    def _vnc_connect(self):
        """Retry till API server connection is up"""
        connected = False
        while not connected:
            try:
                vnc_lib = VncApi(self.args.admin_user,
                                 self.args.admin_password,
                                 self.args.admin_tenant,
                                 self.args.vnc_endpoint_ip,
                                 self.args.vnc_endpoint_port)
                connected = True
                self.logger.info("Connected to API-server {}{}."
                                 .format(self.args.vnc_endpoint_ip,
                                         self.args.vnc_endpoint_port))
            except requests.exceptions.ConnectionError:
                time.sleep(3)
            except ResourceExhaustionError:
                time.sleep(3)
        return vnc_lib

    def _sync_sm(self):
        for cls in DBBaseMM.get_obj_type_map().values():
            for obj in cls.list_obj():
                cls.locate(obj['uuid'], obj)

    @classmethod
    def get_instance(cls):
        return VncService._vnc_mesos

    @classmethod
    def destroy_instance(cls):
        inst = cls.get_instance()
        if inst is None:
            return
        inst.rabbit.close()
        for obj_cls in DBBaseMM.get_obj_type_map().values():
            obj_cls.reset()
        DBBase.clear()
        inst._db = None
        VncService._vnc_mesos = None

    @staticmethod
    def reset():
        for cls in DBBaseMM.get_obj_type_map().values():
            cls.reset()

    def add_mesos_task_and_define_network(self, obj_labels):
        """Adds task and network references to VNC db"""
        # Project creation
        vnc_project = VncProject(self.vnc_lib, self.logger)
        project_obj = vnc_project.create_project(obj_labels.domain_name,
                                                 obj_labels.project_name)
        # Network creation
        vnc_network = VncMesosNetwork(self.vnc_lib, self.logger)
        network_obj = vnc_network.create_network(project_obj,
                                                 obj_labels.networks,
                                                 obj_labels.app_subnets)
        # Register a task
        vnc_task = VncMesosTask(self.vnc_lib, self.logger)
        task_obj = vnc_task.register_task(obj_labels.task_uuid)

        # Create floating-ip, apply security groups, instance-ip and interface
        common_operations = VncMesosCommonOperations(self.vnc_lib, self.logger)
        vmi_obj = common_operations.create_vmi(project_obj, network_obj,
                                               task_obj)
        common_operations.add_security_groups(vmi_obj,
                                              obj_labels.security_groups)
        common_operations.add_floating_ip(vmi_obj, project_obj,
                                          obj_labels.floating_ips,
                                          task_obj.name)
        common_operations.create_instance_ip(task_obj.name, network_obj,
                                             vmi_obj)
        common_operations.link_task_to_vrouter(task_obj,
                                               obj_labels.cluster_name)

    def del_mesos_task_and_remove_network(self, obj_labels):
        """Deletes task and network references to VNC db"""
        task_obj = VirtualMachineMM.get(obj_labels.task_uuid)
        if not task_obj:
            self.logger.error("Delete operation: task entry does not exist")
        else:
            #unlink from vrouter, delete iip,floating ip, security group, vmi
            common_operations = VncMesosCommonOperations(self.vnc_lib,
                                                         self.logger)
            common_operations.unlink_task_from_vrouter(task_obj)
            for vmi_id in list(task_obj.virtual_machine_interfaces):
                vmi_obj = VirtualMachineInterfaceMM.get(vmi_id)
                if vmi_obj:
                    common_operations.remove_instance_ip(vmi_obj)
                    common_operations.remove_floating_ip(vmi_obj)
                    common_operations.remove_security_groups(vmi_obj)
                    common_operations.remove_vmi(vmi_obj)
            # Unregister a task
            vnc_task = VncMesosTask(self.vnc_lib, self.logger)
            task_obj = vnc_task.unregister_task(obj_labels.task_uuid)
            # Remove network
            vnc_network = VncMesosNetwork(self.vnc_lib, self.logger)
            vnc_network.delete_network(obj_labels.task_uuid)
    def __init__(self, args=None, logger=None, q=None, kube=None):
        self._name = type(self).__name__
        self.args = args
        self.logger = logger
        self.q = q
        self.kube = kube
        self._cluster_pod_ipam_fq_name = None

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # init access to db
        self._db = db.KubeNetworkManagerDB(self.args, self.logger)
        DBBaseKM.init(self, self.logger, self._db)

        # If nested mode is enabled via config, then record the directive.
        if self.args.nested_mode is '1':
            DBBaseKM.set_nested(True)

        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger,
                                    DBBaseKM,
                                    REACTION_MAP,
                                    'kube_manager',
                                    args=self.args)
        self.rabbit.establish()

        # Cache common config.
        self.vnc_kube_config = vnc_kube_config(logger=self.logger,
                                               vnc_lib=self.vnc_lib,
                                               args=self.args,
                                               queue=self.q,
                                               kube=self.kube)

        # sync api server db in local cache
        self._sync_km()
        self.rabbit._db_resync_done.set()

        # provision cluster
        self._provision_cluster()

        # handle events
        self.label_cache = label_cache.LabelCache()

        # Update common config.
        self.vnc_kube_config.update(
            label_cache=self.label_cache,
            cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(),
            cluster_service_fip_pool=self._get_cluster_service_fip_pool())

        self.network_policy_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy')
        self.namespace_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_namespace.VncNamespace',
            self.network_policy_mgr)
        self.service_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_service.VncService')
        self.pod_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr,
            self.network_policy_mgr)
        self.endpoints_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_endpoints.VncEndpoints')
        self.ingress_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_ingress.VncIngress')
Пример #39
0
class VncService(object):
    """"Client to handle vnc api server interactions"""
    def __init__(self, args=None, logger=None):
        self.args = args
        self.logger = logger

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # init access to db
        self._db = db.MesosNetworkManagerDB(self.args, self.logger)
        DBBaseMM.init(self, self.logger, self._db)

        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger,
                                    DBBaseMM,
                                    REACTION_MAP,
                                    'mesos_manager',
                                    args=self.args)
        self.rabbit.establish()

        # sync api server db in local cache
        self._sync_sm()
        self.rabbit._db_resync_done.set()

    def _vnc_connect(self):
        """Retry till API server connection is up"""
        connected = False
        while not connected:
            try:
                vnc_lib = VncApi(self.args.admin_user,
                                 self.args.admin_password,
                                 self.args.admin_tenant,
                                 self.args.vnc_endpoint_ip,
                                 self.args.vnc_endpoint_port)
                connected = True
                self.logger.info("Connected to API-server {}{}.".format(
                    self.args.vnc_endpoint_ip, self.args.vnc_endpoint_port))
            except requests.exceptions.ConnectionError:
                time.sleep(3)
            except ResourceExhaustionError:
                time.sleep(3)
        return vnc_lib

    def _sync_sm(self):
        for cls in DBBaseMM.get_obj_type_map().values():
            for obj in cls.list_obj():
                cls.locate(obj['uuid'], obj)

    @staticmethod
    def reset():
        for cls in DBBaseMM.get_obj_type_map().values():
            cls.reset()

    def add_mesos_task_and_define_network(self, obj_labels):
        """Adds task and network references to VNC db"""
        # Project creation
        vnc_project = VncProject(self.vnc_lib, self.logger)
        project_obj = vnc_project.create_project(obj_labels.domain_name,
                                                 obj_labels.project_name)
        # Network creation
        vnc_network = VncMesosNetwork(self.vnc_lib, self.logger)
        network_obj = vnc_network.create_network(project_obj,
                                                 obj_labels.networks)
        # Register a task
        vnc_task = VncMesosTask(self.vnc_lib, self.logger)
        task_obj = vnc_task.register_task(obj_labels.task_uuid)

        # Create floating-ip, apply security groups, instance-ip and interface
        common_operations = VncMesosCommonOperations(self.vnc_lib, self.logger)
        vmi_obj = common_operations.create_vmi(project_obj, network_obj,
                                               task_obj)
        common_operations.add_security_groups(vmi_obj,
                                              obj_labels.security_groups)
        common_operations.add_floating_ip(vmi_obj, project_obj,
                                          obj_labels.floating_ips,
                                          task_obj.name)
        common_operations.create_instance_ip(task_obj.name, network_obj,
                                             vmi_obj)
        common_operations.link_task_to_vrouter(task_obj,
                                               obj_labels.cluster_name)

    def del_mesos_task_and_remove_network(self, obj_labels):
        """Deletes task and network references to VNC db"""
        task_obj = VirtualMachineMM.get(obj_labels.task_uuid)
        if not task_obj:
            self.logger.error("Delete operation: task entry does not exist")
        else:
            #unlink from vrouter, delete iip,floating ip, security group, vmi
            common_operations = VncMesosCommonOperations(
                self.vnc_lib, self.logger)
            common_operations.unlink_task_from_vrouter(task_obj)
            for vmi_id in list(task_obj.virtual_machine_interfaces):
                vmi_obj = VirtualMachineInterfaceMM.get(vmi_id)
                if vmi_obj:
                    common_operations.remove_instance_ip(vmi_obj)
                    common_operations.remove_floating_ip(vmi_obj)
                    common_operations.remove_security_groups(vmi_obj)
                    common_operations.remove_vmi(vmi_obj)
            # Unregister a task
            vnc_task = VncMesosTask(self.vnc_lib, self.logger)
            task_obj = vnc_task.unregister_task(obj_labels.task_uuid)
            # Remove network
            vnc_network = VncMesosNetwork(self.vnc_lib, self.logger)
            vnc_network.delete_network(obj_labels.task_uuid)
Пример #40
0
class VncMesos(object):
    def __init__(self, args=None, logger=None, q=None):
        self.args = args
        self.logger = logger
        self.q = q

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # init access to db
        self._db = db.MesosNetworkManagerDB(self.args, self.logger)
        DBBaseMM.init(self, self.logger, self._db)

        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger, DBBaseMM, REACTION_MAP, "mesos_manager", args=self.args)
        self.rabbit.establish()

        # sync api server db in local cache
        self._sync_sm()
        self.rabbit._db_resync_done.set()

    def _vnc_connect(self):
        # Retry till API server connection is up
        connected = False
        while not connected:
            try:
                vnc_lib = VncApi(
                    self.args.admin_user,
                    self.args.admin_password,
                    self.args.admin_tenant,
                    self.args.vnc_endpoint_ip,
                    self.args.vnc_endpoint_port,
                )
                connected = True
                self.logger.info(
                    "Connected to API-server %s:%s." % (self.args.vnc_endpoint_ip, self.args.vnc_endpoint_port)
                )
            except requests.exceptions.ConnectionError as e:
                time.sleep(3)
            except ResourceExhaustionError:
                time.sleep(3)
        return vnc_lib

    def _sync_sm(self):
        for cls in DBBaseMM.get_obj_type_map().values():
            for obj in cls.list_obj():
                cls.locate(obj["uuid"], obj)

    @staticmethod
    def reset():
        for cls in DBBaseMM.get_obj_type_map().values():
            cls.reset()

    def process_q_event(self, event):
        labels = event["labels"]
        # subnet = event['ipam']['subnet'] if event['ipam'] else None

        for k, v in labels.items():
            if k == mesos_consts.MESOS_LABEL_PRIVATE_NETWORK:
                print v
            elif k == mesos_consts.MESOS_LABEL_PUBLIC_NETWORK:
                print v
            elif k == mesos_consts.MESOS_LABEL_PUBLIC_SUBNET:
                print v
            else:
                pass

    def vnc_process(self):
        while True:
            try:
                event = self.q.get()
                print event
                self.logger.info("VNC: Handle CNI Data for ContainerId: %s." % (event["cid"]))
                self.process_q_event(event)
            except Empty:
                gevent.sleep(0)
class ConfigHandler(object):
    def __init__(self, sandesh, service_id, rabbitmq_cfg, cassandra_cfg,
                 db_cls, reaction_map):
        self._sandesh = sandesh
        self._logger = AnalyticsLogger(self._sandesh)
        self._service_id = service_id
        self._rabbitmq_cfg = rabbitmq_cfg
        self._cassandra_cfg = cassandra_cfg
        self._db_cls = db_cls
        self._reaction_map = reaction_map
        self._vnc_amqp = None
        self._vnc_db = None

    # end __init__

    # Public methods

    def start(self):
        # Connect to rabbitmq for config update notifications
        rabbitmq_qname = self._service_id
        while True:
            try:
                self._vnc_amqp = VncAmqpHandle(self._sandesh, self._logger,
                                               self._db_cls,
                                               self._reaction_map,
                                               self._service_id,
                                               self._rabbitmq_cfg)
                self._vnc_amqp.establish()
            except Exception as e:
                template = 'Exception {0} connecting to Rabbitmq. Arguments:\n{1!r}'
                msg = template.format(type(e).__name__, e.args)
                self._logger.error('%s: %s' % (msg, traceback.format_exc()))
                gevent.sleep(2)
            else:
                break
        cassandra_credential = {
            'username': self._cassandra_cfg['user'],
            'password': self._cassandra_cfg['password']
        }
        if not all(cassandra_credential.values()):
            cassandra_credential = None
        try:
            self._vnc_db = VncObjectDBClient(self._cassandra_cfg['servers'],
                                             self._cassandra_cfg['cluster_id'],
                                             logger=self._logger.log,
                                             credential=cassandra_credential)
        except Exception as e:
            template = 'Exception {0} connecting to Config DB. Arguments:\n{1!r}'
            msg = template.format(type(e).__name__, e.args)
            self._logger.error('%s: %s' % (msg, traceback.format_exc()))
            exit(2)
        self._db_cls.init(self, self._logger, self._vnc_db)
        self._sync_config_db()

    # end start

    def stop(self):
        self._vnc_amqp.close()
        self._vnc_db = None
        self._db_cls.clear()

    # end stop

    def obj_to_dict(self, obj):
        def to_json(obj):
            if hasattr(obj, 'serialize_to_json'):
                return obj.serialize_to_json()
            else:
                return dict((k, v) for k, v in obj.__dict__.iteritems())

        return json.loads(json.dumps(obj, default=to_json))

    # end obj_to_dict

    # Private methods

    def _fqname_to_str(self, fq_name):
        return ':'.join(fq_name)

    # end _fqname_to_str

    def _sync_config_db(self):
        for cls in self._db_cls.get_obj_type_map().values():
            cls.reinit()
        self._handle_config_sync()
        self._vnc_amqp._db_resync_done.set()

    # end _sync_config_db

    # Should be overridden by the derived class
    def _handle_config_sync(self):
        pass
Пример #42
0
class SvcMonitor(object):

    def __init__(self, sm_logger=None, args=None):
        self._args = args
        # initialize logger
        if sm_logger is not None:
            self.logger = sm_logger
        else:
            # Initialize logger
            self.logger = ServiceMonitorLogger(args)

        # init object_db
        self._object_db = ServiceMonitorDB(self._args, self.logger)
        DBBaseSM.init(self, self.logger, self._object_db)

        # init rabbit connection
        rabbitmq_cfg = get_rabbitmq_cfg(args)
        self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger,
                DBBaseSM, REACTION_MAP, 'svc_monitor', rabbitmq_cfg,
                self._args.trace_file)
        self.rabbit.establish()

    def post_init(self, vnc_lib, args=None):
        # api server
        self._vnc_lib = vnc_lib

        try:
            self._nova_client = importutils.import_object(
                'svc_monitor.nova_client.ServiceMonitorNovaClient',
                self._args, self.logger)
        except Exception as e:
            self._nova_client = None

        # agent manager
        self._agent_manager = AgentManager()

        # load vrouter scheduler
        self.vrouter_scheduler = importutils.import_object(
            self._args.si_netns_scheduler_driver,
            self._vnc_lib, self._nova_client,
            None, self.logger, self._args)

        # load virtual machine instance manager
        self.vm_manager = importutils.import_object(
            'svc_monitor.virtual_machine_manager.VirtualMachineManager',
            self._vnc_lib, self._object_db, self.logger,
            self.vrouter_scheduler, self._nova_client, self._agent_manager,
            self._args)

        # load network namespace instance manager
        self.netns_manager = importutils.import_object(
            'svc_monitor.instance_manager.NetworkNamespaceManager',
            self._vnc_lib, self._object_db, self.logger,
            self.vrouter_scheduler, self._nova_client, self._agent_manager,
            self._args)

        # load a vrouter instance manager
        self.vrouter_manager = importutils.import_object(
            'svc_monitor.vrouter_instance_manager.VRouterInstanceManager',
            self._vnc_lib, self._object_db, self.logger,
            self.vrouter_scheduler, self._nova_client,
            self._agent_manager, self._args)

        # load PNF instance manager
        self.ps_manager = importutils.import_object(
            'svc_monitor.physical_service_manager.PhysicalServiceManager',
            self._vnc_lib, self._object_db, self.logger,
            self.vrouter_scheduler, self._nova_client,
            self._agent_manager, self._args)

        # load a loadbalancer agent
        self.loadbalancer_agent = LoadbalancerAgent(
            self, self._vnc_lib,
            self._object_db, self._args)
        self._agent_manager.register_agent(self.loadbalancer_agent)

        # load a snat agent
        self.snat_agent = SNATAgent(self, self._vnc_lib,
                                    self._object_db, self._args,
                                    ServiceMonitorModuleLogger(self.logger))
        self._agent_manager.register_agent(self.snat_agent)

        # load port tuple agent
        self.port_tuple_agent = PortTupleAgent(self, self._vnc_lib,
            self._object_db, self._args, ServiceMonitorModuleLogger(self.logger))
        self._agent_manager.register_agent(self.port_tuple_agent)

        # Read the object_db and populate the entry in ServiceMonitor DB
        self.sync_sm()

        # create default analyzer template
        self._create_default_template('analyzer-template', 'analyzer',
                                      flavor='m1.medium',
                                      image_name='analyzer')
        # create default NAT template
        self._create_default_template('nat-template', 'firewall',
                                      svc_mode='in-network-nat',
                                      image_name='analyzer',
                                      flavor='m1.medium')
        # create default netns SNAT template
        self._create_default_template('netns-snat-template', 'source-nat',
                                      svc_mode='in-network-nat',
                                      hypervisor_type='network-namespace',
                                      scaling=True)
        # create default loadbalancer template
        self._create_default_template('haproxy-loadbalancer-template',
                                      'loadbalancer',
                                      svc_mode='in-network-nat',
                                      hypervisor_type='network-namespace',
                                      scaling=True)
        self._create_default_template('docker-template', 'firewall',
                                      svc_mode='transparent',
                                      image_name="ubuntu",
                                      hypervisor_type='vrouter-instance',
                                      vrouter_instance_type='docker',
                                      instance_data={
                                          "command": "/bin/bash"
                                      })

        # upgrade handling
        self.upgrade()

        # check services
        self.vrouter_scheduler.vrouters_running()
        self.launch_services()

        self.rabbit._db_resync_done.set()

    def _upgrade_instance_ip(self, vm):
        for vmi_id in vm.virtual_machine_interfaces:
            vmi = VirtualMachineInterfaceSM.get(vmi_id)
            if not vmi:
                continue

            for iip_id in vmi.instance_ips:
                iip = InstanceIpSM.get(iip_id)
                if not iip or iip.service_instance_ip:
                    continue
                iip_obj = InstanceIp()
                iip_obj.name = iip.name
                iip_obj.uuid = iip.uuid
                iip_obj.set_service_instance_ip(True)
                try:
                    self._vnc_lib.instance_ip_update(iip_obj)
                except NoIdError:
                    self.logger.error("upgrade instance ip to service ip failed %s" % (iip.name))
                    continue

    def _upgrade_auto_policy(self, si, st):
        if st.name != 'netns-snat-template':
            return
        if not si.params['auto_policy']:
            return

        si_obj = ServiceInstance()
        si_obj.uuid = si.uuid
        si_obj.fq_name = si.fq_name
        si_props = ServiceInstanceType(**si.params)
        si_props.set_auto_policy(False)
        si_obj.set_service_instance_properties(si_props)
        try:
            self._vnc_lib.service_instance_update(si_obj)
            self.logger.notice("snat policy upgraded for %s" % (si.name))
        except NoIdError:
            self.logger.error("snat policy upgrade failed for %s" % (si.name))
            return

    def upgrade(self):
        for lr in LogicalRouterSM.values():
            self.snat_agent.upgrade(lr)

        for si in ServiceInstanceSM.values():
            st = ServiceTemplateSM.get(si.service_template)
            if not st:
                continue

            self._upgrade_auto_policy(si, st)

            vm_id_list = list(si.virtual_machines)
            for vm_id in vm_id_list:
                vm = VirtualMachineSM.get(vm_id)
                self._upgrade_instance_ip(vm)
                if vm.virtualization_type:
                    continue

                try:
                    nova_vm = self._nova_client.oper('servers', 'get',
                        si.proj_name, id=vm_id)
                except nc_exc.NotFound:
                    nova_vm = None

                if nova_vm:
                    vm_name = nova_vm.name
                    vm.proj_fq_name = nova_vm.name.split('__')[0:2]
                else:
                    vm_name = vm.name

                if not vm_name.split('__')[-1].isdigit():
                    continue

                vm.virtualization_type = st.virtualization_type
                self.delete_service_instance(vm)

    def launch_services(self):
        for si in ServiceInstanceSM.values():
            self.create_service_instance(si)

    def sync_sm(self):
        # Read and Sync all DBase
        for cls in DBBaseSM.get_obj_type_map().values():
            for obj in cls.list_obj():
                cls.locate(obj['uuid'], obj)

        # Link SI and VM
        for vm in VirtualMachineSM.values():
            if vm.service_instance:
                continue
            for vmi_id in vm.virtual_machine_interfaces:
                vmi = VirtualMachineInterfaceSM.get(vmi_id)
                if not vmi:
                    continue
                self.port_delete_or_si_link(vm, vmi)

        # invoke port tuple handling
        try:
            self.port_tuple_agent.update_port_tuples()
        except Exception:
            cgitb_error_log(self)

        # Load the loadbalancer driver
        self.loadbalancer_agent.load_drivers()

        # Invoke the health monitors
        for hm in HealthMonitorSM.values():
            hm.sync()

        # Invoke the loadbalancers
        for lb in LoadbalancerSM.values():
            lb.sync()

        # Invoke the loadbalancer listeners
        for lb_listener in LoadbalancerListenerSM.values():
            lb_listener.sync()

        # Invoke the loadbalancer pools
        for lb_pool in LoadbalancerPoolSM.values():
            lb_pool.sync()

        # Audit the lb pools
        self.loadbalancer_agent.audit_lb_pools()

        # Audit the SNAT instances
        self.snat_agent.audit_snat_instances()
    # end sync_sm

    # create service template
    def _create_default_template(self, st_name, svc_type, svc_mode=None,
                                 hypervisor_type='virtual-machine',
                                 image_name=None, flavor=None, scaling=False,
                                 vrouter_instance_type=None,
                                 instance_data=None):
        domain_name = 'default-domain'
        domain_fq_name = [domain_name]
        st_fq_name = [domain_name, st_name]
        self.logger.info("Creating %s %s hypervisor %s" %
                             (domain_name, st_name, hypervisor_type))

        domain_obj = None
        for domain in DomainSM.values():
            if domain.fq_name == domain_fq_name:
                domain_obj = Domain()
                domain_obj.uuid = domain.uuid
                domain_obj.fq_name = domain_fq_name
                break
        if not domain_obj:
            self.logger.error("%s domain not found" % (domain_name))
            return

        for st in ServiceTemplateSM.values():
            if st.fq_name == st_fq_name:
                self.logger.info("%s exists uuid %s" %
                                     (st.name, str(st.uuid)))
                return

        svc_properties = ServiceTemplateType()
        svc_properties.set_service_type(svc_type)
        svc_properties.set_service_mode(svc_mode)
        svc_properties.set_service_virtualization_type(hypervisor_type)
        svc_properties.set_image_name(image_name)
        svc_properties.set_flavor(flavor)
        svc_properties.set_ordered_interfaces(True)
        svc_properties.set_service_scaling(scaling)

        # set interface list
        if svc_type == 'analyzer':
            if_list = [['left', False]]
        elif hypervisor_type == 'network-namespace':
            if_list = [['right', True], ['left', True]]
        else:
            if_list = [
                ['management', False], ['left', False], ['right', False]]

        for itf in if_list:
            if_type = ServiceTemplateInterfaceType(shared_ip=itf[1])
            if_type.set_service_interface_type(itf[0])
            svc_properties.add_interface_type(if_type)

        if vrouter_instance_type is not None:
            svc_properties.set_vrouter_instance_type(vrouter_instance_type)

        if instance_data is not None:
            svc_properties.set_instance_data(
                json.dumps(instance_data, separators=(',', ':')))

        st_obj = ServiceTemplate(name=st_name, domain_obj=domain)
        st_obj.set_service_template_properties(svc_properties)
        try:
            st_uuid = self._vnc_lib.service_template_create(st_obj)
        except Exception as e:
            self.logger.error("%s create failed with error %s" %
                                  (st_name, str(e)))
            return

        # Create the service template in local db
        ServiceTemplateSM.locate(st_uuid)

        self.logger.info("%s created with uuid %s" %
                             (st_name, str(st_uuid)))
    #_create_default_analyzer_template

    def port_delete_or_si_link(self, vm, vmi):
        if vmi.port_tuples:
            return
        if (vmi.service_instances and vmi.virtual_machine == None):
            self.vm_manager.cleanup_svc_vm_ports([vmi.uuid])
            return

        if not vm or vm.service_instance:
            return
        if not vmi.if_type:
            return

        if len(vmi.name.split('__')) < 4:
            return
        si_fq_name = vmi.name.split('__')[0:3]
        index = int(vmi.name.split('__')[3]) - 1
        for si in ServiceInstanceSM.values():
            if si.fq_name != si_fq_name:
                continue
            st = ServiceTemplateSM.get(si.service_template)
            self.vm_manager.link_si_to_vm(si, st, index, vm.uuid)
            return

    def create_service_instance(self, si):
        if si.state == 'active':
            return
        st = ServiceTemplateSM.get(si.service_template)
        if not st:
            self.logger.error("template not found for %s" %
                                  ((':').join(si.fq_name)))
            return
        if st.params and st.params.get('version', 1) == 2:
            return

        self.logger.info("Creating SI %s (%s)" %
                             ((':').join(si.fq_name), st.virtualization_type))
        try:
            if st.virtualization_type == 'virtual-machine':
                self.vm_manager.create_service(st, si)
            elif st.virtualization_type == 'network-namespace':
                self.netns_manager.create_service(st, si)
            elif st.virtualization_type == 'vrouter-instance':
                self.vrouter_manager.create_service(st, si)
            elif st.virtualization_type == 'physical-device':
                self.ps_manager.create_service(st, si)
            else:
                self.logger.error("Unknown virt type: %s" %
                                      st.virtualization_type)
        except Exception:
            cgitb_error_log(self)
        si.launch_count += 1
        self.logger.info("SI %s creation success" % (':').join(si.fq_name))

    def delete_service_instance(self, vm):
        self.logger.info("Deleting VM %s %s for SI %s" %
            ((':').join(vm.fq_name), vm.uuid, vm.service_id))

        try:
            if vm.virtualization_type == svc_info.get_vm_instance_type():
                self.vm_manager.delete_service(vm)
            elif vm.virtualization_type == svc_info.get_netns_instance_type():
                self.netns_manager.delete_service(vm)
            elif vm.virtualization_type == 'vrouter-instance':
                self.vrouter_manager.delete_service(vm)
            elif vm.virtualization_type == 'physical-device':
                self.ps_manager.delete_service(vm)
            self.logger.info("Deleted VM %s %s for SI %s" %
                ((':').join(vm.fq_name), vm.uuid, vm.service_id))
        except Exception:
            cgitb_error_log(self)

        # generate UVE
        si_fq_name = vm.display_name.split('__')[:-2]
        si_fq_str = (':').join(si_fq_name)
        self.logger.uve_svc_instance(si_fq_str, status='DELETE',
                                     vms=[{'uuid': vm.uuid}])
        return True

    def _relaunch_service_instance(self, si):
        si.state = 'relaunch'
        self.create_service_instance(si)

    def _check_service_running(self, si):
        st = ServiceTemplateSM.get(si.service_template)
        if st.params and st.params.get('version', 1) == 2:
            return
        if st.virtualization_type == 'virtual-machine':
            status = self.vm_manager.check_service(si)
        elif st.virtualization_type == 'network-namespace':
            status = self.netns_manager.check_service(si)
        elif st.virtualization_type == 'vrouter-instance':
            status = self.vrouter_manager.check_service(si)
        elif st.virtualization_type == 'physical-device':
            status = self.ps_manager.check_service(si)
        return status

    def delete_interface_route_table(self, irt_uuid):
        try:
            self._vnc_lib.interface_route_table_delete(id=irt_uuid)
            InterfaceRouteTableSM.delete(irt_uuid)
        except (NoIdError, RefsExistError):
            return

    def _delete_shared_vn(self, vn_uuid):
        try:
            self.logger.info("Deleting vn %s" % (vn_uuid))
            self._vnc_lib.virtual_network_delete(id=vn_uuid)
            VirtualNetworkSM.delete(vn_uuid)
        except (NoIdError, RefsExistError):
            pass

    @staticmethod
    def reset():
        for cls in DBBaseSM.get_obj_type_map().values():
            cls.reset()

    def sighup_handler(self):
        if self._conf_file:
            config = ConfigParser.SafeConfigParser()
            config.read(self._conf_file)
            if 'DEFAULTS' in config.sections():
                try:
                    collectors = config.get('DEFAULTS', 'collectors')
                    if type(collectors) is str:
                        collectors = collectors.split()
                        new_chksum = hashlib.md5("".join(collectors)).hexdigest()
                        if new_chksum != self._chksum:
                            self._chksum = new_chksum
                            config.random_collectors = random.sample(collectors, len(collectors))
                        # Reconnect to achieve load-balance irrespective of list
                        self.logger.sandesh_reconfig_collectors(config)
                except ConfigParser.NoOptionError as e:
                     pass
Пример #43
0
class VncMesos(object):
    def __init__(self, args=None, logger=None, q=None):
        self.args = args
        self.logger = logger
        self.q = q

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # init access to db
        self._db = db.MesosNetworkManagerDB(self.args, self.logger)
        DBBaseMM.init(self, self.logger, self._db)

        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger,
                                    DBBaseMM,
                                    REACTION_MAP,
                                    'mesos_manager',
                                    args=self.args)
        self.rabbit.establish()

        # sync api server db in local cache
        self._sync_sm()
        self.rabbit._db_resync_done.set()

    def _vnc_connect(self):
        # Retry till API server connection is up
        connected = False
        while not connected:
            try:
                vnc_lib = VncApi(self.args.admin_user,
                                 self.args.admin_password,
                                 self.args.admin_tenant,
                                 self.args.vnc_endpoint_ip,
                                 self.args.vnc_endpoint_port)
                connected = True
                self.logger.info(
                    "Connected to API-server %s:%s." %
                    (self.args.vnc_endpoint_ip, self.args.vnc_endpoint_port))
            except requests.exceptions.ConnectionError as e:
                time.sleep(3)
            except ResourceExhaustionError:
                time.sleep(3)
        return vnc_lib

    def _sync_sm(self):
        for cls in DBBaseMM.get_obj_type_map().values():
            for obj in cls.list_obj():
                cls.locate(obj['uuid'], obj)

    @staticmethod
    def reset():
        for cls in DBBaseMM.get_obj_type_map().values():
            cls.reset()

    def _create_project(self, project_name):
        proj_fq_name = ['default-domain', project_name]
        proj_obj = Project(name=project_name, fq_name=proj_fq_name)
        try:
            self.vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self.vnc_lib.project_read(fq_name=proj_fq_name)
        ProjectMM.locate(proj_obj.uuid)
        return proj_obj

    def _create_ipam(self, ipam_name, subnet, proj_obj):
        ipam_subnets = []
        pfx, pfx_len = subnet.split('/')
        ipam_subnet = IpamSubnetType(subnet=SubnetType(pfx, int(pfx_len)))
        ipam_subnets.append(ipam_subnet)
        ipam_obj = NetworkIpam(name=ipam_name, parent_obj=proj_obj)
        try:
            self.vnc_lib.network_ipam_create(ipam_obj)
        except RefsExistError:
            vn_obj = self.vnc_lib.network_ipam_read(
                fq_name=ipam_obj.get_fq_name())
        return ipam_obj, ipam_subnets

    def _create_network(self, labels, mesos_proj_obj):
        vn_obj = VirtualNetwork(
            name=labels['network'],
            parent_obj=mesos_proj_obj,
            address_allocation_mode='user-defined-subnet-only')
        ipam_obj, ipam_subnets = self._create_ipam(labels['public'],
                                                   labels['public_subnet'],
                                                   mesos_proj_obj)
        vn_obj.add_network_ipam(ipam_obj, VnSubnetsType(ipam_subnets))
        vn_obj.set_virtual_network_properties(
            VirtualNetworkType(forwarding_mode='l3'))
        try:
            self.vnc_lib.virtual_network_create(vn_obj)
        except RefsExistError:
            vn_obj = self.vnc_lib.virtual_network_read(
                fq_name=vn_obj.get_fq_name())
        return vn_obj

    def _create_vm(self, pod_id, pod_name):
        vm_obj = VirtualMachine(name=pod_name)
        vm_obj.uuid = pod_id
        try:
            self.vnc_lib.virtual_machine_create(vm_obj)
        except RefsExistError:
            vm_obj = self.vnc_lib.virtual_machine_read(id=pod_id)
        vm = VirtualMachineMM.locate(vm_obj.uuid)
        return vm_obj

    def _create_vmi(self, pod_name, pod_namespace, vm_obj, vn_obj):
        proj_fq_name = ['default-domain', pod_namespace]
        proj_obj = self.vnc_lib.project_read(fq_name=proj_fq_name)

        vmi_obj = VirtualMachineInterface(name=pod_name, parent_obj=proj_obj)
        vmi_obj.set_virtual_network(vn_obj)
        vmi_obj.set_virtual_machine(vm_obj)
        try:
            self.vnc_lib.virtual_machine_interface_create(vmi_obj)
        except RefsExistError:
            self.vnc_lib.virtual_machine_interface_update(vmi_obj)
        VirtualMachineInterfaceMM.locate(vmi_obj.uuid)
        return vmi_obj

    def _create_iip(self, pod_name, vn_obj, vmi_obj):
        iip_obj = InstanceIp(name=pod_name)
        iip_obj.add_virtual_network(vn_obj)
        iip_obj.add_virtual_machine_interface(vmi_obj)
        try:
            self.vnc_lib.instance_ip_create(iip_obj)
        except RefsExistError:
            self.vnc_lib.instance_ip_update(iip_obj)
        InstanceIpMM.locate(iip_obj.uuid)
        return iip_obj

    def _link_vm_to_node(self, vm_obj, pod_node):
        vrouter_fq_name = ['default-global-system-config', pod_node]
        try:
            vrouter_obj = self.vnc_lib.virtual_router_read(
                fq_name=vrouter_fq_name)
        except Exception as e:
            return

        self.vnc_lib.ref_update('virtual-router', vrouter_obj.uuid,
                                'virtual-machine', vm_obj.uuid, None, 'ADD')
        vm = VirtualMachineMM.get(vm_obj.uuid)
        if vm:
            vm.virtual_router = vrouter_obj.uuid

    def _setup_all(self, labels, pod_name, pod_id):
        pod_namespace = 'meso-system'
        #pod_name = pod_id
        pod_node = 'a2s33'
        mesos_proj_obj = self._create_project(pod_namespace)
        vn_obj = self._create_network(labels, mesos_proj_obj)
        vm_obj = self._create_vm(pod_id, pod_name)
        vmi_obj = self._create_vmi(pod_name, pod_namespace, vm_obj, vn_obj)
        self._create_iip(pod_name, vn_obj, vmi_obj)
        self._link_vm_to_node(vm_obj, pod_node)

    def process_q_event(self, event):
        labels = event['labels']
        #subnet = event['ipam']['subnet'] if event['ipam'] else None
        for k, v in labels.items():
            if k == mesos_consts.MESOS_LABEL_PRIVATE_NETWORK:
                print v
                print "Cid for this is %s" % event['cid']
                self._setup_all(labels, str(event['cid']), event['cid'])
            elif k == mesos_consts.MESOS_LABEL_PUBLIC_NETWORK:
                print v
            elif k == mesos_consts.MESOS_LABEL_PUBLIC_SUBNET:
                print v
            else:
                pass

    def vnc_process(self):
        while True:
            try:
                event = self.q.get()
                print event
                self.logger.info("VNC: Handle CNI Data for ContainerId: %s." %
                                 (event['cid']))
                self.process_q_event(event)
            except Empty:
                gevent.sleep(0)
    def __init__(self, args=None, logger=None, q=None, kube=None):
        self._name = type(self).__name__
        self.args = args
        self.logger = logger
        self.q = q
        self.kube = kube
        self._cluster_pod_ipam_fq_name = None

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # HACK ALERT.
        # Till we have an alternate means to get config objects,  we will
        # direcly connect to cassandra. Such a persistant connection is
        # discouraged, but is the only option we have for now.
        #
        # Disable flow timeout on this connection, so the flow persists.
        #
        if self.args.nested_mode is '1':
            for cassandra_server in self.args.cassandra_server_list:
                cassandra_port = cassandra_server.split(':')[-1]
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", cassandra_port, 2147483647)

        # init access to db
        self._db = db.KubeNetworkManagerDB(self.args, self.logger)
        DBBaseKM.init(self, self.logger, self._db)

        # If nested mode is enabled via config, then record the directive.
        if self.args.nested_mode is '1':
            DBBaseKM.set_nested(True)

        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger,
                                    DBBaseKM,
                                    REACTION_MAP,
                                    'kube_manager',
                                    args=self.args)
        self.rabbit.establish()

        # Cache common config.
        self.vnc_kube_config = vnc_kube_config(logger=self.logger,
                                               vnc_lib=self.vnc_lib,
                                               args=self.args,
                                               queue=self.q,
                                               kube=self.kube)

        # sync api server db in local cache
        self._sync_km()
        self.rabbit._db_resync_done.set()

        # provision cluster
        self._provision_cluster()

        # handle events
        self.label_cache = label_cache.LabelCache()

        # Update common config.
        self.vnc_kube_config.update(
            label_cache=self.label_cache,
            cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(),
            cluster_service_fip_pool=self._get_cluster_service_fip_pool())

        self.network_policy_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy')
        self.namespace_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_namespace.VncNamespace',
            self.network_policy_mgr)
        self.ingress_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_ingress.VncIngress')
        self.service_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr)
        self.pod_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr,
            self.network_policy_mgr)
        self.endpoints_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_endpoints.VncEndpoints')

        VncKubernetes._vnc_kubernetes = self
Пример #45
0
class VncKubernetes(object):
    def __init__(self, args=None, logger=None, q=None, kube=None):
        self.args = args
        self.logger = logger
        self.q = q
        self.kube = kube

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # init access to db
        self._db = db.KubeNetworkManagerDB(self.args, self.logger)
        DBBaseKM.init(self, self.logger, self._db)

        # init rabbit connection
        self.rabbit = VncAmqpHandle(self.logger, DBBaseKM, REACTION_MAP, "kube_manager", args=self.args)
        self.rabbit.establish()

        # sync api server db in local cache
        self._sync_km()
        self.rabbit._db_resync_done.set()

        # provision cluster
        self._provision_cluster()

        # handle events
        self.label_cache = label_cache.LabelCache()
        self.namespace_mgr = importutils.import_object(
            "kube_manager.vnc.vnc_namespace.VncNamespace",
            vnc_lib=self.vnc_lib,
            cluster_pod_subnets=self.args.pod_subnets,
        )
        self.service_mgr = importutils.import_object(
            "kube_manager.vnc.vnc_service.VncService", self.vnc_lib, self.label_cache, self.args, self.logger, self.kube
        )
        self.pod_mgr = importutils.import_object(
            "kube_manager.vnc.vnc_pod.VncPod",
            self.vnc_lib,
            self.label_cache,
            self.service_mgr,
            svc_fip_pool=self._get_cluster_service_fip_pool(),
        )
        self.network_policy_mgr = importutils.import_object(
            "kube_manager.vnc.vnc_network_policy.VncNetworkPolicy", self.vnc_lib, self.label_cache, self.logger
        )
        self.endpoints_mgr = importutils.import_object(
            "kube_manager.vnc.vnc_endpoints.VncEndpoints", self.vnc_lib, self.label_cache
        )

    def _vnc_connect(self):
        # Retry till API server connection is up
        connected = False
        while not connected:
            try:
                vnc_lib = VncApi(
                    self.args.admin_user,
                    self.args.admin_password,
                    self.args.admin_tenant,
                    self.args.vnc_endpoint_ip,
                    self.args.vnc_endpoint_port,
                )
                connected = True
            except requests.exceptions.ConnectionError as e:
                time.sleep(3)
            except ResourceExhaustionError:
                time.sleep(3)
        return vnc_lib

    def _sync_km(self):
        for cls in DBBaseKM.get_obj_type_map().values():
            for obj in cls.list_obj():
                cls.locate(obj["uuid"], obj)

    @staticmethod
    def reset():
        for cls in DBBaseKM.get_obj_type_map().values():
            cls.reset()

    def _create_project(self, project_name):
        proj_fq_name = ["default-domain", project_name]
        proj_obj = Project(name=project_name, fq_name=proj_fq_name)
        try:
            self.vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self.vnc_lib.project_read(fq_name=proj_fq_name)
        ProjectKM.locate(proj_obj.uuid)
        return proj_obj

    def _create_ipam(self, ipam_name, subnets, proj_obj, type="user-defined-subnet"):
        ipam_subnets = []
        for subnet in subnets:
            pfx, pfx_len = subnet.split("/")
            ipam_subnet = IpamSubnetType(subnet=SubnetType(pfx, int(pfx_len)))
            ipam_subnets.append(ipam_subnet)
        ipam_obj = NetworkIpam(name=ipam_name, parent_obj=proj_obj)

        if type == "flat-subnet":
            ipam_obj.set_ipam_subnet_method("flat-subnet")
            ipam_obj.set_ipam_subnets(IpamSubnets(ipam_subnets))

        try:
            self.vnc_lib.network_ipam_create(ipam_obj)
        except RefsExistError:
            vn_obj = self.vnc_lib.network_ipam_read(fq_name=ipam_obj.get_fq_name())
        return ipam_obj, ipam_subnets

    def _create_cluster_network(self, vn_name, proj_obj):
        vn_obj = VirtualNetwork(name=vn_name, parent_obj=proj_obj, address_allocation_mode="user-defined-subnet-only")

        ipam_obj, ipam_subnets = self._create_ipam("pod-ipam", self.args.pod_subnets, proj_obj)
        vn_obj.add_network_ipam(ipam_obj, VnSubnetsType(ipam_subnets))

        ipam_obj, ipam_subnets = self._create_ipam(
            "service-ipam", self.args.service_subnets, proj_obj, type="flat-subnet"
        )
        vn_obj.add_network_ipam(ipam_obj, VnSubnetsType([]))

        vn_obj.set_virtual_network_properties(VirtualNetworkType(forwarding_mode="l3"))
        try:
            self.vnc_lib.virtual_network_create(vn_obj)
        except RefsExistError:
            vn_obj = self.vnc_lib.virtual_network_read(fq_name=vn_obj.get_fq_name())

        VirtualNetworkKM.locate(vn_obj.uuid)

        # Create service floating ip pool.
        self._create_cluster_service_fip_pool(vn_obj)

        return vn_obj.uuid

    def _get_cluster_service_fip_pool_name(self, vn_name):
        """
        Return fip pool name of cluster service network.
        """
        return "svc-fip-pool-%s" % (vn_name)

    def _get_cluster_service_fip_pool(self):
        """
        Get floating ip pool of cluster service network.
        """
        vn_obj = self._get_cluster_network()
        return FloatingIpPoolKM.find_by_name_or_uuid(self._get_cluster_service_fip_pool_name(vn_obj.name))

    def _create_cluster_service_fip_pool(self, vn_obj):
        # Create a floating IP pool in cluster service network.
        #
        # Service IP's in the cluster are allocated from service
        # IPAM in the cluster network. All pods spawned in isolated
        # virtual networks will be allocated an IP from this floating IP
        # pool. These pods in those isolated virtual networks will use this
        # floating IP for traffic to services in the cluster.
        fip_pool_obj = FloatingIpPool(self._get_cluster_service_fip_pool_name(vn_obj.name), parent_obj=vn_obj)
        try:
            # Create floating ip pool for cluster service network.
            self.vnc_lib.floating_ip_pool_create(fip_pool_obj)
        except:
            self.logger.error("Service floating-IP-pool create failed for " "Virtual Network[%s] " % vn_obj.name)
            return None
        else:
            # Update local cache.
            FloatingIpPoolKM.locate(fip_pool_obj.uuid)

        return

    def _provision_cluster(self):
        self._create_project("kube-system")
        proj_obj = self._create_project("default")
        self._create_cluster_network("cluster-network", proj_obj)

    def _get_cluster_network(self):
        return VirtualNetworkKM.find_by_name_or_uuid("cluster-network")

    def vnc_process(self):
        while True:
            try:
                event = self.q.get()
                print(
                    "\tGot %s %s %s:%s"
                    % (
                        event["type"],
                        event["object"].get("kind"),
                        event["object"]["metadata"].get("namespace"),
                        event["object"]["metadata"].get("name"),
                    )
                )
                if event["object"].get("kind") == "Pod":
                    self.pod_mgr.process(event)
                    self.network_policy_mgr.process(event)
                elif event["object"].get("kind") == "Service":
                    self.service_mgr.process(event)
                elif event["object"].get("kind") == "Namespace":
                    self.namespace_mgr.process(event)
                elif event["object"].get("kind") == "NetworkPolicy":
                    self.network_policy_mgr.process(event)
                elif event["object"].get("kind") == "Endpoints":
                    self.endpoints_mgr.process(event)
            except Empty:
                gevent.sleep(0)