def __init__(self, args=None, logger=None, q=None): self.args = args self.logger = logger self.q = q # init vnc connection self.vnc_lib = self._vnc_connect() # init access to db self._db = db.KubeNetworkManagerDB(self.args, self.logger) DBBaseKM.init(self, self.logger, self._db) # init rabbit connection self.rabbit = rabbit.RabbitConnection(self.logger, self.args) # sync api server db in local cache self._sync_sm() # provision cluster self._provision_cluster() # handle events self.label_cache = label_cache.LabelCache() self.namespace_mgr = importutils.import_object( 'vnc.vnc_namespace.VncNamespace', self.vnc_lib) self.service_mgr = importutils.import_object( 'vnc.vnc_service.VncService', self.vnc_lib, self.label_cache) self.pod_mgr = importutils.import_object( 'vnc.vnc_pod.VncPod', self.vnc_lib, self.label_cache, self.service_mgr) self.network_policy_mgr = importutils.import_object( 'vnc.vnc_network_policy.VncNetworkPolicy', self.vnc_lib)
def __init__(self, vnc_lib=None, label_cache=None, args=None, logger=None): self._vnc_lib = vnc_lib self._label_cache = label_cache self.logger = logger # Cache kubernetes API server params. self._kubernetes_api_secure_ip = args.kubernetes_api_secure_ip self._kubernetes_api_secure_port = int(args.kubernetes_api_secure_port) # Cache kuberneter service name. self._kubernetes_service_name = args.kubernetes_service_name # Config knob to control enable/disable of link local service. if args.api_service_link_local == 'True': api_service_ll_enable = True else: api_service_ll_enable = False # If Kubernetes API server info is incomplete, disable link-local # create, as create is not possible. if not self._kubernetes_api_secure_ip or\ not self._kubernetes_api_secure_ip: self._create_linklocal = False else: self._create_linklocal = api_service_ll_enable self.service_lb_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbManager', vnc_lib) self.service_ll_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbListenerManager', vnc_lib) self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager', vnc_lib) self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager', vnc_lib)
def post_init(self, vnc_lib, args=None): # api server self._vnc_lib = vnc_lib # create default analyzer template self._create_default_template('analyzer-template', 'analyzer', flavor='m1.medium', image_name='analyzer') # create default NAT template self._create_default_template('nat-template', 'firewall', svc_mode='in-network-nat', image_name='analyzer', flavor='m1.medium') # create default netns SNAT template self._create_default_template('netns-snat-template', 'source-nat', svc_mode='in-network-nat', hypervisor_type='network-namespace', scaling=True) # create default loadbalancer template self._create_default_template('haproxy-loadbalancer-template', 'loadbalancer', svc_mode='in-network-nat', hypervisor_type='network-namespace', scaling=True) self._create_default_template('docker-template', 'firewall', svc_mode='transparent', image_name="ubuntu", hypervisor_type='vrouter-instance', vrouter_instance_type='docker', instance_data={"command": "/bin/bash"}) self._nova_client = importutils.import_object( 'svc_monitor.nova_client.ServiceMonitorNovaClient', self._args) # load vrouter scheduler self.vrouter_scheduler = importutils.import_object( self._args.si_netns_scheduler_driver, self._vnc_lib, self._nova_client, self._args) # load virtual machine instance manager self.vm_manager = importutils.import_object( 'svc_monitor.virtual_machine_manager.VirtualMachineManager', self._vnc_lib, self.db, self.logger, self.vrouter_scheduler, self._nova_client, self._args) # load network namespace instance manager self.netns_manager = importutils.import_object( 'svc_monitor.instance_manager.NetworkNamespaceManager', self._vnc_lib, self.db, self.logger, self.vrouter_scheduler, self._nova_client, self._args) # load a vrouter instance manager self.vrouter_manager = importutils.import_object( 'svc_monitor.vrouter_instance_manager.VRouterInstanceManager', self._vnc_lib, self.db, self.logger, self.vrouter_scheduler, self._nova_client, self._args)
def post_init(self, vnc_lib, args=None): # api server self._vnc_lib = vnc_lib # create default analyzer template self._create_default_template('analyzer-template', 'analyzer', flavor='m1.medium', image_name='analyzer') # create default NAT template self._create_default_template('nat-template', 'firewall', svc_mode='in-network-nat', image_name='analyzer', flavor='m1.medium') # create default netns SNAT template self._create_default_template('netns-snat-template', 'source-nat', svc_mode='in-network-nat', hypervisor_type='network-namespace', scaling=True) # create default loadbalancer template self._create_default_template('haproxy-loadbalancer-template', 'loadbalancer', svc_mode='in-network-nat', hypervisor_type='network-namespace', scaling=True) self._create_default_template('docker-template', 'firewall', svc_mode='transparent', image_name="ubuntu", hypervisor_type='vrouter-instance', vrouter_instance_type='docker', instance_data={ "command": "/bin/bash" }) self._nova_client = importutils.import_object( 'svc_monitor.nova_client.ServiceMonitorNovaClient', self._args) # load vrouter scheduler self.vrouter_scheduler = importutils.import_object( self._args.si_netns_scheduler_driver, self._vnc_lib, self._nova_client, self._args) # load virtual machine instance manager self.vm_manager = importutils.import_object( 'svc_monitor.virtual_machine_manager.VirtualMachineManager', self._vnc_lib, self.db, self.logger, self.vrouter_scheduler, self._nova_client, self._args) # load network namespace instance manager self.netns_manager = importutils.import_object( 'svc_monitor.instance_manager.NetworkNamespaceManager', self._vnc_lib, self.db, self.logger, self.vrouter_scheduler, self._nova_client, self._args) # load a vrouter instance manager self.vrouter_manager = importutils.import_object( 'svc_monitor.vrouter_instance_manager.VRouterInstanceManager', self._vnc_lib, self.db, self.logger, self.vrouter_scheduler, self._nova_client, self._args)
def __init__(self): self._name = type(self).__name__ self._vnc_lib = vnc_kube_config.vnc_lib() self.logger = vnc_kube_config.logger() self._kube = vnc_kube_config.kube() self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager') self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager')
def __init__(self): super(VncEndpoints,self).__init__('Endpoint') self._name = type(self).__name__ self._vnc_lib = vnc_kube_config.vnc_lib() self.logger = vnc_kube_config.logger() self._kube = vnc_kube_config.kube() self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager') self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager')
def __init__(self, vnc_lib=None, logger=None, kube=None): self._vnc_lib = vnc_lib self._logger = logger self._kube = kube self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager', vnc_lib, logger) self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager', vnc_lib, logger)
def __init__(self, vnc_lib=None, label_cache=None): self._vnc_lib = vnc_lib self._label_cache = label_cache self.service_lb_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbManager', vnc_lib) self.service_ll_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbListenerManager', vnc_lib) self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager', vnc_lib) self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager', vnc_lib)
def __init__(self): super(VncEndpoints, self).__init__('Endpoint') self._name = type(self).__name__ self._vnc_lib = vnc_kube_config.vnc_lib() self.logger = vnc_kube_config.logger() self._kube = vnc_kube_config.kube() self._labels = XLabelCache('Endpoint') self._args = vnc_kube_config.args() self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager') self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager')
def post_init(self, vnc_lib, args=None): # api server self._vnc_lib = vnc_lib # create default analyzer template self._create_default_template("analyzer-template", "analyzer", flavor="m1.medium", image_name="analyzer") # create default NAT template self._create_default_template( "nat-template", "firewall", svc_mode="in-network-nat", image_name="analyzer", flavor="m1.medium" ) # create default netns SNAT template self._create_default_template( "netns-snat-template", "source-nat", svc_mode="in-network-nat", hypervisor_type="network-namespace", scaling=True, ) # create default loadbalancer template self._create_default_template( "haproxy-loadbalancer-template", "loadbalancer", svc_mode="in-network-nat", hypervisor_type="network-namespace", scaling=True, ) # load vrouter scheduler self.vrouter_scheduler = importutils.import_object( self._args.si_netns_scheduler_driver, self._vnc_lib, self._args ) # load virtual machine instance manager self.vm_manager = importutils.import_object( "svc_monitor.instance_manager.VirtualMachineManager", self._vnc_lib, self.db, self.logger, self.vrouter_scheduler, self._args, ) # load network namespace instance manager self.netns_manager = importutils.import_object( "svc_monitor.instance_manager.NetworkNamespaceManager", self._vnc_lib, self.db, self.logger, self.vrouter_scheduler, self._args, )
def __init__(self, ingress_mgr): self._k8s_event_type = 'Service' super(VncService,self).__init__(self._k8s_event_type) self._name = type(self).__name__ self._ingress_mgr = ingress_mgr self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._labels = XLabelCache(self._k8s_event_type) self._labels.reset_resource() self._args = vnc_kube_config.args() self.logger = vnc_kube_config.logger() self._queue = vnc_kube_config.queue() self.kube = vnc_kube_config.kube() self._fip_pool_obj = None # Cache kubernetes API server params. self._kubernetes_api_server = self._args.kubernetes_api_server self._kubernetes_api_secure_port =\ int(self._args.kubernetes_api_secure_port) # Cache kuberneter service name. self._kubernetes_service_name = self._args.kubernetes_service_name # Config knob to control enable/disable of link local service. if self._args.api_service_link_local == 'True': api_service_ll_enable = True else: api_service_ll_enable = False # If Kubernetes API server info is incomplete, disable link-local create, # as create is not possible. if not self._kubernetes_api_server: self._create_linklocal = False elif vnc_kube_config.is_cluster_network_configured() and\ DBBaseKM.is_nested(): # In nested mode, if cluster network is configured, then the k8s api # server is in the same network as the k8s cluster. So there is no # need for link local. self._create_linklocal = False else: self._create_linklocal = api_service_ll_enable self.service_lb_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbManager') self.service_ll_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbListenerManager') self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager') self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager')
def __init__(self, ingress_mgr): self._k8s_event_type = 'Service' super(VncService, self).__init__(self._k8s_event_type) self._name = type(self).__name__ self._ingress_mgr = ingress_mgr self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._labels = XLabelCache(self._k8s_event_type) self._labels.reset_resource() self._args = vnc_kube_config.args() self.logger = vnc_kube_config.logger() self._queue = vnc_kube_config.queue() self.kube = vnc_kube_config.kube() self._fip_pool_obj = None # Cache kubernetes API server params. self._kubernetes_api_server = self._args.kubernetes_api_server self._kubernetes_api_secure_port =\ int(self._args.kubernetes_api_secure_port) # Cache kuberneter service name. self._kubernetes_service_name = self._args.kubernetes_service_name # Config knob to control enable/disable of link local service. if self._args.api_service_link_local == 'True': api_service_ll_enable = True else: api_service_ll_enable = False # If Kubernetes API server info is incomplete, disable link-local create, # as create is not possible. if not self._kubernetes_api_server: self._create_linklocal = False elif vnc_kube_config.is_cluster_network_configured() and\ DBBaseKM.is_nested(): # In nested mode, if cluster network is configured, then the k8s api # server is in the same network as the k8s cluster. So there is no # need for link local. self._create_linklocal = False else: self._create_linklocal = api_service_ll_enable self.service_lb_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbManager') self.service_ll_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbListenerManager') self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager') self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager')
def sanitize_resources(self): logger.debug("Santizing resources distribution") self._resource_map = OrderedDict() for resource_type in self._SUPPORTED_RESOURCES: object_path = 'contrail_db_loader.resources.%s.%s' %\ (resource_type.replace('-', '_'), camel_case(resource_type)) kwargs = { 'db_manager': self._cassandra_db, 'batch_size': self._cassandra_batch_size, 'zk_client': self._zk_client, 'project_amount': self._resource_distribution.get('project', 0), 'amount_per_project': self._resource_distribution.get(resource_type, 0), } self._resource_map[resource_type] = import_object( object_path, **kwargs) resources_not_supported = (set(self._resource_distribution.keys()) - set(self._SUPPORTED_RESOURCES)) if resources_not_supported: logger.warning('Loading resources %s are not supported' % ', '.join(resources_not_supported))
def __init__(self, args=None, logger=None, queue=None, sync_queue=None): self.args = args self.logger = logger self.queue = queue self.sync_queue = sync_queue self._cluster_pod_task_ipam_fq_name = None self._cluster_ip_fabric_ipam_fq_name = None """Initialize vnc connection""" self.vnc_lib = self._vnc_connect() # Cache common config. self.vnc_mesos_config = vnc_mesos_config(logger=self.logger, vnc_lib=self.vnc_lib, args=self.args, queue=self.queue, sync_queue=self.sync_queue) # init access to db self._db = db.MesosNetworkManagerDB(self.args, self.logger) DBBaseMM.init(self, self.logger, self._db) # sync api server db in local cache self._sync_mm() # provision cluster self._provision_cluster() self.vnc_mesos_config.update(cluster_pod_task_ipam_fq_name=self. _get_cluster_pod_task_ipam_fq_name(), cluster_ip_fabric_ipam_fq_name=self. _get_cluster_ip_fabric_ipam_fq_name()) self.pod_task_mgr = importutils.import_object( 'mesos_manager.vnc.vnc_pod_task.VncPodTask') VncMesos._vnc_mesos = self
def __init__(self, args=None, logger=None, queue=None, sync_queue=None): self.args = args self.logger = logger self.queue = queue self.sync_queue = sync_queue self._cluster_pod_task_ipam_fq_name = None self._cluster_ip_fabric_ipam_fq_name = None """Initialize vnc connection""" self.vnc_lib = self._vnc_connect() # Cache common config. self.vnc_mesos_config = vnc_mesos_config(logger=self.logger, vnc_lib=self.vnc_lib, args=self.args, queue=self.queue, sync_queue = self.sync_queue) # init access to db self._db = db.MesosNetworkManagerDB(self.args, self.logger) DBBaseMM.init(self, self.logger, self._db) # sync api server db in local cache self._sync_mm() # provision cluster self._provision_cluster() self.vnc_mesos_config.update( cluster_pod_task_ipam_fq_name=self._get_cluster_pod_task_ipam_fq_name(), cluster_ip_fabric_ipam_fq_name=self._get_cluster_ip_fabric_ipam_fq_name()) self.pod_task_mgr = importutils.import_object( 'mesos_manager.vnc.vnc_pod_task.VncPodTask') VncMesos._vnc_mesos = self
def __init__(self, args=None, logger=None, q=None, kube=None): self.args = args self.logger = logger self.q = q self.kube = kube # init vnc connection self.vnc_lib = self._vnc_connect() # init access to db self._db = db.KubeNetworkManagerDB(self.args, self.logger) DBBaseKM.init(self, self.logger, self._db) # init rabbit connection self.rabbit = VncAmqpHandle(self.logger, DBBaseKM, REACTION_MAP, "kube_manager", args=self.args) self.rabbit.establish() # sync api server db in local cache self._sync_km() self.rabbit._db_resync_done.set() # provision cluster self._provision_cluster() # handle events self.label_cache = label_cache.LabelCache() self.namespace_mgr = importutils.import_object( "kube_manager.vnc.vnc_namespace.VncNamespace", vnc_lib=self.vnc_lib, cluster_pod_subnets=self.args.pod_subnets, ) self.service_mgr = importutils.import_object( "kube_manager.vnc.vnc_service.VncService", self.vnc_lib, self.label_cache, self.args, self.logger, self.kube ) self.pod_mgr = importutils.import_object( "kube_manager.vnc.vnc_pod.VncPod", self.vnc_lib, self.label_cache, self.service_mgr, svc_fip_pool=self._get_cluster_service_fip_pool(), ) self.network_policy_mgr = importutils.import_object( "kube_manager.vnc.vnc_network_policy.VncNetworkPolicy", self.vnc_lib, self.label_cache, self.logger ) self.endpoints_mgr = importutils.import_object( "kube_manager.vnc.vnc_endpoints.VncEndpoints", self.vnc_lib, self.label_cache )
def __init__(self): self._k8s_event_type = 'Service' super(VncService,self).__init__(self._k8s_event_type) self._name = type(self).__name__ self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._args = vnc_kube_config.args() self.logger = vnc_kube_config.logger() self._queue = vnc_kube_config.queue() self.kube = vnc_kube_config.kube() self._fip_pool_obj = None # Cache kubernetes API server params. self._kubernetes_api_secure_ip = self._args.kubernetes_api_secure_ip self._kubernetes_api_secure_port =\ int(self._args.kubernetes_api_secure_port) # Cache kuberneter service name. self._kubernetes_service_name = self._args.kubernetes_service_name # Config knob to control enable/disable of link local service. if self._args.api_service_link_local == 'True': api_service_ll_enable = True else: api_service_ll_enable = False # If Kubernetes API server info is incomplete, disable link-local create, # as create is not possible. if not self._kubernetes_api_secure_ip or\ not self._kubernetes_api_secure_ip: self._create_linklocal = False else: self._create_linklocal = api_service_ll_enable self.service_lb_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbManager') self.service_ll_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbListenerManager') self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager') self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager')
def post_init(self, vnc_lib, args=None): # api server self._vnc_lib = vnc_lib # create default analyzer template self._create_default_template('analyzer-template', 'analyzer', flavor='m1.medium', image_name='analyzer') # create default NAT template self._create_default_template('nat-template', 'firewall', svc_mode='in-network-nat', image_name='analyzer', flavor='m1.medium') # create default netns SNAT template self._create_default_template('netns-snat-template', 'source-nat', svc_mode='in-network-nat', hypervisor_type='network-namespace', scaling=True) # create default loadbalancer template self._create_default_template('haproxy-loadbalancer-template', 'loadbalancer', image_name='in-network', hypervisor_type='network-namespace', scaling=True) # load vrouter scheduler self.vrouter_scheduler = importutils.import_object( self._args.si_netns_scheduler_driver, self._vnc_lib, self._args) # load virtual machine instance manager self.vm_manager = importutils.import_object( 'svc_monitor.instance_manager.VirtualMachineManager', self._vnc_lib, self.db, self.logger, self.vrouter_scheduler, self._args) # load network namespace instance manager self.netns_manager = importutils.import_object( 'svc_monitor.instance_manager.NetworkNamespaceManager', self._vnc_lib, self.db, self.logger, self.vrouter_scheduler, self._args)
def __init__(self, args=None, logger=None, q=None): self.args = args self.logger = logger self.q = q # init vnc connection self.vnc_lib = self._vnc_connect() # init access to db self._db = db.KubeNetworkManagerDB(self.args, self.logger) DBBaseKM.init(self, self.logger, self._db) # init rabbit connection self.rabbit = VncAmqpHandle(self.logger, DBBaseKM, REACTION_MAP, 'kube_manager', args=self.args) self.rabbit.establish() # sync api server db in local cache self._sync_sm() self.rabbit._db_resync_done.set() # provision cluster self._provision_cluster() # handle events self.label_cache = label_cache.LabelCache() self.namespace_mgr = importutils.import_object( 'kube_manager.vnc.vnc_namespace.VncNamespace', self.vnc_lib) self.service_mgr = importutils.import_object( 'kube_manager.vnc.vnc_service.VncService', self.vnc_lib, self.label_cache, self.args, self.logger) self.pod_mgr = importutils.import_object( 'kube_manager.vnc.vnc_pod.VncPod', self.vnc_lib, self.label_cache, self.service_mgr) self.network_policy_mgr = importutils.import_object( 'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy', self.vnc_lib, self.label_cache, self.logger) self.endpoints_mgr = importutils.import_object( 'kube_manager.vnc.vnc_endpoints.VncEndpoints', self.vnc_lib, self.label_cache)
def __init__(self, args=None, vnc_lib=None, \ label_cache=None, logger=None, kube=None): self._args = args self._kube = kube self._vnc_lib = vnc_lib self._vn_obj = None self._service_subnet_uuid = None self._fip_pool_obj = None self.service_lb_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbManager', vnc_lib, logger) self.service_ll_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbListenerManager', vnc_lib, logger) self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager', vnc_lib, logger) self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager', vnc_lib, logger)
def post_init(self, vnc_lib, args=None): # api server self._vnc_lib = vnc_lib # create default analyzer template self._create_default_template('analyzer-template', 'analyzer', flavor='m1.medium', image_name='analyzer') # create default NAT template self._create_default_template('nat-template', 'firewall', svc_mode='in-network-nat', image_name='analyzer', flavor='m1.medium') # create default netns SNAT template self._create_default_template('netns-snat-template', 'source-nat', svc_mode='in-network-nat', hypervisor_type='network-namespace', scaling=True) # create default loadbalancer template self._create_default_template('haproxy-loadbalancer-template', 'loadbalancer', svc_mode='in-network-nat', hypervisor_type='network-namespace', scaling=True) # load vrouter scheduler self.vrouter_scheduler = importutils.import_object( self._args.si_netns_scheduler_driver, self._vnc_lib, self._args) # load virtual machine instance manager self.vm_manager = importutils.import_object( 'svc_monitor.instance_manager.VirtualMachineManager', self._vnc_lib, self.db, self.logger, self.vrouter_scheduler, self._args) # load network namespace instance manager self.netns_manager = importutils.import_object( 'svc_monitor.instance_manager.NetworkNamespaceManager', self._vnc_lib, self.db, self.logger, self.vrouter_scheduler, self._args)
def __init__(self, vnc_lib=None, label_cache=None, args=None, logger=None, kube=None): self._vnc_lib = vnc_lib self._label_cache = label_cache self.logger = logger self.kube = kube # Cache kubernetes API server params. self._kubernetes_api_secure_ip = args.kubernetes_api_secure_ip self._kubernetes_api_secure_port = int(args.kubernetes_api_secure_port) # Cache kuberneter service name. self._kubernetes_service_name = args.kubernetes_service_name # Config knob to control enable/disable of link local service. if args.api_service_link_local == 'True': api_service_ll_enable = True else: api_service_ll_enable = False # If Kubernetes API server info is incomplete, disable link-local create, # as create is not possible. if not self._kubernetes_api_secure_ip or\ not self._kubernetes_api_secure_ip: self._create_linklocal = False else: self._create_linklocal = api_service_ll_enable self.service_lb_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbManager', vnc_lib, logger) self.service_ll_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbListenerManager', vnc_lib, logger) self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager', vnc_lib, logger) self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager', vnc_lib, logger)
def load_drivers(self): for sas in ServiceApplianceSetSM.values(): if sas.driver: config = self._args.config_sections config.add_section(sas.name) for kvp in sas.kvpairs or []: config.set(sas.name, kvp["key"], kvp["value"]) if sas.ha_mode: config.set(sas.name, "ha_mode", str(sas.ha_mode)) for sa in sas.service_appliances or []: saobj = ServiceApplianceSM.get(sa) config.set(sas.name, "device_ip", saobj.ip_address) config.set(sas.name, "user", saobj.user_credential["username"]) config.set(sas.name, "password", saobj.user_credential["password"]) self._loadbalancer_driver[sas.name] = importutils.import_object( sas.driver, sas.name, self._svc_mon, self._vnc_lib, self._cassandra, self._args )
def __init__(self, args=None, logger=None, q=None, kube=None): self._name = type(self).__name__ self.args = args self.logger = logger self.q = q self.kube = kube # init vnc connection self.vnc_lib = self._vnc_connect() # init access to db self._db = db.KubeNetworkManagerDB(self.args, self.logger) DBBaseKM.init(self, self.logger, self._db) # If nested mode is enabled via config, then record the directive. if self.args.nested_mode is '1': DBBaseKM.set_nested(True) # init rabbit connection self.rabbit = VncAmqpHandle(self.logger, DBBaseKM, REACTION_MAP, 'kube_manager', args=self.args) self.rabbit.establish() # sync api server db in local cache self._sync_km() self.rabbit._db_resync_done.set() # provision cluster self._provision_cluster() # handle events self.label_cache = label_cache.LabelCache() self.namespace_mgr = importutils.import_object( 'kube_manager.vnc.vnc_namespace.VncNamespace',vnc_lib=self.vnc_lib, logger=self.logger, cluster_pod_subnets = self.args.pod_subnets) self.service_mgr = importutils.import_object( 'kube_manager.vnc.vnc_service.VncService', self.vnc_lib, self.label_cache, self.args, self.logger, self.kube) self.network_policy_mgr = importutils.import_object( 'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy', self.vnc_lib, self.label_cache, self.logger) self.pod_mgr = importutils.import_object( 'kube_manager.vnc.vnc_pod.VncPod', self.vnc_lib, self.label_cache, self.args, self.logger, self.service_mgr, self.network_policy_mgr, self.q, svc_fip_pool = self._get_cluster_service_fip_pool()) self.endpoints_mgr = importutils.import_object( 'kube_manager.vnc.vnc_endpoints.VncEndpoints', self.vnc_lib, self.logger, self.kube) self.ingress_mgr = importutils.import_object( 'kube_manager.vnc.vnc_ingress.VncIngress', self.args, self.q, self.vnc_lib, self.label_cache, self.logger, self.kube)
def load_drivers(self): for sas in ServiceApplianceSetSM.values(): if sas.driver: config = self._args.config_sections config.add_section(sas.name) for kvp in sas.kvpairs or []: config.set(sas.name, kvp['key'], kvp['value']) if sas.ha_mode: config.set(sas.name, 'ha_mode', str(sas.ha_mode)) for sa in sas.service_appliances or []: saobj = ServiceApplianceSM.get(sa) config.set(sas.name, 'device_ip', saobj.ip_address) config.set(sas.name, 'user', saobj.user_credential['username']) config.set(sas.name, 'password', saobj.user_credential['password']) self._loadbalancer_driver[sas.name] = \ importutils.import_object(sas.driver, sas.name, self._svc_mon, self._vnc_lib, self.lb_db, self._args)
def load_driver(self, sas): if sas.name in self._loadbalancer_driver: return if sas.driver: config = self._args.config_sections config.add_section(sas.name) for kvp in sas.kvpairs or []: config.set(sas.name, kvp['key'], kvp['value']) if sas.ha_mode: config.set(sas.name, 'ha_mode', sas.ha_mode) for sa in sas.service_appliances or []: saobj = ServiceApplianceSet.get(sa) config.set(sas.name, 'device_ip', saobj.ip_address) config.set(sas.name, 'user', saobj.user_credential['username']) config.set(sas.name, 'password', saobj.user_credential['password']) self._loadbalancer_driver[sas.name] = \ importutils.import_object(sas.driver, sas.name, self._svc_mon, self._vnc_lib, self.lb_db, self._args)
def load_driver(self, sas): if sas.name in self._loadbalancer_driver: del (self._loadbalancer_driver[sas.name]) if sas.driver: config = self._args.config_sections try: config.remove_section(sas.name) except Exception: pass config.add_section(sas.name) for kvp in sas.kvpairs or []: config.set(sas.name, kvp['key'], kvp['value']) if sas.ha_mode: config.set(sas.name, 'ha_mode', sas.ha_mode) for sa in sas.service_appliances or []: saobj = ServiceApplianceSM.get(sa) config.set(sas.name, 'device_ip', saobj.ip_address) config.set(sas.name, 'user', saobj.user_credential['username']) config.set(sas.name, 'password', saobj.user_credential['password']) self._loadbalancer_driver[sas.name] = \ importutils.import_object(sas.driver, sas.name, self._svc_mon, self._vnc_lib, self._cassandra, self._args)
def sanitize_resources(self): logger.debug("Santizing resources distribution") self._resource_map = OrderedDict() for resource_type in self._SUPPORTED_RESOURCES: object_path = 'contrail_db_loader.resources.%s.%s' %\ (resource_type.replace('-', '_'), camel_case(resource_type)) kwargs = { 'db_manager': self._object_db, 'batch_size': self._cassandra_batch_size, 'zk_client': self._zk_client, 'project_amount': self._resource_distribution.get('project', 0), 'amount_per_project': self._resource_distribution.get( resource_type, 0), } self._resource_map[resource_type] = import_object(object_path, **kwargs) resources_not_supported = (set(self._resource_distribution.keys()) - set(self._SUPPORTED_RESOURCES)) if resources_not_supported: logger.warning('Loading resources %s are not supported' % ', '.join(resources_not_supported))
def test_import_object(self): dt = importutils.import_object('datetime.time') self.assertTrue(isinstance(dt, sys.modules['datetime'].time))
def test_import_object_with_args(self): dt = importutils.import_object('datetime.datetime', 2012, 4, 5) self.assertTrue(isinstance(dt, sys.modules['datetime'].datetime)) self.assertEqual(dt, datetime.datetime(2012, 4, 5))
def test_import_object_optional_arg_not_present(self): obj = importutils.import_object('cfgm_common.tests.fake.FakeDriver') self.assertEqual(obj.__class__.__name__, 'FakeDriver')
def test_import_object_required_arg_present(self): obj = importutils.import_object('cfgm_common.tests.fake.FakeDriver2', first_arg=False) self.assertEqual(obj.__class__.__name__, 'FakeDriver2')
def __init__(self, args=None, logger=None, q=None, kube=None, vnc_kubernetes_config_dict=None): self._name = type(self).__name__ self.args = args self.logger = logger self.q = q self.kube = kube self._cluster_pod_ipam_fq_name = None self._cluster_service_ipam_fq_name = None self._cluster_ip_fabric_ipam_fq_name = None # init vnc connection self.vnc_lib = self._vnc_connect() # Cache common config. self.vnc_kube_config = vnc_kube_config(logger=self.logger, vnc_lib=self.vnc_lib, args=self.args, queue=self.q, kube=self.kube) # # In nested mode, kube-manager connects to contrail components running # in underlay via global link local services. TCP flows established on # link local services will be torn down by vrouter, if there is no # activity for configured(or default) timeout. So disable flow timeout # on these connections, so these flows will persist. # # Note: The way to disable flow timeout is to set timeout to max # possible value. # if self.args.nested_mode == '1': for cassandra_server in self.args.cassandra_server_list: cassandra_port = cassandra_server.split(':')[-1] flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", cassandra_port, 2147483647) if self.args.rabbit_port: flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", self.args.rabbit_port, 2147483647) if self.args.vnc_endpoint_port: flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", self.args.vnc_endpoint_port, 2147483647) for collector in self.args.collectors: collector_port = collector.split(':')[-1] flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", collector_port, 2147483647) # init access to db self._db = db.KubeNetworkManagerDB(self.args, self.logger) DBBaseKM.init(self, self.logger, self._db) # If nested mode is enabled via config, then record the directive. if self.args.nested_mode == '1': DBBaseKM.set_nested(True) # sync api server db in local cache self._sync_km() # init rabbit connection rabbitmq_cfg = kube_args.rabbitmq_args(self.args) self.rabbit = VncAmqpHandle( self.logger._sandesh, self.logger, DBBaseKM, reaction_map.REACTION_MAP, self.args.cluster_id + '-' + self.args.cluster_name + '-kube_manager', rabbitmq_cfg, self.args.host_ip) self.rabbit.establish() self.rabbit._db_resync_done.set() # Register label add and delete callbacks with label management entity. label_cache.XLabelCache.register_label_add_callback( VncKubernetes.create_tags) label_cache.XLabelCache.register_label_delete_callback( VncKubernetes.delete_tags) # Instantiate and init Security Policy Manager. self.security_policy_mgr = VncSecurityPolicy(self.vnc_lib, VncKubernetes.get_tags) # provision cluster self._provision_cluster() if vnc_kubernetes_config_dict: self.vnc_kube_config.update(**vnc_kubernetes_config_dict) else: # Update common config. self.vnc_kube_config.update( cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(), cluster_service_ipam_fq_name=self. _get_cluster_service_ipam_fq_name(), cluster_ip_fabric_ipam_fq_name=self. _get_cluster_ip_fabric_ipam_fq_name()) # handle events self.label_cache = label_cache.LabelCache() self.vnc_kube_config.update(label_cache=self.label_cache) self.tags_mgr = importutils.import_object( 'kube_manager.vnc.vnc_tags.VncTags') self.network_policy_mgr = importutils.import_object( 'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy') self.namespace_mgr = importutils.import_object( 'kube_manager.vnc.vnc_namespace.VncNamespace', self.network_policy_mgr) self.ingress_mgr = importutils.import_object( 'kube_manager.vnc.vnc_ingress.VncIngress', self.tags_mgr) self.service_mgr = importutils.import_object( 'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr) self.pod_mgr = importutils.import_object( 'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr, self.network_policy_mgr) self.endpoints_mgr = importutils.import_object( 'kube_manager.vnc.vnc_endpoints.VncEndpoints') self.network_mgr = importutils.import_object( 'kube_manager.vnc.vnc_network.VncNetwork') # Create system default security policies. VncSecurityPolicy.create_deny_all_security_policy() VncSecurityPolicy.create_allow_all_security_policy() self.ingress_mgr.create_ingress_security_policy() VncKubernetes._vnc_kubernetes = self # Associate cluster with the APS. VncSecurityPolicy.tag_cluster_application_policy_set()
def __init__(self, args=None, logger=None, q=None, kube=None): self._name = type(self).__name__ self.args = args self.logger = logger self.q = q self.kube = kube self._cluster_pod_ipam_fq_name = None # init vnc connection self.vnc_lib = self._vnc_connect() # HACK ALERT. # Till we have an alternate means to get config objects, we will # direcly connect to cassandra. Such a persistant connection is # discouraged, but is the only option we have for now. # # Disable flow timeout on this connection, so the flow persists. # if self.args.nested_mode is '1': for cassandra_server in self.args.cassandra_server_list: cassandra_port = cassandra_server.split(':')[-1] flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", cassandra_port, 2147483647) # init access to db self._db = db.KubeNetworkManagerDB(self.args, self.logger) DBBaseKM.init(self, self.logger, self._db) # If nested mode is enabled via config, then record the directive. if self.args.nested_mode is '1': DBBaseKM.set_nested(True) # init rabbit connection self.rabbit = VncAmqpHandle(self.logger, DBBaseKM, REACTION_MAP, 'kube_manager', args=self.args) self.rabbit.establish() # Cache common config. self.vnc_kube_config = vnc_kube_config(logger=self.logger, vnc_lib=self.vnc_lib, args=self.args, queue=self.q, kube=self.kube) # sync api server db in local cache self._sync_km() self.rabbit._db_resync_done.set() # provision cluster self._provision_cluster() # handle events self.label_cache = label_cache.LabelCache() # Update common config. self.vnc_kube_config.update( label_cache=self.label_cache, cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(), cluster_service_fip_pool=self._get_cluster_service_fip_pool()) self.network_policy_mgr = importutils.import_object( 'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy') self.namespace_mgr = importutils.import_object( 'kube_manager.vnc.vnc_namespace.VncNamespace', self.network_policy_mgr) self.ingress_mgr = importutils.import_object( 'kube_manager.vnc.vnc_ingress.VncIngress') self.service_mgr = importutils.import_object( 'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr) self.pod_mgr = importutils.import_object( 'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr, self.network_policy_mgr) self.endpoints_mgr = importutils.import_object( 'kube_manager.vnc.vnc_endpoints.VncEndpoints') VncKubernetes._vnc_kubernetes = self
def _load_driver(self, args): # TODO Load the driver fom config option self._loadbalancer_driver["opencontrail"] = importutils.import_object( "neutron_plugin_contrail.plugins.opencontrail.loadbalancer.driver.OpencontrailLoadbalancerDriver", self._vnc_lib) return "opencontrail"
def post_init(self, vnc_lib, args=None): # api server self._vnc_lib = vnc_lib try: self._nova_client = importutils.import_object( 'svc_monitor.nova_client.ServiceMonitorNovaClient', self._args, self.logger) except Exception as e: self._nova_client = None # agent manager self._agent_manager = AgentManager() # load vrouter scheduler self.vrouter_scheduler = importutils.import_object( self._args.si_netns_scheduler_driver, self._vnc_lib, self._nova_client, self._disc, self.logger, self._args) # load virtual machine instance manager self.vm_manager = importutils.import_object( 'svc_monitor.virtual_machine_manager.VirtualMachineManager', self._vnc_lib, self._cassandra, self.logger, self.vrouter_scheduler, self._nova_client, self._agent_manager, self._args) # load network namespace instance manager self.netns_manager = importutils.import_object( 'svc_monitor.instance_manager.NetworkNamespaceManager', self._vnc_lib, self._cassandra, self.logger, self.vrouter_scheduler, self._nova_client, self._agent_manager, self._args) # load a vrouter instance manager self.vrouter_manager = importutils.import_object( 'svc_monitor.vrouter_instance_manager.VRouterInstanceManager', self._vnc_lib, self._cassandra, self.logger, self.vrouter_scheduler, self._nova_client, self._agent_manager, self._args) # load PNF instance manager self.ps_manager = importutils.import_object( 'svc_monitor.physical_service_manager.PhysicalServiceManager', self._vnc_lib, self._cassandra, self.logger, self.vrouter_scheduler, self._nova_client, self._agent_manager, self._args) # load a loadbalancer agent self.loadbalancer_agent = LoadbalancerAgent(self, self._vnc_lib, self._cassandra, self._args) self._agent_manager.register_agent(self.loadbalancer_agent) # load a snat agent self.snat_agent = SNATAgent(self, self._vnc_lib, self._cassandra, self._args) self._agent_manager.register_agent(self.snat_agent) # load port tuple agent self.port_tuple_agent = PortTupleAgent(self, self._vnc_lib, self._cassandra, self._args, self.logger) self._agent_manager.register_agent(self.port_tuple_agent) # Read the cassandra and populate the entry in ServiceMonitor DB self.sync_sm() # create default analyzer template self._create_default_template('analyzer-template', 'analyzer', flavor='m1.medium', image_name='analyzer') # create default NAT template self._create_default_template('nat-template', 'firewall', svc_mode='in-network-nat', image_name='analyzer', flavor='m1.medium') # create default netns SNAT template self._create_default_template('netns-snat-template', 'source-nat', svc_mode='in-network-nat', hypervisor_type='network-namespace', scaling=True) # create default loadbalancer template self._create_default_template('haproxy-loadbalancer-template', 'loadbalancer', svc_mode='in-network-nat', hypervisor_type='network-namespace', scaling=True) self._create_default_template('docker-template', 'firewall', svc_mode='transparent', image_name="ubuntu", hypervisor_type='vrouter-instance', vrouter_instance_type='docker', instance_data={"command": "/bin/bash"}) # upgrade handling self.upgrade() # check services self.vrouter_scheduler.vrouters_running() self.launch_services() self.rabbit._db_resync_done.set()
def load_driver(self, sas): if sas.name in self._loadbalancer_driver: return if sas.driver: self._loadbalancer_driver[sas.name] = importutils.import_object(sas.driver, self._svc_mon, self._vnc_lib)
def __init__(self, args=None, logger=None, q=None, kube=None, vnc_kubernetes_config_dict=None): self._name = type(self).__name__ self.args = args self.logger = logger self.q = q self.kube = kube self._cluster_pod_ipam_fq_name = None self._cluster_service_ipam_fq_name = None self._cluster_ip_fabric_ipam_fq_name = None # init vnc connection self.vnc_lib = self._vnc_connect() # Cache common config. self.vnc_kube_config = vnc_kube_config(logger=self.logger, vnc_lib=self.vnc_lib, args=self.args, queue=self.q, kube=self.kube) # # In nested mode, kube-manager connects to contrail components running # in underlay via global link local services. TCP flows established on # link local services will be torn down by vrouter, if there is no # activity for configured(or default) timeout. So disable flow timeout # on these connections, so these flows will persist. # # Note: The way to disable flow timeout is to set timeout to max # possible value. # if self.args.nested_mode is '1': for cassandra_server in self.args.cassandra_server_list: cassandra_port = cassandra_server.split(':')[-1] flow_aging_manager.create_flow_aging_timeout_entry(self.vnc_lib, "tcp", cassandra_port, 2147483647) if self.args.rabbit_port: flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", self.args.rabbit_port, 2147483647) if self.args.vnc_endpoint_port: flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", self.args.vnc_endpoint_port, 2147483647) for collector in self.args.collectors: collector_port = collector.split(':')[-1] flow_aging_manager.create_flow_aging_timeout_entry(self.vnc_lib, "tcp", collector_port, 2147483647) # init access to db self._db = db.KubeNetworkManagerDB(self.args, self.logger) DBBaseKM.init(self, self.logger, self._db) # If nested mode is enabled via config, then record the directive. if self.args.nested_mode is '1': DBBaseKM.set_nested(True) # sync api server db in local cache self._sync_km() # init rabbit connection rabbitmq_cfg = kube_args.rabbitmq_args(self.args) self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger, DBBaseKM, REACTION_MAP, 'kube_manager', rabbitmq_cfg, self.args.host_ip) self.rabbit.establish() self.rabbit._db_resync_done.set() # Register label add and delete callbacks with label management entity. XLabelCache.register_label_add_callback(VncKubernetes.create_tags) XLabelCache.register_label_delete_callback(VncKubernetes.delete_tags) # Instantiate and init Security Policy Manager. self.security_policy_mgr = VncSecurityPolicy(self.vnc_lib, VncKubernetes.get_tags) # provision cluster self._provision_cluster() if vnc_kubernetes_config_dict: self.vnc_kube_config.update(**vnc_kubernetes_config_dict) else: # Update common config. self.vnc_kube_config.update( cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(), cluster_service_ipam_fq_name=self._get_cluster_service_ipam_fq_name(), cluster_ip_fabric_ipam_fq_name=self._get_cluster_ip_fabric_ipam_fq_name()) # handle events self.label_cache = label_cache.LabelCache() self.vnc_kube_config.update(label_cache=self.label_cache) self.tags_mgr = importutils.import_object( 'kube_manager.vnc.vnc_tags.VncTags') self.network_policy_mgr = importutils.import_object( 'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy') self.namespace_mgr = importutils.import_object( 'kube_manager.vnc.vnc_namespace.VncNamespace', self.network_policy_mgr) self.ingress_mgr = importutils.import_object( 'kube_manager.vnc.vnc_ingress.VncIngress', self.tags_mgr) self.service_mgr = importutils.import_object( 'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr) self.pod_mgr = importutils.import_object( 'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr, self.network_policy_mgr) self.endpoints_mgr = importutils.import_object( 'kube_manager.vnc.vnc_endpoints.VncEndpoints') self.network_mgr = importutils.import_object( 'kube_manager.vnc.vnc_network.VncNetwork') # Create system default security policies. VncSecurityPolicy.create_deny_all_security_policy() VncSecurityPolicy.create_allow_all_security_policy() self.ingress_mgr.create_ingress_security_policy() VncKubernetes._vnc_kubernetes = self # Associate cluster with the APS. VncSecurityPolicy.tag_cluster_application_policy_set()
def __init__(self, args=None, logger=None, q=None, kube=None, vnc_kubernetes_config_dict=None): self._name = type(self).__name__ self.args = args self.logger = logger self.q = q self.kube = kube self._cluster_pod_ipam_fq_name = None # init vnc connection self.vnc_lib = self._vnc_connect() # Cache common config. self.vnc_kube_config = vnc_kube_config(logger=self.logger, vnc_lib=self.vnc_lib, args=self.args, queue=self.q, kube=self.kube) # HACK ALERT. # Till we have an alternate means to get config objects, we will # direcly connect to cassandra. Such a persistant connection is # discouraged, but is the only option we have for now. # # Disable flow timeout on this connection, so the flow persists. # if self.args.nested_mode is '1': for cassandra_server in self.args.cassandra_server_list: cassandra_port = cassandra_server.split(':')[-1] flow_aging_manager.create_flow_aging_timeout_entry(self.vnc_lib, "tcp", cassandra_port, 2147483647) # init access to db self._db = db.KubeNetworkManagerDB(self.args, self.logger) DBBaseKM.init(self, self.logger, self._db) # If nested mode is enabled via config, then record the directive. if self.args.nested_mode is '1': DBBaseKM.set_nested(True) # sync api server db in local cache self._sync_km() # init rabbit connection rabbitmq_cfg = kube_args.rabbitmq_args(self.args) self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger, DBBaseKM, REACTION_MAP, 'kube_manager', rabbitmq_cfg) self.rabbit.establish() self.rabbit._db_resync_done.set() # provision cluster self._provision_cluster() if vnc_kubernetes_config_dict: self.vnc_kube_config.update(**vnc_kubernetes_config_dict) else: # Update common config. self.vnc_kube_config.update( cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(), cluster_service_fip_pool=self._get_cluster_service_fip_pool()) # handle events self.label_cache = label_cache.LabelCache() self.vnc_kube_config.update(label_cache=self.label_cache) self.network_policy_mgr = importutils.import_object( 'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy') self.namespace_mgr = importutils.import_object( 'kube_manager.vnc.vnc_namespace.VncNamespace', self.network_policy_mgr) self.ingress_mgr = importutils.import_object( 'kube_manager.vnc.vnc_ingress.VncIngress') self.service_mgr = importutils.import_object( 'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr) self.pod_mgr = importutils.import_object( 'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr, self.network_policy_mgr) self.endpoints_mgr = importutils.import_object( 'kube_manager.vnc.vnc_endpoints.VncEndpoints') VncKubernetes._vnc_kubernetes = self
def __init__(self, args=None, logger=None, q=None, kube=None, vnc_kubernetes_config_dict=None): self._name = type(self).__name__ self.args = args self.logger = logger self.q = q self.kube = kube self._cluster_pod_ipam_fq_name = None self._cluster_service_ipam_fq_name = None self._cluster_ip_fabric_ipam_fq_name = None # init vnc connection self.vnc_lib = self._vnc_connect() # Cache common config. self.vnc_kube_config = vnc_kube_config(logger=self.logger, vnc_lib=self.vnc_lib, args=self.args, queue=self.q, kube=self.kube) # HACK ALERT. # Till we have an alternate means to get config objects, we will # direcly connect to cassandra and rabbitmq. Such a persistant connection # is discouraged, but is the only option we have for now. # # Disable flow timeout on this connection, so the flow persists. # if self.args.nested_mode is '1': for cassandra_server in self.args.cassandra_server_list: cassandra_port = cassandra_server.split(':')[-1] flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", cassandra_port, 2147483647) if self.args.rabbit_port: flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", self.args.rabbit_port, 2147483647) # init access to db self._db = db.KubeNetworkManagerDB(self.args, self.logger) DBBaseKM.init(self, self.logger, self._db) # If nested mode is enabled via config, then record the directive. if self.args.nested_mode is '1': DBBaseKM.set_nested(True) # sync api server db in local cache self._sync_km() # init rabbit connection rabbitmq_cfg = kube_args.rabbitmq_args(self.args) self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger, DBBaseKM, REACTION_MAP, 'kube_manager', rabbitmq_cfg) self.rabbit.establish() self.rabbit._db_resync_done.set() # Register label add and delete callbacks with label management entity. XLabelCache.register_label_add_callback(VncKubernetes.create_tags) XLabelCache.register_label_delete_callback(VncKubernetes.delete_tags) # Instantiate and init Security Policy Manager. self.security_policy_mgr = VncSecurityPolicy(self.vnc_lib, VncKubernetes.get_tags) # provision cluster self._provision_cluster() if vnc_kubernetes_config_dict: self.vnc_kube_config.update(**vnc_kubernetes_config_dict) else: # Update common config. self.vnc_kube_config.update( cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(), cluster_service_ipam_fq_name=self. _get_cluster_service_ipam_fq_name(), cluster_ip_fabric_ipam_fq_name=self. _get_cluster_ip_fabric_ipam_fq_name()) # handle events self.label_cache = label_cache.LabelCache() self.vnc_kube_config.update(label_cache=self.label_cache) self.network_policy_mgr = importutils.import_object( 'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy') self.namespace_mgr = importutils.import_object( 'kube_manager.vnc.vnc_namespace.VncNamespace', self.network_policy_mgr) self.ingress_mgr = importutils.import_object( 'kube_manager.vnc.vnc_ingress.VncIngress') self.service_mgr = importutils.import_object( 'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr) self.pod_mgr = importutils.import_object( 'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr, self.network_policy_mgr) self.endpoints_mgr = importutils.import_object( 'kube_manager.vnc.vnc_endpoints.VncEndpoints') self.tags_mgr = importutils.import_object( 'kube_manager.vnc.vnc_tags.VncTags') # Create system default security policies. VncSecurityPolicy.create_deny_all_security_policy() VncSecurityPolicy.create_allow_all_security_policy() self.ingress_mgr.create_ingress_security_policy() VncKubernetes._vnc_kubernetes = self # Associate cluster with the APS. VncSecurityPolicy.tag_cluster_application_policy_set()
def post_init(self, vnc_lib, args=None): # api server self._vnc_lib = vnc_lib self._nova_client = importutils.import_object( 'svc_monitor.nova_client.ServiceMonitorNovaClient', self._args, self.logger) # agent manager self._agent_manager = AgentManager() # load vrouter scheduler self.vrouter_scheduler = importutils.import_object( self._args.si_netns_scheduler_driver, self._vnc_lib, self._nova_client, self._args) # load virtual machine instance manager self.vm_manager = importutils.import_object( 'svc_monitor.virtual_machine_manager.VirtualMachineManager', self._vnc_lib, self._cassandra, self.logger, self.vrouter_scheduler, self._nova_client, self._agent_manager, self._args) # load network namespace instance manager self.netns_manager = importutils.import_object( 'svc_monitor.instance_manager.NetworkNamespaceManager', self._vnc_lib, self._cassandra, self.logger, self.vrouter_scheduler, self._nova_client, self._agent_manager, self._args) # load a vrouter instance manager self.vrouter_manager = importutils.import_object( 'svc_monitor.vrouter_instance_manager.VRouterInstanceManager', self._vnc_lib, self._cassandra, self.logger, self.vrouter_scheduler, self._nova_client, self._agent_manager, self._args) # load PNF instance manager self.ps_manager = importutils.import_object( 'svc_monitor.physical_service_manager.PhysicalServiceManager', self._vnc_lib, self._cassandra, self.logger, self.vrouter_scheduler, self._nova_client, self._agent_manager, self._args) # load a loadbalancer agent self.loadbalancer_agent = LoadbalancerAgent( self, self._vnc_lib, self._cassandra, self._args) self._agent_manager.register_agent(self.loadbalancer_agent) # load a snat agent self.snat_agent = SNATAgent(self, self._vnc_lib, self._cassandra, self._args) self._agent_manager.register_agent(self.snat_agent) # Read the cassandra and populate the entry in ServiceMonitor DB self.sync_sm() # create default analyzer template self._create_default_template('analyzer-template', 'analyzer', flavor='m1.medium', image_name='analyzer') # create default NAT template self._create_default_template('nat-template', 'firewall', svc_mode='in-network-nat', image_name='analyzer', flavor='m1.medium') # create default netns SNAT template self._create_default_template('netns-snat-template', 'source-nat', svc_mode='in-network-nat', hypervisor_type='network-namespace', scaling=True) # create default loadbalancer template self._create_default_template('haproxy-loadbalancer-template', 'loadbalancer', svc_mode='in-network-nat', hypervisor_type='network-namespace', scaling=True) self._create_default_template('docker-template', 'firewall', svc_mode='transparent', image_name="ubuntu", hypervisor_type='vrouter-instance', vrouter_instance_type='docker', instance_data={ "command": "/bin/bash" }) # upgrade handling self.upgrade() # check services self.launch_services() self._db_resync_done.set()
def load_drivers(self): for sas in ServiceApplianceSetSM.values(): if sas.driver: self._loadbalancer_driver[sas.name] = importutils.import_object(sas.driver, self._svc_mon, self._vnc_lib)