def _create_attach_policy(self, proj_obj, ip_fabric_vn_obj, pod_vn_obj,
                              service_vn_obj, cluster_vn_obj):
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-ip-fabric-np'
        ip_fabric_policy = \
            self._create_np_vn_policy(policy_name, proj_obj, ip_fabric_vn_obj)
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-service-np'
        cluster_service_network_policy = \
            self._create_np_vn_policy(policy_name, proj_obj, service_vn_obj)
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-pod-service-np'
        cluster_default_policy = self._create_vn_vn_policy(
            policy_name, proj_obj, pod_vn_obj, service_vn_obj)
        self._attach_policy(ip_fabric_vn_obj, ip_fabric_policy)
        self._attach_policy(pod_vn_obj, ip_fabric_policy,
                            cluster_default_policy)
        self._attach_policy(service_vn_obj, ip_fabric_policy,
                            cluster_service_network_policy,
                            cluster_default_policy)

        # In nested mode, create and attach a network policy to the underlay
        # virtual network.
        if DBBaseKM.is_nested() and cluster_vn_obj:
            policy_name = vnc_kube_config.cluster_nested_underlay_policy_name()
            nested_underlay_policy = self._create_np_vn_policy(
                policy_name, proj_obj, cluster_vn_obj)
            self._attach_policy(cluster_vn_obj, nested_underlay_policy)
    def _provision_cluster(self):
        # Pre creating default project before namespace add event.
        proj_obj = self._create_project('default')

        # Create application policy set for the cluster project.
        VncSecurityPolicy.create_application_policy_set(
            vnc_kube_config.application_policy_set_name())

        # Allocate fabric snat port translation pools.
        self._allocate_fabric_snat_port_translation_pools()

        ip_fabric_fq_name = vnc_kube_config.cluster_ip_fabric_network_fq_name()
        ip_fabric_vn_obj = self.vnc_lib. \
            virtual_network_read(fq_name=ip_fabric_fq_name)

        cluster_vn_obj = None
        if DBBaseKM.is_nested():
            try:
                cluster_vn_obj = self.vnc_lib.virtual_network_read(
                    fq_name=vnc_kube_config.cluster_default_network_fq_name())
            except NoIdError:
                pass

        # Pre creating kube-system project before namespace add event.
        self._create_project('kube-system')
        # Create ip-fabric IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-ip-fabric-ipam'
        ip_fabric_ipam_update, ip_fabric_ipam_obj, ip_fabric_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.ip_fabric_subnets, proj_obj)
        self._cluster_ip_fabric_ipam_fq_name = ip_fabric_ipam_obj.get_fq_name()
        # Create Pod IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-pod-ipam'
        pod_ipam_update, pod_ipam_obj, pod_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.pod_subnets, proj_obj)
        # Cache cluster pod ipam name.
        # This will be referenced by ALL pods that are spawned in the cluster.
        self._cluster_pod_ipam_fq_name = pod_ipam_obj.get_fq_name()
        # Create a cluster-pod-network.
        if self.args.ip_fabric_forwarding:
            cluster_pod_vn_obj = self._create_network(
                vnc_kube_config.cluster_default_pod_network_name(),
                'pod-network', proj_obj, ip_fabric_ipam_obj,
                ip_fabric_ipam_update, ip_fabric_vn_obj)
        else:
            cluster_pod_vn_obj = self._create_network(
                vnc_kube_config.cluster_default_pod_network_name(),
                'pod-network', proj_obj, pod_ipam_obj, pod_ipam_update,
                ip_fabric_vn_obj)
        # Create Service IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-service-ipam'
        service_ipam_update, service_ipam_obj, service_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.service_subnets, proj_obj)
        self._cluster_service_ipam_fq_name = service_ipam_obj.get_fq_name()
        # Create a cluster-service-network.
        cluster_service_vn_obj = self._create_network(
            vnc_kube_config.cluster_default_service_network_name(),
            'service-network', proj_obj, service_ipam_obj, service_ipam_update)
        self._create_attach_policy(proj_obj, ip_fabric_vn_obj,
                                   cluster_pod_vn_obj, cluster_service_vn_obj,
                                   cluster_vn_obj)
 def _update_default_virtual_network_perms2(self,
                                            ns_name,
                                            proj_uuid,
                                            oper='add'):
     if DBBaseKM.is_nested():
         return
     try:
         vn_fq_name = vnc_kube_config.cluster_default_pod_network_fq_name()
         pod_vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name)
         vn_fq_name = vnc_kube_config.cluster_default_service_network_fq_name(
         )
         service_vn_obj = self._vnc_lib.virtual_network_read(
             fq_name=vn_fq_name)
     except NoIdError:
         return
     for vn_obj in [pod_vn_obj, service_vn_obj]:
         perms2 = vn_obj.perms2
         share = perms2.share
         tenant_found = False
         for item in share:
             if item.tenant == proj_uuid:
                 tenant_found = True
                 break
         if oper == 'add':
             if tenant_found == True:
                 continue
             else:
                 share_item = ShareType(tenant=proj_uuid,
                                        tenant_access=PERMS_R)
                 share.append(share_item)
         else:
             share.remove(item)
         perms2.share = share
         vn_obj.perms2 = perms2
         self._vnc_lib.virtual_network_update(vn_obj)
 def destroy_instance(cls):
     inst = cls.get_instance()
     if inst is None:
         return
     inst.rabbit.close()
     for obj_cls in list(DBBaseKM.get_obj_type_map().values()):
         obj_cls.reset()
     DBBase.clear()
     inst._db = None
     VncKubernetes._vnc_kubernetes = None
Esempio n. 5
0
    def __init__(self, ingress_mgr):
        self._k8s_event_type = 'Service'
        super(VncService, self).__init__(self._k8s_event_type)
        self._name = type(self).__name__
        self._ingress_mgr = ingress_mgr
        self._vnc_lib = vnc_kube_config.vnc_lib()
        self._label_cache = vnc_kube_config.label_cache()
        self._labels = XLabelCache(self._k8s_event_type)
        self._labels.reset_resource()
        self._args = vnc_kube_config.args()
        self.logger = vnc_kube_config.logger()
        self._queue = vnc_kube_config.queue()
        self.kube = vnc_kube_config.kube()
        self._fip_pool_obj = None

        # Cache kubernetes API server params.
        self._kubernetes_api_server = self._args.kubernetes_api_server
        self._kubernetes_api_secure_port =\
            int(self._args.kubernetes_api_secure_port)

        # Cache kuberneter service name.
        self._kubernetes_service_name = self._args.kubernetes_service_name

        # Config knob to control enable/disable of link local service.
        if self._args.api_service_link_local == 'True':
            api_service_ll_enable = True
        else:
            api_service_ll_enable = False

        # If Kubernetes API server info is incomplete, disable link-local create,
        # as create is not possible.
        if not self._kubernetes_api_server:
            self._create_linklocal = False
        elif vnc_kube_config.is_cluster_network_configured(
        ) and DBBaseKM.is_nested():
            # In nested mode, if cluster network is configured, then the k8s api
            # server is in the same network as the k8s cluster. So there is no
            # need for link local.
            self._create_linklocal = False
        else:
            self._create_linklocal = api_service_ll_enable

        self.service_lb_mgr = importutils.import_object(
            'kube_manager.vnc.loadbalancer.ServiceLbManager')
        self.service_ll_mgr = importutils.import_object(
            'kube_manager.vnc.loadbalancer.ServiceLbListenerManager')
        self.service_lb_pool_mgr = importutils.import_object(
            'kube_manager.vnc.loadbalancer.ServiceLbPoolManager')
        self.service_lb_member_mgr = importutils.import_object(
            'kube_manager.vnc.loadbalancer.ServiceLbMemberManager')
Esempio n. 6
0
    def _create_attach_policy(self, ns_name, proj_obj, ip_fabric_vn_obj,
                              pod_vn_obj, service_vn_obj):
        if not self._cluster_service_policy:
            cluster_service_np_fq_name = \
                vnc_kube_config.cluster_default_service_network_policy_fq_name()
            try:
                cluster_service_policy = self._vnc_lib. \
                    network_policy_read(fq_name=cluster_service_np_fq_name)
            except NoIdError:
                return
            self._cluster_service_policy = cluster_service_policy
        if not self._ip_fabric_policy:
            cluster_ip_fabric_np_fq_name = \
                vnc_kube_config.cluster_ip_fabric_policy_fq_name()
            try:
                cluster_ip_fabric_policy = self._vnc_lib. \
                    network_policy_read(fq_name=cluster_ip_fabric_np_fq_name)
            except NoIdError:
                return
            self._ip_fabric_policy = cluster_ip_fabric_policy

        self._nested_underlay_policy = None
        if DBBaseKM.is_nested() and not self._nested_underlay_policy:
            try:
                name = vnc_kube_config.cluster_nested_underlay_policy_fq_name()
                self._nested_underlay_policy = \
                    self._vnc_lib.network_policy_read(fq_name=name)
            except NoIdError:
                return

        policy_name = "-".join(
            [vnc_kube_config.cluster_name(), ns_name, 'pod-service-np'])
        #policy_name = '%s-default' %ns_name
        ns_default_policy = self._create_vn_vn_policy(policy_name, proj_obj,
                                                      pod_vn_obj,
                                                      service_vn_obj)
        self._attach_policy(pod_vn_obj, ns_default_policy,
                            self._ip_fabric_policy,
                            self._cluster_service_policy,
                            self._nested_underlay_policy)
        self._attach_policy(service_vn_obj, ns_default_policy,
                            self._ip_fabric_policy,
                            self._nested_underlay_policy)
    def _create_attach_policy(self, ns_name, proj_obj,
            ip_fabric_vn_obj, pod_vn_obj, service_vn_obj):
        if not self._cluster_service_policy:
            cluster_service_np_fq_name = \
                vnc_kube_config.cluster_default_service_network_policy_fq_name()
            try:
                cluster_service_policy = self._vnc_lib. \
                    network_policy_read(fq_name=cluster_service_np_fq_name)
            except NoIdError:
                return
            self._cluster_service_policy = cluster_service_policy
        if not self._ip_fabric_policy:
            cluster_ip_fabric_np_fq_name = \
                vnc_kube_config.cluster_ip_fabric_policy_fq_name()
            try:
                cluster_ip_fabric_policy = self._vnc_lib. \
                    network_policy_read(fq_name=cluster_ip_fabric_np_fq_name)
            except NoIdError:
                return
            self._ip_fabric_policy = cluster_ip_fabric_policy

        self._nested_underlay_policy = None
        if DBBaseKM.is_nested() and not self._nested_underlay_policy:
            try:
                name = vnc_kube_config.cluster_nested_underlay_policy_fq_name()
                self._nested_underlay_policy = \
                    self._vnc_lib.network_policy_read(fq_name=name)
            except NoIdError:
                return

        policy_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'pod-service-np'])
        #policy_name = '%s-default' %ns_name
        ns_default_policy = self._create_vn_vn_policy(policy_name, proj_obj,
            pod_vn_obj, service_vn_obj)
        self._attach_policy(pod_vn_obj, ns_default_policy,
            self._ip_fabric_policy, self._cluster_service_policy,
            self._nested_underlay_policy)
        self._attach_policy(service_vn_obj, ns_default_policy,
            self._ip_fabric_policy, self._nested_underlay_policy)
Esempio n. 8
0
    def create_virtual_machine(self, name, vn, ipaddress):
        vm = VirtualMachine(name)
        self._vnc_lib.virtual_machine_create(vm)
        VirtualMachineKM.locate(vm.uuid)

        vmi = VirtualMachineInterface(parent_type='virtual-machine',
                                      fq_name=[name, '0'])
        vmi.set_virtual_machine(vm)
        vmi.set_virtual_network(vn)
        if DBBaseKM.is_nested():
            vmi.set_virtual_machine_interface_bindings(
                KeyValuePairs([KeyValuePair('host_id', 'WHATEVER')]))
        self._vnc_lib.virtual_machine_interface_create(vmi)
        VirtualMachineInterfaceKM.locate(vmi.uuid)

        ip = InstanceIp(vm.name + '.0')
        ip.set_virtual_machine_interface(vmi)
        ip.set_virtual_network(vn)
        ip.set_instance_ip_address(ipaddress)
        self._vnc_lib.instance_ip_create(ip)
        InstanceIpKM.locate(ip.uuid)

        return vm, vmi, ip
    def create_virtual_machine(self, name, vn, ipaddress):
        vm = VirtualMachine(name)
        self._vnc_lib.virtual_machine_create(vm)
        VirtualMachineKM.locate(vm.uuid)

        vmi = VirtualMachineInterface(
            parent_type='virtual-machine', fq_name=[name, '0'])
        vmi.set_virtual_machine(vm)
        vmi.set_virtual_network(vn)
        if DBBaseKM.is_nested():
            vmi.set_virtual_machine_interface_bindings(
                KeyValuePairs([KeyValuePair('host_id', 'WHATEVER')]))
        self._vnc_lib.virtual_machine_interface_create(vmi)
        VirtualMachineInterfaceKM.locate(vmi.uuid)

        ip = InstanceIp(vm.name + '.0')
        ip.set_virtual_machine_interface(vmi)
        ip.set_virtual_network(vn)
        ip.set_instance_ip_address(ipaddress)
        self._vnc_lib.instance_ip_create(ip)
        InstanceIpKM.locate(ip.uuid)

        return vm, vmi, ip
Esempio n. 10
0
 def _is_pod_nested():
     # Pod is nested if we are configured to run in nested mode.
     return DBBaseKM.is_nested()
Esempio n. 11
0
 def init():
     DBMock.db = {}
     for cls in list(DBBaseKM.get_obj_type_map().values()):
         DBMock.db[cls.obj_type] = {}
Esempio n. 12
0
 def _is_pod_nested():
     # Pod is nested if we are configured to run in nested mode.
     return DBBaseKM.is_nested()
Esempio n. 13
0
    def vnc_namespace_add(self, namespace_id, name, labels):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \
            else 'False'

        # Check if policy enforcement is enabled at project level.
        # If not, then security will be enforced at VN level.
        if DBBaseKM.is_nested():
            # In nested mode, policy is always enforced at network level.
            # This is so that we do not enforce policy on other virtual
            # networks that may co-exist in the current project.
            secure_project = False
        else:
            secure_project = vnc_kube_config.is_secure_project_enabled()
        secure_vn = not secure_project

        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self,
                                  proj_obj,
                                  namespace=name,
                                  name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)

        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True or name == 'default':
            vn_name = self._get_namespace_pod_vn_name(name)
            if self._is_ip_fabric_forwarding_enabled(name):
                ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(
                    fq_name=ipam_fq_name)
                provider = self._ip_fabric_vn_obj
            else:
                ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(
                    fq_name=ipam_fq_name)
                provider = None
            pod_vn = self._create_isolated_ns_virtual_network(
                ns_name=name,
                vn_name=vn_name,
                vn_type='pod-network',
                proj_obj=proj_obj,
                ipam_obj=ipam_obj,
                provider=provider,
                enforce_policy=secure_vn)
            # Cache pod network info in namespace entry.
            self._set_namespace_pod_virtual_network(name, pod_vn.get_fq_name())
            vn_name = self._get_namespace_service_vn_name(name)
            ipam_fq_name = vnc_kube_config.service_ipam_fq_name()
            ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
            service_vn = self._create_isolated_ns_virtual_network(
                ns_name=name,
                vn_name=vn_name,
                vn_type='service-network',
                ipam_obj=ipam_obj,
                proj_obj=proj_obj,
                enforce_policy=secure_vn)
            # Cache service network info in namespace entry.
            self._set_namespace_service_virtual_network(
                name, service_vn.get_fq_name())
            self._create_attach_policy(name, proj_obj, self._ip_fabric_vn_obj,
                                       pod_vn, service_vn)

        try:
            self._update_security_groups(name, proj_obj)
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)

            # If requested, enforce security policy at project level.
            if secure_project:
                proj_obj = self._vnc_lib.project_read(id=project.uuid)
                self._vnc_lib.set_tags(
                    proj_obj,
                    self._labels.get_labels_dict(
                        VncSecurityPolicy.cluster_aps_uuid))

        return project
 def _sync_km(self):
     for cls in list(DBBaseKM.get_obj_type_map().values()):
         for obj in cls.list_obj():
             cls.locate(obj['uuid'], obj)
Esempio n. 15
0
    def vnc_namespace_add(self, namespace_id, name, labels):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \
            else 'False'

        # Check if policy enforcement is enabled at project level.
        # If not, then security will be enforced at VN level.
        if DBBaseKM.is_nested():
            # In nested mode, policy is always enforced at network level.
            # This is so that we do not enforce policy on other virtual
            # networks that may co-exist in the current project.
            secure_project = False
        else:
            secure_project = vnc_kube_config.is_secure_project_enabled()
        secure_vn = not secure_project

        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self, proj_obj, namespace=name, name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)


        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True or name == 'default':
            vn_name = self._get_namespace_pod_vn_name(name)
            if self._is_ip_fabric_forwarding_enabled(name):
                ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
                provider = self._ip_fabric_vn_obj
            else:
                ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
                provider = None
            pod_vn = self._create_isolated_ns_virtual_network(
                    ns_name=name, vn_name=vn_name, vn_type='pod-network',
                    proj_obj=proj_obj, ipam_obj=ipam_obj, provider=provider,
                    enforce_policy = secure_vn)
            # Cache pod network info in namespace entry.
            self._set_namespace_pod_virtual_network(name, pod_vn.get_fq_name())
            vn_name = self._get_namespace_service_vn_name(name)
            ipam_fq_name = vnc_kube_config.service_ipam_fq_name()
            ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
            service_vn = self._create_isolated_ns_virtual_network(
                    ns_name=name, vn_name=vn_name, vn_type='service-network',
                    ipam_obj=ipam_obj,proj_obj=proj_obj,
                    enforce_policy = secure_vn)
            # Cache service network info in namespace entry.
            self._set_namespace_service_virtual_network(
                    name, service_vn.get_fq_name())
            self._create_attach_policy(name, proj_obj,
                    self._ip_fabric_vn_obj, pod_vn, service_vn)

        try:
            self._update_security_groups(name, proj_obj)
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)

            # If requested, enforce security policy at project level.
            if secure_project:
                proj_obj = self._vnc_lib.project_read(id=project.uuid)
                self._vnc_lib.set_tags(proj_obj,
                    self._labels.get_labels_dict(
                        VncSecurityPolicy.cluster_aps_uuid))
        return project
 def reset():
     for cls in list(DBBaseKM.get_obj_type_map().values()):
         cls.reset()
    def __init__(self,
                 args=None,
                 logger=None,
                 q=None,
                 kube=None,
                 vnc_kubernetes_config_dict=None):
        self._name = type(self).__name__
        self.args = args
        self.logger = logger
        self.q = q
        self.kube = kube
        self._cluster_pod_ipam_fq_name = None
        self._cluster_service_ipam_fq_name = None
        self._cluster_ip_fabric_ipam_fq_name = None

        # init vnc connection
        self.vnc_lib = self._vnc_connect()

        # Cache common config.
        self.vnc_kube_config = vnc_kube_config(logger=self.logger,
                                               vnc_lib=self.vnc_lib,
                                               args=self.args,
                                               queue=self.q,
                                               kube=self.kube)

        #
        # In nested mode, kube-manager connects to contrail components running
        # in underlay via global link local services. TCP flows established on
        # link local services will be torn down by vrouter, if there is no
        # activity for configured(or default) timeout. So disable flow timeout
        # on these connections, so these flows will persist.
        #
        # Note: The way to disable flow timeout is to set timeout to max
        #       possible value.
        #
        if self.args.nested_mode == '1':
            for cassandra_server in self.args.cassandra_server_list:
                cassandra_port = cassandra_server.split(':')[-1]
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", cassandra_port, 2147483647)

            if self.args.rabbit_port:
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", self.args.rabbit_port, 2147483647)

            if self.args.vnc_endpoint_port:
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", self.args.vnc_endpoint_port,
                    2147483647)

            for collector in self.args.collectors:
                collector_port = collector.split(':')[-1]
                flow_aging_manager.create_flow_aging_timeout_entry(
                    self.vnc_lib, "tcp", collector_port, 2147483647)

        # init access to db
        self._db = db.KubeNetworkManagerDB(self.args, self.logger)
        DBBaseKM.init(self, self.logger, self._db)

        # If nested mode is enabled via config, then record the directive.
        if self.args.nested_mode == '1':
            DBBaseKM.set_nested(True)

        # sync api server db in local cache
        self._sync_km()

        # init rabbit connection
        rabbitmq_cfg = kube_args.rabbitmq_args(self.args)
        self.rabbit = VncAmqpHandle(
            self.logger._sandesh, self.logger, DBBaseKM,
            reaction_map.REACTION_MAP, self.args.cluster_id + '-' +
            self.args.cluster_name + '-kube_manager', rabbitmq_cfg,
            self.args.host_ip)
        self.rabbit.establish()
        self.rabbit._db_resync_done.set()

        # Register label add and delete callbacks with label management entity.
        label_cache.XLabelCache.register_label_add_callback(
            VncKubernetes.create_tags)
        label_cache.XLabelCache.register_label_delete_callback(
            VncKubernetes.delete_tags)

        # Instantiate and init Security Policy Manager.
        self.security_policy_mgr = VncSecurityPolicy(self.vnc_lib,
                                                     VncKubernetes.get_tags)

        # provision cluster
        self._provision_cluster()

        if vnc_kubernetes_config_dict:
            self.vnc_kube_config.update(**vnc_kubernetes_config_dict)
        else:
            # Update common config.
            self.vnc_kube_config.update(
                cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(),
                cluster_service_ipam_fq_name=self.
                _get_cluster_service_ipam_fq_name(),
                cluster_ip_fabric_ipam_fq_name=self.
                _get_cluster_ip_fabric_ipam_fq_name())

        # handle events
        self.label_cache = label_cache.LabelCache()
        self.vnc_kube_config.update(label_cache=self.label_cache)

        self.tags_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_tags.VncTags')
        self.network_policy_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy')
        self.namespace_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_namespace.VncNamespace',
            self.network_policy_mgr)
        self.ingress_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_ingress.VncIngress', self.tags_mgr)
        self.service_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr)
        self.pod_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr,
            self.network_policy_mgr)
        self.endpoints_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_endpoints.VncEndpoints')
        self.network_mgr = importutils.import_object(
            'kube_manager.vnc.vnc_network.VncNetwork')

        # Create system default security policies.
        VncSecurityPolicy.create_deny_all_security_policy()
        VncSecurityPolicy.create_allow_all_security_policy()
        self.ingress_mgr.create_ingress_security_policy()

        VncKubernetes._vnc_kubernetes = self

        # Associate cluster with the APS.
        VncSecurityPolicy.tag_cluster_application_policy_set()