def _network_policy_sync(self): """ Validate and synchronize network policy config. """ # Validate current network policy config. valid = VncSecurityPolicy.validate_cluster_security_policy() if valid == False: # Validation of current network policy config failed. self._logger.error( "%s - Periodic validation of cluster security policy failed."\ " Attempting to heal."\ % (self._name)) # Attempt to heal the inconsistency in network policy config. VncSecurityPolicy.recreate_cluster_security_policy() # Validate and sync that K8s API and Contrail API. # This handles the cases where kube-manager could have missed delete events # from K8s API, which is possible if kube-manager was down when the policy # was deleted. headless_fw_policy_uuids = VncSecurityPolicy.sync_cluster_security_policy( ) # Delete config objects for network policies not found in K8s API server but # are found in Contrail API. for fw_policy_uuid in headless_fw_policy_uuids: self._logger.error( "%s - Generating delete event for orphaned FW policy [%s]"\ % (self._name, fw_policy_uuid)) self._create_network_policy_delete_event(fw_policy_uuid)
def _provision_cluster(self): proj_obj = self._create_project(\ vnc_kube_config.cluster_default_project_name()) # Create application policy set for the cluster project. VncSecurityPolicy.create_application_policy_set( vnc_kube_config.application_policy_set_name()) ip_fabric_fq_name = vnc_kube_config.cluster_ip_fabric_network_fq_name() ip_fabric_vn_obj = self.vnc_lib. \ virtual_network_read(fq_name=ip_fabric_fq_name) self._create_project('kube-system') # Create Pod IPAM. pod_ipam_update, pod_ipam_obj, pod_ipam_subnets = \ self._create_ipam('pod-ipam', self.args.pod_subnets, proj_obj) # Cache cluster pod ipam name. # This will be referenced by ALL pods that are spawned in the cluster. self._cluster_pod_ipam_fq_name = pod_ipam_obj.get_fq_name() # Create a cluster-pod-network cluster_pod_vn_obj = self._create_network( vnc_kube_config.cluster_default_pod_network_name(), proj_obj, \ pod_ipam_obj, pod_ipam_update, ip_fabric_vn_obj) # Create Service IPAM. service_ipam_update, service_ipam_obj, service_ipam_subnets = \ self._create_ipam('service-ipam', self.args.service_subnets, proj_obj) self._cluster_service_ipam_fq_name = service_ipam_obj.get_fq_name() # Create a cluster-service-network cluster_service_vn_obj = self._create_network( vnc_kube_config.cluster_default_service_network_name(), proj_obj, \ service_ipam_obj, service_ipam_update) self._create_attach_policy(proj_obj, ip_fabric_vn_obj, \ cluster_pod_vn_obj, cluster_service_vn_obj)
def _network_policy_sync(self): """ Validate and synchronize network policy config. """ # Validate current network policy config. valid = VncSecurityPolicy.validate_cluster_security_policy() if valid == False: # Validation of current network policy config failed. self._logger.error( "%s - Periodic validation of cluster security policy failed."\ " Attempting to heal."\ % (self._name)) # Attempt to heal the inconsistency in network policy config. VncSecurityPolicy.recreate_cluster_security_policy() # Validate and sync that K8s API and Contrail API. # This handles the cases where kube-manager could have missed delete events # from K8s API, which is possible if kube-manager was down when the policy # was deleted. headless_fw_policy_uuids = VncSecurityPolicy.sync_cluster_security_policy() # Delete config objects for network policies not found in K8s API server but # are found in Contrail API. for fw_policy_uuid in headless_fw_policy_uuids: self._logger.error( "%s - Generating delete event for orphaned FW policy [%s]"\ % (self._name, fw_policy_uuid)) self._create_network_policy_delete_event(fw_policy_uuid)
def _provision_cluster(self): # Pre creating default project before namespace add event. proj_obj = self._create_project('default') # Create application policy set for the cluster project. VncSecurityPolicy.create_application_policy_set( vnc_kube_config.application_policy_set_name()) # Allocate fabric snat port translation pools. self._allocate_fabric_snat_port_translation_pools() ip_fabric_fq_name = vnc_kube_config.cluster_ip_fabric_network_fq_name() ip_fabric_vn_obj = self.vnc_lib. \ virtual_network_read(fq_name=ip_fabric_fq_name) cluster_vn_obj = None if DBBaseKM.is_nested(): try: cluster_vn_obj = self.vnc_lib.virtual_network_read( fq_name=vnc_kube_config.cluster_default_network_fq_name()) except NoIdError: pass # Pre creating kube-system project before namespace add event. self._create_project('kube-system') # Create ip-fabric IPAM. ipam_name = vnc_kube_config.cluster_name() + '-ip-fabric-ipam' ip_fabric_ipam_update, ip_fabric_ipam_obj, ip_fabric_ipam_subnets = \ self._create_ipam(ipam_name, self.args.ip_fabric_subnets, proj_obj) self._cluster_ip_fabric_ipam_fq_name = ip_fabric_ipam_obj.get_fq_name() # Create Pod IPAM. ipam_name = vnc_kube_config.cluster_name() + '-pod-ipam' pod_ipam_update, pod_ipam_obj, pod_ipam_subnets = \ self._create_ipam(ipam_name, self.args.pod_subnets, proj_obj) # Cache cluster pod ipam name. # This will be referenced by ALL pods that are spawned in the cluster. self._cluster_pod_ipam_fq_name = pod_ipam_obj.get_fq_name() # Create a cluster-pod-network. if self.args.ip_fabric_forwarding: cluster_pod_vn_obj = self._create_network( vnc_kube_config.cluster_default_pod_network_name(), 'pod-network', proj_obj, ip_fabric_ipam_obj, ip_fabric_ipam_update, ip_fabric_vn_obj) else: cluster_pod_vn_obj = self._create_network( vnc_kube_config.cluster_default_pod_network_name(), 'pod-network', proj_obj, pod_ipam_obj, pod_ipam_update, ip_fabric_vn_obj) # Create Service IPAM. ipam_name = vnc_kube_config.cluster_name() + '-service-ipam' service_ipam_update, service_ipam_obj, service_ipam_subnets = \ self._create_ipam(ipam_name, self.args.service_subnets, proj_obj) self._cluster_service_ipam_fq_name = service_ipam_obj.get_fq_name() # Create a cluster-service-network. cluster_service_vn_obj = self._create_network( vnc_kube_config.cluster_default_service_network_name(), 'service-network', proj_obj, service_ipam_obj, service_ipam_update) self._create_attach_policy(proj_obj, ip_fabric_vn_obj, cluster_pod_vn_obj, cluster_service_vn_obj, cluster_vn_obj)
def create_ingress_security_policy(self): """ Create a FW policy to house all ingress-to-service rules. """ if not VncSecurityPolicy.ingress_svc_fw_policy_uuid: VncSecurityPolicy.ingress_svc_fw_policy_uuid =\ VncSecurityPolicy.create_firewall_policy(self._k8s_event_type, None, None, is_global=True) VncSecurityPolicy.add_firewall_policy( VncSecurityPolicy.ingress_svc_fw_policy_uuid)
def vnc_network_policy_add(self, event, namespace, name, uid): spec = event['object']['spec'] if not spec: self._logger.error("%s - %s:%s Spec Not Found" \ %(self._name, name, uid)) return fw_policy_uuid = VncSecurityPolicy.create_firewall_policy( name, namespace, spec) VncSecurityPolicy.add_firewall_policy(fw_policy_uuid)
def create_ingress_security_policy(self): """ Create a FW policy to house all ingress-to-service rules. """ if not VncSecurityPolicy.ingress_svc_fw_policy_uuid: VncSecurityPolicy.ingress_svc_fw_policy_uuid =\ VncSecurityPolicy.create_firewall_policy( "-".join([vnc_kube_config.cluster_name(), self._k8s_event_type]), None, None, is_global=True) VncSecurityPolicy.add_firewall_policy( VncSecurityPolicy.ingress_svc_fw_policy_uuid)
def delete_namespace_security_policy(self, ns_name): """ Delete firwall rule created to enforce default behavior on this namespace. """ if VncSecurityPolicy.allow_all_fw_policy_uuid: rule_name = self._get_namespace_firewall_rule_name(ns_name) # Dis-associate the rule from namespace policy. rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid(rule_name) # Delete the rule. VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.allow_all_fw_policy_uuid, rule_uuid)
def vnc_network_policy_add(self, event, namespace, name, uid): spec = event['object']['spec'] if not spec: self._logger.error("%s - %s:%s Spec Not Found" \ %(self._name, name, uid)) return fw_policy_uuid = VncSecurityPolicy.create_firewall_policy(name, namespace, spec) VncSecurityPolicy.add_firewall_policy(fw_policy_uuid) # Update kube config db entry for the network policy. np = NetworkPolicyKM.find_by_name_or_uuid(uid) if np: fw_policy_obj = self._vnc_lib.firewall_policy_read(id=fw_policy_uuid) np.set_vnc_fq_name(":".join(fw_policy_obj.get_fq_name()))
def vnc_network_policy_add(self, event, namespace, name, uid): spec = event['object']['spec'] if not spec: self._logger.error("%s - %s:%s Spec Not Found" \ %(self._name, name, uid)) return fw_policy_uuid = VncSecurityPolicy.create_firewall_policy(name, namespace, spec, k8s_uuid=uid) VncSecurityPolicy.add_firewall_policy(fw_policy_uuid) # Update kube config db entry for the network policy. np = NetworkPolicyKM.find_by_name_or_uuid(uid) if np: fw_policy_obj = self._vnc_lib.firewall_policy_read(id=fw_policy_uuid) np.set_vnc_fq_name(":".join(fw_policy_obj.get_fq_name()))
def delete_ingress_to_service_rule(cls, ns_name, ingress_name, service_name): """ Delete the ingress-to-service allow rule added to ingress firewall policy. """ rule_uuid = None if VncSecurityPolicy.ingress_svc_fw_policy_uuid: rule_name = VncIngress._get_ingress_firewall_rule_name( ns_name, ingress_name, service_name) # Get the rule id of the rule to be deleted. rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid(rule_name) if rule_uuid: # Delete the rule. VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.ingress_svc_fw_policy_uuid, rule_uuid) return rule_uuid
def add_ingress_to_service_rule(cls, ns_name, ingress_name, service_name): """ Add a ingress-to-service allow rule to ingress firewall policy. """ if VncSecurityPolicy.ingress_svc_fw_policy_uuid: ingress_labels = XLabelCache.get_ingress_label( cls.get_ingress_label_name(ns_name, ingress_name)) service_labels = XLabelCache.get_service_label(service_name) rule_name = VncIngress._get_ingress_firewall_rule_name( ns_name, ingress_name, service_name) fw_rule_uuid = VncSecurityPolicy.create_firewall_rule_allow_all( rule_name, service_labels, ingress_labels) VncSecurityPolicy.add_firewall_rule( VncSecurityPolicy.ingress_svc_fw_policy_uuid, fw_rule_uuid) return fw_rule_uuid
def add_namespace_security_policy(self, k8s_namespace_uuid): """ Create a firwall rule for default behavior on a namespace. """ ns = self._get_namespace(k8s_namespace_uuid) if ns and not ns.firewall_rule_uuid: rule_name = self._get_namespace_firewall_rule_name(ns.name) # Add custom namespace label on the namespace object. self._labels.append(k8s_namespace_uuid, self._labels.get_namespace_label(ns.name)) # Create a rule for default allow behavior on this namespace. ns.firewall_rule_uuid =\ VncSecurityPolicy.create_firewall_rule_allow_all(rule_name, self._labels.get_namespace_label(ns.name)) # Add default allow rule to the "global allow" firewall policy. VncSecurityPolicy.add_firewall_rule( VncSecurityPolicy.allow_all_fw_policy_uuid, ns.firewall_rule_uuid)
def add_namespace_security_policy(self, k8s_namespace_uuid): """ Create a firwall rule for default behavior on a namespace. """ ns = self._get_namespace(k8s_namespace_uuid) if not ns: return # Add custom namespace label on the namespace object. self._labels.append(k8s_namespace_uuid, self._labels.get_namespace_label(ns.name)) if not ns.firewall_ingress_allow_rule_uuid: ingress_rule_name = self._get_namespace_firewall_ingress_rule_name( ns.name) # Create a rule for default allow behavior on this namespace. ns.firewall_ingress_allow_rule_uuid =\ VncSecurityPolicy.create_firewall_rule_allow_all( ingress_rule_name, self._labels.get_namespace_label(ns.name)) # Add default allow rule to the "global allow" firewall policy. VncSecurityPolicy.add_firewall_rule( VncSecurityPolicy.allow_all_fw_policy_uuid, ns.firewall_ingress_allow_rule_uuid) if not ns.firewall_egress_allow_rule_uuid: egress_rule_name = self._get_namespace_firewall_egress_rule_name( ns.name) # Create a rule for default egress allow behavior on this namespace. ns.firewall_egress_allow_rule_uuid =\ VncSecurityPolicy.create_firewall_rule_allow_all( egress_rule_name, {}, self._labels.get_namespace_label(ns.name)) # Add default egress allow rule to "global allow" firewall policy. VncSecurityPolicy.add_firewall_rule( VncSecurityPolicy.allow_all_fw_policy_uuid, ns.firewall_egress_allow_rule_uuid)
def delete_namespace_security_policy(self, ns_name): """ Delete firwall rule created to enforce default behavior on this namespace. """ if VncSecurityPolicy.allow_all_fw_policy_uuid: # Dis-associate and delete the ingress rule from namespace policy. rule_name = self._get_namespace_firewall_ingress_rule_name(ns_name) rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid(rule_name) VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.allow_all_fw_policy_uuid, rule_uuid) # Dis-associate and delete egress rule from namespace policy. egress_rule_name = self._get_namespace_firewall_egress_rule_name( ns_name) egress_rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid( egress_rule_name) VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.allow_all_fw_policy_uuid, egress_rule_uuid)
def delete_ingress_to_service_rule_by_id(cls, rule_uuid): if VncSecurityPolicy.ingress_svc_fw_policy_uuid: # Delete the rule. VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.ingress_svc_fw_policy_uuid, rule_uuid)
def vnc_network_policy_delete(self, namespace, name, uuid): VncSecurityPolicy.delete_firewall_policy(name, namespace)
def __init__(self, args=None, logger=None, q=None, kube=None, vnc_kubernetes_config_dict=None): self._name = type(self).__name__ self.args = args self.logger = logger self.q = q self.kube = kube self._cluster_pod_ipam_fq_name = None self._cluster_service_ipam_fq_name = None self._cluster_ip_fabric_ipam_fq_name = None # init vnc connection self.vnc_lib = self._vnc_connect() # Cache common config. self.vnc_kube_config = vnc_kube_config(logger=self.logger, vnc_lib=self.vnc_lib, args=self.args, queue=self.q, kube=self.kube) # # In nested mode, kube-manager connects to contrail components running # in underlay via global link local services. TCP flows established on # link local services will be torn down by vrouter, if there is no # activity for configured(or default) timeout. So disable flow timeout # on these connections, so these flows will persist. # # Note: The way to disable flow timeout is to set timeout to max # possible value. # if self.args.nested_mode is '1': for cassandra_server in self.args.cassandra_server_list: cassandra_port = cassandra_server.split(':')[-1] flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", cassandra_port, 2147483647) if self.args.rabbit_port: flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", self.args.rabbit_port, 2147483647) if self.args.vnc_endpoint_port: flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", self.args.vnc_endpoint_port, 2147483647) for collector in self.args.collectors: collector_port = collector.split(':')[-1] flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", collector_port, 2147483647) # init access to db self._db = db.KubeNetworkManagerDB(self.args, self.logger) DBBaseKM.init(self, self.logger, self._db) # If nested mode is enabled via config, then record the directive. if self.args.nested_mode is '1': DBBaseKM.set_nested(True) # sync api server db in local cache self._sync_km() # init rabbit connection rabbitmq_cfg = kube_args.rabbitmq_args(self.args) self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger, DBBaseKM, REACTION_MAP, 'kube_manager', rabbitmq_cfg, self.args.host_ip) self.rabbit.establish() self.rabbit._db_resync_done.set() # Register label add and delete callbacks with label management entity. XLabelCache.register_label_add_callback(VncKubernetes.create_tags) XLabelCache.register_label_delete_callback(VncKubernetes.delete_tags) # Instantiate and init Security Policy Manager. self.security_policy_mgr = VncSecurityPolicy(self.vnc_lib, VncKubernetes.get_tags) # provision cluster self._provision_cluster() if vnc_kubernetes_config_dict: self.vnc_kube_config.update(**vnc_kubernetes_config_dict) else: # Update common config. self.vnc_kube_config.update( cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(), cluster_service_ipam_fq_name=self. _get_cluster_service_ipam_fq_name(), cluster_ip_fabric_ipam_fq_name=self. _get_cluster_ip_fabric_ipam_fq_name()) # handle events self.label_cache = label_cache.LabelCache() self.vnc_kube_config.update(label_cache=self.label_cache) self.tags_mgr = importutils.import_object( 'kube_manager.vnc.vnc_tags.VncTags') self.network_policy_mgr = importutils.import_object( 'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy') self.namespace_mgr = importutils.import_object( 'kube_manager.vnc.vnc_namespace.VncNamespace', self.network_policy_mgr) self.ingress_mgr = importutils.import_object( 'kube_manager.vnc.vnc_ingress.VncIngress', self.tags_mgr) self.service_mgr = importutils.import_object( 'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr) self.pod_mgr = importutils.import_object( 'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr, self.network_policy_mgr) self.endpoints_mgr = importutils.import_object( 'kube_manager.vnc.vnc_endpoints.VncEndpoints') self.network_mgr = importutils.import_object( 'kube_manager.vnc.vnc_network.VncNetwork') # Create system default security policies. VncSecurityPolicy.create_deny_all_security_policy() VncSecurityPolicy.create_allow_all_security_policy() self.ingress_mgr.create_ingress_security_policy() VncKubernetes._vnc_kubernetes = self # Associate cluster with the APS. VncSecurityPolicy.tag_cluster_application_policy_set()
def __init__(self, args=None, logger=None, q=None, kube=None, vnc_kubernetes_config_dict=None): self._name = type(self).__name__ self.args = args self.logger = logger self.q = q self.kube = kube self._cluster_pod_ipam_fq_name = None self._cluster_service_ipam_fq_name = None self._cluster_ip_fabric_ipam_fq_name = None # init vnc connection self.vnc_lib = self._vnc_connect() # Cache common config. self.vnc_kube_config = vnc_kube_config(logger=self.logger, vnc_lib=self.vnc_lib, args=self.args, queue=self.q, kube=self.kube) # # In nested mode, kube-manager connects to contrail components running # in underlay via global link local services. TCP flows established on # link local services will be torn down by vrouter, if there is no # activity for configured(or default) timeout. So disable flow timeout # on these connections, so these flows will persist. # # Note: The way to disable flow timeout is to set timeout to max # possible value. # if self.args.nested_mode is '1': for cassandra_server in self.args.cassandra_server_list: cassandra_port = cassandra_server.split(':')[-1] flow_aging_manager.create_flow_aging_timeout_entry(self.vnc_lib, "tcp", cassandra_port, 2147483647) if self.args.rabbit_port: flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", self.args.rabbit_port, 2147483647) if self.args.vnc_endpoint_port: flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", self.args.vnc_endpoint_port, 2147483647) for collector in self.args.collectors: collector_port = collector.split(':')[-1] flow_aging_manager.create_flow_aging_timeout_entry(self.vnc_lib, "tcp", collector_port, 2147483647) # init access to db self._db = db.KubeNetworkManagerDB(self.args, self.logger) DBBaseKM.init(self, self.logger, self._db) # If nested mode is enabled via config, then record the directive. if self.args.nested_mode is '1': DBBaseKM.set_nested(True) # sync api server db in local cache self._sync_km() # init rabbit connection rabbitmq_cfg = kube_args.rabbitmq_args(self.args) self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger, DBBaseKM, REACTION_MAP, 'kube_manager', rabbitmq_cfg, self.args.host_ip) self.rabbit.establish() self.rabbit._db_resync_done.set() # Register label add and delete callbacks with label management entity. XLabelCache.register_label_add_callback(VncKubernetes.create_tags) XLabelCache.register_label_delete_callback(VncKubernetes.delete_tags) # Instantiate and init Security Policy Manager. self.security_policy_mgr = VncSecurityPolicy(self.vnc_lib, VncKubernetes.get_tags) # provision cluster self._provision_cluster() if vnc_kubernetes_config_dict: self.vnc_kube_config.update(**vnc_kubernetes_config_dict) else: # Update common config. self.vnc_kube_config.update( cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(), cluster_service_ipam_fq_name=self._get_cluster_service_ipam_fq_name(), cluster_ip_fabric_ipam_fq_name=self._get_cluster_ip_fabric_ipam_fq_name()) # handle events self.label_cache = label_cache.LabelCache() self.vnc_kube_config.update(label_cache=self.label_cache) self.tags_mgr = importutils.import_object( 'kube_manager.vnc.vnc_tags.VncTags') self.network_policy_mgr = importutils.import_object( 'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy') self.namespace_mgr = importutils.import_object( 'kube_manager.vnc.vnc_namespace.VncNamespace', self.network_policy_mgr) self.ingress_mgr = importutils.import_object( 'kube_manager.vnc.vnc_ingress.VncIngress', self.tags_mgr) self.service_mgr = importutils.import_object( 'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr) self.pod_mgr = importutils.import_object( 'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr, self.network_policy_mgr) self.endpoints_mgr = importutils.import_object( 'kube_manager.vnc.vnc_endpoints.VncEndpoints') self.network_mgr = importutils.import_object( 'kube_manager.vnc.vnc_network.VncNetwork') # Create system default security policies. VncSecurityPolicy.create_deny_all_security_policy() VncSecurityPolicy.create_allow_all_security_policy() self.ingress_mgr.create_ingress_security_policy() VncKubernetes._vnc_kubernetes = self # Associate cluster with the APS. VncSecurityPolicy.tag_cluster_application_policy_set()
def __init__(self, args=None, logger=None, q=None, kube=None, vnc_kubernetes_config_dict=None): self._name = type(self).__name__ self.args = args self.logger = logger self.q = q self.kube = kube self._cluster_pod_ipam_fq_name = None self._cluster_service_ipam_fq_name = None self._cluster_ip_fabric_ipam_fq_name = None # init vnc connection self.vnc_lib = self._vnc_connect() # Cache common config. self.vnc_kube_config = vnc_kube_config(logger=self.logger, vnc_lib=self.vnc_lib, args=self.args, queue=self.q, kube=self.kube) # HACK ALERT. # Till we have an alternate means to get config objects, we will # direcly connect to cassandra and rabbitmq. Such a persistant connection # is discouraged, but is the only option we have for now. # # Disable flow timeout on this connection, so the flow persists. # if self.args.nested_mode is '1': for cassandra_server in self.args.cassandra_server_list: cassandra_port = cassandra_server.split(':')[-1] flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", cassandra_port, 2147483647) if self.args.rabbit_port: flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", self.args.rabbit_port, 2147483647) # init access to db self._db = db.KubeNetworkManagerDB(self.args, self.logger) DBBaseKM.init(self, self.logger, self._db) # If nested mode is enabled via config, then record the directive. if self.args.nested_mode is '1': DBBaseKM.set_nested(True) # sync api server db in local cache self._sync_km() # init rabbit connection rabbitmq_cfg = kube_args.rabbitmq_args(self.args) self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger, DBBaseKM, REACTION_MAP, 'kube_manager', rabbitmq_cfg) self.rabbit.establish() self.rabbit._db_resync_done.set() # Register label add and delete callbacks with label management entity. XLabelCache.register_label_add_callback(VncKubernetes.create_tags) XLabelCache.register_label_delete_callback(VncKubernetes.delete_tags) # Instantiate and init Security Policy Manager. self.security_policy_mgr = VncSecurityPolicy(self.vnc_lib, VncKubernetes.get_tags) # provision cluster self._provision_cluster() if vnc_kubernetes_config_dict: self.vnc_kube_config.update(**vnc_kubernetes_config_dict) else: # Update common config. self.vnc_kube_config.update( cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(), cluster_service_ipam_fq_name=self. _get_cluster_service_ipam_fq_name(), cluster_ip_fabric_ipam_fq_name=self. _get_cluster_ip_fabric_ipam_fq_name()) # handle events self.label_cache = label_cache.LabelCache() self.vnc_kube_config.update(label_cache=self.label_cache) self.network_policy_mgr = importutils.import_object( 'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy') self.namespace_mgr = importutils.import_object( 'kube_manager.vnc.vnc_namespace.VncNamespace', self.network_policy_mgr) self.ingress_mgr = importutils.import_object( 'kube_manager.vnc.vnc_ingress.VncIngress') self.service_mgr = importutils.import_object( 'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr) self.pod_mgr = importutils.import_object( 'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr, self.network_policy_mgr) self.endpoints_mgr = importutils.import_object( 'kube_manager.vnc.vnc_endpoints.VncEndpoints') self.tags_mgr = importutils.import_object( 'kube_manager.vnc.vnc_tags.VncTags') # Create system default security policies. VncSecurityPolicy.create_deny_all_security_policy() VncSecurityPolicy.create_allow_all_security_policy() self.ingress_mgr.create_ingress_security_policy() VncKubernetes._vnc_kubernetes = self # Associate cluster with the APS. VncSecurityPolicy.tag_cluster_application_policy_set()