def _provision_cluster(self): # Pre creating default project before namespace add event. proj_obj = self._create_project('default') # Create application policy set for the cluster project. VncSecurityPolicy.create_application_policy_set( vnc_kube_config.application_policy_set_name()) # Allocate fabric snat port translation pools. self._allocate_fabric_snat_port_translation_pools() ip_fabric_fq_name = vnc_kube_config.cluster_ip_fabric_network_fq_name() ip_fabric_vn_obj = self.vnc_lib. \ virtual_network_read(fq_name=ip_fabric_fq_name) cluster_vn_obj = None if DBBaseKM.is_nested(): try: cluster_vn_obj = self.vnc_lib.virtual_network_read( fq_name=vnc_kube_config.cluster_default_network_fq_name()) except NoIdError: pass # Pre creating kube-system project before namespace add event. self._create_project('kube-system') # Create ip-fabric IPAM. ipam_name = vnc_kube_config.cluster_name() + '-ip-fabric-ipam' ip_fabric_ipam_update, ip_fabric_ipam_obj, ip_fabric_ipam_subnets = \ self._create_ipam(ipam_name, self.args.ip_fabric_subnets, proj_obj) self._cluster_ip_fabric_ipam_fq_name = ip_fabric_ipam_obj.get_fq_name() # Create Pod IPAM. ipam_name = vnc_kube_config.cluster_name() + '-pod-ipam' pod_ipam_update, pod_ipam_obj, pod_ipam_subnets = \ self._create_ipam(ipam_name, self.args.pod_subnets, proj_obj) # Cache cluster pod ipam name. # This will be referenced by ALL pods that are spawned in the cluster. self._cluster_pod_ipam_fq_name = pod_ipam_obj.get_fq_name() # Create a cluster-pod-network. if self.args.ip_fabric_forwarding: cluster_pod_vn_obj = self._create_network( vnc_kube_config.cluster_default_pod_network_name(), 'pod-network', proj_obj, ip_fabric_ipam_obj, ip_fabric_ipam_update, ip_fabric_vn_obj) else: cluster_pod_vn_obj = self._create_network( vnc_kube_config.cluster_default_pod_network_name(), 'pod-network', proj_obj, pod_ipam_obj, pod_ipam_update, ip_fabric_vn_obj) # Create Service IPAM. ipam_name = vnc_kube_config.cluster_name() + '-service-ipam' service_ipam_update, service_ipam_obj, service_ipam_subnets = \ self._create_ipam(ipam_name, self.args.service_subnets, proj_obj) self._cluster_service_ipam_fq_name = service_ipam_obj.get_fq_name() # Create a cluster-service-network. cluster_service_vn_obj = self._create_network( vnc_kube_config.cluster_default_service_network_name(), 'service-network', proj_obj, service_ipam_obj, service_ipam_update) self._create_attach_policy(proj_obj, ip_fabric_vn_obj, cluster_pod_vn_obj, cluster_service_vn_obj, cluster_vn_obj)
def _network_policy_sync(self): """ Validate and synchronize network policy config. """ # Validate current network policy config. valid = VncSecurityPolicy.validate_cluster_security_policy() if not valid: # Validation of current network policy config failed. self._logger.error( "%s - Periodic validation of cluster security policy failed." " Attempting to heal." % (self._name)) # Attempt to heal the inconsistency in network policy config. VncSecurityPolicy.recreate_cluster_security_policy() # Validate and sync that K8s API and Contrail API. # This handles the cases where kube-manager could have missed delete events # from K8s API, which is possible if kube-manager was down when the policy # was deleted. headless_fw_policy_uuids = VncSecurityPolicy.sync_cluster_security_policy( ) # Delete config objects for network policies not found in K8s API server but # are found in Contrail API. for fw_policy_uuid in headless_fw_policy_uuids: self._logger.error( "%s - Generating delete event for orphaned FW policy [%s]" % (self._name, fw_policy_uuid)) self._create_network_policy_delete_event(fw_policy_uuid)
def _validate_network_policy_resources(self, name, uuid, spec={}, validate_delete=False, namespace=None): ns_name = namespace if namespace else self.ns_name np_event_obj = NetworkPolicyKM.find_by_name_or_uuid(uuid) if validate_delete: self.assertIsNone(np_event_obj) elif not spec: fw_policy_uuid = VncSecurityPolicy.get_firewall_policy_uuid( name, ns_name) fw_policy = FirewallPolicyKM.locate(fw_policy_uuid) self.assertIsNotNone(np_event_obj) self.assertIsNone(fw_policy) else: fw_policy_uuid = VncSecurityPolicy.get_firewall_policy_uuid( name, ns_name) fw_policy = FirewallPolicyKM.locate(fw_policy_uuid) self.assertIsNotNone(np_event_obj) self.assertIsNotNone(fw_policy) # Validate network policy spec. self._validate_spec(spec, fw_policy)
def create_ingress_security_policy(self): """ Create a FW policy to house all ingress-to-service rules. """ if not VncSecurityPolicy.ingress_svc_fw_policy_uuid: ingress_svc_fw_policy_uuid =\ VncSecurityPolicy.create_firewall_policy( self._k8s_event_type, None, None, is_global=True) VncSecurityPolicy.add_firewall_policy(ingress_svc_fw_policy_uuid) VncSecurityPolicy.ingress_svc_fw_policy_uuid =\ ingress_svc_fw_policy_uuid
def test_add_network_policy_scaling(self): np_uuid_dict = {} test_range = list(range(1, 10)) for i in test_range: np_spec = {'podSelector': {}, 'ingress': [{}]} np_name = "-".join([unittest.TestCase.id(self), str(i)]) np_uuid_dict[i] = self._add_update_network_policy(np_name, np_spec) self._validate_network_policy_resources(np_name, np_uuid_dict[i], np_spec) previous_sequence = None aps_uid = VncSecurityPolicy.cluster_aps_uuid aps_obj = self._get_default_application_policy_set() fw_policy_refs = aps_obj.get_firewall_policy_refs() for i in test_range: np_name = "-".join([unittest.TestCase.id(self), str(i)]) fw_policy_name = VncSecurityPolicy.get_firewall_policy_name( np_name, self.ns_name, False) for fw_policy in fw_policy_refs if fw_policy_refs else []: if fw_policy_name == fw_policy['to'][-1]: if previous_sequence: self.assertTrue(previous_sequence < \ fw_policy['attr'].get_sequence()) previous_sequence = fw_policy['attr'].get_sequence() break for i in test_range: self._delete_network_policy(unittest.TestCase.id(self), np_uuid_dict[i]) self._validate_network_policy_resources(np_name, np_uuid_dict[i], np_spec, validate_delete=True)
def test_add_network_policy_scaling(self): np_uuid_dict={} test_range = range(1, 10) for i in test_range: np_spec = { 'podSelector': {}, 'ingress': [{}] } np_name = "-".join([unittest.TestCase.id(self), str(i)]) np_uuid_dict[i] = self._add_update_network_policy(np_name, np_spec) self._validate_network_policy_resources(np_name, np_uuid_dict[i], np_spec) previous_sequence = None aps_uid = VncSecurityPolicy.cluster_aps_uuid aps_obj = self._get_default_application_policy_set() fw_policy_refs = aps_obj.get_firewall_policy_refs() for i in test_range: np_name = "-".join([unittest.TestCase.id(self), str(i)]) fw_policy_name = VncSecurityPolicy.get_firewall_policy_name(np_name, self.ns_name, False) for fw_policy in fw_policy_refs if fw_policy_refs else []: if fw_policy_name == fw_policy['to'][-1]: if previous_sequence: self.assertTrue(previous_sequence < \ fw_policy['attr'].get_sequence()) previous_sequence = fw_policy['attr'].get_sequence() break for i in test_range: self._delete_network_policy(unittest.TestCase.id(self), np_uuid_dict[i]) self._validate_network_policy_resources(np_name, np_uuid_dict[i], np_spec, validate_delete=True)
def test_network_policy_ordering_resolve_during_modify(self): # Check if we have a valid config to start with. valid = VncSecurityPolicy.validate_cluster_security_policy() self.assertTrue(valid) # Get deny-all object handle. self.assertIsNotNone(VncSecurityPolicy.deny_all_fw_policy_uuid) fw_policy_obj = self._vnc_lib.firewall_policy_read( id=VncSecurityPolicy.deny_all_fw_policy_uuid) aps_obj = self._get_default_application_policy_set() self.assertIsNotNone(fw_policy_obj) self.assertIsNotNone(aps_obj) # Detach deny-all policy from APS to introduce error. aps_obj.del_firewall_policy(fw_policy_obj) self._vnc_lib.application_policy_set_update(aps_obj) # Verify that validation of APS will fail. valid = VncSecurityPolicy.validate_cluster_security_policy() self.assertFalse(valid) # Add deny-all policy to TAIL. # This is essentially an error condition where there is already # post-tail objects in the APS, but the deny-all gets added after # post-tail objects. VncSecurityPolicy.add_firewall_policy( VncSecurityPolicy.deny_all_fw_policy_uuid, tail=True) # Verify that validation of APS will fail. # Validation will fail because "tail" object is found after objects # that are marked as post-tail. valid = VncSecurityPolicy.validate_cluster_security_policy() self.assertFalse(valid) # Get allow-all object handle. self.assertIsNotNone(VncSecurityPolicy.allow_all_fw_policy_uuid) fw_policy_obj = self._vnc_lib.firewall_policy_read( id=VncSecurityPolicy.allow_all_fw_policy_uuid) aps_obj = self._get_default_application_policy_set() self.assertIsNotNone(fw_policy_obj) self.assertIsNotNone(aps_obj) # Re-add attempt of object marked post-tail should cause the post-tail # object to be re-arranged after tail, even though the object is already # present on the APS. VncSecurityPolicy.add_firewall_policy( VncSecurityPolicy.allow_all_fw_policy_uuid, append_after_tail=True) # Validation of APS should now succeed. valid = VncSecurityPolicy.validate_cluster_security_policy() self.assertTrue(valid)
def delete_ingress_to_service_rule(cls, ns_name, ingress_name, service_name): """ Delete the ingress-to-service allow rule added to ingress firewall policy. """ rule_uuid = None if VncSecurityPolicy.ingress_svc_fw_policy_uuid: rule_name = VncIngress._get_ingress_firewall_rule_name( ns_name, ingress_name, service_name) # Get the rule id of the rule to be deleted. rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid(rule_name) if rule_uuid: # Delete the rule. VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.ingress_svc_fw_policy_uuid, rule_uuid) return rule_uuid
def vnc_network_policy_add(self, event, namespace, name, uid): spec = event['object']['spec'] if not spec: self._logger.error("%s - %s:%s Spec Not Found" % (self._name, name, uid)) return fw_policy_uuid = VncSecurityPolicy.create_firewall_policy(name, namespace, spec, k8s_uuid=uid) VncSecurityPolicy.add_firewall_policy(fw_policy_uuid) # Update kube config db entry for the network policy. np = NetworkPolicyKM.find_by_name_or_uuid(uid) if np: fw_policy_obj = self._vnc_lib.firewall_policy_read( id=fw_policy_uuid) np.set_vnc_fq_name(":".join(fw_policy_obj.get_fq_name()))
def add_ingress_to_service_rule(cls, ns_name, ingress_name, service_name): """ Add a ingress-to-service allow rule to ingress firewall policy. """ if VncSecurityPolicy.ingress_svc_fw_policy_uuid: ingress_labels = XLabelCache.get_ingress_label( cls.get_ingress_label_name(ns_name, ingress_name)) service_labels = XLabelCache.get_service_label(service_name) rule_name = VncIngress._get_ingress_firewall_rule_name( ns_name, ingress_name, service_name) fw_rule_uuid = VncSecurityPolicy.create_firewall_rule_allow_all( rule_name, service_labels, ingress_labels) VncSecurityPolicy.add_firewall_rule( VncSecurityPolicy.ingress_svc_fw_policy_uuid, fw_rule_uuid) return fw_rule_uuid
def test_deny_all_policy_periodic_validate(self): """ Validate network policy periodic self-healing when deny-all firewall policy is detached from APS. """ # Check if we have a valid config to start with. valid = VncSecurityPolicy.validate_cluster_security_policy() self.assertTrue(valid) # Create namespace. self._create_namespace(self.ns_name, None, True) # Create a network policy. np_name = unittest.TestCase.id(self) np_spec = {'podSelector': {}, 'policyTypes': ['Ingress', 'Egress']} # Create a user network policy. np_uuid = self._add_update_network_policy(np_name, np_spec) self._validate_network_policy_resources(np_name, np_uuid, np_spec, namespace=self.ns_name) # Validate that config is sane after user policy add. valid = VncSecurityPolicy.validate_cluster_security_policy() self.assertTrue(valid) # Get some basic object handles. self.assertIsNotNone(VncSecurityPolicy.deny_all_fw_policy_uuid) fw_policy_obj = self._vnc_lib.firewall_policy_read( id=VncSecurityPolicy.deny_all_fw_policy_uuid) aps_obj = self._get_default_application_policy_set() self.assertIsNotNone(fw_policy_obj) self.assertIsNotNone(aps_obj) # Detach deny-all policy from APS to introduce error. aps_obj.del_firewall_policy(fw_policy_obj) self._vnc_lib.application_policy_set_update(aps_obj) # Verify that validation of APS will fail. valid = VncSecurityPolicy.validate_cluster_security_policy() self.assertFalse(valid) # Fix the inconsisteny in APS. VncSecurityPolicy.recreate_cluster_security_policy() # Verify that validation of APS will succeed now. valid = VncSecurityPolicy.validate_cluster_security_policy() self.assertTrue(valid) # Cleanup user created network policy. self._delete_network_policy(np_name, np_uuid, np_spec) self._validate_network_policy_resources(np_name, np_uuid, np_spec, validate_delete=True, namespace=self.ns_name)
def _validate_network_policy_resources(self, name, uuid, spec={}, validate_delete=False, namespace=None): ns_name = namespace if namespace else self.ns_name np_event_obj = NetworkPolicyKM.find_by_name_or_uuid(uuid) if validate_delete: self.assertIsNone(np_event_obj) elif not spec: fw_policy_uuid = VncSecurityPolicy.get_firewall_policy_uuid(name, ns_name) fw_policy = FirewallPolicyKM.locate(fw_policy_uuid) self.assertIsNotNone(np_event_obj) self.assertIsNone(fw_policy) else: fw_policy_uuid = VncSecurityPolicy.get_firewall_policy_uuid(name, ns_name) fw_policy = FirewallPolicyKM.locate(fw_policy_uuid) self.assertIsNotNone(np_event_obj) self.assertIsNotNone(fw_policy) # Validate network policy spec. self._validate_spec(spec, fw_policy)
def test_deny_all_policy_periodic_validate(self): """ Validate network policy periodic self-healing when deny-all firewall policy is detached from APS. """ # Check if we have a valid config to start with. valid = VncSecurityPolicy.validate_cluster_security_policy() self.assertTrue(valid) # Create namespace. self._create_namespace(self.ns_name, None, True) # Create a network policy. np_name = unittest.TestCase.id(self) np_spec = { 'podSelector': {}, 'policyTypes': ['Ingress', 'Egress'] } # Create a user network policy. np_uuid = self._add_update_network_policy(np_name, np_spec) self._validate_network_policy_resources(np_name, np_uuid, np_spec, namespace=self.ns_name) # Validate that config is sane after user policy add. valid = VncSecurityPolicy.validate_cluster_security_policy() self.assertTrue(valid) # Get some basic object handles. self.assertIsNotNone(VncSecurityPolicy.deny_all_fw_policy_uuid) fw_policy_obj = self._vnc_lib.firewall_policy_read( id=VncSecurityPolicy.deny_all_fw_policy_uuid) aps_obj = self._get_default_application_policy_set() self.assertIsNotNone(fw_policy_obj) self.assertIsNotNone(aps_obj) # Detach deny-all policy from APS to introduce error. aps_obj.del_firewall_policy(fw_policy_obj) self._vnc_lib.application_policy_set_update(aps_obj) # Verify that validation of APS will fail. valid = VncSecurityPolicy.validate_cluster_security_policy() self.assertFalse(valid) # Fix the inconsisteny in APS. VncSecurityPolicy.recreate_cluster_security_policy() # Verify that validation of APS will succeed now. valid = VncSecurityPolicy.validate_cluster_security_policy() self.assertTrue(valid) # Cleanup user created network policy. self._delete_network_policy(np_name, np_uuid, np_spec) self._validate_network_policy_resources(np_name, np_uuid, np_spec, validate_delete=True, namespace=self.ns_name)
def delete_namespace_security_policy(self, ns_name): """ Delete firwall rule created to enforce default behavior on this namespace. """ if VncSecurityPolicy.allow_all_fw_policy_uuid: # Dis-associate and delete the ingress rule from namespace policy. rule_name = self._get_namespace_firewall_ingress_rule_name(ns_name) rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid(rule_name) VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.allow_all_fw_policy_uuid, rule_uuid) # Dis-associate and delete egress rule from namespace policy. egress_rule_name = self._get_namespace_firewall_egress_rule_name( ns_name) egress_rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid( egress_rule_name) VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.allow_all_fw_policy_uuid, egress_rule_uuid)
def add_namespace_security_policy(self, k8s_namespace_uuid): """ Create a firwall rule for default behavior on a namespace. """ ns = self._get_namespace(k8s_namespace_uuid) if not ns: return # Add custom namespace label on the namespace object. self._labels.append(k8s_namespace_uuid, self._labels.get_namespace_label(ns.name)) if not ns.firewall_ingress_allow_rule_uuid: ingress_rule_name = self._get_namespace_firewall_ingress_rule_name( ns.name) # Create a rule for default allow behavior on this namespace. ns.firewall_ingress_allow_rule_uuid =\ VncSecurityPolicy.create_firewall_rule_allow_all( ingress_rule_name, self._labels.get_namespace_label(ns.name)) # Add default allow rule to the "global allow" firewall policy. VncSecurityPolicy.add_firewall_rule( VncSecurityPolicy.allow_all_fw_policy_uuid, ns.firewall_ingress_allow_rule_uuid) if not ns.firewall_egress_allow_rule_uuid: egress_rule_name = self._get_namespace_firewall_egress_rule_name( ns.name) # Create a rule for default egress allow behavior on this namespace. ns.firewall_egress_allow_rule_uuid =\ VncSecurityPolicy.create_firewall_rule_allow_all( egress_rule_name, {}, self._labels.get_namespace_label(ns.name)) # Add default egress allow rule to "global allow" firewall policy. VncSecurityPolicy.add_firewall_rule( VncSecurityPolicy.allow_all_fw_policy_uuid, ns.firewall_egress_allow_rule_uuid)
def vnc_network_policy_delete(self, namespace, name, uuid): VncSecurityPolicy.delete_firewall_policy(name, namespace)
def test_periodic_validate_with_user_policies(self): """ Validate network policy periodic self-healing when multiple user created policies are present. """ np_uuid_dict = {} test_range = list(range(1, 10)) for i in test_range: np_spec = {'podSelector': {}, 'ingress': [{}]} np_name = "-".join([unittest.TestCase.id(self), str(i)]) np_uuid_dict[i] = self._add_update_network_policy(np_name, np_spec) self._validate_network_policy_resources(np_name, np_uuid_dict[i], np_spec) # Check if we have a valid config to start with. valid = VncSecurityPolicy.validate_cluster_security_policy() self.assertTrue(valid) # Get some basic object handles. self.assertIsNotNone(VncSecurityPolicy.allow_all_fw_policy_uuid) fw_policy_obj = self._vnc_lib.firewall_policy_read( id=VncSecurityPolicy.allow_all_fw_policy_uuid) aps_obj = self._get_default_application_policy_set() self.assertIsNotNone(fw_policy_obj) self.assertIsNotNone(aps_obj) # Detach allow-all policy from APS to introduce error. aps_obj.del_firewall_policy(fw_policy_obj) self._vnc_lib.application_policy_set_update(aps_obj) # Verify that validation of APS will fail. valid = VncSecurityPolicy.validate_cluster_security_policy() self.assertFalse(valid) # Fix the inconsisteny in APS. VncSecurityPolicy.recreate_cluster_security_policy() # Verify that validation of APS will succeed now. valid = VncSecurityPolicy.validate_cluster_security_policy() self.assertTrue(valid) # # After self-healing, verify that the first on the APS, the FW policies # are ordered as follows: # - Ingress-svc fw policy # - User created policies # - Deny-all fw policy # - Allow-all fw policy # previous_sequence = None aps = ApplicationPolicySetKM.locate(aps_obj.get_uuid()) aps.update() fw_policy_refs = aps.get_firewall_policy_refs_sorted() ingress_fw_policy_idx = None for index, fw_policy_ref in enumerate(fw_policy_refs): fw_policy = FirewallPolicyKM.locate(fw_policy_ref['uuid']) if fw_policy.owner and\ fw_policy.cluster_name == self.cluster_name(): self.assertTrue(fw_policy.uuid == VncSecurityPolicy.ingress_svc_fw_policy_uuid) ingress_fw_policy_idx = index break last_user_policy_index = None loop_start_index = ingress_fw_policy_idx + 1 for i in test_range: np_name = "-".join([unittest.TestCase.id(self), str(i)]) fw_policy_name = VncSecurityPolicy.get_firewall_policy_name( np_name, self.ns_name, False) for index, fw_policy in enumerate( fw_policy_refs[loop_start_index:]): if fw_policy_name == fw_policy['to'][-1]: if previous_sequence: self.assertTrue(previous_sequence < \ fw_policy['attr']['sequence']) previous_sequence = fw_policy['attr']['sequence'] last_user_policy_index = loop_start_index + index break deny_all_policy_index = None loop_start_index = last_user_policy_index + 1 for index, fw_policy_ref in enumerate( fw_policy_refs[loop_start_index:]): fw_policy = FirewallPolicyKM.locate(fw_policy_ref['uuid']) if fw_policy.cluster_name and\ fw_policy.cluster_name == self.cluster_name(): self.assertTrue(fw_policy.uuid == VncSecurityPolicy.deny_all_fw_policy_uuid) deny_all_policy_index = loop_start_index + index break loop_start_index = deny_all_policy_index + 1 for fw_policy_ref in fw_policy_refs[loop_start_index:]: fw_policy = FirewallPolicyKM.locate(fw_policy_ref['uuid']) if fw_policy.cluster_name and\ fw_policy.cluster_name == self.cluster_name(): self.assertTrue(fw_policy.uuid == VncSecurityPolicy.allow_all_fw_policy_uuid) break for i in test_range: self._delete_network_policy(unittest.TestCase.id(self), np_uuid_dict[i]) self._validate_network_policy_resources(np_name, np_uuid_dict[i], np_spec, validate_delete=True)
def delete_ingress_to_service_rule_by_id(cls, rule_uuid): if VncSecurityPolicy.ingress_svc_fw_policy_uuid: # Delete the rule. VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.ingress_svc_fw_policy_uuid, rule_uuid)
def _create_application_policy_set(self, name, parent_obj=None): return VncSecurityPolicy.create_application_policy_set( name, parent_obj)
def _create_application_policy_set(self, name, parent_obj=None): return VncSecurityPolicy.create_application_policy_set(name, parent_obj)
def __init__(self, args=None, logger=None, q=None, kube=None, vnc_kubernetes_config_dict=None): self._name = type(self).__name__ self.args = args self.logger = logger self.q = q self.kube = kube self._cluster_pod_ipam_fq_name = None self._cluster_service_ipam_fq_name = None self._cluster_ip_fabric_ipam_fq_name = None # init vnc connection self.vnc_lib = self._vnc_connect() # Cache common config. self.vnc_kube_config = vnc_kube_config(logger=self.logger, vnc_lib=self.vnc_lib, args=self.args, queue=self.q, kube=self.kube) # # In nested mode, kube-manager connects to contrail components running # in underlay via global link local services. TCP flows established on # link local services will be torn down by vrouter, if there is no # activity for configured(or default) timeout. So disable flow timeout # on these connections, so these flows will persist. # # Note: The way to disable flow timeout is to set timeout to max # possible value. # if self.args.nested_mode == '1': for cassandra_server in self.args.cassandra_server_list: cassandra_port = cassandra_server.split(':')[-1] flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", cassandra_port, 2147483647) if self.args.rabbit_port: flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", self.args.rabbit_port, 2147483647) if self.args.vnc_endpoint_port: flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", self.args.vnc_endpoint_port, 2147483647) for collector in self.args.collectors: collector_port = collector.split(':')[-1] flow_aging_manager.create_flow_aging_timeout_entry( self.vnc_lib, "tcp", collector_port, 2147483647) # init access to db self._db = db.KubeNetworkManagerDB(self.args, self.logger) DBBaseKM.init(self, self.logger, self._db) # If nested mode is enabled via config, then record the directive. if self.args.nested_mode == '1': DBBaseKM.set_nested(True) # sync api server db in local cache self._sync_km() # init rabbit connection rabbitmq_cfg = kube_args.rabbitmq_args(self.args) self.rabbit = VncAmqpHandle( self.logger._sandesh, self.logger, DBBaseKM, reaction_map.REACTION_MAP, self.args.cluster_id + '-' + self.args.cluster_name + '-kube_manager', rabbitmq_cfg, self.args.host_ip) self.rabbit.establish() self.rabbit._db_resync_done.set() # Register label add and delete callbacks with label management entity. label_cache.XLabelCache.register_label_add_callback( VncKubernetes.create_tags) label_cache.XLabelCache.register_label_delete_callback( VncKubernetes.delete_tags) # Instantiate and init Security Policy Manager. self.security_policy_mgr = VncSecurityPolicy(self.vnc_lib, VncKubernetes.get_tags) # provision cluster self._provision_cluster() if vnc_kubernetes_config_dict: self.vnc_kube_config.update(**vnc_kubernetes_config_dict) else: # Update common config. self.vnc_kube_config.update( cluster_pod_ipam_fq_name=self._get_cluster_pod_ipam_fq_name(), cluster_service_ipam_fq_name=self. _get_cluster_service_ipam_fq_name(), cluster_ip_fabric_ipam_fq_name=self. _get_cluster_ip_fabric_ipam_fq_name()) # handle events self.label_cache = label_cache.LabelCache() self.vnc_kube_config.update(label_cache=self.label_cache) self.tags_mgr = importutils.import_object( 'kube_manager.vnc.vnc_tags.VncTags') self.network_policy_mgr = importutils.import_object( 'kube_manager.vnc.vnc_network_policy.VncNetworkPolicy') self.namespace_mgr = importutils.import_object( 'kube_manager.vnc.vnc_namespace.VncNamespace', self.network_policy_mgr) self.ingress_mgr = importutils.import_object( 'kube_manager.vnc.vnc_ingress.VncIngress', self.tags_mgr) self.service_mgr = importutils.import_object( 'kube_manager.vnc.vnc_service.VncService', self.ingress_mgr) self.pod_mgr = importutils.import_object( 'kube_manager.vnc.vnc_pod.VncPod', self.service_mgr, self.network_policy_mgr) self.endpoints_mgr = importutils.import_object( 'kube_manager.vnc.vnc_endpoints.VncEndpoints') self.network_mgr = importutils.import_object( 'kube_manager.vnc.vnc_network.VncNetwork') # Create system default security policies. VncSecurityPolicy.create_deny_all_security_policy() VncSecurityPolicy.create_allow_all_security_policy() self.ingress_mgr.create_ingress_security_policy() VncKubernetes._vnc_kubernetes = self # Associate cluster with the APS. VncSecurityPolicy.tag_cluster_application_policy_set()
def test_periodic_validate_with_user_policies(self): """ Validate network policy periodic self-healing when multiple user created policies are present. """ np_uuid_dict={} test_range = range(1, 10) for i in test_range: np_spec = { 'podSelector': {}, 'ingress': [{}] } np_name = "-".join([unittest.TestCase.id(self), str(i)]) np_uuid_dict[i] = self._add_update_network_policy(np_name, np_spec) self._validate_network_policy_resources(np_name, np_uuid_dict[i], np_spec) # Check if we have a valid config to start with. valid = VncSecurityPolicy.validate_cluster_security_policy() self.assertTrue(valid) # Get some basic object handles. self.assertIsNotNone(VncSecurityPolicy.allow_all_fw_policy_uuid) fw_policy_obj = self._vnc_lib.firewall_policy_read( id=VncSecurityPolicy.allow_all_fw_policy_uuid) aps_obj = self._get_default_application_policy_set() self.assertIsNotNone(fw_policy_obj) self.assertIsNotNone(aps_obj) # Detach allow-all policy from APS to introduce error. aps_obj.del_firewall_policy(fw_policy_obj) self._vnc_lib.application_policy_set_update(aps_obj) # Verify that validation of APS will fail. valid = VncSecurityPolicy.validate_cluster_security_policy() self.assertFalse(valid) # Fix the inconsisteny in APS. VncSecurityPolicy.recreate_cluster_security_policy() # Verify that validation of APS will succeed now. valid = VncSecurityPolicy.validate_cluster_security_policy() self.assertTrue(valid) # # After self-healing, verify that the first on the APS, the FW policies # are ordered as follows: # - Ingress-svc fw policy # - User created policies # - Deny-all fw policy # - Allow-all fw policy # previous_sequence = None aps = ApplicationPolicySetKM.locate(aps_obj.get_uuid()) aps.update() fw_policy_refs = aps.get_firewall_policy_refs_sorted() ingress_fw_policy_idx = None for index, fw_policy_ref in enumerate(fw_policy_refs): fw_policy = FirewallPolicyKM.locate(fw_policy_ref['uuid']) if fw_policy.owner and\ fw_policy.cluster_name == self.cluster_name(): self.assertTrue(fw_policy.uuid == VncSecurityPolicy.ingress_svc_fw_policy_uuid) ingress_fw_policy_idx = index break last_user_policy_index = None loop_start_index = ingress_fw_policy_idx+1 for i in test_range: np_name = "-".join([unittest.TestCase.id(self), str(i)]) fw_policy_name = VncSecurityPolicy.get_firewall_policy_name(np_name, self.ns_name, False) for index, fw_policy in enumerate(fw_policy_refs[loop_start_index:]): if fw_policy_name == fw_policy['to'][-1]: if previous_sequence: self.assertTrue(previous_sequence < \ fw_policy['attr']['sequence']) previous_sequence = fw_policy['attr']['sequence'] last_user_policy_index = loop_start_index + index break deny_all_policy_index = None loop_start_index = last_user_policy_index + 1 for index, fw_policy_ref in enumerate(fw_policy_refs[loop_start_index:]): fw_policy = FirewallPolicyKM.locate(fw_policy_ref['uuid']) if fw_policy.cluster_name and\ fw_policy.cluster_name == self.cluster_name(): self.assertTrue(fw_policy.uuid == VncSecurityPolicy.deny_all_fw_policy_uuid) deny_all_policy_index = loop_start_index + index break loop_start_index = deny_all_policy_index + 1 for fw_policy_ref in fw_policy_refs[loop_start_index:]: fw_policy = FirewallPolicyKM.locate(fw_policy_ref['uuid']) if fw_policy.cluster_name and\ fw_policy.cluster_name == self.cluster_name(): self.assertTrue(fw_policy.uuid == VncSecurityPolicy.allow_all_fw_policy_uuid) break for i in test_range: self._delete_network_policy(unittest.TestCase.id(self), np_uuid_dict[i]) self._validate_network_policy_resources(np_name, np_uuid_dict[i], np_spec, validate_delete=True)