def create_security_group(self, knp, project_id): sg_name = driver_utils.get_resource_name(knp['metadata']['namespace'] + '-' + knp['metadata']['name'], prefix='sg/') desc = ("Kuryr-Kubernetes Network Policy %s SG" % utils.get_res_unique_name(knp)) try: # Create initial security group sg = self.os_net.create_security_group(name=sg_name, project_id=project_id, description=desc) driver_utils.tag_neutron_resources([sg]) # NOTE(dulek): Neutron populates every new SG with two rules # allowing egress on IPv4 and IPv6. This collides with # how network policies are supposed to work, because # initially even egress traffic should be blocked. # To work around this we will delete those two SG # rules just after creation. for sgr in sg.security_group_rules: self.os_net.delete_security_group_rule(sgr['id']) except (os_exc.SDKException, exceptions.ResourceNotReady) as exc: np = utils.get_referenced_object(knp, 'NetworkPolicy') if np: self.kubernetes.add_event(np, 'FailedToAddSecurityGroup', f'Adding new security group or ' f'security group rules for ' f'corresponding network policy has ' f'failed: {exc}', 'Warning') LOG.exception("Error creating security group for network policy " " %s", knp['metadata']['name']) raise return sg.id
def test_get_resource_name_with_sufix(self): name = 'another' suffix = '/something-else' new_name = utils.get_resource_name(name, suffix=suffix) self.assertEqual(new_name, f'{name}{suffix}')
def test_get_resource_name_with_prefix(self): name = 'fun_name' prefix = 'something/' new_name = utils.get_resource_name(name, prefix) self.assertEqual(new_name, f'{prefix}{name}')
def test_get_resource_name_non_ascii(self): name = 'Ру́сский вое́нный кора́бль, иди́ на хуй!' prefix = 'bar:' suffix = ':baz' new_name = utils.get_resource_name(name, prefix, suffix) self.assertEqual(new_name, f'{prefix}{name}{suffix}')
def test_get_resource_name_with_sane_name(self): name = 'myns' prefix = 'ns/' suffix = '-foo' new_name = utils.get_resource_name(name, prefix, suffix) self.assertEqual(new_name, f'{prefix}{name}{suffix}')
def test_get_resource_name_with_too_long_name(self): name = 253 * "a" prefix = 'ns/' suffix = '-net' new_name = utils.get_resource_name(name, prefix, suffix) self.assertEqual(new_name, prefix + 248 * 'a' + suffix) self.assertEqual(len(new_name), 255)
def create_network(self, ns, project_id): os_net = clients.get_network_client() ns_name = ns['metadata']['name'] ns_uid = ns['metadata']['uid'] net_name = c_utils.get_resource_name(ns_name) old_net_name = c_utils.get_resource_name(ns_name, prefix='ns/', suffix='-net') # TODO(gryf): remove old_net_name support in next release, and precise # the query by adding additional query parameter 'description' which # should contain namespace uid. networks = os_net.networks(name=(net_name, old_net_name)) try: # NOTE(ltomasbo): only one network must exists net = next(networks) if net.name == net_name and net.description != ns_uid: # this condition would be unnecessary when guard for old names # would be eventually removed. raise ValueError # NOTE(gryf): It might happen, that network has been created, but # for some reason tagging has failed. if TAGS and not set(TAGS).issubset(set(net.tags)): c_utils.tag_neutron_resources([net], exceptions=True) return net.id except (StopIteration, ValueError): LOG.debug('Network does not exist. Creating.') mtu_cfg = oslo_cfg.CONF.neutron_defaults.network_device_mtu attrs = {'name': net_name, 'project_id': project_id, 'description': ns_uid} if mtu_cfg: attrs['mtu'] = mtu_cfg try: net = os_net.create_network(**attrs) except os_exc.SDKException: LOG.exception("Error creating neutron resources for the namespace " "%s", ns_name) raise c_utils.tag_neutron_resources([net], exceptions=True) return net.id
def create_subnet(self, ns, project_id, net_id): os_net = clients.get_network_client() ns_name = ns['metadata']['name'] # NOTE(gryf): assumption is, that all the subnets (well, currently # only one) in specific k8s namespaces are under exactly one network, # which have proper namespace uid in its description, so there is no # need to put it on the subnet as well. subnet_name = c_utils.get_resource_name(ns_name) subnets = os_net.subnets(network_id=net_id) try: # NOTE(ltomasbo): only one subnet must exists subnet = next(subnets) # NOTE(gryf): same situation as in networks. if TAGS and not set(TAGS).issubset(set(subnet.tags)): c_utils.tag_neutron_resources([subnet], exceptions=True) return subnet.id, subnet.cidr except StopIteration: LOG.debug('Subnet does not exist. Creating.') # create subnet with namespace as name subnet_pool_id = oslo_cfg.CONF.namespace_subnet.pod_subnet_pool ip_version = utils.get_subnetpool_version(subnet_pool_id) try: neutron_subnet = (os_net .create_subnet(network_id=net_id, ip_version=ip_version, name=subnet_name, enable_dhcp=False, subnetpool_id=subnet_pool_id, project_id=project_id)) except os_exc.ConflictException: LOG.debug("Max number of retries on neutron side achieved, " "raising ResourceNotReady to retry subnet creation " "for %s", subnet_name) raise exceptions.ResourceNotReady(subnet_name) c_utils.tag_neutron_resources([neutron_subnet], exceptions=True) return neutron_subnet.id, neutron_subnet.cidr
def update_lbaas_sg(self, service, sgs): LOG.debug('Setting SG for LBaaS VIP port') svc_namespace = service['metadata']['namespace'] svc_name = service['metadata']['name'] svc_ports = service['spec'].get('ports', []) lbaas_name = c_utils.get_resource_name(svc_name, prefix=svc_namespace + "/") endpoints_link = utils.get_endpoints_link(service) k8s = clients.get_kubernetes_client() try: k8s.get(endpoints_link) except k_exc.K8sResourceNotFound: LOG.debug("Endpoint not Found. Skipping LB SG update for " "%s as the LB resources are not present", lbaas_name) return try: klb = k8s.get(f'{k_const.K8S_API_CRD_NAMESPACES}/{svc_namespace}/' f'kuryrloadbalancers/{svc_name}') except k_exc.K8sResourceNotFound: LOG.debug('No KuryrLoadBalancer for service %s created yet.', lbaas_name) raise k_exc.ResourceNotReady(svc_name) if (not klb.get('status', {}).get('loadbalancer') or klb.get('status', {}).get('listeners') is None): LOG.debug('KuryrLoadBalancer for service %s not populated yet.', lbaas_name) raise k_exc.ResourceNotReady(svc_name) klb['status']['loadbalancer']['security_groups'] = sgs lb = klb['status']['loadbalancer'] try: k8s.patch_crd('status/loadbalancer', utils.get_res_link(klb), {'security_groups': sgs}) except k_exc.K8sResourceNotFound: LOG.debug('KuryrLoadBalancer CRD not found %s', lbaas_name) return except k_exc.K8sClientException: LOG.exception('Error updating KuryLoadBalancer CRD %s', lbaas_name) raise lsnr_ids = {(listener['protocol'], listener['port']): listener['id'] for listener in klb['status']['listeners']} for port in svc_ports: port_protocol = port['protocol'] lbaas_port = port['port'] target_port = port['targetPort'] suffix = f"{port_protocol}:{lbaas_port}" sg_rule_name = c_utils.get_resource_name(lbaas_name, suffix=':' + suffix) listener_id = lsnr_ids.get((port_protocol, lbaas_port)) if listener_id is None: LOG.warning("There is no listener associated to the protocol " "%s and port %s. Skipping", port_protocol, lbaas_port) continue self._apply_members_security_groups(lb, lbaas_port, target_port, port_protocol, sg_rule_name, listener_id, sgs)