def _get_vifs(self, pod): k8s = clients.get_kubernetes_client() try: kuryrport_crd = k8s.get(f'{k_const.K8S_API_CRD_NAMESPACES}/' f'{pod["metadata"]["namespace"]}/' f'kuryrports/{pod["metadata"]["name"]}') LOG.debug("Got CRD: %r", kuryrport_crd) except k_exc.K8sClientException: return {} vifs_dict = utils.get_vifs_from_crd(kuryrport_crd) LOG.debug("Got vifs: %r", vifs_dict) return vifs_dict
def __init__(self): super(KuryrNetworkPolicyHandler, self).__init__() self.os_net = clients.get_network_client() self.k8s = clients.get_kubernetes_client() self._drv_project = drivers.NetworkPolicyProjectDriver.get_instance() self._drv_policy = drivers.NetworkPolicyDriver.get_instance() self._drv_vif_pool = drivers.VIFPoolDriver.get_instance( specific_driver='multi_pool') self._drv_vif_pool.set_vif_driver() self._drv_pod_sg = drivers.PodSecurityGroupsDriver.get_instance() self._drv_svc_sg = drivers.ServiceSecurityGroupsDriver.get_instance() self._drv_lbaas = drivers.LBaaSDriver.get_instance() self._convert_old_crds()
def on_present(self, pod, *args, **kwargs): if (driver_utils.is_host_network(pod) or not self._is_pod_scheduled(pod)): # REVISIT(ivc): consider an additional configurable check that # would allow skipping pods to enable heterogeneous environments # where certain pods/namespaces/nodes can be managed by other # networking solutions/CNI drivers. return # NOTE(gryf): Set the finalizer as soon, as we have pod created. On # subsequent updates of the pod, add_finalizer will ignore this if # finalizer exists. k8s = clients.get_kubernetes_client() try: if not k8s.add_finalizer(pod, constants.POD_FINALIZER): # NOTE(gryf) It might happen that pod will be deleted even # before we got here. return except k_exc.K8sClientException as ex: LOG.exception("Failed to add finalizer to pod object: %s", ex) raise kp = driver_utils.get_kuryrport(pod) if self._is_pod_completed(pod): if kp: LOG.debug("Pod has completed execution, removing the vifs") self.on_finalize(pod) else: LOG.debug("Pod has completed execution, no KuryrPort found." " Skipping") return LOG.debug("Got KuryrPort: %r", kp) if not kp: try: self._add_kuryrport_crd(pod) except k_exc.K8sNamespaceTerminating: # The underlying namespace is being terminated, we can # ignore this and let `on_finalize` handle this now. LOG.warning( 'Namespace %s is being terminated, ignoring Pod ' '%s in that namespace.', pod['metadata']['namespace'], pod['metadata']['name']) return except k_exc.K8sClientException as ex: LOG.exception( "Kubernetes Client Exception creating " "KuryrPort CRD: %s", ex) raise k_exc.ResourceNotReady(pod)
def _set_pod_info(self, pod, info): if not info[0]: LOG.debug("Removing info annotations: %r", info) annotation = None, info[1] else: annotation = jsonutils.dumps(info[0], sort_keys=True), info[1] LOG.debug("Setting info annotations: %r", annotation) k8s = clients.get_kubernetes_client() k8s.annotate(utils.get_res_link(pod), { constants.K8S_ANNOTATION_LABEL: annotation[0], constants.K8S_ANNOTATION_IP: annotation[1] }, resource_version=pod['metadata']['resourceVersion'])
def set_lbaas_state(endpoints, lbaas_state): # TODO(ivc): extract annotation interactions if lbaas_state is None: LOG.debug("Removing LBaaSState annotation: %r", lbaas_state) annotation = None else: lbaas_state.obj_reset_changes(recursive=True) LOG.debug("Setting LBaaSState annotation: %r", lbaas_state) annotation = jsonutils.dumps(lbaas_state.obj_to_primitive(), sort_keys=True) k8s = clients.get_kubernetes_client() k8s.annotate(get_res_link(endpoints), {constants.K8S_ANNOTATION_LBAAS_STATE: annotation}, resource_version=endpoints['metadata']['resourceVersion'])
def _bump_nps(self): """Bump NetworkPolicy objects to have the SG rules recalculated.""" k8s = clients.get_kubernetes_client() # NOTE(dulek): Listing KuryrNetworkPolicies instead of NetworkPolicies, # as we only care about NPs already handled. knps = k8s.get(constants.K8S_API_CRD_KURYRNETWORKPOLICIES) for knp in knps.get('items', []): try: k8s.annotate( knp['metadata']['annotations']['networkPolicyLink'], {constants.K8S_ANNOTATION_POLICY: str(uuid.uuid4())}) except exceptions.K8sResourceNotFound: # Had to be deleted in the meanwhile. pass
def _delete_host_networking_ports(self): k8s = clients.get_kubernetes_client() pods = k8s.get('/api/v1/pods')['items'] kuryrports = k8s.get(constants.K8S_API_CRD_KURYRPORTS)['items'] pairs = driver_utils.zip_resources(kuryrports, pods) for kuryrport, pod in pairs: if driver_utils.is_host_network(pod): LOG.warning(f'Found unnecessary KuryrPort ' f'{utils.get_res_unique_name(kuryrport)} created ' f'for host networking pod. Deleting it.') try: k8s.delete(utils.get_res_link(kuryrport)) except k_exc.K8sResourceNotFound: pass
def _set_vif(self, pod, vif): # TODO(ivc): extract annotation interactions if vif is None: LOG.debug("Removing VIF annotation: %r", vif) annotation = None else: vif.obj_reset_changes(recursive=True) LOG.debug("Setting VIF annotation: %r", vif) annotation = jsonutils.dumps(vif.obj_to_primitive(), sort_keys=True) k8s = clients.get_kubernetes_client() k8s.annotate(pod['metadata']['selfLink'], {constants.K8S_ANNOTATION_VIF: annotation}, resource_version=pod['metadata']['resourceVersion'])
def _get_namespace_labels(namespace): kubernetes = clients.get_kubernetes_client() try: path = '{}/{}'.format(constants.K8S_API_NAMESPACES, namespace) namespaces = kubernetes.get(path) LOG.debug("Return Namespace: %s", namespaces) except exceptions.K8sResourceNotFound: LOG.exception("Namespace not found") raise except exceptions.K8sClientException: LOG.exception("Kubernetes Client Exception") raise return namespaces['metadata'].get('labels')
def _get_kuryrnetpolicy_crd(self, policy): kubernetes = clients.get_kubernetes_client() netpolicy_crd_name = "np-" + policy['metadata']['name'] netpolicy_crd_namespace = policy['metadata']['namespace'] try: netpolicy_crd = kubernetes.get('{}/{}/kuryrnetpolicies/{}'.format( constants.K8S_API_CRD_NAMESPACES, netpolicy_crd_namespace, netpolicy_crd_name)) except exceptions.K8sResourceNotFound: return None except exceptions.K8sClientException: LOG.exception("Kubernetes Client Exception.") raise return netpolicy_crd
def __init__(self): super(ServiceHandler, self).__init__() self._drv_project = drv_base.ServiceProjectDriver.get_instance() self._drv_subnets = drv_base.ServiceSubnetsDriver.get_instance() self._drv_sg = drv_base.ServiceSecurityGroupsDriver.get_instance() self._drv_lbaas = drv_base.LBaaSDriver.get_instance() self.k8s = clients.get_kubernetes_client() self._lb_provider = None if self._drv_lbaas.providers_supported(): self._lb_provider = 'amphora' config_provider = CONF.kubernetes.endpoints_driver_octavia_provider if config_provider != 'default': self._lb_provider = config_provider
def get_services(namespace=None): kubernetes = clients.get_kubernetes_client() try: if namespace: services = kubernetes.get( '{}/namespaces/{}/services'.format(constants.K8S_API_BASE, namespace)) else: services = kubernetes.get( '{}/services'.format(constants.K8S_API_BASE)) except k_exc.K8sClientException: LOG.exception('Exception when getting K8s services.') raise return services
def bump_networkpolicy(knp): kubernetes = clients.get_kubernetes_client() try: kubernetes.annotate( knp['metadata']['annotations']['networkPolicyLink'], {constants.K8S_ANNOTATION_POLICY: str(uuid.uuid4())}) except k_exc.K8sResourceNotFound: raise except k_exc.K8sClientException: LOG.exception( "Failed to annotate network policy %s to force its " "recalculation.", utils.get_res_unique_name(knp)) raise
def request_additional_vifs(self, pod, project_id, security_groups): vifs = [] networks = self._get_networks(pod) if not networks: return vifs kubernetes = clients.get_kubernetes_client() namespace = pod['metadata']['namespace'] for network in networks: if 'name' not in network: raise exceptions.InvalidKuryrNetworkAnnotation() if 'namespace' in network: namespace = network['namespace'] try: url = '%s/namespaces/%s/network-attachment-definitions/%s' % ( constants.K8S_API_NPWG_CRD, namespace, network['name']) nad_obj = kubernetes.get(url) except exceptions.K8sClientException: LOG.exception("Kubernetes Client Exception") raise config = jsonutils.loads(nad_obj['metadata']['annotations'] ['openstack.org/kuryr-config']) subnet_id = config.get(constants.K8S_ANNOTATION_NPWG_CRD_SUBNET_ID) neutron_defaults = kuryr_config.CONF.neutron_defaults if constants.K8S_ANNOTATION_NPWG_CRD_DRIVER_TYPE not in config: vif_drv = self._drv_vif_pool if not subnet_id: subnet_id = neutron_defaults.pod_subnet else: alias = config[constants.K8S_ANNOTATION_NPWG_CRD_DRIVER_TYPE] vif_drv = base.PodVIFDriver.get_instance(specific_driver=alias) if not subnet_id: try: subnet_id = neutron_defaults.subnet_mapping[alias] except KeyError: subnet_id = neutron_defaults.pod_subnet LOG.debug( "Default subnet mapping in config file " "doesn't contain any subnet for %s driver " "alias. Default pod_subnet was used.", alias) subnet = {subnet_id: utils.get_subnet(subnet_id)} vif = vif_drv.request_vif(pod, project_id, subnet, security_groups) if vif: vifs.append(vif) return vifs
def get_referenced_object(obj, kind): """Get referenced object. Helper function for getting objects out of the CRDs like KuryrLoadBalancer, KuryrNetworkPolicy or KuryrPort needed solely for creating Event object, so there will be no exceptions raises from this function. """ for ref in obj['metadata'].get('ownerReferences', []): if ref['kind'] != kind: continue try: return { 'kind': kind, 'apiVersion': ref['apiVersion'], 'metadata': { 'namespace': obj['metadata']['namespace'], 'name': ref['name'], 'uid': ref['uid'] } } except KeyError: LOG.debug( "Not all needed keys was found in ownerReferences " "list: %s", ref) # There was no ownerReferences field, let's query API k8s = clients.get_kubernetes_client() data = { 'metadata': { 'name': obj['metadata']['name'] }, 'kind': kind, 'apiVersion': API_VER_MAP[kind] } if obj['metadata'].get('namespace'): data['metadata']['namespace'] = obj['metadata']['namespace'] try: url = get_res_link(data) except KeyError: LOG.debug("Not all needed data was found in provided object: %s", data) return try: return k8s.get(url) except exceptions.K8sClientException: LOG.debug('Error when fetching %s to add an event %s, ignoring', kind, get_res_unique_name(obj))
def on_finalize(self, service, *args, **kwargs): k8s = clients.get_kubernetes_client() svc_name = service['metadata']['name'] svc_namespace = service['metadata']['namespace'] klb_crd_path = (f"{k_const.K8S_API_CRD_NAMESPACES}/" f"{svc_namespace}/kuryrloadbalancers/{svc_name}") # Bump all the NPs in the namespace to force SG rules # recalculation. self._bump_network_policies(service) try: k8s.delete(klb_crd_path) except k_exc.K8sResourceNotFound: k8s.remove_finalizer(service, k_const.SERVICE_FINALIZER)
def _handle_namespace(self, namespace): """Evaluate if the Namespace should be handled Fetches all the Pods in the Namespace and check if there is any Pod in that Namespace on Pods Network. :param namespace: Namespace name :returns: True if the Namespace resources should be created, False if otherwise. """ kubernetes = clients.get_kubernetes_client() pods = kubernetes.get('{}/namespaces/{}/pods'.format( constants.K8S_API_BASE, namespace)) return any(not utils.is_host_network(pod) for pod in pods.get('items', []))
def create_namespace_sg_rules(self, namespace): kubernetes = clients.get_kubernetes_client() ns_name = namespace['metadata']['name'] LOG.debug("Creating sg rule for namespace: %s", ns_name) namespace = kubernetes.get('{}/namespaces/{}'.format( constants.K8S_API_BASE, ns_name)) knp_crds = utils.get_kuryrnetpolicy_crds() for crd in knp_crds.get('items'): crd_selector = crd['spec'].get('podSelector') i_matched, i_rules = _parse_rules('ingress', crd, namespace) e_matched, e_rules = _parse_rules('egress', crd, namespace) if i_matched or e_matched: utils.patch_kuryr_crd(crd, i_rules, e_rules, crd_selector)
def _get_in_use_ports(self): kubernetes = clients.get_kubernetes_client() in_use_ports = [] running_pods = kubernetes.get(constants.K8S_API_BASE + '/pods') for pod in running_pods['items']: try: annotations = jsonutils.loads(pod['metadata']['annotations'][ constants.K8S_ANNOTATION_VIF]) except KeyError: LOG.debug("Skipping pod without kuryr VIF annotation: %s", pod) else: in_use_ports.append( annotations['versioned_object.data']['id']) return in_use_ports
def bump_networkpolicies(namespace=None): k8s = clients.get_kubernetes_client() nps = get_networkpolicies(namespace) for np in nps: try: k8s.annotate(utils.get_res_link(np), {constants.K8S_ANNOTATION_POLICY: str(uuid.uuid4())}) except k_exc.K8sResourceNotFound: # Ignore if NP got deleted. pass except k_exc.K8sClientException: LOG.warning( "Failed to annotate network policy %s to force its " "recalculation.", utils.get_res_unique_name(np)) continue
def _patch_kuryrnetwork_crd(self, kuryrnet_crd, status, labels=False): kubernetes = clients.get_kubernetes_client() LOG.debug('Patching KuryrNetwork CRD %s', kuryrnet_crd) try: if labels: kubernetes.patch_crd('status', utils.get_res_link(kuryrnet_crd), status) else: kubernetes.patch('status', utils.get_res_link(kuryrnet_crd), status) except k_exc.K8sResourceNotFound: LOG.debug('KuryrNetwork CRD not found %s', kuryrnet_crd) except k_exc.K8sClientException: LOG.exception('Error updating kuryrNetwork CRD %s', kuryrnet_crd) raise
def _get_in_use_ports(self): kubernetes = clients.get_kubernetes_client() in_use_ports = [] running_pods = kubernetes.get(constants.K8S_API_BASE + '/pods') for pod in running_pods['items']: try: annotations = jsonutils.loads(pod['metadata']['annotations'][ constants.K8S_ANNOTATION_VIF]) pod_state = utils.extract_pod_annotation(annotations) except KeyError: LOG.debug("Skipping pod without kuryr VIF annotation: %s", pod) else: for vif in pod_state.vifs.values(): in_use_ports.append(vif.id) return in_use_ports
def test_setup_clients(self, m_neutron, m_k8s, m_cfg): k8s_api_root = 'http://127.0.0.1:1234' neutron_dummy = object() k8s_dummy = object() m_cfg.kubernetes.api_root = k8s_api_root m_neutron.return_value = neutron_dummy m_k8s.return_value = k8s_dummy clients.setup_clients() m_k8s.assert_called_with(k8s_api_root) self.assertIs(k8s_dummy, clients.get_kubernetes_client()) self.assertIs(neutron_dummy, clients.get_neutron_client())
def _update_crd_spec(self, loadbalancer_crd, service): svc_name = service['metadata']['name'] kubernetes = clients.get_kubernetes_client() spec = self._build_kuryrloadbalancer_spec(service) LOG.debug('Patching KuryrLoadBalancer CRD %s', loadbalancer_crd) try: kubernetes.patch_crd('spec', loadbalancer_crd['metadata'][ 'selfLink'], spec) except k_exc.K8sResourceNotFound: LOG.debug('KuryrLoadBalancer CRD not found %s', loadbalancer_crd) except k_exc.K8sConflict: raise k_exc.ResourceNotReady(svc_name) except k_exc.K8sClientException: LOG.exception('Error updating kuryrnet CRD %s', loadbalancer_crd) raise
def _annotate_device(self, pod_link, pci, old_driver, new_driver, port_id): k8s = clients.get_kubernetes_client() pod_devices = self._get_pod_devices(pod_link) pod_devices[pci] = { constants.K8S_ANNOTATION_OLD_DRIVER: old_driver, constants.K8S_ANNOTATION_CURRENT_DRIVER: new_driver, constants.K8S_ANNOTATION_NEUTRON_PORT: port_id } pod_devices = jsonutils.dumps(pod_devices) LOG.debug( "Trying to annotate pod %s with pci %s, old driver %s " "and new driver %s", pod_link, pci, old_driver, new_driver) k8s.annotate(pod_link, {constants.K8S_ANNOTATION_PCI_DEVICES: pod_devices})
def _update_lb_status(self, endpoints, lb_ip_address): status_data = { "loadBalancer": { "ingress": [{ "ip": lb_ip_address.format() }] } } k8s = clients.get_kubernetes_client() svc_link = self._get_service_link(endpoints) try: k8s.patch("status", svc_link, status_data) except k_exc.K8sClientException: # REVISIT(ivc): only raise ResourceNotReady for NotFound raise k_exc.ResourceNotReady(svc_link)
def create_crd_spec(self, service): svc_name = service['metadata']['name'] svc_namespace = service['metadata']['namespace'] kubernetes = clients.get_kubernetes_client() svc_ip = self._get_service_ip(service) spec_lb_ip = service['spec'].get('loadBalancerIP') ports = service['spec'].get('ports') for port in ports: if type(port['targetPort']) == int: port['targetPort'] = str(port['targetPort']) project_id = self._drv_project.get_project(service) sg_ids = self._drv_sg.get_security_groups(service, project_id) subnet_id = self._get_subnet_id(service, project_id, svc_ip) spec_type = service['spec'].get('type') loadbalancer_crd = { 'apiVersion': 'openstack.org/v1', 'kind': 'KuryrLoadBalancer', 'metadata': { 'name': svc_name, 'finalizers': [k_const.KURYRLB_FINALIZER], }, 'spec': { 'ip': svc_ip, 'ports': ports, 'project_id': project_id, 'security_groups_ids': sg_ids, 'subnet_id': subnet_id, 'type': spec_type }, 'status': {} } if spec_lb_ip is not None: loadbalancer_crd['spec']['lb_ip'] = spec_lb_ip try: kubernetes.post( '{}/{}/kuryrloadbalancers'.format( k_const.K8S_API_CRD_NAMESPACES, svc_namespace), loadbalancer_crd) except k_exc.K8sConflict: raise k_exc.ResourceNotReady(svc_name) except k_exc.K8sClientException: LOG.exception("Kubernetes Client Exception creating " "kuryrloadbalancer CRD. %s" % k_exc.K8sClientException) raise return loadbalancer_crd
def get_namespace_subnet_cidr(namespace): kubernetes = clients.get_kubernetes_client() try: ns_annotations = namespace['metadata']['annotations'] ns_name = ns_annotations[constants.K8S_ANNOTATION_NET_CRD] except KeyError: LOG.exception('Namespace handler must be enabled to support ' 'Network Policies with namespaceSelector') raise k_exc.ResourceNotReady(namespace) try: net_crd = kubernetes.get('{}/kuryrnets/{}'.format( constants.K8S_API_CRD, ns_name)) except k_exc.K8sClientException: LOG.exception("Kubernetes Client Exception.") raise return net_crd['spec']['subnetCIDR']
def get_port_annot_pci_info(nodename, neutron_port): k8s = clients.get_kubernetes_client() annot_name = constants.K8S_ANNOTATION_NODE_PCI_DEVICE_INFO annot_name = annot_name + '-' + neutron_port node_info = k8s.get('/api/v1/nodes/{}'.format(nodename)) annotations = node_info['metadata']['annotations'] try: json_pci_info = annotations[annot_name] pci_info = jsonutils.loads(json_pci_info) except KeyError: pci_info = {} except Exception: LOG.exception('Exception when reading annotations ' '%s and converting from json', annot_name) return pci_info
def patch_kuryrnetworkpolicy_crd(crd, i_rules, e_rules): kubernetes = clients.get_kubernetes_client() crd_name = crd['metadata']['name'] LOG.debug('Patching KuryrNetworkPolicy CRD %s' % crd_name) try: spec = { 'ingressSgRules': i_rules, 'egressSgRules': e_rules, } kubernetes.patch_crd('spec', crd['metadata']['selfLink'], spec) except k_exc.K8sResourceNotFound: LOG.debug('KuryrNetworkPolicy CRD not found %s', crd_name) except k_exc.K8sClientException: LOG.exception('Error updating KuryrNetworkPolicy CRD %s', crd_name) raise