def _patch_kuryrnetwork_crd(self, kuryrnet_crd, status, labels=False):
     LOG.debug('Patching KuryrNetwork CRD %s', kuryrnet_crd)
     try:
         if labels:
             self.k8s.patch_crd('status',
                                utils.get_res_link(kuryrnet_crd), status)
         else:
             self.k8s.patch('status', utils.get_res_link(kuryrnet_crd),
                            status)
     except k_exc.K8sResourceNotFound:
         LOG.debug('KuryrNetwork CRD not found %s', kuryrnet_crd)
     except k_exc.K8sClientException:
         LOG.exception('Error updating kuryrNetwork CRD %s', kuryrnet_crd)
         raise
    def _remove_unused_pools(self, loadbalancer_crd):
        removed_ids = set()

        for pool in loadbalancer_crd['status'].get('pools', []):
            if self._is_pool_in_spec(pool, loadbalancer_crd):
                continue
            self._drv_lbaas.release_pool(
                loadbalancer_crd['status']['loadbalancer'], pool)
            removed_ids.add(pool['id'])
        if removed_ids:
            loadbalancer_crd['status']['pools'] = [
                p for p in loadbalancer_crd['status'].get('pools', [])
                if p['id'] not in removed_ids
            ]
            loadbalancer_crd['status']['members'] = [
                m for m in loadbalancer_crd['status'].get('members', [])
                if m['pool_id'] not in removed_ids
            ]

            kubernetes = clients.get_kubernetes_client()
            try:
                kubernetes.patch_crd('status',
                                     utils.get_res_link(loadbalancer_crd),
                                     loadbalancer_crd['status'])
            except k_exc.K8sResourceNotFound:
                LOG.debug('KuryrLoadbalancer CRD not found %s',
                          loadbalancer_crd)
            except k_exc.K8sClientException:
                LOG.exception('Error updating KuryLoadbalancer CRD %s',
                              loadbalancer_crd)
                raise
        return bool(removed_ids)
    def _add_new_pools(self, loadbalancer_crd):
        changed = False

        current_listeners_ids = {
            pool['listener_id']
            for pool in loadbalancer_crd['status'].get('pools', [])
        }
        for listener in loadbalancer_crd['status'].get('listeners', []):
            if listener['id'] in current_listeners_ids:
                continue
            pool = self._drv_lbaas.ensure_pool(
                loadbalancer_crd['status']['loadbalancer'], listener)
            if not pool:
                continue
            pools = loadbalancer_crd['status'].get('pools', [])
            if pools:
                loadbalancer_crd['status'].get('pools', []).append(pool)
            else:
                loadbalancer_crd['status']['pools'] = []
                loadbalancer_crd['status'].get('pools', []).append(pool)
            kubernetes = clients.get_kubernetes_client()
            try:
                kubernetes.patch_crd('status',
                                     utils.get_res_link(loadbalancer_crd),
                                     loadbalancer_crd['status'])
            except k_exc.K8sResourceNotFound:
                LOG.debug('KuryrLoadbalancer CRD not found %s',
                          loadbalancer_crd)
            except k_exc.K8sClientException:
                LOG.exception('Error updating KuryrLoadbalancer CRD %s',
                              loadbalancer_crd)
                raise
            changed = True
        return changed
Esempio n. 4
0
def neutron_to_osvif_vif_dpdk(os_port, subnets, pod):
    """Converts Neutron port to VIF object for nested dpdk containers.

    :param os_port: dict containing port information as returned by
                    neutron client's 'show_port'
    :param subnets: subnet mapping as returned by PodSubnetsDriver.get_subnets
    :param pod: pod object received by k8s and containing profile details
    :return: os-vif VIF object
    """

    details = os_port.get('binding:vif_details', {})
    profile = osv_vif.VIFPortProfileK8sDPDK(
        l3_setup=False,
        selflink=utils.get_res_link(pod))

    return k_vif.VIFDPDKNested(
        id=os_port['id'],
        port_profile=profile,
        address=os_port['mac_address'],
        network=_make_vif_network(os_port, subnets),
        has_traffic_filtering=details.get('port_filter', False),
        preserve_on_delete=False,
        active=_is_port_active(os_port),
        plugin=const.K8S_OS_VIF_NOOP_PLUGIN,
        pci_address="",
        dev_driver="",
        vif_name=_get_vif_name(os_port))
Esempio n. 5
0
    def _sync_lbaas_sgs(self, klb_crd):
        lb = klb_crd['status'].get('loadbalancer')
        svc_name = klb_crd['metadata']['name']
        svc_namespace = klb_crd['metadata']['namespace']
        k8s = clients.get_kubernetes_client()
        try:
            service = k8s.get(f'{k_const.K8S_API_NAMESPACES}/{svc_namespace}/'
                              f'services/{svc_name}')
        except k_exc.K8sResourceNotFound:
            LOG.debug('Service %s not found.', svc_name)
            return
        except k_exc.K8sClientException:
            LOG.exception('Error retrieving Service %s.', svc_name)
            raise

        project_id = self._drv_svc_project.get_project(service)
        lb_sgs = self._drv_sg.get_security_groups(service, project_id)
        lb['security_groups'] = lb_sgs

        try:
            k8s.patch_crd('status/loadbalancer', utils.get_res_link(klb_crd),
                          {'security_groups': lb_sgs})
        except k_exc.K8sResourceNotFound:
            LOG.debug('KuryrLoadBalancer %s not found', svc_name)
            return None
        except k_exc.K8sUnprocessableEntity:
            LOG.debug('KuryrLoadBalancer entity not processable '
                      'due to missing loadbalancer field.')
            return None
        except k_exc.K8sClientException:
            LOG.exception('Error syncing KuryrLoadBalancer %s', svc_name)
            raise
        return klb_crd
    def test_update_kuryrport_crd(self, ged, k8s):
        ged.return_value = [self._driver]
        kp = kuryrport.KuryrPortHandler()

        kp._update_kuryrport_crd(self._kp, self._vifs)
        self._vif1.obj_reset_changes()
        self._vif2.obj_reset_changes()
        vif1 = self._vif1.obj_to_primitive()
        vif2 = self._vif2.obj_to_primitive()

        arg = {
            'vifs': {
                'eth0': {
                    'default': True,
                    'vif': vif1
                },
                'eth1': {
                    'default': False,
                    'vif': vif2
                }
            }
        }
        kp.k8s.patch_crd.assert_called_once_with('status',
                                                 utils.get_res_link(self._kp),
                                                 arg)
Esempio n. 7
0
    def request_vif(self, pod, project_id, subnets, security_groups):
        pod_name = pod['metadata']['name']
        os_net = clients.get_network_client()
        vif_plugin = 'sriov'
        subnet_id = next(iter(subnets))
        physnet = self._get_physnet_for_subnet_id(subnet_id)
        LOG.debug("Pod {} handling {}".format(pod_name, physnet))

        amount = self._get_remaining_sriov_vfs(pod, physnet)
        if not amount:
            LOG.error("SRIOV VIF request failed due to lack of "
                      "available VFs for the current pod creation")
            return None
        rq = self._get_port_request(pod, project_id, subnets, security_groups)

        port = os_net.create_port(**rq)
        self._check_port_binding([port])
        if not self._tag_on_creation:
            c_utils.tag_neutron_resources([port])
        vif = ovu.neutron_to_osvif_vif(vif_plugin, port, subnets)
        vif.physnet = physnet
        vif.pod_name = pod_name
        vif.pod_link = utils.get_res_link(pod)

        LOG.debug("{} vifs are available for the pod {}".format(
            amount, pod_name))

        self._reduce_remaining_sriov_vfs(pod, physnet)
        return vif
    def _remove_unused_listeners(self, loadbalancer_crd):
        current_listeners = {
            p['listener_id']
            for p in loadbalancer_crd['status'].get('pools', [])
        }
        removed_ids = set()
        for listener in loadbalancer_crd['status'].get('listeners', []):
            if listener['id'] in current_listeners:
                continue
            self._drv_lbaas.release_listener(
                loadbalancer_crd['status']['loadbalancer'], listener)
            removed_ids.add(listener['id'])
        if removed_ids:
            loadbalancer_crd['status']['listeners'] = [
                l for l in loadbalancer_crd['status'].get('listeners', [])
                if l['id'] not in removed_ids
            ]

            kubernetes = clients.get_kubernetes_client()
            try:
                kubernetes.patch_crd('status',
                                     utils.get_res_link(loadbalancer_crd),
                                     loadbalancer_crd['status'])
            except k_exc.K8sResourceNotFound:
                LOG.debug('KuryrLoadbalancer CRD not found %s',
                          loadbalancer_crd)
            except k_exc.K8sClientException:
                LOG.exception('Error updating KuryLoadbalancer CRD %s',
                              loadbalancer_crd)
                raise
        return bool(removed_ids)
    def _remove_unused_members(self, loadbalancer_crd):
        lb_crd_name = loadbalancer_crd['metadata']['name']
        spec_ports = {}
        pools = loadbalancer_crd['status'].get('pools', [])
        for pool in pools:
            port = self._get_port_in_pool(pool, loadbalancer_crd)
            if port:
                if not port.get('name'):
                    port['name'] = None
                spec_ports[port['name']] = pool['id']

        ep_slices = loadbalancer_crd['spec'].get('endpointSlices', [])
        current_targets = [utils.get_current_endpoints_target(
                           ep, p, spec_ports, lb_crd_name)
                           for ep_slice in ep_slices
                           for ep in ep_slice['endpoints']
                           for p in ep_slice['ports']
                           if p.get('name') in spec_ports]

        removed_ids = set()
        for member in loadbalancer_crd['status'].get('members', []):
            member_name = member.get('name', '')
            try:
                # NOTE: The member name is compose of:
                # NAMESPACE_NAME/POD_NAME:PROTOCOL_PORT
                pod_name = member_name.split('/')[1].split(':')[0]
            except AttributeError:
                pod_name = ""

            if ((str(member['ip']), pod_name, member['port'], member[
                    'pool_id']) in current_targets):
                continue

            self._drv_lbaas.release_member(loadbalancer_crd['status'][
                'loadbalancer'], member)
            removed_ids.add(member['id'])

        if removed_ids:
            loadbalancer_crd['status']['members'] = [m for m in
                                                     loadbalancer_crd[
                                                         'status'][
                                                             'members']
                                                     if m['id'] not in
                                                     removed_ids]

            kubernetes = clients.get_kubernetes_client()
            try:
                kubernetes.patch_crd('status',
                                     utils.get_res_link(loadbalancer_crd),
                                     loadbalancer_crd['status'])
            except k_exc.K8sResourceNotFound:
                LOG.debug('KuryrLoadbalancer CRD not found %s',
                          loadbalancer_crd)
            except k_exc.K8sClientException:
                LOG.exception('Error updating KuryLoadbalancer CRD %s',
                              loadbalancer_crd)
                raise
        return bool(removed_ids)
Esempio n. 10
0
    def _update_kuryrport_crd(self, kuryrport_crd, vifs):
        LOG.debug('Updatting CRD %s', kuryrport_crd["metadata"]["name"])
        vif_dict = {}
        for ifname, data in vifs.items():
            data['vif'].obj_reset_changes(recursive=True)
            vif_dict[ifname] = {'default': data['default'],
                                'vif': data['vif'].obj_to_primitive()}

        self.k8s.patch_crd('status', utils.get_res_link(kuryrport_crd),
                           {'vifs': vif_dict})
Esempio n. 11
0
    def test_get_res_link_no_namespace(self):
        res = {
            'apiVersion': 'v1',
            'kind': 'Namespace',
            'metadata': {
                'name': 'ns-1'
            }
        }

        self.assertEqual(utils.get_res_link(res), '/api/v1/namespaces/ns-1')
Esempio n. 12
0
 def _patch_kuryrnetwork_crd(self, kns_crd, populated=True):
     kubernetes = clients.get_kubernetes_client()
     crd_name = kns_crd['metadata']['name']
     LOG.debug('Patching KuryrNetwork CRD %s' % crd_name)
     try:
         kubernetes.patch_crd('status', utils.get_res_link(kns_crd),
                              {'populated': populated})
     except exceptions.K8sClientException:
         LOG.exception('Error updating kuryrnet CRD %s', crd_name)
         raise
    def _sync_lbaas_loadbalancer(self, loadbalancer_crd):
        changed = False
        lb = loadbalancer_crd['status'].get('loadbalancer')

        if lb and lb['ip'] != loadbalancer_crd['spec'].get('ip'):
            # if loadbalancerIP was associated to lbaas VIP, disassociate it.

            try:
                pub_info = loadbalancer_crd['status']['service_pub_ip_info']
            except KeyError:
                pub_info = None

            if pub_info:
                self._drv_service_pub_ip.disassociate_pub_ip(
                    loadbalancer_crd['status']['service_pub_ip_info'])
                self._drv_service_pub_ip.release_pub_ip(
                    loadbalancer_crd['status']['service_pub_ip_info'])

            self._drv_lbaas.release_loadbalancer(
                loadbalancer=lb)

            lb = {}
            loadbalancer_crd['status'] = {}

        if not lb:
            if loadbalancer_crd['spec'].get('ip'):
                lb_name = self._drv_lbaas.get_service_loadbalancer_name(
                    loadbalancer_crd['metadata']['namespace'],
                    loadbalancer_crd['metadata']['name'])
                lb = self._drv_lbaas.ensure_loadbalancer(
                    name=lb_name,
                    project_id=loadbalancer_crd['spec'].get('project_id'),
                    subnet_id=loadbalancer_crd['spec'].get('subnet_id'),
                    ip=loadbalancer_crd['spec'].get('ip'),
                    security_groups_ids=loadbalancer_crd['spec'].get(
                        'security_groups_ids'),
                    service_type=loadbalancer_crd['spec'].get('type'),
                    provider=loadbalancer_crd['spec'].get('provider'))
                loadbalancer_crd['status']['loadbalancer'] = lb

            kubernetes = clients.get_kubernetes_client()
            try:
                kubernetes.patch_crd('status',
                                     utils.get_res_link(loadbalancer_crd),
                                     loadbalancer_crd['status'])
            except k_exc.K8sResourceNotFound:
                LOG.debug('KuryrLoadbalancer CRD not found %s',
                          loadbalancer_crd)
            except k_exc.K8sClientException:
                LOG.exception('Error updating KuryrLoadbalancer CRD %s',
                              loadbalancer_crd)
                raise
            changed = True

        return changed
    def on_present(self, loadbalancer_crd):
        if self._should_ignore(loadbalancer_crd):
            LOG.debug("Ignoring Kubernetes service %s",
                      loadbalancer_crd['metadata']['name'])
            return

        crd_lb = loadbalancer_crd['status'].get('loadbalancer')
        if crd_lb:
            lb_provider = crd_lb.get('provider')
            spec_lb_provider = loadbalancer_crd['spec'].get('provider')
            # amphora to ovn upgrade
            if not lb_provider or lb_provider in OCTAVIA_DEFAULT_PROVIDERS:
                if (spec_lb_provider and
                        spec_lb_provider not in OCTAVIA_DEFAULT_PROVIDERS):
                    self._ensure_release_lbaas(loadbalancer_crd)

            # ovn to amphora downgrade
            elif lb_provider and lb_provider not in OCTAVIA_DEFAULT_PROVIDERS:
                if (not spec_lb_provider or
                        spec_lb_provider in OCTAVIA_DEFAULT_PROVIDERS):
                    self._ensure_release_lbaas(loadbalancer_crd)

        if self._sync_lbaas_members(loadbalancer_crd):
            # Note(yboaron) For LoadBalancer services, we should allocate FIP,
            # associate it to LB VIP and update K8S service status
            lb_ip = loadbalancer_crd['spec'].get('lb_ip')
            pub_info = loadbalancer_crd['status'].get(
                    'service_pub_ip_info')
            if pub_info is None and loadbalancer_crd['spec'].get('type'):
                service_pub_ip_info = (
                    self._drv_service_pub_ip.acquire_service_pub_ip_info(
                        loadbalancer_crd['spec']['type'],
                        lb_ip,
                        loadbalancer_crd['spec']['project_id'],
                        loadbalancer_crd['status']['loadbalancer'][
                            'port_id']))
                if service_pub_ip_info:
                    self._drv_service_pub_ip.associate_pub_ip(
                        service_pub_ip_info, loadbalancer_crd['status'][
                            'loadbalancer']['port_id'])
                    loadbalancer_crd['status'][
                        'service_pub_ip_info'] = service_pub_ip_info
                    self._update_lb_status(loadbalancer_crd)
                    kubernetes = clients.get_kubernetes_client()
                    try:
                        kubernetes.patch_crd('status', utils.get_res_link(
                            loadbalancer_crd), loadbalancer_crd['status'])
                    except k_exc.K8sResourceNotFound:
                        LOG.debug('KuryrLoadbalancer CRD not found %s',
                                  loadbalancer_crd)
                    except k_exc.K8sClientException:
                        LOG.exception('Error updating KuryLoadbalancer CRD %s',
                                      loadbalancer_crd)
                        raise
Esempio n. 15
0
    def _trigger_reconciliation(self, loadbalancer_crds):
        LOG.debug("Reconciling the KuryrLoadBalancer CRDs")
        lbaas = clients.get_loadbalancer_client()
        resources_fn = {
            'loadbalancer': lbaas.load_balancers,
            'listener': lbaas.listeners,
            'pool': lbaas.pools
        }
        resources = {'loadbalancer': [], 'listener': [], 'pool': []}

        for klb in loadbalancer_crds:
            if klb['metadata'].get('deletionTimestamp'):
                continue

            selflink = utils.get_res_link(klb)
            lb_id = klb.get('status', {}).get('loadbalancer', {}).get('id')

            if lb_id:
                resources['loadbalancer'].append({
                    'id': lb_id,
                    'selflink': selflink,
                    'klb': klb
                })

            for lbl in klb.get('status', {}).get('listeners', []):
                resources['listener'].append({
                    'id': lbl['id'],
                    'selflink': selflink,
                    'lklb': klb
                })
            for pl in klb.get('status', {}).get('pools', []):
                resources['pool'].append({
                    'id': pl['id'],
                    'selflink': selflink,
                    'pklb': klb
                })

        resources_already_triggered = []
        # let's reconcile load balancers first, listeners and then pools
        resource_types = ('loadbalancer', 'listener', 'pool')
        for resource_type in resource_types:
            filters = {}
            self._drv_lbaas.add_tags(resource_type, filters)
            os_list = resources_fn[resource_type]
            os_resources = os_list(**filters)
            os_resources_id = [rsrc['id'] for rsrc in os_resources]
            for data in resources[resource_type]:
                if data['selflink'] in resources_already_triggered:
                    continue
                if data['id'] not in os_resources_id:
                    resources_already_triggered.append(data['selflink'])
                    LOG.debug("Reconciling KuryrLoadBalancer CRD: %s",
                              data['selflink'])
                    self._reconcile_lb(data)
Esempio n. 16
0
 def test_get_res_link_core_res(self):
     res = {
         'apiVersion': 'v1',
         'kind': 'Pod',
         'metadata': {
             'name': 'pod-1',
             'namespace': 'default'
         }
     }
     self.assertEqual(utils.get_res_link(res),
                      '/api/v1/namespaces/default/pods/pod-1')
Esempio n. 17
0
 def test_get_res_link_beta_res(self):
     res = {
         'apiVersion': 'networking.k8s.io/v2beta2',
         'kind': 'NetworkPolicy',
         'metadata': {
             'name': 'np-1',
             'namespace': 'default'
         }
     }
     self.assertEqual(
         utils.get_res_link(res), '/apis/networking.k8s.io/'
         'v2beta2/namespaces/default/networkpolicies/np-1')
    def add(self, params):
        kp_name = self._get_obj_name(params)
        timeout = CONF.cni_daemon.vif_annotation_timeout

        # Try to confirm if CRD in the registry is not stale cache. If it is,
        # remove it.
        with lockutils.lock(kp_name, external=True):
            if kp_name in self.registry:
                cached_kp = self.registry[kp_name]['kp']
                try:
                    kp = self.k8s.get(k_utils.get_res_link(cached_kp))
                except Exception:
                    LOG.exception('Error when getting KuryrPort %s', kp_name)
                    raise exceptions.ResourceNotReady(kp_name)

                if kp['metadata']['uid'] != cached_kp['metadata']['uid']:
                    LOG.warning(
                        'Stale KuryrPort %s detected in cache. (API '
                        'uid=%s, cached uid=%s). Removing it from '
                        'cache.', kp_name, kp['metadata']['uid'],
                        cached_kp['metadata']['uid'])
                    del self.registry[kp_name]

        vifs = self._do_work(params, b_base.connect, timeout)

        # NOTE(dulek): Saving containerid to be able to distinguish old DEL
        #              requests that we should ignore. We need a lock to
        #              prevent race conditions and replace whole object in the
        #              dict for multiprocessing.Manager to notice that.
        with lockutils.lock(kp_name, external=True):
            d = self.registry[kp_name]
            d['containerid'] = params.CNI_CONTAINERID
            self.registry[kp_name] = d
            LOG.debug('Saved containerid = %s for CRD %s',
                      params.CNI_CONTAINERID, kp_name)

        # Wait for timeout sec, 1 sec between tries, retry when even one
        # vif is not active.
        @retrying.retry(stop_max_delay=timeout * 1000,
                        wait_fixed=RETRY_DELAY,
                        retry_on_result=utils.any_vif_inactive)
        def wait_for_active(kp_name):
            return self.registry[kp_name]['vifs']

        vifs = wait_for_active(kp_name)
        for vif in vifs.values():
            if not vif.active:
                LOG.error("Timed out waiting for vifs to become active")
                raise exceptions.ResourceNotReady(kp_name)

        return vifs[k_const.DEFAULT_IFNAME]
Esempio n. 19
0
    def _create_knp_crd(self, policy, i_rules, e_rules):
        networkpolicy_name = policy['metadata']['name']
        namespace = policy['metadata']['namespace']
        pod_selector = policy['spec'].get('podSelector')
        policy_types = policy['spec'].get('policyTypes', [])

        owner_reference = {'apiVersion': policy['apiVersion'],
                           'kind': policy['kind'],
                           'name': policy['metadata']['name'],
                           'uid': policy['metadata']['uid']}

        netpolicy_crd = {
            'apiVersion': 'openstack.org/v1',
            'kind': constants.K8S_OBJ_KURYRNETWORKPOLICY,
            'metadata': {
                'name': networkpolicy_name,
                'namespace': namespace,
                'annotations': {
                    'networkPolicyLink': utils.get_res_link(policy)
                },
                'finalizers': [constants.NETWORKPOLICY_FINALIZER],
                'ownerReferences': [owner_reference]
            },
            'spec': {
                'ingressSgRules': i_rules,
                'egressSgRules': e_rules,
                'podSelector': pod_selector,
                'policyTypes': policy_types,
            },
            'status': {
                'securityGroupRules': [],
            },
        }

        try:
            LOG.debug("Creating KuryrNetworkPolicy CRD %s" % netpolicy_crd)
            url = '{}/{}/kuryrnetworkpolicies'.format(
                constants.K8S_API_CRD_NAMESPACES,
                namespace)
            netpolicy_crd = self.kubernetes.post(url, netpolicy_crd)
        except exceptions.K8sNamespaceTerminating:
            raise
        except exceptions.K8sClientException as exc:
            self.kubernetes.add_event(policy, 'FailedToCreateNetworkPolicyCRD',
                                      f'Adding corresponding Kuryr Network '
                                      f'Policy CRD has failed: {exc}',
                                      'Warning')
            LOG.exception("Kubernetes Client Exception creating "
                          "KuryrNetworkPolicy CRD.")
            raise
        return netpolicy_crd
Esempio n. 20
0
    def _set_pod_info(self, pod, info):
        if not info[0]:
            LOG.debug("Removing info annotations: %r", info)
            annotation = None, info[1]
        else:
            annotation = jsonutils.dumps(info[0], sort_keys=True), info[1]
            LOG.debug("Setting info annotations: %r", annotation)

        k8s = clients.get_kubernetes_client()
        k8s.annotate(utils.get_res_link(pod), {
            constants.K8S_ANNOTATION_LABEL: annotation[0],
            constants.K8S_ANNOTATION_IP: annotation[1]
        },
                     resource_version=pod['metadata']['resourceVersion'])
Esempio n. 21
0
def bump_networkpolicies(namespace=None):
    k8s = clients.get_kubernetes_client()
    nps = get_networkpolicies(namespace)
    for np in nps:
        try:
            k8s.annotate(utils.get_res_link(np),
                         {constants.K8S_ANNOTATION_POLICY: str(uuid.uuid4())})
        except k_exc.K8sResourceNotFound:
            # Ignore if NP got deleted.
            pass
        except k_exc.K8sClientException:
            LOG.warning("Failed to annotate network policy %s to force its "
                        "recalculation.", utils.get_res_unique_name(np))
            continue
Esempio n. 22
0
    def test_get_res_link_custom_api(self):
        res = {
            'apiVersion': 'openstack.org/v1',
            'kind': 'KuryrPort',
            'metadata': {
                'name': 'kp-1',
                'namespace': 'default'
            }
        }

        self.assertEqual(
            utils.get_res_link(res),
            '/apis/openstack.org/v1/namespaces/default/'
            'kuryrports/kp-1')
    def test_neutron_to_osvif_nested_dpdk(self, m_mk_port_profile, m_mk_vif,
                                          m_make_vif_network, m_is_port_active,
                                          m_get_vif_name):
        vif_plugin = const.K8S_OS_VIF_NOOP_PLUGIN
        port_id = mock.sentinel.port_id
        mac_address = mock.sentinel.mac_address
        port_filter = mock.sentinel.port_filter
        subnets = mock.sentinel.subnets
        network = mock.sentinel.network
        port_active = mock.sentinel.port_active
        vif_name = mock.sentinel.vif_name
        vif = mock.sentinel.vif
        port_profile = mock.sentinel.port_profile

        m_make_vif_network.return_value = network
        m_is_port_active.return_value = port_active
        m_get_vif_name.return_value = vif_name
        m_mk_vif.return_value = vif
        m_mk_port_profile.return_value = port_profile

        pod = fake.get_k8s_pod()

        port = {
            'id': port_id,
            'mac_address': mac_address,
            'binding:vif_details': {
                'port_filter': port_filter
            },
        }

        self.assertEqual(vif,
                         ovu.neutron_to_osvif_vif_dpdk(port, subnets, pod))

        m_make_vif_network.assert_called_once_with(port, subnets)
        m_is_port_active.assert_called_once_with(port)
        m_get_vif_name.assert_called_once_with(port)
        m_mk_port_profile.assert_called_once_with(
            l3_setup=False, selflink=utils.get_res_link(pod))

        m_mk_vif.assert_called_once_with(id=port_id,
                                         port_profile=port_profile,
                                         address=mac_address,
                                         network=network,
                                         has_traffic_filtering=port_filter,
                                         preserve_on_delete=False,
                                         active=port_active,
                                         plugin=vif_plugin,
                                         pci_address="",
                                         dev_driver="",
                                         vif_name=vif_name)
Esempio n. 24
0
 def _delete_host_networking_ports(self):
     k8s = clients.get_kubernetes_client()
     pods = k8s.get('/api/v1/pods')['items']
     kuryrports = k8s.get(constants.K8S_API_CRD_KURYRPORTS)['items']
     pairs = driver_utils.zip_resources(kuryrports, pods)
     for kuryrport, pod in pairs:
         if driver_utils.is_host_network(pod):
             LOG.warning(f'Found unnecessary KuryrPort '
                         f'{utils.get_res_unique_name(kuryrport)} created '
                         f'for host networking pod. Deleting it.')
             try:
                 k8s.delete(utils.get_res_link(kuryrport))
             except k_exc.K8sResourceNotFound:
                 pass
Esempio n. 25
0
 def _update_crd_spec(self, loadbalancer_crd, service):
     svc_name = service['metadata']['name']
     kubernetes = clients.get_kubernetes_client()
     spec = self._build_kuryrloadbalancer_spec(service)
     LOG.debug('Patching KuryrLoadBalancer CRD %s', loadbalancer_crd)
     try:
         kubernetes.patch_crd('spec', utils.get_res_link(loadbalancer_crd),
                              spec)
     except k_exc.K8sResourceNotFound:
         LOG.debug('KuryrLoadBalancer CRD not found %s', loadbalancer_crd)
     except k_exc.K8sConflict:
         raise k_exc.ResourceNotReady(svc_name)
     except k_exc.K8sClientException:
         LOG.exception('Error updating kuryrnet CRD %s', loadbalancer_crd)
         raise
Esempio n. 26
0
    def _has_timeout_changes(self, service, loadbalancer_crd):
        link = utils.get_res_link(service)
        cli_timeout, mem_timeout = self._get_data_timeout_annotation(service)

        for spec_value, current_value in [
            (loadbalancer_crd['spec'].get('timeout_client_data'), cli_timeout),
            (loadbalancer_crd['spec'].get('timeout_member_data'), mem_timeout)
        ]:
            if not spec_value and not current_value:
                continue
            elif spec_value != current_value:
                LOG.debug("LBaaS spec listener timeout {} != {} for {}".format(
                    spec_value, current_value, link))
                return True

        return False
Esempio n. 27
0
def patch_kuryrnetworkpolicy_crd(crd, i_rules, e_rules):
    kubernetes = clients.get_kubernetes_client()
    crd_name = crd['metadata']['name']
    LOG.debug('Patching KuryrNetworkPolicy CRD %s' % crd_name)
    try:
        spec = {
            'ingressSgRules': i_rules,
            'egressSgRules': e_rules,
        }

        kubernetes.patch_crd('spec', utils.get_res_link(crd), spec)
    except k_exc.K8sResourceNotFound:
        LOG.debug('KuryrNetworkPolicy CRD not found %s', crd_name)
    except k_exc.K8sClientException:
        LOG.exception('Error updating KuryrNetworkPolicy CRD %s', crd_name)
        raise
    def _patch_kuryrnetworkpolicy_crd(self, knp, field, data,
                                      action='replace'):
        name = knp['metadata']['name']
        LOG.debug('Patching KuryrNet CRD %s', name)
        try:
            status = self.k8s.patch_crd(field, utils.get_res_link(knp),
                                        data, action=action)
        except exceptions.K8sResourceNotFound:
            LOG.debug('KuryrNetworkPolicy CRD not found %s', name)
            return None
        except exceptions.K8sClientException:
            LOG.exception('Error updating KuryrNetworkPolicy CRD %s', name)
            raise

        knp['status'] = status
        return knp
Esempio n. 29
0
 def __call__(self, event, *args, **kwargs):
     start_time = time.time()
     deadline = time.time() + self._timeout
     for attempt in itertools.count(1):
         if event.get('type') in ['MODIFIED', 'ADDED']:
             obj = event.get('object')
             if obj:
                 try:
                     obj_link = utils.get_res_link(obj)
                 except KeyError:
                     LOG.debug("Unknown object, skipping: %s", obj)
                 else:
                     try:
                         self._k8s.get(obj_link)
                     except exceptions.K8sResourceNotFound:
                         LOG.debug(
                             "There is no need to process the "
                             "retry as the object %s has already "
                             "been deleted.", obj_link)
                         return
                     except (exceptions.K8sClientException,
                             requests.ConnectionError):
                         LOG.debug("Kubernetes client error getting the "
                                   "object. Continuing with handler "
                                   "execution.")
         try:
             info = {'elapsed': time.time() - start_time}
             self._handler(event, *args, retry_info=info, **kwargs)
             break
         except os_exc.ConflictException as ex:
             if ex.details.startswith('Quota exceeded for resources'):
                 with excutils.save_and_reraise_exception() as ex:
                     if self._sleep(deadline, attempt, ex.value):
                         ex.reraise = False
             else:
                 raise
         except self._exceptions:
             with excutils.save_and_reraise_exception() as ex:
                 if self._sleep(deadline, attempt, ex.value):
                     ex.reraise = False
                 else:
                     LOG.debug('Report handler unhealthy %s', self._handler)
                     self._handler.set_liveness(alive=False)
         except Exception:
             LOG.exception('Report handler unhealthy %s', self._handler)
             self._handler.set_liveness(alive=False)
             raise
Esempio n. 30
0
    def _get_subnet_id(self, service, project_id, ip):
        subnets_mapping = self._drv_subnets.get_subnets(service, project_id)
        subnet_ids = {
            subnet_id
            for subnet_id, network in subnets_mapping.items()
            for subnet in network.subnets.objects if ip in subnet.cidr
        }

        if len(subnet_ids) != 1:
            raise k_exc.IntegrityError(
                _("Found %(num)s subnets for service %(link)s IP %(ip)s") % {
                    'link': utils.get_res_link(service),
                    'ip': ip,
                    'num': len(subnet_ids)
                })

        return subnet_ids.pop()