Ejemplo n.º 1
0
    def test_is_network_policy_enabled(self):
        CONF.set_override('enabled_handlers', ['fake_handler'],
                          group='kubernetes')
        CONF.set_override('service_security_groups_driver',
                          'foo',
                          group='kubernetes')

        self.assertFalse(utils.is_network_policy_enabled())

        CONF.set_override('enabled_handlers', ['policy'], group='kubernetes')
        CONF.set_override('service_security_groups_driver',
                          'foo',
                          group='kubernetes')

        self.assertFalse(utils.is_network_policy_enabled())

        CONF.set_override('enabled_handlers', ['policy'], group='kubernetes')
        self.addCleanup(CONF.clear_override,
                        'enabled_handlers',
                        group='kubernetes')
        CONF.set_override('service_security_groups_driver',
                          'policy',
                          group='kubernetes')
        self.addCleanup(CONF.clear_override,
                        'service_security_groups_driver',
                        group='kubernetes')

        self.assertTrue(utils.is_network_policy_enabled())
    def on_finalize(self, kuryrnet_crd, *args, **kwargs):
        LOG.debug("Deleting kuryrnetwork CRD resources: %s", kuryrnet_crd)

        net_id = kuryrnet_crd.get('status', {}).get('netId')
        if net_id:
            self._drv_vif_pool.delete_network_pools(net_id)
            try:
                self._drv_subnets.delete_namespace_subnet(kuryrnet_crd)
            except k_exc.ResourceNotReady:
                LOG.debug("Subnet is not ready to be removed.")
                # TODO(ltomasbo): Once KuryrPort CRDs is supported, we should
                # execute a delete network ports method here to remove the
                # ports associated to the namespace/subnet, ensuring next
                # retry will be successful
                raise

        namespace = {
            'metadata': {'name': kuryrnet_crd['spec']['nsName']}}
        crd_selectors = self._drv_sg.delete_namespace_sg_rules(namespace)

        if (driver_utils.is_network_policy_enabled() and crd_selectors and
                oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
            project_id = kuryrnet_crd['spec']['projectId']
            services = driver_utils.get_services()
            self._update_services(services, crd_selectors, project_id)

        LOG.debug('Removing finalizer for KuryrNet CRD %s', kuryrnet_crd)
        try:
            self.k8s.remove_finalizer(kuryrnet_crd,
                                      constants.KURYRNETWORK_FINALIZER)
        except k_exc.K8sClientException:
            LOG.exception('Error removing kuryrnetwork CRD finalizer for %s',
                          kuryrnet_crd)
            raise
Ejemplo n.º 3
0
    def on_present(self, kuryrport_crd, *args, **kwargs):
        if not kuryrport_crd['status']['vifs']:
            # Get vifs
            if not self.get_vifs(kuryrport_crd):
                # Ignore this event, according to one of the cases logged in
                # get_vifs method.
                return

        retry_info = kwargs.get('retry_info')

        vifs = {ifname: {'default': data['default'],
                         'vif': objects.base.VersionedObject
                         .obj_from_primitive(data['vif'])}
                for ifname, data in kuryrport_crd['status']['vifs'].items()}

        if all([v['vif'].active for v in vifs.values()]):
            return

        changed = False
        pod = self._get_pod(kuryrport_crd)

        try:
            for ifname, data in vifs.items():
                if (data['vif'].plugin == constants.KURYR_VIF_TYPE_SRIOV and
                        oslo_cfg.CONF.sriov.enable_node_annotations):
                    pod_node = kuryrport_crd['spec']['podNodeName']
                    # TODO(gryf): This probably will need adoption, so it will
                    # add information to CRD instead of the pod.
                    driver_utils.update_port_pci_info(pod_node, data['vif'])
                if not data['vif'].active:
                    try:
                        self._drv_vif_pool.activate_vif(data['vif'], pod=pod,
                                                        retry_info=retry_info)
                        changed = True
                    except os_exc.ResourceNotFound:
                        LOG.debug("Port not found, possibly already deleted. "
                                  "No need to activate it")
        finally:
            if changed:
                project_id = self._drv_project.get_project(pod)

                try:
                    self._update_kuryrport_crd(kuryrport_crd, vifs)
                except k_exc.K8sResourceNotFound as ex:
                    LOG.exception("Failed to update KuryrPort CRD: %s", ex)
                    security_groups = self._drv_sg.get_security_groups(
                        pod, project_id)
                    for ifname, data in vifs.items():
                        self._drv_vif_pool.release_vif(pod, data['vif'],
                                                       project_id,
                                                       security_groups)
                except k_exc.K8sClientException:
                    raise k_exc.ResourceNotReady(pod['metadata']['name'])

                if driver_utils.is_network_policy_enabled():
                    crd_pod_selectors = self._drv_sg.create_sg_rules(pod)
                    if oslo_cfg.CONF.octavia_defaults.enforce_sg_rules:
                        services = driver_utils.get_services()
                        self._update_services(services, crd_pod_selectors,
                                              project_id)
Ejemplo n.º 4
0
 def __init__(self):
     super(KuryrNetworkHandler, self).__init__()
     self._drv_project = drivers.NamespaceProjectDriver.get_instance()
     self._drv_subnets = drivers.PodSubnetsDriver.get_instance()
     self._drv_sg = drivers.PodSecurityGroupsDriver.get_instance()
     self._drv_vif_pool = drivers.VIFPoolDriver.get_instance(
         specific_driver='multi_pool')
     self._drv_vif_pool.set_vif_driver()
     if driver_utils.is_network_policy_enabled():
         self._drv_lbaas = drivers.LBaaSDriver.get_instance()
         self._drv_svc_sg = (
             drivers.ServiceSecurityGroupsDriver.get_instance())
Ejemplo n.º 5
0
 def __init__(self):
     super(KuryrPortHandler, self).__init__()
     self._drv_project = drivers.PodProjectDriver.get_instance()
     self._drv_subnets = drivers.PodSubnetsDriver.get_instance()
     self._drv_sg = drivers.PodSecurityGroupsDriver.get_instance()
     # REVISIT(ltomasbo): The VIF Handler should not be aware of the pool
     # directly. Due to the lack of a mechanism to load and set the
     # VIFHandler driver, for now it is aware of the pool driver, but this
     # will be reverted as soon as a mechanism is in place.
     self._drv_vif_pool = drivers.VIFPoolDriver.get_instance(
         specific_driver='multi_pool')
     self._drv_vif_pool.set_vif_driver()
     self._drv_multi_vif = drivers.MultiVIFDriver.get_enabled_drivers()
     if driver_utils.is_network_policy_enabled():
         self._drv_lbaas = drivers.LBaaSDriver.get_instance()
         self._drv_svc_sg = (drivers.ServiceSecurityGroupsDriver
                             .get_instance())
     self.k8s = clients.get_kubernetes_client()
Ejemplo n.º 6
0
    def on_present(self, kuryrnet_crd):
        ns_name = kuryrnet_crd['spec']['nsName']
        project_id = kuryrnet_crd['spec']['projectId']
        kns_status = kuryrnet_crd.get('status', {})

        crd_creation = False
        net_id = kns_status.get('netId')
        if not net_id:
            net_id = self._drv_subnets.create_network(ns_name, project_id)
            status = {'netId': net_id}
            self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
            crd_creation = True
        subnet_id = kns_status.get('subnetId')
        if not subnet_id or crd_creation:
            subnet_id, subnet_cidr = self._drv_subnets.create_subnet(
                ns_name, project_id, net_id)
            status = {'subnetId': subnet_id, 'subnetCIDR': subnet_cidr}
            self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
            crd_creation = True
        if not kns_status.get('routerId') or crd_creation:
            router_id = self._drv_subnets.add_subnet_to_router(subnet_id)
            status = {'routerId': router_id, 'populated': False}
            self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
            crd_creation = True

        # check labels to create sg rules
        ns_labels = kns_status.get('nsLabels', {})
        if (crd_creation or
                ns_labels != kuryrnet_crd['spec']['nsLabels']):
            # update SG and svc SGs
            namespace = driver_utils.get_namespace(ns_name)
            crd_selectors = self._drv_sg.update_namespace_sg_rules(namespace)
            if (driver_utils.is_network_policy_enabled() and crd_selectors and
                    oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
                services = driver_utils.get_services()
                self._update_services(services, crd_selectors, project_id)
            # update status
            status = {'nsLabels': kuryrnet_crd['spec']['nsLabels']}
            self._patch_kuryrnetwork_crd(kuryrnet_crd, status, labels=True)
Ejemplo n.º 7
0
 def _bump_network_policies(self, svc):
     if driver_utils.is_network_policy_enabled():
         driver_utils.bump_networkpolicies(svc['metadata']['namespace'])
    def on_present(self, kuryrnet_crd, *args, **kwargs):
        ns_name = kuryrnet_crd['spec']['nsName']
        project_id = kuryrnet_crd['spec']['projectId']
        kns_status = kuryrnet_crd.get('status', {})
        namespace = driver_utils.get_namespace(ns_name)

        crd_creation = False
        net_id = kns_status.get('netId')
        if not net_id:
            try:
                net_id = self._drv_subnets.create_network(namespace,
                                                          project_id)
            except os_exc.SDKException as ex:
                self.k8s.add_event(kuryrnet_crd, 'CreateNetworkFailed',
                                   f'Error during creating Neutron network: '
                                   f'{ex.details}', 'Warning')
                raise
            status = {'netId': net_id}
            self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
            self.k8s.add_event(kuryrnet_crd, 'CreateNetworkSucceed',
                               f'Neutron network {net_id} for namespace')
            crd_creation = True
        subnet_id = kns_status.get('subnetId')
        if not subnet_id or crd_creation:
            try:
                subnet_id, subnet_cidr = self._drv_subnets.create_subnet(
                    namespace, project_id, net_id)
            except os_exc.ConflictException as ex:
                self.k8s.add_event(kuryrnet_crd, 'CreateSubnetFailed',
                                   f'Error during creating Neutron subnet '
                                   f'for network {net_id}: {ex.details}',
                                   'Warning')
                raise
            status = {'subnetId': subnet_id, 'subnetCIDR': subnet_cidr}
            self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
            self.k8s.add_event(kuryrnet_crd, 'CreateSubnetSucceed',
                               f'Neutron subnet {subnet_id} for network '
                               f'{net_id}')
            crd_creation = True
        if not kns_status.get('routerId') or crd_creation:
            try:
                router_id = self._drv_subnets.add_subnet_to_router(subnet_id)
            except os_exc.SDKException as ex:
                self.k8s.add_event(kuryrnet_crd, 'AddingSubnetToRouterFailed',
                                   f'Error adding Neutron subnet {subnet_id} '
                                   f'to router {router_id}: {ex.details}',
                                   'Warning')
                raise
            status = {'routerId': router_id, 'populated': False}
            self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
            self.k8s.add_event(kuryrnet_crd, 'AddingSubnetToRouterSucceed',
                               f'Neutron subnet {subnet_id} added to router '
                               f'{router_id}')
            crd_creation = True

        # check labels to create sg rules
        ns_labels = kns_status.get('nsLabels', {})
        if (crd_creation or
                ns_labels != kuryrnet_crd['spec']['nsLabels']):
            # update SG and svc SGs
            crd_selectors = self._drv_sg.update_namespace_sg_rules(namespace)
            if (driver_utils.is_network_policy_enabled() and crd_selectors and
                    oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
                services = driver_utils.get_services()
                self._update_services(services, crd_selectors, project_id)
            # update status
            status = {'nsLabels': kuryrnet_crd['spec']['nsLabels']}
            self._patch_kuryrnetwork_crd(kuryrnet_crd, status, labels=True)
            self.k8s.add_event(kuryrnet_crd, 'SGUpdateTriggered',
                               'Neutron security groups update has been '
                               'triggered')
Ejemplo n.º 9
0
    def on_finalize(self, kuryrport_crd, *args, **kwargs):
        name = kuryrport_crd['metadata']['name']
        namespace = kuryrport_crd['metadata']['namespace']
        try:
            pod = self.k8s.get(f"{constants.K8S_API_NAMESPACES}"
                               f"/{namespace}/pods/{name}")
        except k_exc.K8sResourceNotFound:
            LOG.error("Pod %s/%s doesn't exists, deleting orphaned KuryrPort",
                      namespace, name)
            # TODO(gryf): Free resources
            try:
                self.k8s.remove_finalizer(kuryrport_crd,
                                          constants.KURYRPORT_FINALIZER)
            except k_exc.K8sClientException as ex:
                LOG.exception("Failed to remove finalizer from KuryrPort %s",
                              ex)
                raise
            return

        if 'deletionTimestamp' not in pod['metadata']:
            # NOTE(gryf): Ignore deleting KuryrPort, since most likely it was
            # removed manually, while we need vifs for corresponding pod
            # object which apparently is still running.
            LOG.warning('Manually triggered KuryrPort %s removal. This '
                        'action should be avoided, since KuryrPort CRDs are '
                        'internal to Kuryr.', name)
            return

        project_id = self._drv_project.get_project(pod)
        try:
            crd_pod_selectors = self._drv_sg.delete_sg_rules(pod)
        except k_exc.ResourceNotReady:
            # NOTE(ltomasbo): If the pod is being deleted before
            # kuryr-controller annotated any information about the port
            # associated, there is no need for deleting sg rules associated to
            # it. So this exception could be safely ignored for the current
            # sg drivers. Only the NP driver associates rules to the pods ips,
            # and that waits for annotations to start.
            #
            # NOTE(gryf): perhaps we don't need to handle this case, since
            # during CRD creation all the things, including security groups
            # rules would be created too.
            LOG.debug("Skipping SG rules deletion associated to the pod %s",
                      pod)
            crd_pod_selectors = []
        try:
            security_groups = self._drv_sg.get_security_groups(pod, project_id)
        except k_exc.ResourceNotReady:
            # NOTE(ltomasbo): If the namespace object gets deleted first the
            # namespace security group driver will raise a ResourceNotReady
            # exception as it cannot access anymore the kuryrnetwork CRD
            # annotated on the namespace object. In such case we set security
            # groups to empty list so that if pools are enabled they will be
            # properly released.
            security_groups = []

        for data in kuryrport_crd['status']['vifs'].values():
            vif = objects.base.VersionedObject.obj_from_primitive(data['vif'])
            self._drv_vif_pool.release_vif(pod, vif, project_id,
                                           security_groups)
        if (driver_utils.is_network_policy_enabled() and crd_pod_selectors and
                oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
            services = driver_utils.get_services()
            self._update_services(services, crd_pod_selectors, project_id)

        # Remove finalizer out of pod.
        self.k8s.remove_finalizer(pod, constants.POD_FINALIZER)

        # Finally, remove finalizer from KuryrPort CRD
        self.k8s.remove_finalizer(kuryrport_crd, constants.KURYRPORT_FINALIZER)