Beispiel #1
0
 def _get_port_from_pool(self, pool_key, pod, subnets, security_groups):
     try:
         pool_ports = self._available_ports_pools[pool_key]
     except (KeyError, AttributeError):
         raise exceptions.ResourceNotReady(pod)
     try:
         port_id = pool_ports[security_groups].pop()
     except (KeyError, IndexError):
         # Get another port from the pool and update the SG to the
         # appropriate one. It uses a port from the group that was updated
         # longer ago
         pool_updates = self._last_update.get(pool_key, {})
         if not pool_updates:
             # No pools update info. Selecting a random one
             for sg_group, ports in pool_ports.items():
                 if len(ports) > 0:
                     port_id = pool_ports[sg_group].pop()
                     break
             else:
                 raise exceptions.ResourceNotReady(pod)
         else:
             min_date = -1
             for sg_group, date in pool_updates.items():
                 if pool_ports.get(sg_group):
                     if min_date == -1 or date < min_date:
                         min_date = date
                         min_sg_group = sg_group
             if min_date == -1:
                 # pool is empty, no port to reuse
                 raise exceptions.ResourceNotReady(pod)
             port_id = pool_ports[min_sg_group].pop()
         neutron = clients.get_neutron_client()
         neutron.update_port(
             port_id,
             {
                 "port": {
                     'security_groups': list(security_groups)
                 }
             })
     if config.CONF.kubernetes.port_debug:
         neutron = clients.get_neutron_client()
         neutron.update_port(
             port_id,
             {
                 "port": {
                     'name': c_utils.get_port_name(pod),
                     'device_id': pod['metadata']['uid']
                 }
             })
     # check if the pool needs to be populated
     if (self._get_pool_size(pool_key) <
             oslo_cfg.CONF.vif_pool.ports_pool_min):
         eventlet.spawn(self._populate_pool, pool_key, pod, subnets,
                        security_groups)
     return self._existing_vifs[port_id]
    def request_vifs(self, pod, project_id, subnets, security_groups,
                     num_ports):
        neutron = clients.get_neutron_client()

        rq = self._get_port_request(pod,
                                    project_id,
                                    subnets,
                                    security_groups,
                                    unbound=True)

        bulk_port_rq = {'ports': [rq] * num_ports}
        try:
            ports = neutron.create_port(bulk_port_rq).get('ports')
        except n_exc.NeutronClientException:
            LOG.exception("Error creating bulk ports: %s", bulk_port_rq)
            raise
        utils.tag_neutron_resources('ports', [port['id'] for port in ports])

        vif_plugin = self._get_vif_plugin(ports[0])

        # NOTE(ltomasbo): Due to the bug (1696051) on neutron bulk port
        # creation request returning the port objects without binding
        # information, an additional port show is performed to get the binding
        # information
        if vif_plugin == 'unbound':
            port_info = neutron.show_port(ports[0]['id']).get('port')
            vif_plugin = self._get_vif_plugin(port_info)

        vifs = []
        for port in ports:
            vif = ovu.neutron_to_osvif_vif(vif_plugin, port, subnets)
            vifs.append(vif)
        return vifs
Beispiel #3
0
    def test_setup_clients_lbaasv2(self, m_neutron, m_k8s, m_cfg):
        k8s_api_root = 'http://127.0.0.1:1234'

        neutron_mock = mock.Mock()
        k8s_dummy = object()

        neutron_mock.list_extensions.return_value = {
            'extensions': [{
                'alias': 'lbaasv2',
                'description': 'Provides Load Balancing',
                'links': [],
                'name': 'Load Balancing v2',
                'updated': '2017-11-28T09:00:00-00:00'
            }]
        }

        m_cfg.kubernetes.api_root = k8s_api_root
        m_neutron.return_value = neutron_mock
        m_k8s.return_value = k8s_dummy

        clients.setup_clients()

        m_k8s.assert_called_with(k8s_api_root)
        self.assertIs(k8s_dummy, clients.get_kubernetes_client())
        self.assertIs(neutron_mock, clients.get_neutron_client())
        self.assertIs(neutron_mock, clients.get_loadbalancer_client())
    def request_vifs(self, pod, project_id, subnets, security_groups,
                     num_ports):
        neutron = clients.get_neutron_client()

        rq = self._get_port_request(pod, project_id, subnets, security_groups,
                                    unbound=True)

        bulk_port_rq = {'ports': [rq for _ in range(num_ports)]}
        try:
            ports = neutron.create_port(bulk_port_rq).get('ports')
        except n_exc.NeutronClientException as ex:
            LOG.error("Error creating bulk ports: %s", bulk_port_rq)
            raise ex

        vif_plugin = self._get_vif_plugin(ports[0])

        # NOTE(ltomasbo): Due to the bug (1696051) on neutron bulk port
        # creation request returning the port objects without binding
        # information, an additional (non-bulk) port creation is performed to
        # get the right vif binding information
        if vif_plugin == 'unbound':
            single_port = neutron.create_port(rq).get('port')
            vif_plugin = self._get_vif_plugin(single_port)
            ports.append(single_port)

        vifs = []
        for port in ports:
            vif = ovu.neutron_to_osvif_vif(vif_plugin, port, subnets)
            vifs.append(vif)
        return vifs
    def _update(self, res_id, vip_port_id):
        response = None
        neutron = clients.get_neutron_client()
        try:
            response = neutron.update_floatingip(
                res_id, {'floatingip': {
                    'port_id': vip_port_id,
                }})
        except n_exc.Conflict:
            LOG.warning(
                "Conflict when assigning floating IP with id %s. "
                "Checking if it's already assigned correctly.", res_id)
            fip = neutron.show_floatingip(res_id).get('floatingip')
            if fip is not None and fip.get('port_id') == vip_port_id:
                LOG.debug('FIP %s already assigned to %s', res_id, vip_port_id)
            else:
                LOG.exception(
                    'Failed to assign FIP %s to VIP port %s. It is '
                    'probably already bound', res_id, vip_port_id)
                raise

        except n_exc.NeutronClientException:
            LOG.error(
                "Failed to update_floatingip ,floating_ip_id=%s,"
                "response=%s!", res_id, response)
            raise
    def allocate_ip(self,
                    pub_net_id,
                    project_id,
                    pub_subnet_id=None,
                    description=None):
        neutron = clients.get_neutron_client()
        request = {
            'floatingip': {
                'tenant_id': project_id,
                'project_id': project_id,
                'floating_network_id': pub_net_id
            }
        }

        if pub_subnet_id is not None:
            request['floatingip']['subnet_id'] = pub_subnet_id
        if description is not None:
            request['floatingip']['description'] = description

        try:
            response = neutron.create_floatingip(request)
        except n_exc.NeutronClientException as ex:
            LOG.error("Failed to create floating IP - netid=%s ", pub_net_id)
            raise ex
        return response['floatingip']['id'], response['floatingip'][
            'floating_ip_address']
Beispiel #7
0
    def release_loadbalancer(self, loadbalancer):
        neutron = clients.get_neutron_client()
        lbaas = clients.get_loadbalancer_client()
        if lbaas.cascading_capable:
            self._release(
                loadbalancer,
                loadbalancer,
                lbaas.delete,
                lbaas.lbaas_loadbalancer_path % loadbalancer.id,
                params={'cascade': True})

        else:
            self._release(loadbalancer, loadbalancer,
                          lbaas.delete_loadbalancer, loadbalancer.id)

        sg_id = self._find_listeners_sg(loadbalancer)
        if sg_id:
            # Note: reusing activation timeout as deletion timeout
            self._wait_for_deletion(loadbalancer, _ACTIVATION_TIMEOUT)
            try:
                neutron.delete_security_group(sg_id)
            except n_exc.NeutronClientException:
                LOG.exception('Error when deleting loadbalancer security '
                              'group. Leaving it orphaned.')
            except n_exc.NotFound:
                LOG.debug('Security group %s already deleted', sg_id)
Beispiel #8
0
    def request_vif(self, pod, project_id, subnets, security_groups):
        amount = self._get_remaining_sriov_vfs(pod)
        if not amount:
            LOG.error("SRIOV VIF request failed due to lack of "
                      "available VFs for the current pod creation")
            return None

        pod_name = pod['metadata']['name']
        neutron = clients.get_neutron_client()
        vif_plugin = 'sriov'
        subnet_id = next(iter(subnets))
        physnet = self._get_physnet_for_subnet_id(subnet_id)
        LOG.debug("Pod {} handling {}".format(pod_name, physnet))
        rq = self._get_port_request(pod, project_id, subnets, security_groups)

        port = neutron.create_port(rq).get('port')
        c_utils.tag_neutron_resources('ports', [port['id']])
        vif = ovu.neutron_to_osvif_vif(vif_plugin, port, subnets)
        vif.physnet = physnet

        LOG.debug("{} vifs are available for the pod {}".format(
            amount, pod_name))

        self._reduce_remaining_sriov_vfs(pod)
        return vif
    def _create_lb_security_group_rule(self, loadbalancer, listener):
        neutron = clients.get_neutron_client()
        sg_id = self._find_listeners_sg(loadbalancer)
        # if an SG for the loadbalancer has not being created, create one
        if not sg_id:
            sg = neutron.create_security_group({
                'security_group': {
                    'name': loadbalancer.name,
                    'project_id': loadbalancer.project_id,
                    },
                })
            sg_id = sg['security_group']['id']
            c_utils.tag_neutron_resources('security-groups', [sg_id])
            loadbalancer.security_groups.append(sg_id)
            vip_port = self._get_vip_port(loadbalancer)
            neutron.update_port(
                vip_port.get('id'),
                {'port': {
                    'security_groups': [sg_id]}})

        try:
            neutron.create_security_group_rule({
                'security_group_rule': {
                    'direction': 'ingress',
                    'port_range_min': listener.port,
                    'port_range_max': listener.port,
                    'protocol': listener.protocol,
                    'security_group_id': sg_id,
                    'description': listener.name,
                },
            })
        except n_exc.NeutronClientException as ex:
            if ex.status_code != requests.codes.conflict:
                LOG.exception('Failed when creating security group rule '
                              'for listener %s.', listener.name)
 def _check_quota(self, quota):
     neutron = clients.get_neutron_client()
     sg_quota = quota['security_group']
     sg_func = neutron.list_security_groups
     if utils.has_limit(sg_quota):
         return utils.is_available('security_groups', sg_quota, sg_func)
     return True
Beispiel #11
0
    def request_vifs(self, pod, project_id, subnets, security_groups,
                     num_ports, trunk_ip=None):
        """This method creates subports and returns a list with their vifs.

        It creates up to num_ports subports and attaches them to the trunk
        port.

        If not enough vlan ids are available for all the subports to create,
        it creates as much as available vlan ids.

        Note the neutron trunk_add_subports is an atomic operation that will
        either attach all or none of the subports. Therefore, if there is a
        vlan id collision, all the created ports will be deleted and the
        exception is raised.
        """
        neutron = clients.get_neutron_client()
        if trunk_ip:
            parent_port = self._get_parent_port_by_host_ip(neutron, trunk_ip)
        else:
            parent_port = self._get_parent_port(neutron, pod)
        trunk_id = self._get_trunk_id(parent_port)

        port_rq, subports_info = self._create_subports_info(
            pod, project_id, subnets, security_groups,
            trunk_id, num_ports, unbound=True)

        if not subports_info:
            LOG.error("There are no vlan ids available to create subports")
            return []

        bulk_port_rq = {'ports': [port_rq] * len(subports_info)}
        try:
            ports = neutron.create_port(bulk_port_rq).get('ports')
        except n_exc.NeutronClientException:
            LOG.exception("Error creating bulk ports: %s", bulk_port_rq)
            raise
        utils.tag_neutron_resources('ports', [port['id'] for port in ports])

        for index, port in enumerate(ports):
            subports_info[index]['port_id'] = port['id']

        try:
            try:
                neutron.trunk_add_subports(trunk_id,
                                           {'sub_ports': subports_info})
            except n_exc.Conflict:
                LOG.error("vlan ids already in use on trunk")
                for port in ports:
                    neutron.delete_port(port['id'])
                raise
        except n_exc.NeutronClientException:
            LOG.exception("Error happened during subport addition to trunk")
            raise

        vifs = []
        for index, port in enumerate(ports):
            vlan_id = subports_info[index]['segmentation_id']
            vif = ovu.neutron_to_osvif_vif_nested_vlan(port, subnets, vlan_id)
            vifs.append(vif)
        return vifs
Beispiel #12
0
    def _recover_precreated_ports(self):
        neutron = clients.get_neutron_client()
        available_ports = self._get_ports_by_attrs(
            name='available-port', device_owner='trunk:subport')

        if not available_ports:
            return

        trunk_ports = neutron.list_trunks().get('trunks')
        for trunk in trunk_ports:
            try:
                host_addr = self._get_parent_port_ip(trunk['port_id'])
            except n_exc.PortNotFoundClient:
                LOG.debug('Unable to find parent port for trunk port %s.',
                          trunk['port_id'])
                continue

            for subport in trunk.get('sub_ports'):
                kuryr_subport = None
                for port in available_ports:
                    if port['id'] == subport['port_id']:
                        kuryr_subport = port
                        break

                if kuryr_subport:
                    pool_key = (host_addr, kuryr_subport['project_id'],
                                tuple(kuryr_subport['security_groups']))
                    subnet_id = kuryr_subport['fixed_ips'][0]['subnet_id']
                    subnet = {subnet_id: default_subnet._get_subnet(subnet_id)}
                    vif = ovu.neutron_to_osvif_vif_nested_vlan(
                        kuryr_subport, subnet, subport['segmentation_id'])

                    self._existing_vifs[subport['port_id']] = vif
                    self._available_ports_pools.setdefault(
                        pool_key, []).append(subport['port_id'])
Beispiel #13
0
 def _create_security_group_rule(
         self, security_group_id, direction, port_range_min,
         port_range_max=None, protocol='TCP', ethertype='IPv4',
         description="Kuryr-Kubernetes NetPolicy SG rule"):
     if not port_range_max:
         port_range_max = port_range_min
     security_group_rule_body = {
         "security_group_rule": {
             "ethertype": ethertype,
             "security_group_id": security_group_id,
             "description": description,
             "direction": direction,
             "protocol": protocol,
             "port_range_min": port_range_min,
             "port_range_max": port_range_max
         }
     }
     LOG.debug("Creating sg rule %s" % security_group_rule_body)
     neutron = clients.get_neutron_client()
     try:
         sg_rule = neutron.create_security_group_rule(
             body=security_group_rule_body)
     except n_exc.NeutronClientException:
         LOG.exception("Error creating security group rule for the network "
                       "policy.")
         raise
     return sg_rule
Beispiel #14
0
 def ensure_network_policy(self, policy, project_id):
     neutron = clients.get_neutron_client()
     LOG.debug("Creating network policy %s" % policy['metadata']['name'])
     if self._get_kuryrnetpolicy_crd(policy):
         LOG.debug("Already existing CRD")
         return
     security_group_body = {
         "security_group":
         {
             "name": policy['metadata']['name'],
             "project_id": project_id,
             "description": "Kuryr-Kubernetes NetPolicy SG"
             }
         }
     try:
         sg = neutron.create_security_group(body=security_group_body)
         i_rules, e_rules = self.apply_network_policy_rules(policy, sg)
     except n_exc.NeutronClientException:
         LOG.exception("Error creating security group for network policy. ")
         raise
     try:
         self._add_kuryrnetpolicy_crd(policy, project_id,
                                      sg['security_group']['id'], i_rules,
                                      e_rules)
     except exceptions.K8sClientException:
         LOG.exception("Rolling back security groups")
         neutron.delete_security_group(sg['security_group']['id'])
         raise
Beispiel #15
0
    def _trigger_return_to_pool(self):
        if not hasattr(self, '_recyclable_ports'):
            LOG.info("Kuryr-controller not yet ready to return ports to "
                     "pools.")
            return
        neutron = clients.get_neutron_client()
        sg_current = {}
        if not config.CONF.kubernetes.port_debug:
            kuryr_subports = self._get_ports_by_attrs(
                device_owner=['trunk:subport', kl_const.DEVICE_OWNER])
            for subport in kuryr_subports:
                if subport['id'] in self._recyclable_ports:
                    sg_current[subport['id']] = subport['security_groups']

        for port_id, pool_key in self._recyclable_ports.copy().items():
            if (not oslo_cfg.CONF.vif_pool.ports_pool_max or
                self._get_pool_size(pool_key) <
                    oslo_cfg.CONF.vif_pool.ports_pool_max):
                port_name = (constants.KURYR_PORT_NAME
                             if config.CONF.kubernetes.port_debug
                             else '')
                if (config.CONF.kubernetes.port_debug or
                        list(pool_key[2]) != sg_current.get(port_id)):
                    try:
                        neutron.update_port(
                            port_id,
                            {
                                "port": {
                                    'name': port_name,
                                    'security_groups': list(pool_key[2])
                                }
                            })
                    except n_exc.NeutronClientException:
                        LOG.warning("Error preparing port %s to be "
                                    "reused, put back on the cleanable "
                                    "pool.", port_id)
                        continue
                self._available_ports_pools.setdefault(
                    pool_key, []).append(port_id)
            else:
                trunk_id = self._get_trunk_id(neutron, pool_key)
                try:
                    self._drv_vif._remove_subport(neutron, trunk_id,
                                                  port_id)
                    self._drv_vif._release_vlan_id(
                        self._existing_vifs[port_id].vlan_id)
                    del self._existing_vifs[port_id]
                    neutron.delete_port(port_id)
                except n_exc.PortNotFoundClient:
                    LOG.debug('Unable to release port %s as it no longer '
                              'exists.', port_id)
                except KeyError:
                    LOG.debug('Port %s is not in the ports list.', port_id)
                except n_exc.NeutronClientException:
                    LOG.warning('Error removing the subport %s', port_id)
                    continue
            try:
                del self._recyclable_ports[port_id]
            except KeyError:
                LOG.debug('Port already recycled: %s', port_id)
Beispiel #16
0
    def delete_network_pools(self, net_id):
        if not hasattr(self, '_available_ports_pools'):
            LOG.info("Kuryr-controller not yet ready to delete network pools"
                     "pools.")
            raise exceptions.ResourceNotReady(net_id)
        neutron = clients.get_neutron_client()

        # NOTE(ltomasbo): Note the pods should already be deleted, but their
        # associated ports may not have been recycled yet, therefore not being
        # on the available_ports_pools dict. The next call forces it to be on
        # that dict before cleaning it up
        self._trigger_return_to_pool()
        for pool_key, ports_id in self._available_ports_pools.items():
            if self._get_pool_key_net(pool_key) != net_id:
                continue
            self._available_ports_pools[pool_key] = []
            for port_id in ports_id:
                try:
                    del self._existing_vifs[port_id]
                except KeyError:
                    LOG.debug('Port %s is not in the ports list.', port_id)
                try:
                    neutron.delete_port(port_id)
                except n_exc.PortNotFoundClient:
                    LOG.debug(
                        'Unable to release port %s as it no longer '
                        'exists.', port_id)
Beispiel #17
0
    def delete_namespace_subnet(self, net_crd):
        neutron = clients.get_neutron_client()

        router_id = oslo_cfg.CONF.namespace_subnet.pod_router
        subnet_id = net_crd['spec']['subnetId']
        net_id = net_crd['spec']['netId']

        try:
            neutron.remove_interface_router(router_id,
                                            {"subnet_id": subnet_id})
        except n_exc.NotFound:
            LOG.debug("Subnet %(subnet)s not attached to router %(router)s", {
                'subnet': subnet_id,
                'router': router_id
            })
        except n_exc.NeutronClientException:
            LOG.exception(
                "Error deleting subnet %(subnet)s from router "
                "%(router)s.", {
                    'subnet': subnet_id,
                    'router': router_id
                })
            raise

        try:
            neutron.delete_network(net_id)
        except n_exc.NotFound:
            LOG.debug("Neutron Network not found: %s", net_id)
        except n_exc.NetworkInUseClient:
            LOG.exception("One or more ports in use on the network %s.",
                          net_id)
            raise exceptions.ResourceNotReady(net_id)
        except n_exc.NeutronClientException:
            LOG.exception("Error deleting network %s.", net_id)
            raise
    def _ensure_security_groups(self, loadbalancer, service_type):
        # We only handle SGs for legacy LBaaSv2, Octavia handles it dynamically
        # according to listener ports.
        if loadbalancer.provider == const.NEUTRON_LBAAS_HAPROXY_PROVIDER:
            neutron = clients.get_neutron_client()
            sg_id = None
            try:
                # NOTE(dulek): We're creating another security group to
                #              overcome LBaaS v2 limitations and handle SGs
                #              ourselves.
                if service_type == 'LoadBalancer':
                    sg_id = self._find_listeners_sg(loadbalancer)
                    if not sg_id:
                        sg = neutron.create_security_group({
                            'security_group': {
                                'name': loadbalancer.name,
                                'project_id': loadbalancer.project_id,
                            },
                        })
                        sg_id = sg['security_group']['id']
                    loadbalancer.security_groups.append(sg_id)

                neutron.update_port(
                    loadbalancer.port_id,
                    {'port': {
                        'security_groups': loadbalancer.security_groups}})
            except n_exc.NeutronClientException:
                LOG.exception('Failed to set SG for LBaaS v2 VIP port %s.',
                              loadbalancer.port_id)
                if sg_id:
                    neutron.delete_security_group(sg_id)
                raise
    def delete_network_pools(self, net_id):
        neutron = clients.get_neutron_client()
        # NOTE(ltomasbo): Note the pods should already be deleted, but their
        # associated ports may not have been recycled yet, therefore not being
        # on the available_ports_pools dict. The next call forces it to be on
        # that dict before cleaning it up
        self._trigger_return_to_pool()
        for pool_key, ports_ids in self._available_ports_pools.items():
            if self._get_pool_key_net(pool_key) != net_id:
                continue
            self._available_ports_pools[pool_key] = []
            trunk_id = self._get_trunk_id(neutron, pool_key)
            try:
                self._drv_vif._remove_subports(neutron, trunk_id, ports_ids)
            except n_exc.NeutronClientException:
                LOG.exception('Error removing subports from trunk: %s',
                              trunk_id)
                continue

            for port_id in ports_ids:
                try:
                    self._drv_vif._release_vlan_id(
                        self._existing_vifs[port_id].vlan_id)
                    del self._existing_vifs[port_id]
                except KeyError:
                    LOG.debug('Port %s is not in the ports list.', port_id)
                try:
                    neutron.delete_port(port_id)
                except n_exc.PortNotFoundClient:
                    LOG.debug(
                        'Unable to delete subport %s as it no longer '
                        'exists.', port_id)
Beispiel #20
0
    def _find_listeners_sg(self, loadbalancer, lb_name=None):
        neutron = clients.get_neutron_client()
        if lb_name:
            sgs = neutron.list_security_groups(
                name=lb_name, project_id=loadbalancer.project_id)
            # NOTE(ltomasbo): lb_name parameter is only passed when sg_mode
            # is 'create' and in that case there is only one sg associated
            # to the loadbalancer
            try:
                sg_id = sgs['security_groups'][0]['id']
            except IndexError:
                sg_id = None
                LOG.debug("Security Group not created yet for LBaaS.")
            return sg_id
        try:
            sgs = neutron.list_security_groups(
                name=loadbalancer.name, project_id=loadbalancer.project_id)
            for sg in sgs['security_groups']:
                sg_id = sg['id']
                if sg_id in loadbalancer.security_groups:
                    return sg_id
        except n_exc.NeutronClientException:
            LOG.exception('Cannot list security groups for loadbalancer %s.',
                          loadbalancer.name)

        return None
Beispiel #21
0
 def _delete_rule_if_no_match(self, rule, all_pod_rules):
     for pod_rule in all_pod_rules:
         if pod_rule['remote_ip_prefix'] == rule['remote_ip_prefix']:
             return
     neutron = clients.get_neutron_client()
     LOG.debug("Deleting sg rule: %r", rule['id'])
     neutron.delete_security_group_rule(rule['id'])
    def create_namespace_sg(self, namespace, project_id, crd_spec):
        neutron = clients.get_neutron_client()

        sg_name = "ns/" + namespace + "-sg"
        # create the associated SG for the namespace
        try:
            # default namespace is different from the rest
            # Default allows traffic from everywhere
            # The rest can be accessed from the default one
            sg = neutron.create_security_group({
                "security_group": {
                    "name": sg_name,
                    "project_id": project_id
                }
            }).get('security_group')
            utils.tag_neutron_resources('security-groups', [sg['id']])
            neutron.create_security_group_rule({
                "security_group_rule": {
                    "direction": "ingress",
                    "remote_ip_prefix": crd_spec['subnetCIDR'],
                    "security_group_id": sg['id']
                }
            })
        except n_exc.NeutronClientException:
            LOG.exception(
                "Error creating security group for the namespace "
                "%s", namespace)
            raise
        return {'sgId': sg['id']}
Beispiel #23
0
 def is_ready(self, quota):
     neutron = clients.get_neutron_client()
     port_quota = quota['port']
     port_func = neutron.list_ports
     if utils.has_limit(port_quota):
         return utils.is_available('ports', port_quota, port_func)
     return True
Beispiel #24
0
    def _return_ports_to_pool(self):
        """Recycle ports to be reused by future pods.

        For each port in the recyclable_ports dict it reaplies
        security group and changes the port name to available_port.
        Upon successful port update, the port_id is included in the dict
        with the available_ports.

        If a maximun number of ports per pool is set, the port will be
        deleted if the maximun has been already reached.
        """
        neutron = clients.get_neutron_client()
        while True:
            for port_id, pool_key in self._recyclable_ports.copy().items():
                if (not oslo_cfg.CONF.vif_pool.ports_pool_max or
                    self._get_pool_size(pool_key) <
                        oslo_cfg.CONF.vif_pool.ports_pool_max):
                    port_name = (constants.KURYR_PORT_NAME
                                 if config.CONF.kubernetes.port_debug
                                 else '')
                    try:
                        neutron.update_port(
                            port_id,
                            {
                                "port": {
                                    'name': port_name,
                                    'security_groups': list(pool_key[2])
                                }
                            })
                    except n_exc.NeutronClientException:
                        LOG.warning("Error preparing port %s to be reused, put"
                                    " back on the cleanable pool.", port_id)
                        continue
                    self._available_ports_pools.setdefault(
                        pool_key, []).append(port_id)
                else:
                    trunk_id = self._known_trunk_ids.get(pool_key, None)
                    if not trunk_id:
                        p_port = self._drv_vif._get_parent_port_by_host_ip(
                            neutron, pool_key[0])
                        trunk_id = self._drv_vif._get_trunk_id(p_port)
                        self._known_trunk_ids[pool_key] = trunk_id
                    try:
                        self._drv_vif._remove_subport(neutron, trunk_id,
                                                      port_id)
                        self._drv_vif._release_vlan_id(
                            self._existing_vifs[port_id].vlan_id)
                        del self._existing_vifs[port_id]
                        neutron.delete_port(port_id)
                    except n_exc.PortNotFoundClient:
                        LOG.debug('Unable to release port %s as it no longer '
                                  'exists.', port_id)
                    except KeyError:
                        LOG.debug('Port %s is not in the ports list.', port_id)
                    except n_exc.NeutronClientException:
                        LOG.warning('Error removing the subport %s', port_id)
                        continue
                del self._recyclable_ports[port_id]
            eventlet.sleep(oslo_cfg.CONF.vif_pool.ports_pool_update_frequency)
 def _get_subnet_cidr(self, subnet_id):
     neutron = clients.get_neutron_client()
     try:
         subnet_obj = neutron.show_subnet(subnet_id)
     except n_exc.NeutronClientException:
         LOG.exception("Subnet %s CIDR not found!", subnet_id)
         raise
     return subnet_obj.get('subnet')['cidr']
    def _get_in_use_vlan_ids_set(self, trunk_id):
        vlan_ids = set()
        neutron = clients.get_neutron_client()
        trunk = neutron.show_trunk(trunk_id)
        for port in trunk['trunk']['sub_ports']:
            vlan_ids.add(port['segmentation_id'])

        return vlan_ids
Beispiel #27
0
 def _update_port_address_pairs(self, port_id, address_pairs,
                                revision_number=None):
     neutron = clients.get_neutron_client()
     neutron.update_port(
         port_id,
         {'port': {'allowed_address_pairs': address_pairs}},
         revision_number=revision_number
     )
    def release_vif(self, pod, vif):
        neutron = clients.get_neutron_client()

        try:
            neutron.delete_port(vif.id)
        except n_exc.PortNotFoundClient:
            LOG.debug('Unable to release port %s as it no longer exists.',
                      vif.id)
    def release_vif(self, pod, vif, project_id=None, security_groups=None):
        neutron = clients.get_neutron_client()

        try:
            neutron.delete_port(vif.id)
        except n_exc.PortNotFoundClient:
            LOG.debug('Unable to release port %s as it no longer exists.',
                      vif.id)
    def request_vif(self, pod, project_id, subnets, security_groups):
        neutron = clients.get_neutron_client()

        rq = self._get_port_request(pod, project_id, subnets, security_groups)
        port = neutron.create_port(rq).get('port')
        vif_plugin = self._get_vif_plugin(port)

        return ovu.neutron_to_osvif_vif(vif_plugin, port, subnets)