def request_vifs(self, pod, project_id, subnets, security_groups, num_ports): os_net = clients.get_network_client() rq = self._get_port_request(pod, project_id, subnets, security_groups, unbound=True) bulk_port_rq = {'ports': [rq] * num_ports} try: ports = list(os_net.create_ports(bulk_port_rq)) except os_exc.SDKException: LOG.exception("Error creating bulk ports: %s", bulk_port_rq) raise utils.tag_neutron_resources(ports) vif_plugin = ports[0].binding_vif_type # NOTE(ltomasbo): Due to the bug (1696051) on neutron bulk port # creation request returning the port objects without binding # information, an additional port show is performed to get the binding # information if vif_plugin == 'unbound': port_info = os_net.get_port(ports[0].id) vif_plugin = port_info.binding_vif_type vifs = [] for port in ports: vif = ovu.neutron_to_osvif_vif(vif_plugin, port, subnets) vifs.append(vif) return vifs
def _cleanup_leftover_ports(self): os_net = clients.get_network_client() existing_ports = os_net.ports(device_owner=kl_const.DEVICE_OWNER, status='DOWN') tags = config.CONF.neutron_defaults.resource_tags if tags: nets = os_net.networks(tags=tags) nets_ids = [n.id for n in nets] for port in existing_ports: net_id = port.network_id if net_id in nets_ids: if port.binding_host_id: if set(tags).difference(set(port.tags)): # delete the port if it has binding details, it # belongs to the deployment subnet and it does not # have the right tags try: os_net.delete_port(port.id) except os_exc.SDKException: LOG.debug("Problem deleting leftover port %s. " "Skipping.", port.id) else: # delete port if they have no binding but belong to the # deployment networks, regardless of their tagging try: os_net.delete_port(port.id) except os_exc.SDKException: LOG.debug("Problem deleting leftover port %s. " "Skipping.", port.id) continue else: for port in existing_ports: if not port.binding_host_id: os_net.delete_port(port.id)
def delete_network_pools(self, net_id): if not self._recovered_pools: LOG.info("Kuryr-controller not yet ready to delete network " "pools.") raise exceptions.ResourceNotReady(net_id) os_net = clients.get_network_client() # NOTE(ltomasbo): Note the pods should already be deleted, but their # associated ports may not have been recycled yet, therefore not being # on the available_ports_pools dict. The next call forces it to be on # that dict before cleaning it up self._trigger_return_to_pool() for pool_key, ports in list(self._available_ports_pools.items()): if self._get_pool_key_net(pool_key) != net_id: continue ports_id = [] for sg_ports in ports.values(): ports_id.extend(sg_ports) for port_id in ports_id: try: del self._existing_vifs[port_id] except KeyError: LOG.debug('Port %s is not in the ports list.', port_id) # NOTE(gryf): openstack client doesn't return information, if # the port deos not exists os_net.delete_port(port_id) self._available_ports_pools[pool_key] = {}
def _delete_rule_if_no_match(self, rule, all_pod_rules): for pod_rule in all_pod_rules: if pod_rule['remote_ip_prefix'] == rule['remote_ip_prefix']: return os_net = clients.get_network_client() LOG.debug("Deleting sg rule: %r", rule.id) os_net.delete_security_group_rule(rule.id)
def remove_sg_from_pools(self, sg_id, net_id): os_net = clients.get_network_client() for pool_key, pool_ports in list(self._available_ports_pools.items()): if self._get_pool_key_net(pool_key) != net_id: continue for sg_key, ports in list(pool_ports.items()): if sg_id not in sg_key: continue # remove the pool associated to that SG try: del self._available_ports_pools[pool_key][sg_key] except KeyError: LOG.debug("SG already removed from the pool. Ports " "already re-used, no need to change their " "associated SGs.") continue for port_id in ports: # remove all SGs from the port to be reused os_net.update_port(port_id, security_groups=None) # add the port to the default pool self._available_ports_pools[pool_key].setdefault( tuple([]), []).append(port_id) # NOTE(ltomasbo): as this ports were not created for this # pool, ensuring they are used first, marking them as the # most outdated self._last_update[pool_key] = {tuple([]): 0}
def update_port_pci_info(pod, vif): node = get_host_id(pod) annot_port_pci_info = get_port_annot_pci_info(node, vif.id) os_net = clients.get_network_client() LOG.debug("Neutron port %s is updated with binding:profile info %s", vif.id, annot_port_pci_info) os_net.update_port(vif.id, binding_profile=annot_port_pci_info)
def create_network(self, ns_name, project_id): os_net = clients.get_network_client() net_name = 'ns/' + ns_name + '-net' tags = oslo_cfg.CONF.neutron_defaults.resource_tags if tags: networks = os_net.networks(name=net_name, tags=tags) else: networks = os_net.networks(name=net_name) try: # NOTE(ltomasbo): only one network must exists return next(networks).id except StopIteration: LOG.debug('Network does not exist. Creating.') mtu_cfg = oslo_cfg.CONF.neutron_defaults.network_device_mtu attrs = {'name': net_name, 'project_id': project_id} if mtu_cfg: attrs['mtu'] = mtu_cfg # create network with namespace as name try: neutron_net = os_net.create_network(**attrs) c_utils.tag_neutron_resources([neutron_net]) except os_exc.SDKException: LOG.exception( "Error creating neutron resources for the namespace " "%s", ns_name) raise return neutron_net.id
def create_subnet(self, ns_name, project_id, net_id): os_net = clients.get_network_client() subnet_name = "ns/" + ns_name + "-subnet" tags = oslo_cfg.CONF.neutron_defaults.resource_tags if tags: subnets = os_net.subnets(name=subnet_name, tags=tags) else: subnets = os_net.subnets(name=subnet_name) try: # NOTE(ltomasbo): only one subnet must exists subnet = next(subnets) return subnet.id, subnet.cidr except StopIteration: LOG.debug('Subnet does not exist. Creating.') # create subnet with namespace as name subnet_pool_id = oslo_cfg.CONF.namespace_subnet.pod_subnet_pool ip_version = utils.get_subnetpool_version(subnet_pool_id) try: neutron_subnet = (os_net .create_subnet(network_id=net_id, ip_version=ip_version, name=subnet_name, enable_dhcp=False, subnetpool_id=subnet_pool_id, project_id=project_id)) except os_exc.ConflictException: LOG.debug("Max number of retries on neutron side achieved, " "raising ResourceNotReady to retry subnet creation " "for %s", subnet_name) raise exceptions.ResourceNotReady(subnet_name) c_utils.tag_neutron_resources([neutron_subnet]) return neutron_subnet.id, neutron_subnet.cidr
def create_security_group_rule(body): os_net = clients.get_network_client() try: params = dict(body) if 'ethertype' in params: # NOTE(gryf): in openstacksdk, there is ether_type attribute in # the security_group_rule object, in CRD we have 'ethertype' # instead, just like it was returned by the neutron client. params['ether_type'] = params['ethertype'] del params['ethertype'] sgr = os_net.create_security_group_rule(**params) return sgr.id except os_exc.ConflictException as ex: if 'quota' in ex.details.lower(): LOG.error("Failed to create security group rule %s: %s", body, ex.details) raise else: LOG.debug( "Failed to create already existing security group " "rule %s", body) # Get existent sg rule id from exception message return str(ex).split()[-1][:-1] except os_exc.SDKException: LOG.debug("Error creating security group rule") raise
def release_vif(self, pod, vif, project_id=None, security_groups=None): os_net = clients.get_network_client() parent_port = self._get_parent_port(pod) trunk_id = self._get_trunk_id(parent_port) self._remove_subport(trunk_id, vif.id) self._release_vlan_id(vif.vlan_id) os_net.delete_port(vif.id)
def _delete_namespace_network_resources(self, subnet_id, net_id): os_net = clients.get_network_client() if subnet_id: router_id = oslo_cfg.CONF.namespace_subnet.pod_router try: clients.handle_neutron_errors( os_net.remove_interface_from_router, router_id, subnet_id=subnet_id) except os_exc.NotFoundException as e: # Nothing to worry about, either router or subnet is no more, # or subnet is already detached. LOG.debug(e.message) pass except os_exc.SDKException: LOG.exception("Error deleting subnet %(subnet)s from router " "%(router)s.", {'subnet': subnet_id, 'router': router_id}) raise try: os_net.delete_network(net_id) except os_exc.ConflictException: LOG.warning("One or more ports in use on the network %s. " "Deleting leftovers ports before retrying", net_id) # NOTE(dulek): '' is there because Neutron seems to unset # device_owner on detach. leftover_ports = [p for p in os_net.ports(network_id=net_id) if p.device_owner in ['', 'trunk:subport', kl_const.DEVICE_OWNER]] c_utils.delete_ports(leftover_ports) raise exceptions.ResourceNotReady(net_id) except os_exc.SDKException: LOG.exception("Error deleting network %s.", net_id) raise
def _find_listeners_sg(self, loadbalancer, lb_name=None): os_net = clients.get_network_client() if lb_name: sgs = os_net.security_groups(name=lb_name, project_id=loadbalancer.project_id) # NOTE(ltomasbo): lb_name parameter is only passed when sg_mode # is 'create' and in that case there is only one sg associated # to the loadbalancer try: sg_id = next(sgs).id except StopIteration: sg_id = None LOG.debug("Security Group not created yet for LBaaS.") return sg_id try: sgs = os_net.security_groups(name=loadbalancer.name, project_id=loadbalancer.project_id) for sg in sgs: try: if sg.id in loadbalancer.security_groups: return sg.id except TypeError: LOG.exception( 'Loadbalancer %s does not have ' 'security_groups defined.', loadbalancer.name) raise except os_exc.SDKException: LOG.exception('Cannot list security groups for loadbalancer %s.', loadbalancer.name) return None
def _create_lb_security_group_rule(self, loadbalancer, listener): os_net = clients.get_network_client() sg_id = self._find_listeners_sg(loadbalancer) # if an SG for the loadbalancer has not being created, create one if not sg_id: sg = os_net.create_security_group( name=loadbalancer.name, project_id=loadbalancer.project_id) c_utils.tag_neutron_resources([sg]) loadbalancer.security_groups.append(sg.id) vip_port = self._get_vip_port(loadbalancer) os_net.update_port(vip_port.id, security_groups=[sg.id]) sg_id = sg.id try: os_net.create_security_group_rule(direction='ingress', port_range_min=listener.port, port_range_max=listener.port, protocol=listener.protocol, security_group_id=sg_id, description=listener.name) except os_exc.ConflictException: pass except os_exc.SDKException: LOG.exception( 'Failed when creating security group rule for ' 'listener %s.', listener.name)
def update_vif_sgs(self, pod, security_groups): os_net = clients.get_network_client() pod_state = utils.get_pod_state(pod) if pod_state: # NOTE(ltomasbo): It just updates the default_vif security group port_id = pod_state.vifs[constants.DEFAULT_IFNAME].id os_net.update_port(port_id, security_groups=list(security_groups))
def request_vif(self, pod, project_id, subnets, security_groups): pod_name = pod['metadata']['name'] os_net = clients.get_network_client() vif_plugin = 'sriov' subnet_id = next(iter(subnets)) physnet = self._get_physnet_for_subnet_id(subnet_id) LOG.debug("Pod {} handling {}".format(pod_name, physnet)) amount = self._get_remaining_sriov_vfs(pod, physnet) if not amount: LOG.error("SRIOV VIF request failed due to lack of " "available VFs for the current pod creation") return None rq = self._get_port_request(pod, project_id, subnets, security_groups) port = os_net.create_port(**rq) self._check_port_binding([port]) c_utils.tag_neutron_resources([port]) vif = ovu.neutron_to_osvif_vif(vif_plugin, port, subnets) vif.physnet = physnet vif.pod_name = pod_name vif.pod_link = pod['metadata']['selfLink'] LOG.debug("{} vifs are available for the pod {}".format( amount, pod_name)) self._reduce_remaining_sriov_vfs(pod, physnet) return vif
def delete_network_pools(self, net_id): if not self._recovered_pools: LOG.info("Kuryr-controller not yet ready to delete network " "pools.") raise exceptions.ResourceNotReady(net_id) os_net = clients.get_network_client() # NOTE(ltomasbo): Note the pods should already be deleted, but their # associated ports may not have been recycled yet, therefore not being # on the available_ports_pools dict. The next call forces it to be on # that dict before cleaning it up self._trigger_return_to_pool() for pool_key, ports in list(self._available_ports_pools.items()): if self._get_pool_key_net(pool_key) != net_id: continue trunk_id = self._get_trunk_id(pool_key) ports_id = [p_id for sg_ports in ports.values() for p_id in sg_ports] try: self._drv_vif._remove_subports(trunk_id, ports_id) except (os_exc.SDKException, os_exc.HttpException): LOG.exception('Error removing subports from trunk: %s', trunk_id) continue for port_id in ports_id: try: self._drv_vif._release_vlan_id( self._existing_vifs[port_id].vlan_id) del self._existing_vifs[port_id] except KeyError: LOG.debug('Port %s is not in the ports list.', port_id) os_net.delete_port(port_id) self._available_ports_pools[pool_key] = {}
def _get_in_use_vlan_ids_set(self, trunk_id): vlan_ids = set() os_net = clients.get_network_client() trunk = os_net.get_trunk(trunk_id) for port in trunk.sub_ports: vlan_ids.add(port['segmentation_id']) return vlan_ids
def get_subnet_cidr(subnet_id): os_net = clients.get_network_client() try: subnet_obj = os_net.get_subnet(subnet_id) except os_exc.ResourceNotFound: LOG.exception("Subnet %s CIDR not found!", subnet_id) raise return subnet_obj.cidr
def request_vif(self, pod, project_id, subnets, security_groups): os_net = clients.get_network_client() rq = self._get_port_request(pod, project_id, subnets, security_groups) port = os_net.create_port(**rq) utils.tag_neutron_resources([port]) return ovu.neutron_to_osvif_vif(port.binding_vif_type, port, subnets)
def get_subnetpool_version(subnetpool_id): os_net = clients.get_network_client() try: subnetpool_obj = os_net.get_subnet_pool(subnetpool_id) except os_exc.ResourceNotFound: LOG.exception("Subnetpool %s not found!", subnetpool_id) raise return subnetpool_obj.ip_version
def _update_port_address_pairs(self, port_id, address_pairs, revision_number=None): os_net = clients.get_network_client() os_net.update_port(port_id, allowed_address_pairs=address_pairs, if_match=f'revision_number={revision_number}')
def get_subnet_id(**filters): os_net = clients.get_network_client() subnets = os_net.subnets(**filters) try: return next(subnets).id except StopIteration: return None
def update_vif_sgs(self, pod, security_groups): os_net = clients.get_network_client() kp = utils.get_kuryrport(pod) vifs = utils.get_vifs(kp) if vifs: # NOTE(ltomasbo): It just updates the default_vif security group port_id = vifs[constants.DEFAULT_IFNAME].id os_net.update_port(port_id, security_groups=list(security_groups))
def free_ip(self, res_id): os_net = clients.get_network_client() try: os_net.delete_ip(res_id) except os_exc.SDKException: LOG.error("Failed to delete floating_ip_id =%s !", res_id) return False return True
def _recover_precreated_ports(self): os_net = clients.get_network_client() attrs = {'device_owner': kl_const.DEVICE_OWNER} tags = config.CONF.neutron_defaults.resource_tags if tags: attrs['tags'] = tags if config.CONF.kubernetes.port_debug: attrs['name'] = constants.KURYR_PORT_NAME available_ports = os_net.ports(**attrs) else: kuryr_ports = os_net.ports(**attrs) in_use_ports = self._get_in_use_ports() available_ports = [port for port in kuryr_ports if port.id not in in_use_ports] _, available_subports, _ = self._get_trunks_info() for port in available_ports: # NOTE(ltomasbo): ensure subports are not considered for # recovering in the case of multi pools if available_subports.get(port.id): continue if not port.binding_vif_type or not port.binding_host_id: # NOTE(ltomasbo): kuryr-controller is running without the # rights to get the needed information to recover the ports. # Thus, removing the port instead os_net = clients.get_network_client() os_net.delete_port(port.id) continue subnet_id = port.fixed_ips[0]['subnet_id'] subnet = { subnet_id: utils.get_subnet(subnet_id)} vif = ovu.neutron_to_osvif_vif(port.binding_vif_type, port, subnet) net_obj = subnet[subnet_id] pool_key = self._get_pool_key(port.binding_host_id, port.project_id, net_obj.id, None) self._existing_vifs[port.id] = vif self._available_ports_pools.setdefault( pool_key, {}).setdefault( tuple(sorted(port.security_group_ids)), []).append(port.id) LOG.info("PORTS POOL: pools updated with pre-created ports") self._create_healthcheck_file()
def delete_security_group_rule(security_group_rule_id): os_net = clients.get_network_client() try: LOG.debug("Deleting sg rule with ID: %s", security_group_rule_id) os_net.delete_security_group_rule(security_group_rule_id) except os_exc.SDKException: LOG.debug("Error deleting security group rule: %s", security_group_rule_id) raise
def _delete_namespace_network_resources(self, subnet_id, net_id): os_net = clients.get_network_client() if subnet_id: router_id = oslo_cfg.CONF.namespace_subnet.pod_router try: clients.handle_neutron_errors( os_net.remove_interface_from_router, router_id, subnet_id=subnet_id) except os_exc.NotFoundException as e: # Nothing to worry about, either router or subnet is no more, # or subnet is already detached. LOG.debug(e.message) pass except os_exc.SDKException: LOG.exception("Error deleting subnet %(subnet)s from router " "%(router)s.", {'subnet': subnet_id, 'router': router_id}) raise try: os_net.delete_network(net_id) except os_exc.ConflictException: LOG.exception("One or more ports in use on the network %s. " "Deleting leftovers ports before retrying", net_id) leftover_ports = os_net.ports(status='DOWN', network_id=net_id) for leftover_port in leftover_ports: try: # NOTE(gryf): there is unlikely, that we get an exception # like PortNotFound or something, since openstacksdk # doesn't raise an exception if port doesn't exists nor # return any information. os_net.delete_port(leftover_port.id) except os_exc.SDKException as e: if "currently a subport for trunk" in str(e): LOG.warning("Port %s is in DOWN status but still " "associated to a trunk. This should not " "happen. Trying to delete it from the " "trunk.", leftover_port.id) # Get the trunk_id from the error message trunk_id = ( str(e).split('trunk')[1].split('.')[0].strip()) try: os_net.delete_trunk_subports( trunk_id, [{'port_id': leftover_port.id}]) except os_exc.NotFoundException: LOG.debug("Port %s already removed from trunk %s", leftover_port['id'], trunk_id) else: LOG.exception("Unexpected error deleting leftover " "port %s. Skiping it and continue with " "the other rest.", leftover_port.id) raise exceptions.ResourceNotReady(net_id) except os_exc.SDKException: LOG.exception("Error deleting network %s.", net_id) raise
def get_service_subnet_version(): os_net = clients.get_network_client() svc_subnet_id = CONF.neutron_defaults.service_subnet try: svc_subnet = os_net.get_subnet(svc_subnet_id) except os_exc.ResourceNotFound: LOG.exception("Service subnet %s not found", svc_subnet_id) raise return svc_subnet.ip_version
def _get_port_from_pool(self, pool_key, pod, subnets, security_groups): try: pool_ports = self._available_ports_pools[pool_key] except (KeyError, AttributeError): raise exceptions.ResourceNotReady(pod) try: port_id = pool_ports[security_groups].pop() except (KeyError, IndexError): # Get another port from the pool and update the SG to the # appropriate one. It uses a port from the group that was updated # longer ago pool_updates = self._last_update.get(pool_key, {}) if not pool_updates: # No pools update info. Selecting a random one for sg_group, ports in list(pool_ports.items()): if len(ports) > 0: port_id = pool_ports[sg_group].pop() break else: raise exceptions.ResourceNotReady(pod) else: min_date = -1 for sg_group, date in list(pool_updates.items()): if pool_ports.get(sg_group): if min_date == -1 or date < min_date: min_date = date min_sg_group = sg_group if min_date == -1: # pool is empty, no port to reuse raise exceptions.ResourceNotReady(pod) port_id = pool_ports[min_sg_group].pop() os_net = clients.get_network_client() os_net.update_port(port_id, security_groups=list(security_groups)) if config.CONF.kubernetes.port_debug: os_net = clients.get_network_client() os_net.update_port(port_id, name=c_utils.get_port_name(pod), device_id=pod['metadata']['uid']) # check if the pool needs to be populated if (self._get_pool_size(pool_key) < oslo_cfg.CONF.vif_pool.ports_pool_min): eventlet.spawn(self._populate_pool, pool_key, pod, subnets, security_groups) return self._existing_vifs[port_id]
def __init__(self): self.application = flask.Flask('prometheus-exporter') self.ctx = None self.application.add_url_rule('/metrics', methods=['GET'], view_func=self.metrics) self.headers = {'Connection': 'close'} self._os_net = clients.get_network_client() self._project_id = config.CONF.neutron_defaults.project self._create_metrics()