def test_get_subnet(self, m_osv_subnet, m_osv_network): neutron = self.useFixture(k_fix.MockNeutronClient()).client subnet = mock.MagicMock() network = mock.MagicMock() subnet_id = mock.sentinel.subnet_id network_id = mock.sentinel.network_id neutron_subnet = {'network_id': network_id} neutron_network = mock.sentinel.neutron_network neutron.show_subnet.return_value = {'subnet': neutron_subnet} neutron.show_network.return_value = {'network': neutron_network} m_osv_subnet.return_value = subnet m_osv_network.return_value = network ret = utils.get_subnet(subnet_id) self.assertEqual(network, ret) neutron.show_subnet.assert_called_once_with(subnet_id) neutron.show_network.assert_called_once_with(network_id) m_osv_subnet.assert_called_once_with(neutron_subnet) m_osv_network.assert_called_once_with(neutron_network) network.subnets.objects.append.assert_called_once_with(subnet)
def _recover_precreated_ports(self): if config.CONF.kubernetes.port_debug: available_ports = self._get_ports_by_attrs( name=constants.KURYR_PORT_NAME, device_owner=[kl_const.DEVICE_OWNER]) else: kuryr_ports = self._get_ports_by_attrs( device_owner=kl_const.DEVICE_OWNER) in_use_ports = self._get_in_use_ports() available_ports = [ port for port in kuryr_ports if port['id'] not in in_use_ports ] for port in available_ports: subnet_id = port['fixed_ips'][0]['subnet_id'] subnet = {subnet_id: utils.get_subnet(subnet_id)} vif_plugin = self._drv_vif._get_vif_plugin(port) vif = ovu.neutron_to_osvif_vif(vif_plugin, port, subnet) net_obj = subnet[subnet_id] pool_key = self._get_pool_key(port['binding:host_id'], port['project_id'], port['security_groups'], net_obj.id, None) self._existing_vifs[port['id']] = vif self._available_ports_pools.setdefault(pool_key, []).append(port['id']) LOG.info("PORTS POOL: pools updated with pre-created ports") self._create_healthcheck_file()
def test_get_subnet(self, m_osv_subnet, m_osv_network): os_net = self.useFixture(k_fix.MockNetworkClient()).client subnet = mock.MagicMock() network = mock.MagicMock() subnet_id = mock.sentinel.subnet_id network_id = mock.sentinel.network_id neutron_subnet = munch.Munch({'network_id': network_id}) neutron_network = mock.sentinel.neutron_network os_net.get_subnet.return_value = neutron_subnet os_net.get_network.return_value = neutron_network m_osv_subnet.return_value = subnet m_osv_network.return_value = network ret = utils.get_subnet(subnet_id) self.assertEqual(network, ret) os_net.get_subnet.assert_called_once_with(subnet_id) os_net.get_network.assert_called_once_with(network_id) m_osv_subnet.assert_called_once_with(neutron_subnet) m_osv_network.assert_called_once_with(neutron_network) network.subnets.objects.append.assert_called_once_with(subnet)
def _get_trunks_info(self): """Returns information about trunks and their subports. This method searches for parent ports and subports among the active neutron ports. To find the parent ports it filters the ones that have trunk_details, i.e., the ones that are the parent port of a trunk. To find the subports to recover, it filters out the ports that are already in used by running kubernetes pods. It also filters out the ports whose device_owner is not related to subports, i.e., the ports that are not attached to trunks, such as active ports allocated to running VMs. At the same time it collects information about ports subnets to minimize the number of interaction with Neutron API. It returns three dictionaries with the needed information about the parent ports, subports and subnets :return: 3 dicts with the trunk details (Key: trunk_id; Value: dict containing ip and subports), subport details (Key: port_id; Value: port_object), and subnet details (Key: subnet_id; Value: subnet dict) """ # REVISIT(ltomasbo): there is no need to recover the subports # belonging to trunk ports whose parent port is DOWN as that means no # pods can be scheduled there. We may need to update this if we allow # lively extending the kubernetes cluster with VMs that already have # precreated subports. For instance by shutting down and up a # kubernetes Worker VM with subports already attached, and the # controller is restarted in between. parent_ports = {} subports = {} subnets = {} all_active_ports = self._get_ports_by_attrs(status='ACTIVE') in_use_ports = self._get_in_use_ports() for port in all_active_ports: trunk_details = port.get('trunk_details') # Parent port if trunk_details: parent_ports[trunk_details['trunk_id']] = { 'ip': port['fixed_ips'][0]['ip_address'], 'subports': trunk_details['sub_ports'] } else: # Filter to only get subports that are not in use if (port['id'] not in in_use_ports and port['device_owner'] in ['trunk:subport', kl_const.DEVICE_OWNER]): subports[port['id']] = port # NOTE(ltomasbo): _get_subnet can be costly as it # needs to call neutron to get network and subnet # information. This ensures it is only called once # per subnet in use subnet_id = port['fixed_ips'][0]['subnet_id'] if not subnets.get(subnet_id): subnets[subnet_id] = { subnet_id: utils.get_subnet(subnet_id) } return parent_ports, subports, subnets
def get_subnets(self, service, project_id): subnet_id = config.CONF.neutron_defaults.service_subnet if not subnet_id: # NOTE(ivc): this option is only required for # DefaultServiceSubnetDriver and its subclasses, but it may be # optional for other drivers (e.g. when each namespace has own # subnet) raise cfg.RequiredOptError('service_subnet', cfg.OptGroup('neutron_defaults')) return {subnet_id: utils.get_subnet(subnet_id)}
def request_additional_vifs(self, pod, project_id, security_groups): vifs = [] networks = self._get_networks(pod) if not networks: return vifs kubernetes = clients.get_kubernetes_client() namespace = pod['metadata']['namespace'] for network in networks: if 'name' not in network: raise exceptions.InvalidKuryrNetworkAnnotation() if 'namespace' in network: namespace = network['namespace'] try: url = '%s/namespaces/%s/network-attachment-definitions/%s' % ( constants.K8S_API_NPWG_CRD, namespace, network['name']) nad_obj = kubernetes.get(url) except exceptions.K8sClientException: LOG.exception("Kubernetes Client Exception") raise config = jsonutils.loads(nad_obj['metadata']['annotations'] ['openstack.org/kuryr-config']) subnet_id = config.get(constants.K8S_ANNOTATION_NPWG_CRD_SUBNET_ID) neutron_defaults = kuryr_config.CONF.neutron_defaults if constants.K8S_ANNOTATION_NPWG_CRD_DRIVER_TYPE not in config: vif_drv = self._drv_vif_pool if not subnet_id: subnet_id = neutron_defaults.pod_subnet else: alias = config[constants.K8S_ANNOTATION_NPWG_CRD_DRIVER_TYPE] vif_drv = base.PodVIFDriver.get_instance(specific_driver=alias) if not subnet_id: try: subnet_id = neutron_defaults.subnet_mapping[alias] except KeyError: subnet_id = neutron_defaults.pod_subnet LOG.debug( "Default subnet mapping in config file " "doesn't contain any subnet for %s driver " "alias. Default pod_subnet was used.", alias) subnet = {subnet_id: utils.get_subnet(subnet_id)} vif = vif_drv.request_vif(pod, project_id, subnet, security_groups) if vif: vifs.append(vif) return vifs
def _recover_precreated_ports(self): attrs = {'device_owner': kl_const.DEVICE_OWNER} tags = config.CONF.neutron_defaults.resource_tags if tags: attrs['tags'] = tags if config.CONF.kubernetes.port_debug: attrs['name'] = constants.KURYR_PORT_NAME available_ports = c_utils.get_ports_by_attrs(**attrs) else: kuryr_ports = c_utils.get_ports_by_attrs(**attrs) in_use_ports = self._get_in_use_ports() available_ports = [port for port in kuryr_ports if port['id'] not in in_use_ports] _, available_subports, _ = self._get_trunks_info() for port in available_ports: # NOTE(ltomasbo): ensure subports are not considered for # recovering in the case of multi pools if available_subports.get(port['id']): continue vif_plugin = self._drv_vif._get_vif_plugin(port) port_host = port['binding:host_id'] if not vif_plugin or not port_host: # NOTE(ltomasbo): kuryr-controller is running without the # rights to get the needed information to recover the ports. # Thus, removing the port instead neutron = clients.get_neutron_client() neutron.delete_port(port['id']) continue subnet_id = port['fixed_ips'][0]['subnet_id'] subnet = { subnet_id: utils.get_subnet(subnet_id)} vif = ovu.neutron_to_osvif_vif(vif_plugin, port, subnet) net_obj = subnet[subnet_id] pool_key = self._get_pool_key(port_host, port['project_id'], net_obj.id, None) self._existing_vifs[port['id']] = vif self._available_ports_pools.setdefault( pool_key, {}).setdefault( tuple(sorted(port['security_groups'])), []).append( port['id']) LOG.info("PORTS POOL: pools updated with pre-created ports") self._create_healthcheck_file()
def request_additional_vifs(self, pod, project_id, security_groups): vifs = [] networks = self._get_networks(pod) if not networks: return vifs kubernetes = clients.get_kubernetes_client() namespace = pod['metadata']['namespace'] for network in networks: if 'name' not in network: raise exceptions.InvalidKuryrNetworkAnnotation() if 'namespace' in network: namespace = network['namespace'] try: url = '%s/namespaces/%s/network-attachment-definitions/%s' % ( constants.K8S_API_NPWG_CRD, namespace, network['name']) nad_obj = kubernetes.get(url) except exceptions.K8sClientException: LOG.exception("Kubernetes Client Exception") raise config = jsonutils.loads(nad_obj['metadata']['annotations'] ['openstack.org/kuryr-config']) subnet_id = config[constants.K8S_ANNOTATION_NPWG_CRD_SUBNET_ID] subnet = {subnet_id: utils.get_subnet(subnet_id)} if constants.K8S_ANNOTATION_NPWG_CRD_DRIVER_TYPE not in config: vif_drv = self._drv_vif_pool else: alias = config[constants.K8S_ANNOTATION_NPWG_CRD_DRIVER_TYPE] vif_drv = base.PodVIFDriver.get_instance(specific_driver=alias) vif = vif_drv.request_vif(pod, project_id, subnet, security_groups) if vif: vifs.append(vif) return vifs
def get_namespace_subnet(self, namespace, subnet_id=None): if not subnet_id: subnet_id = self._get_namespace_subnet_id(namespace) return {subnet_id: utils.get_subnet(subnet_id)}
def get_subnets(self, pod, project_id): pod_namespace = pod['metadata']['namespace'] subnet_id = self._get_namespace_subnet(pod_namespace) return {subnet_id: utils.get_subnet(subnet_id)}