def test_run_image_cache_manager_pass(self, mock_instance_list): def fake_instances(ctxt): instances = [] for x in range(2): instances.append( fake_instance.fake_db_instance( image_ref=uuids.fake_image_ref, uuid=getattr(uuids, 'instance_%s' % x), name='instance-%s' % x, vm_state='', task_state='')) return objects.instance._make_instance_list( ctxt, objects.InstanceList(), instances, None) with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) ctxt = context.get_admin_context() mock_instance_list.return_value = fake_instances(ctxt) compute = importutils.import_object(CONF.compute_manager) self.flags(use_local=True, group='conductor') compute.conductor_api = conductor.API() compute._run_image_cache_manager_pass(ctxt) filters = { 'host': ['fake-mini'], 'deleted': False, 'soft_deleted': True, } mock_instance_list.assert_called_once_with(ctxt, filters, expected_attrs=[], use_slave=True)
def __init__(self, host, binary, topic, manager, report_interval=None, periodic_enable=None, periodic_fuzzy_delay=None, periodic_interval_max=None, db_allowed=True, *args, **kwargs): super(Service, self).__init__() self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager self.servicegroup_api = servicegroup.API() manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, *args, **kwargs) self.rpcserver = None self.report_interval = report_interval self.periodic_enable = periodic_enable self.periodic_fuzzy_delay = periodic_fuzzy_delay self.periodic_interval_max = periodic_interval_max self.saved_args, self.saved_kwargs = args, kwargs self.backdoor_port = None self.conductor_api = conductor.API(use_local=db_allowed) self.conductor_api.wait_until_ready(context.get_admin_context())
def __init__(self): super(API, self).__init__() self.last_neutron_extension_sync = None self.extensions = {} self.conductor_api = conductor.API() self.security_group_api = ( openstack_driver.get_openstack_security_group_driver())
def __init__(self, *args, **kwargs): test = kwargs.get('test') if not CONF.memcached_servers and not test: raise RuntimeError(_('memcached_servers not defined')) self.mc = memorycache.get_client() self.db_allowed = kwargs.get('db_allowed', True) self.conductor_api = conductor.API(use_local=self.db_allowed)
def __init__(self, host, binary, topic, manager, report_interval=None, periodic_enable=None, periodic_fuzzy_delay=None, periodic_interval_max=None, *args, **kwargs): super(Service, self).__init__() self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager self.servicegroup_api = servicegroup.API() manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, *args, **kwargs) self.rpcserver = None self.report_interval = report_interval self.periodic_enable = periodic_enable self.periodic_fuzzy_delay = periodic_fuzzy_delay self.periodic_interval_max = periodic_interval_max self.saved_args, self.saved_kwargs = args, kwargs self.backdoor_port = None if objects_base.NovaObject.indirection_api: conductor_api = conductor.API() conductor_api.wait_until_ready(context.get_admin_context()) setup_profiler(binary, self.host)
def test_compute_manager(self): was = {'called': False} def fake_get_all_by_filters(context, *args, **kwargs): was['called'] = True instances = [] for x in xrange(2): instances.append( fake_instance.fake_db_instance(image_ref='1', uuid=x, name=x, vm_state='', task_state='')) return instances with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) self.stubs.Set(db, 'instance_get_all_by_filters', fake_get_all_by_filters) compute = importutils.import_object(CONF.compute_manager) self.flags(use_local=True, group='conductor') compute.conductor_api = conductor.API() compute._run_image_cache_manager_pass(None) self.assertTrue(was['called'])
def test_compute_manager(self): was = {'called': False} def fake_get_all(context, *args, **kwargs): was['called'] = True return [{ 'image_ref': '1', 'host': CONF.host, 'name': 'instance-1', 'uuid': '123', 'vm_state': '', 'task_state': '' }, { 'image_ref': '1', 'host': CONF.host, 'name': 'instance-2', 'uuid': '456', 'vm_state': '', 'task_state': '' }] with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) self.stubs.Set(db, 'instance_get_all', fake_get_all) compute = importutils.import_object(CONF.compute_manager) self.flags(use_local=True, group='conductor') compute.conductor_api = conductor.API() compute._run_image_cache_manager_pass(None) self.assertTrue(was['called'])
def test_run_image_cache_manager_pass(self): was = {'called': False} def fake_get_all_by_filters(context, *args, **kwargs): was['called'] = True instances = [] for x in range(2): instances.append( fake_instance.fake_db_instance( image_ref=uuids.fake_image_ref, uuid=getattr(uuids, 'instance_%s' % x), name='instance-%s' % x, vm_state='', task_state='')) return instances with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) self.stub_out('nova.db.instance_get_all_by_filters', fake_get_all_by_filters) compute = importutils.import_object(CONF.compute_manager) self.flags(use_local=True, group='conductor') compute.conductor_api = conductor.API() ctxt = context.get_admin_context() compute._run_image_cache_manager_pass(ctxt) self.assertTrue(was['called'])
def __init__(self, host, binary, topic, manager, report_interval=None, periodic_enable=None, periodic_fuzzy_delay=None, periodic_interval_max=None, db_allowed=True, *args, **kwargs): super(Service, self).__init__() self.host = host self.binary = binary self.topic = topic self.manager_class_name = manager # NOTE(russellb) We want to make sure to create the servicegroup API # instance early, before creating other things such as the manager, # that will also create a servicegroup API instance. Internally, the # servicegroup only allocates a single instance of the driver API and # we want to make sure that our value of db_allowed is there when it # gets created. For that to happen, this has to be the first instance # of the servicegroup API. self.servicegroup_api = servicegroup.API(db_allowed=db_allowed) manager_class = importutils.import_class(self.manager_class_name) self.manager = manager_class(host=self.host, *args, **kwargs) self.report_interval = report_interval self.periodic_enable = periodic_enable self.periodic_fuzzy_delay = periodic_fuzzy_delay self.periodic_interval_max = periodic_interval_max self.saved_args, self.saved_kwargs = args, kwargs self.backdoor_port = None self.conductor_api = conductor.API(use_local=db_allowed) self.conductor_api.wait_until_ready(context.get_admin_context())
def __init__(self, scheduler_driver=None, *args, **kwargs): super(ConsoleAuthManager, self).__init__(service_name='consoleauth', *args, **kwargs) self.mc = memorycache.get_client() self.conductor_api = conductor.API() self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.cells_rpcapi = cells_rpcapi.CellsAPI()
def __init__(self, host, driver, nodename): self.host = host self.driver = driver self.nodename = nodename self.compute_node = None self.stats = importutils.import_object(CONF.compute_stats_class) self.tracked_instances = {} self.tracked_migrations = {} self.conductor_api = conductor.API()
def __init__(self): self.host = CONF.wiki_host self.site = None self.kclient = {} self.tenant_manager = {} self.user_manager = {} self._wiki_logged_in = False self._image_service = image.glance.get_default_image_service() self.conductor_api = conductor.API()
def _add_instance_fault(self, error, exc_info): LOG.warning(_("Ignoring error while configuring instance with " "agent: %s") % error, instance=self.instance, exc_info=True) try: ctxt = context.get_admin_context() capi = conductor.API() compute_utils.add_instance_fault_from_exc( ctxt, capi, self.instance, error, exc_info=exc_info) except Exception: LOG.debug(_("Error setting instance fault."), exc_info=True)
def _test_bandwidth(claim): host = claim.tracker.host if not claim.tracker.pci_tracker: LOG.debug("pci_tracker is null.") return pci_pools = claim.tracker.pci_tracker.pci_stats.pools request_bandwidth = {} pre_request_metadata = claim.instance.get('metadata') if isinstance(pre_request_metadata, list): request_metadata = {} for metadata in pre_request_metadata: request_metadata[metadata['key']] = metadata['value'] else: request_metadata = pre_request_metadata or {} for phy_net, __, bandwidth in _get_nw_info_from_metadata( claim, request_metadata): if phy_net in request_bandwidth: request_bandwidth[phy_net] += int(bandwidth) else: request_bandwidth[phy_net] = int(bandwidth) if len(request_bandwidth) == 0: return total_bandwidth = {} for pool in pci_pools: total_bandwidth[pool['physical_network']] = int(pool['bandwidths']) used_bandwidth = {} instance_list = conductor.API().instance_get_all_by_host( claim.context, host) for instance in instance_list: if 'deleting' == instance.get('task_state'): continue if claim.instance.get('uuid') == instance.get('uuid'): continue metadata_dict = {} for metadata in instance.get('metadata', []): metadata_dict[metadata['key']] = metadata['value'] for phy_net, __, bandwidth in _get_nw_info_from_metadata( claim, metadata_dict): if phy_net in used_bandwidth: used_bandwidth[phy_net] += int(bandwidth) else: used_bandwidth[phy_net] = int(bandwidth) for phy_net, bandwidth in request_bandwidth.iteritems(): if phy_net not in total_bandwidth: raise exception.ComputeResourcesUnavailable( reason="Not Enough Bandwidth") free = total_bandwidth[phy_net] - used_bandwidth.get(phy_net, 0) if bandwidth > free: raise exception.ComputeResourcesUnavailable( reason="Not Enough Bandwidth")
def __init__(self, image_api=None, network_api=None, volume_api=None, security_group_api=None, **kwargs): super(HuaweiAPI, self).__init__(image_api, network_api, volume_api, security_group_api, **kwargs) self.conductor_api = conductor.API() self.compute_rpcapi = hw_rpcapi.HuaweiComputeAPI() self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
def __init__(self, host, driver, nodename): self.host = host self.driver = driver self.pci_tracker = None self.nodename = nodename self.compute_node = None self.stats = importutils.import_object(CONF.compute_stats_class) self.tracked_instances = {} self.tracked_migrations = {} self.conductor_api = conductor.API() monitor_handler = monitors.ResourceMonitorHandler() self.monitors = monitor_handler.choose_monitors(self)
def __init__(self, *args, **kwargs): """Creates an instance of the DB-based servicegroup driver. Valid kwargs are: db_allowed - Boolean. False if direct db access is not allowed and alternative data access (conductor) should be used instead. """ self.db_allowed = kwargs.get('db_allowed', True) self.conductor_api = conductor.API(use_local=self.db_allowed) self.service_down_time = CONF.service_down_time
def __init__(self, host, driver, nodename): self.host = host self.driver = driver self.pci_tracker = None self.nodename = nodename self.compute_node = None self.stats = importutils.import_object(CONF.compute_stats_class) self.tracked_instances = {} self.tracked_migrations = {} self.conductor_api = conductor.API() monitor_handler = monitors.ResourceMonitorHandler() self.monitors = monitor_handler.choose_monitors(self) self.notifier = rpc.get_notifier() # Patched by Arie to disable resource tracker self.arieMode = True self.isDisabled = False
def __init__(self, host, driver, nodename): self.host = host self.driver = driver self.pci_tracker = None self.nodename = nodename self.compute_node = None self.stats = importutils.import_object(CONF.compute_stats_class) self.tracked_instances = {} self.tracked_migrations = {} self.conductor_api = conductor.API() monitor_handler = monitors.ResourceMonitorHandler() self.monitors = monitor_handler.choose_monitors(self) self.ext_resources_handler = \ ext_resources.ResourceHandler(CONF.compute_resources) self.notifier = rpc.get_notifier() self.old_resources = {} self.scheduler_client = scheduler_client.SchedulerClient()
def __init__(self, host, driver, nodename): self.host = host self.driver = driver self.pci_tracker = None self.nodename = nodename self.compute_node = None self.stats = importutils.import_object(CONF.compute_stats_class) self.tracked_instances = {} self.tracked_migrations = {} monitor_handler = monitors.MonitorHandler(self) self.monitors = monitor_handler.monitors self.ext_resources_handler = \ ext_resources.ResourceHandler(CONF.compute_resources) self.old_resources = objects.ComputeNode() self.scheduler_client = scheduler_client.SchedulerClient() self.ram_allocation_ratio = CONF.ram_allocation_ratio self.cpu_allocation_ratio = CONF.cpu_allocation_ratio # PF9 Change: begin self.conductor_api = conductor.API() self.resources_pf9 = self.get_resources_pf9()
def handle_password(req, meta_data): ctxt = context.get_admin_context() if req.method == 'GET': return meta_data.password elif req.method == 'POST': # NOTE(vish): The conflict will only happen once the metadata cache # updates, but it isn't a huge issue if it can be set for # a short window. if meta_data.password: raise exc.HTTPConflict() if (req.content_length > MAX_SIZE or len(req.body) > MAX_SIZE): msg = _("Request is too large.") raise exc.HTTPBadRequest(explanation=msg) conductor_api = conductor.API() instance = conductor_api.instance_get_by_uuid(ctxt, meta_data.uuid) sys_meta = utils.metadata_to_dict(instance['system_metadata']) sys_meta.update(convert_password(ctxt, req.body)) conductor_api.instance_update(ctxt, meta_data.uuid, system_metadata=sys_meta) else: raise exc.HTTPBadRequest()
def __init__(self): self._cache = memorycache.get_client() self.conductor_api = conductor.API()
def __init__(self, *args, **kwargs): self.db_allowed = kwargs.get('db_allowed', True) self.conductor_api = conductor.API(use_local=self.db_allowed) self.service_down_time = CONF.service_down_time
def test_import_conductor_rpc(self): self.flags(use_local=False, group='conductor') self.assertTrue(isinstance(conductor.API(), conductor_api.API))
def test_import_conductor_override_to_local(self): self.flags(use_local=False, group='conductor') self.assertTrue( isinstance(conductor.API(use_local=True), conductor_api.LocalAPI))
def conductor(self): if not hasattr(self, '_conductor'): from nova import conductor self._conductor = conductor.API() return self._conductor
def test_api_not_called(self, mock_wait): self.useFixture(fixtures.NoopConductorFixture()) conductor.API().wait_until_ready() self.assertFalse(mock_wait.called)
def __init__(self): self.conductor_api = conductor.API()
def __init__(self, session, virtapi): self._session = session self._virtapi = virtapi self._conductor_api = conductor.API()
class API(base.Base): """API for interacting with the neutron 2.x API.""" conductor_api = conductor.API() security_group_api = openstack_driver.get_openstack_security_group_driver() def __init__(self): super(API, self).__init__() self.last_neutron_extension_sync = None self.extensions = {} def setup_networks_on_host(self, context, instance, host=None, teardown=False): """Setup or teardown the network structures.""" def _get_available_networks(self, context, project_id, net_ids=None): """Return a network list available for the tenant. The list contains networks owned by the tenant and public networks. If net_ids specified, it searches networks with requested IDs only. """ neutron = neutronv2.get_client(context) # If user has specified to attach instance only to specific # networks, add them to **search_opts # (1) Retrieve non-public network list owned by the tenant. search_opts = {"tenant_id": project_id, 'shared': False} if net_ids: search_opts['id'] = net_ids nets = neutron.list_networks(**search_opts).get('networks', []) # (2) Retrieve public network list. search_opts = {'shared': True} if net_ids: search_opts['id'] = net_ids nets += neutron.list_networks(**search_opts).get('networks', []) _ensure_requested_network_ordering( lambda x: x['id'], nets, net_ids) return nets @refresh_cache def allocate_for_instance(self, context, instance, **kwargs): """Allocate network resources for the instance. :param requested_networks: optional value containing network_id, fixed_ip, and port_id :param security_groups: security groups to allocate for instance :param macs: None or a set of MAC addresses that the instance should use. macs is supplied by the hypervisor driver (contrast with requested_networks which is user supplied). NB: NeutronV2 currently assigns hypervisor supplied MAC addresses to arbitrary networks, which requires openflow switches to function correctly if more than one network is being used with the bare metal hypervisor (which is the only one known to limit MAC addresses). """ hypervisor_macs = kwargs.get('macs', None) available_macs = None if hypervisor_macs is not None: # Make a copy we can mutate: records macs that have not been used # to create a port on a network. If we find a mac with a # pre-allocated port we also remove it from this set. available_macs = set(hypervisor_macs) neutron = neutronv2.get_client(context) LOG.debug(_('allocate_for_instance() for %s'), instance['display_name']) if not instance['project_id']: msg = _('empty project id for instance %s') raise exception.InvalidInput( reason=msg % instance['display_name']) requested_networks = kwargs.get('requested_networks') ports = {} fixed_ips = {} net_ids = [] if requested_networks: for network_id, fixed_ip, port_id in requested_networks: if port_id: port = neutron.show_port(port_id)['port'] if hypervisor_macs is not None: if port['mac_address'] not in hypervisor_macs: raise exception.PortNotUsable(port_id=port_id, instance=instance['display_name']) else: # Don't try to use this MAC if we need to create a # port on the fly later. Identical MACs may be # configured by users into multiple ports so we # discard rather than popping. available_macs.discard(port['mac_address']) network_id = port['network_id'] ports[network_id] = port elif fixed_ip and network_id: fixed_ips[network_id] = fixed_ip if network_id: net_ids.append(network_id) nets = self._get_available_networks(context, instance['project_id'], net_ids) if not nets: LOG.warn(_("No network configured!"), instance=instance) return [] security_groups = kwargs.get('security_groups', []) security_group_ids = [] # TODO(arosen) Should optimize more to do direct query for security # group if len(security_groups) == 1 if len(security_groups): search_opts = {'tenant_id': instance['project_id']} user_security_groups = neutron.list_security_groups( **search_opts).get('security_groups') for security_group in security_groups: name_match = None uuid_match = None for user_security_group in user_security_groups: if user_security_group['name'] == security_group: if name_match: msg = (_("Multiple security groups found matching" " '%s'. Use an ID to be more specific."), security_group) raise exception.NoUniqueMatch(msg) name_match = user_security_group['id'] if user_security_group['id'] == security_group: uuid_match = user_security_group['id'] # If a user names the security group the same as # another's security groups uuid, the name takes priority. if not name_match and not uuid_match: raise exception.SecurityGroupNotFound( security_group_id=security_group) elif name_match: security_group_ids.append(name_match) elif uuid_match: security_group_ids.append(uuid_match) touched_port_ids = [] created_port_ids = [] for network in nets: # If security groups are requested on an instance then the # network must has a subnet associated with it. Some plugins # implement the port-security extension which requires # 'port_security_enabled' to be True for security groups. # That is why True is returned if 'port_security_enabled' # is not found. if (security_groups and not ( network['subnets'] and network.get('port_security_enabled', True))): raise exception.SecurityGroupCannotBeApplied() network_id = network['id'] zone = 'compute:%s' % instance['availability_zone'] port_req_body = {'port': {'device_id': instance['uuid'], 'device_owner': zone}} try: port = ports.get(network_id) self._populate_neutron_extension_values(instance, port_req_body) # Requires admin creds to set port bindings port_client = (neutron if not self._has_port_binding_extension() else neutronv2.get_client(context, admin=True)) if port: port_client.update_port(port['id'], port_req_body) touched_port_ids.append(port['id']) else: fixed_ip = fixed_ips.get(network_id) if fixed_ip: port_req_body['port']['fixed_ips'] = [{'ip_address': fixed_ip}] port_req_body['port']['network_id'] = network_id port_req_body['port']['admin_state_up'] = True port_req_body['port']['tenant_id'] = instance['project_id'] if security_group_ids: port_req_body['port']['security_groups'] = ( security_group_ids) if available_macs is not None: if not available_macs: raise exception.PortNotFree( instance=instance['display_name']) mac_address = available_macs.pop() port_req_body['port']['mac_address'] = mac_address created_port_ids.append( port_client.create_port(port_req_body)['port']['id']) except Exception: with excutils.save_and_reraise_exception(): for port_id in touched_port_ids: try: port_req_body = {'port': {'device_id': None}} # Requires admin creds to set port bindings if self._has_port_binding_extension(): port_req_body['port']['binding:host_id'] = None port_client = neutronv2.get_client( context, admin=True) else: port_client = neutron port_client.update_port(port_id, port_req_body) except Exception: msg = _("Failed to update port %s") LOG.exception(msg, port_id) for port_id in created_port_ids: try: neutron.delete_port(port_id) except Exception: msg = _("Failed to delete port %s") LOG.exception(msg, port_id) nw_info = self._get_instance_nw_info(context, instance, networks=nets) # NOTE(danms): Only return info about ports we created in this run. # In the initial allocation case, this will be everything we created, # and in later runs will only be what was created that time. Thus, # this only affects the attach case, not the original use for this # method. return network_model.NetworkInfo([port for port in nw_info if port['id'] in created_port_ids + touched_port_ids]) def _refresh_neutron_extensions_cache(self): """Refresh the neutron extensions cache when necessary.""" if (not self.last_neutron_extension_sync or ((time.time() - self.last_neutron_extension_sync) >= CONF.neutron_extension_sync_interval)): neutron = neutronv2.get_client(context.get_admin_context()) extensions_list = neutron.list_extensions()['extensions'] self.last_neutron_extension_sync = time.time() self.extensions.clear() self.extensions = dict((ext['name'], ext) for ext in extensions_list) def _has_port_binding_extension(self, refresh_cache=False): if refresh_cache: self._refresh_neutron_extensions_cache() return constants.PORTBINDING_EXT in self.extensions def _populate_neutron_extension_values(self, instance, port_req_body): """Populate neutron extension values for the instance. If the extension contains nvp-qos then get the rxtx_factor. """ self._refresh_neutron_extensions_cache() if 'nvp-qos' in self.extensions: instance_type = flavors.extract_flavor(instance) rxtx_factor = instance_type.get('rxtx_factor') port_req_body['port']['rxtx_factor'] = rxtx_factor if self._has_port_binding_extension(): port_req_body['port']['binding:host_id'] = instance.get('host') def deallocate_for_instance(self, context, instance, **kwargs): """Deallocate all network resources related to the instance.""" LOG.debug(_('deallocate_for_instance() for %s'), instance['display_name']) search_opts = {'device_id': instance['uuid']} data = neutronv2.get_client(context).list_ports(**search_opts) ports = [port['id'] for port in data.get('ports', [])] requested_networks = kwargs.get('requested_networks') or {} ports_to_skip = [port_id for nets, fips, port_id in requested_networks] ports = set(ports) - set(ports_to_skip) for port in ports: try: neutronv2.get_client(context).delete_port(port) except Exception: LOG.exception(_("Failed to delete neutron port %(portid)s") % {'portid': port}) @refresh_cache def allocate_port_for_instance(self, context, instance, port_id, network_id=None, requested_ip=None, conductor_api=None): """Allocate a port for the instance.""" return self.allocate_for_instance(context, instance, requested_networks=[(network_id, requested_ip, port_id)], conductor_api=conductor_api) @refresh_cache def deallocate_port_for_instance(self, context, instance, port_id, conductor_api=None): """Remove a specified port from the instance. Return network information for the instance """ try: neutronv2.get_client(context).delete_port(port_id) except Exception as ex: LOG.exception(_("Failed to delete neutron port %s") % port_id) return self._get_instance_nw_info(context, instance) def list_ports(self, context, **search_opts): """List ports for the client based on search options.""" return neutronv2.get_client(context).list_ports(**search_opts) def show_port(self, context, port_id): """Return the port for the client given the port id.""" return neutronv2.get_client(context).show_port(port_id) def get_instance_nw_info(self, context, instance, networks=None): """Return network information for specified instance and update cache. """ result = self._get_instance_nw_info(context, instance, networks) update_instance_info_cache(self, context, instance, result, update_cells=False) return result def _get_instance_nw_info(self, context, instance, networks=None): LOG.debug(_('get_instance_nw_info() for %s'), instance['display_name']) nw_info = self._build_network_info_model(context, instance, networks) return network_model.NetworkInfo.hydrate(nw_info) @refresh_cache def add_fixed_ip_to_instance(self, context, instance, network_id, conductor_api=None): """Add a fixed ip to the instance from specified network.""" search_opts = {'network_id': network_id} data = neutronv2.get_client(context).list_subnets(**search_opts) ipam_subnets = data.get('subnets', []) if not ipam_subnets: raise exception.NetworkNotFoundForInstance( instance_id=instance['uuid']) zone = 'compute:%s' % instance['availability_zone'] search_opts = {'device_id': instance['uuid'], 'device_owner': zone, 'network_id': network_id} data = neutronv2.get_client(context).list_ports(**search_opts) ports = data['ports'] for p in ports: for subnet in ipam_subnets: fixed_ips = p['fixed_ips'] fixed_ips.append({'subnet_id': subnet['id']}) port_req_body = {'port': {'fixed_ips': fixed_ips}} try: neutronv2.get_client(context).update_port(p['id'], port_req_body) return except Exception as ex: msg = _("Unable to update port %(portid)s on subnet " "%(subnet_id)s with failure: %(exception)s") LOG.debug(msg, {'portid': p['id'], 'subnet_id': subnet['id'], 'exception': ex}) raise exception.NetworkNotFoundForInstance( instance_id=instance['uuid']) @refresh_cache def remove_fixed_ip_from_instance(self, context, instance, address, conductor_api=None): """Remove a fixed ip from the instance.""" zone = 'compute:%s' % instance['availability_zone'] search_opts = {'device_id': instance['uuid'], 'device_owner': zone, 'fixed_ips': 'ip_address=%s' % address} data = neutronv2.get_client(context).list_ports(**search_opts) ports = data['ports'] for p in ports: fixed_ips = p['fixed_ips'] new_fixed_ips = [] for fixed_ip in fixed_ips: if fixed_ip['ip_address'] != address: new_fixed_ips.append(fixed_ip) port_req_body = {'port': {'fixed_ips': new_fixed_ips}} try: neutronv2.get_client(context).update_port(p['id'], port_req_body) except Exception as ex: msg = _("Unable to update port %(portid)s with" " failure: %(exception)s") LOG.debug(msg, {'portid': p['id'], 'exception': ex}) return raise exception.FixedIpNotFoundForSpecificInstance( instance_uuid=instance['uuid'], ip=address) def validate_networks(self, context, requested_networks): """Validate that the tenant can use the requested networks.""" LOG.debug(_('validate_networks() for %s'), requested_networks) if not requested_networks: nets = self._get_available_networks(context, context.project_id) if len(nets) > 1: # Attaching to more than one network by default doesn't # make sense, as the order will be arbitrary and the guest OS # won't know which to configure msg = _("Multiple possible networks found, use a Network " "ID to be more specific.") raise exception.NetworkAmbiguous(msg) return net_ids = [] for (net_id, _i, port_id) in requested_networks: if port_id: try: port = (neutronv2.get_client(context) .show_port(port_id) .get('port')) except neutronv2.exceptions.NeutronClientException as e: if e.status_code == 404: port = None if not port: raise exception.PortNotFound(port_id=port_id) if port.get('device_id', None): raise exception.PortInUse(port_id=port_id) net_id = port['network_id'] if net_id in net_ids: raise exception.NetworkDuplicated(network_id=net_id) net_ids.append(net_id) # Now check to see if all requested networks exist nets = self._get_available_networks(context, context.project_id, net_ids) if len(nets) != len(net_ids): requsted_netid_set = set(net_ids) returned_netid_set = set([net['id'] for net in nets]) lostid_set = requsted_netid_set - returned_netid_set id_str = '' for _id in lostid_set: id_str = id_str and id_str + ', ' + _id or _id raise exception.NetworkNotFound(network_id=id_str) def _get_instance_uuids_by_ip(self, context, address): """Retrieve instance uuids associated with the given ip address. :returns: A list of dicts containing the uuids keyed by 'instance_uuid' e.g. [{'instance_uuid': uuid}, ...] """ search_opts = {"fixed_ips": 'ip_address=%s' % address} data = neutronv2.get_client(context).list_ports(**search_opts) ports = data.get('ports', []) return [{'instance_uuid': port['device_id']} for port in ports if port['device_id']] def get_instance_uuids_by_ip_filter(self, context, filters): """Return a list of dicts in the form of [{'instance_uuid': uuid}] that matched the ip filter. """ # filters['ip'] is composed as '^%s$' % fixed_ip.replace('.', '\\.') ip = filters.get('ip') # we remove ^$\ in the ip filer if ip[0] == '^': ip = ip[1:] if ip[-1] == '$': ip = ip[:-1] ip = ip.replace('\\.', '.') return self._get_instance_uuids_by_ip(context, ip) def _get_port_id_by_fixed_address(self, client, instance, address): """Return port_id from a fixed address.""" zone = 'compute:%s' % instance['availability_zone'] search_opts = {'device_id': instance['uuid'], 'device_owner': zone} data = client.list_ports(**search_opts) ports = data['ports'] port_id = None for p in ports: for ip in p['fixed_ips']: if ip['ip_address'] == address: port_id = p['id'] break if not port_id: raise exception.FixedIpNotFoundForAddress(address=address) return port_id @refresh_cache def associate_floating_ip(self, context, instance, floating_address, fixed_address, affect_auto_assigned=False): """Associate a floating ip with a fixed ip.""" # Note(amotoki): 'affect_auto_assigned' is not respected # since it is not used anywhere in nova code and I could # find why this parameter exists. client = neutronv2.get_client(context) port_id = self._get_port_id_by_fixed_address(client, instance, fixed_address) fip = self._get_floating_ip_by_address(client, floating_address) param = {'port_id': port_id, 'fixed_ip_address': fixed_address} client.update_floatingip(fip['id'], {'floatingip': param}) if fip['port_id']: port = client.show_port(fip['port_id'])['port'] orig_instance_uuid = port['device_id'] msg_dict = dict(address=floating_address, instance_id=orig_instance_uuid) LOG.info(_('re-assign floating IP %(address)s from ' 'instance %(instance_id)s') % msg_dict) orig_instance = self.db.instance_get_by_uuid(context, orig_instance_uuid) # purge cached nw info for the original instance update_instance_info_cache(self, context, orig_instance) def get_all(self, context): """Get all networks for client.""" client = neutronv2.get_client(context) networks = client.list_networks().get('networks') for network in networks: network['label'] = network['name'] return networks def get(self, context, network_uuid): """Get specific network for client.""" client = neutronv2.get_client(context) network = client.show_network(network_uuid).get('network') or {} network['label'] = network['name'] return network def delete(self, context, network_uuid): """Delete a network for client.""" raise NotImplementedError() def disassociate(self, context, network_uuid): """Disassociate a network for client.""" raise NotImplementedError() def get_fixed_ip(self, context, id): """Get a fixed ip from the id.""" raise NotImplementedError() def get_fixed_ip_by_address(self, context, address): """Return instance uuids given an address.""" uuid_maps = self._get_instance_uuids_by_ip(context, address) if len(uuid_maps) == 1: return uuid_maps[0] elif not uuid_maps: raise exception.FixedIpNotFoundForAddress(address=address) else: raise exception.FixedIpAssociatedWithMultipleInstances( address=address) def _setup_net_dict(self, client, network_id): if not network_id: return {} pool = client.show_network(network_id)['network'] return {pool['id']: pool} def _setup_port_dict(self, client, port_id): if not port_id: return {} port = client.show_port(port_id)['port'] return {port['id']: port} def _setup_pools_dict(self, client): pools = self._get_floating_ip_pools(client) return dict([(i['id'], i) for i in pools]) def _setup_ports_dict(self, client, project_id=None): search_opts = {'tenant_id': project_id} if project_id else {} ports = client.list_ports(**search_opts)['ports'] return dict([(p['id'], p) for p in ports]) def get_floating_ip(self, context, id): """Return floating ip object given the floating ip id.""" client = neutronv2.get_client(context) try: fip = client.show_floatingip(id)['floatingip'] except neutronv2.exceptions.NeutronClientException as e: if e.status_code == 404: raise exception.FloatingIpNotFound(id=id) pool_dict = self._setup_net_dict(client, fip['floating_network_id']) port_dict = self._setup_port_dict(client, fip['port_id']) return self._format_floating_ip_model(fip, pool_dict, port_dict) def _get_floating_ip_pools(self, client, project_id=None): search_opts = {constants.NET_EXTERNAL: True} if project_id: search_opts.update({'tenant_id': project_id}) data = client.list_networks(**search_opts) return data['networks'] def get_floating_ip_pools(self, context): """Return floating ip pools.""" client = neutronv2.get_client(context) pools = self._get_floating_ip_pools(client) return [{'name': n['name'] or n['id']} for n in pools] def _format_floating_ip_model(self, fip, pool_dict, port_dict): pool = pool_dict[fip['floating_network_id']] result = {'id': fip['id'], 'address': fip['floating_ip_address'], 'pool': pool['name'] or pool['id'], 'project_id': fip['tenant_id'], # In Neutron v2, an exact fixed_ip_id does not exist. 'fixed_ip_id': fip['port_id'], } # In Neutron v2 API fixed_ip_address and instance uuid # (= device_id) are known here, so pass it as a result. result['fixed_ip'] = {'address': fip['fixed_ip_address']} if fip['port_id']: instance_uuid = port_dict[fip['port_id']]['device_id'] result['instance'] = {'uuid': instance_uuid} else: result['instance'] = None return result def get_floating_ip_by_address(self, context, address): """Return a floating ip given an address.""" client = neutronv2.get_client(context) fip = self._get_floating_ip_by_address(client, address) pool_dict = self._setup_net_dict(client, fip['floating_network_id']) port_dict = self._setup_port_dict(client, fip['port_id']) return self._format_floating_ip_model(fip, pool_dict, port_dict) def get_floating_ips_by_project(self, context): client = neutronv2.get_client(context) project_id = context.project_id fips = client.list_floatingips(tenant_id=project_id)['floatingips'] pool_dict = self._setup_pools_dict(client) port_dict = self._setup_ports_dict(client, project_id) return [self._format_floating_ip_model(fip, pool_dict, port_dict) for fip in fips] def get_floating_ips_by_fixed_address(self, context, fixed_address): return [] def get_instance_id_by_floating_address(self, context, address): """Return the instance id a floating ip's fixed ip is allocated to.""" client = neutronv2.get_client(context) fip = self._get_floating_ip_by_address(client, address) if not fip['port_id']: return None port = client.show_port(fip['port_id'])['port'] return port['device_id'] def get_vifs_by_instance(self, context, instance): raise NotImplementedError() def get_vif_by_mac_address(self, context, mac_address): raise NotImplementedError() def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id): search_opts = {constants.NET_EXTERNAL: True, 'fields': 'id'} if uuidutils.is_uuid_like(name_or_id): search_opts.update({'id': name_or_id}) else: search_opts.update({'name': name_or_id}) data = client.list_networks(**search_opts) nets = data['networks'] if len(nets) == 1: return nets[0]['id'] elif len(nets) == 0: raise exception.FloatingIpPoolNotFound() else: msg = (_("Multiple floating IP pools matches found for name '%s'") % name_or_id) raise exception.NovaException(message=msg) def allocate_floating_ip(self, context, pool=None): """Add a floating ip to a project from a pool.""" client = neutronv2.get_client(context) pool = pool or CONF.default_floating_pool pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool) # TODO(amotoki): handle exception during create_floatingip() # At this timing it is ensured that a network for pool exists. # quota error may be returned. param = {'floatingip': {'floating_network_id': pool_id}} fip = client.create_floatingip(param) return fip['floatingip']['floating_ip_address'] def _get_floating_ip_by_address(self, client, address): """Get floatingip from floating ip address.""" if not address: raise exception.FloatingIpNotFoundForAddress(address=address) data = client.list_floatingips(floating_ip_address=address) fips = data['floatingips'] if len(fips) == 0: raise exception.FloatingIpNotFoundForAddress(address=address) elif len(fips) > 1: raise exception.FloatingIpMultipleFoundForAddress(address=address) return fips[0] def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port): """Get floatingips from fixed ip and port.""" try: data = client.list_floatingips(fixed_ip_address=fixed_ip, port_id=port) # If a neutron plugin does not implement the L3 API a 404 from # list_floatingips will be raised. except neutronv2.exceptions.NeutronClientException as e: if e.status_code == 404: return [] raise return data['floatingips'] def release_floating_ip(self, context, address, affect_auto_assigned=False): """Remove a floating ip with the given address from a project.""" # Note(amotoki): We cannot handle a case where multiple pools # have overlapping IP address range. In this case we cannot use # 'address' as a unique key. # This is a limitation of the current nova. # Note(amotoki): 'affect_auto_assigned' is not respected # since it is not used anywhere in nova code and I could # find why this parameter exists. client = neutronv2.get_client(context) fip = self._get_floating_ip_by_address(client, address) if fip['port_id']: raise exception.FloatingIpAssociated(address=address) client.delete_floatingip(fip['id']) @refresh_cache def disassociate_floating_ip(self, context, instance, address, affect_auto_assigned=False): """Disassociate a floating ip from the instance.""" # Note(amotoki): 'affect_auto_assigned' is not respected # since it is not used anywhere in nova code and I could # find why this parameter exists. client = neutronv2.get_client(context) fip = self._get_floating_ip_by_address(client, address) client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}}) def migrate_instance_start(self, context, instance, migration): """Start to migrate the network of an instance.""" # NOTE(wenjianhn): just pass to make migrate instance doesn't # raise for now. pass def migrate_instance_finish(self, context, instance, migration): """Finish migrating the network of an instance.""" if not self._has_port_binding_extension(refresh_cache=True): return neutron = neutronv2.get_client(context, admin=True) search_opts = {'device_id': instance['uuid'], 'tenant_id': instance['project_id']} data = neutron.list_ports(**search_opts) ports = data['ports'] for p in ports: port_req_body = {'port': {'binding:host_id': instance.get('host')}} try: neutron.update_port(p['id'], port_req_body) except Exception as ex: with excutils.save_and_reraise_exception(): msg = _("Unable to update host of port %s") LOG.exception(msg, p['id']) def add_network_to_project(self, context, project_id, network_uuid=None): """Force add a network to the project.""" raise NotImplementedError() def _nw_info_get_ips(self, client, port): network_IPs = [] for fixed_ip in port['fixed_ips']: fixed = network_model.FixedIP(address=fixed_ip['ip_address']) floats = self._get_floating_ips_by_fixed_and_port( client, fixed_ip['ip_address'], port['id']) for ip in floats: fip = network_model.IP(address=ip['floating_ip_address'], type='floating') fixed.add_floating_ip(fip) network_IPs.append(fixed) return network_IPs def _nw_info_get_subnets(self, context, port, network_IPs): subnets = self._get_subnets_from_port(context, port) for subnet in subnets: subnet['ips'] = [fixed_ip for fixed_ip in network_IPs if fixed_ip.is_in_subnet(subnet)] return subnets def _nw_info_build_network(self, port, networks, subnets): # NOTE(danms): This loop can't fail to find a network since we # filtered ports to only the ones matching networks in our parent for net in networks: if port['network_id'] == net['id']: network_name = net['name'] break bridge = None ovs_interfaceid = None # Network model metadata should_create_bridge = None vif_type = port.get('binding:vif_type') # TODO(berrange) Neutron should pass the bridge name # in another binding metadata field if vif_type == network_model.VIF_TYPE_OVS: bridge = CONF.neutron_ovs_bridge ovs_interfaceid = port['id'] elif vif_type == network_model.VIF_TYPE_BRIDGE: bridge = "brq" + port['network_id'] should_create_bridge = True if bridge is not None: bridge = bridge[:network_model.NIC_NAME_LEN] network = network_model.Network( id=port['network_id'], bridge=bridge, injected=CONF.flat_injected, label=network_name, tenant_id=net['tenant_id'] ) network['subnets'] = subnets if should_create_bridge is not None: network['should_create_bridge'] = should_create_bridge return network, ovs_interfaceid def _build_network_info_model(self, context, instance, networks=None): search_opts = {'tenant_id': instance['project_id'], 'device_id': instance['uuid'], } client = neutronv2.get_client(context, admin=True) data = client.list_ports(**search_opts) ports = data.get('ports', []) if networks is None: # retrieve networks from info_cache to get correct nic order network_cache = self.conductor_api.instance_get_by_uuid( context, instance['uuid'])['info_cache']['network_info'] network_cache = jsonutils.loads(network_cache) net_ids = [iface['network']['id'] for iface in network_cache] networks = self._get_available_networks(context, instance['project_id']) # ensure ports are in preferred network order, and filter out # those not attached to one of the provided list of networks else: net_ids = [n['id'] for n in networks] ports = [port for port in ports if port['network_id'] in net_ids] _ensure_requested_network_ordering(lambda x: x['network_id'], ports, net_ids) nw_info = network_model.NetworkInfo() for port in ports: network_IPs = self._nw_info_get_ips(client, port) subnets = self._nw_info_get_subnets(context, port, network_IPs) devname = "tap" + port['id'] devname = devname[:network_model.NIC_NAME_LEN] network, ovs_interfaceid = self._nw_info_build_network(port, networks, subnets) nw_info.append(network_model.VIF( id=port['id'], address=port['mac_address'], network=network, type=port.get('binding:vif_type'), ovs_interfaceid=ovs_interfaceid, devname=devname)) return nw_info def _get_subnets_from_port(self, context, port): """Return the subnets for a given port.""" fixed_ips = port['fixed_ips'] # No fixed_ips for the port means there is no subnet associated # with the network the port is created on. # Since list_subnets(id=[]) returns all subnets visible for the # current tenant, returned subnets may contain subnets which is not # related to the port. To avoid this, the method returns here. if not fixed_ips: return [] search_opts = {'id': [ip['subnet_id'] for ip in fixed_ips]} data = neutronv2.get_client(context).list_subnets(**search_opts) ipam_subnets = data.get('subnets', []) subnets = [] for subnet in ipam_subnets: subnet_dict = {'cidr': subnet['cidr'], 'gateway': network_model.IP( address=subnet['gateway_ip'], type='gateway'), } # attempt to populate DHCP server field search_opts = {'network_id': subnet['network_id'], 'device_owner': 'network:dhcp'} data = neutronv2.get_client(context).list_ports(**search_opts) dhcp_ports = data.get('ports', []) for p in dhcp_ports: for ip_pair in p['fixed_ips']: if ip_pair['subnet_id'] == subnet['id']: subnet_dict['dhcp_server'] = ip_pair['ip_address'] break subnet_object = network_model.Subnet(**subnet_dict) for dns in subnet.get('dns_nameservers', []): subnet_object.add_dns( network_model.IP(address=dns, type='dns')) # TODO(gongysh) get the routes for this subnet subnets.append(subnet_object) return subnets def get_dns_domains(self, context): """Return a list of available dns domains. These can be used to create DNS entries for floating ips. """ raise NotImplementedError() def add_dns_entry(self, context, address, name, dns_type, domain): """Create specified DNS entry for address.""" raise NotImplementedError() def modify_dns_entry(self, context, name, address, domain): """Create specified DNS entry for address.""" raise NotImplementedError() def delete_dns_entry(self, context, name, domain): """Delete the specified dns entry.""" raise NotImplementedError() def delete_dns_domain(self, context, domain): """Delete the specified dns domain.""" raise NotImplementedError() def get_dns_entries_by_address(self, context, address, domain): """Get entries for address and domain.""" raise NotImplementedError() def get_dns_entries_by_name(self, context, name, domain): """Get entries for name and domain.""" raise NotImplementedError() def create_private_dns_domain(self, context, domain, availability_zone): """Create a private DNS domain with nova availability zone.""" raise NotImplementedError() def create_public_dns_domain(self, context, domain, project=None): """Create a private DNS domain with optional nova project.""" raise NotImplementedError()