def test_connection_switch(self): # Use a file-based sqlite database so data will persist across new # connections fake_conn = 'sqlite:///' + self.test_filename # The 'main' database connection will stay open, so in-memory is fine self.useFixture(fixtures.Database(database='main')) self.useFixture(fixtures.Database(connection=fake_conn)) # Make a request context with a cell mapping mapping = compute.CellMapping(database_connection=fake_conn) # In the tests, the admin context is required in order to read # an Instance back after write, for some reason ctxt = context.get_admin_context() # Create an instance in the cell database uuid = uuidutils.generate_uuid() with context.target_cell(ctxt, mapping): instance = compute.Instance(context=ctxt, uuid=uuid) instance.create() # Verify the instance is found in the cell database inst = compute.Instance.get_by_uuid(ctxt, uuid) self.assertEqual(uuid, inst.uuid) # Verify the instance isn't found in the main database self.assertRaises(exception.InstanceNotFound, compute.Instance.get_by_uuid, ctxt, uuid)
def get_by_network(cls, context, network, host=None): ipinfo = db.network_get_associated_fixed_ips(context, network['id'], host=host) if not ipinfo: return [] fips = cls(context=context, objects=[]) for info in ipinfo: inst = objects.Instance(context=context, uuid=info['instance_uuid'], hostname=info['instance_hostname'], created_at=info['instance_created'], updated_at=info['instance_updated']) vif = objects.VirtualInterface(context=context, id=info['vif_id'], address=info['vif_address']) fip = objects.FixedIP(context=context, address=info['address'], instance_uuid=info['instance_uuid'], network_id=info['network_id'], virtual_interface_id=info['vif_id'], allocated=info['allocated'], leased=info['leased'], default_route=info['default_route'], instance=inst, virtual_interface=vif) fips.objects.append(fip) fips.obj_reset_changes() return fips
def _from_db_object(context, fixedip, db_fixedip, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for field in fixedip.fields: if field == 'default_route': # NOTE(danms): This field is only set when doing a # FixedIPList.get_by_network() because it's a relatively # special-case thing, so skip it here continue if field not in FIXED_IP_OPTIONAL_ATTRS: fixedip[field] = db_fixedip[field] # NOTE(danms): Instance could be deleted, and thus None if 'instance' in expected_attrs: fixedip.instance = objects.Instance._from_db_object( context, objects.Instance(context), db_fixedip['instance']) if db_fixedip['instance'] else None if 'network' in expected_attrs: fixedip.network = objects.Network._from_db_object( context, objects.Network(context), db_fixedip['network']) if db_fixedip['network'] else None if 'virtual_interface' in expected_attrs: db_vif = db_fixedip['virtual_interface'] vif = objects.VirtualInterface._from_db_object( context, objects.VirtualInterface(context), db_fixedip['virtual_interface']) if db_vif else None fixedip.virtual_interface = vif if 'floating_ips' in expected_attrs: fixedip.floating_ips = obj_base.obj_make_list( context, objects.FloatingIPList(context), objects.FloatingIP, db_fixedip['floating_ips']) fixedip._context = context fixedip.obj_reset_changes() return fixedip
def set_vm_state_and_notify(context, instance_uuid, service, method, updates, ex, request_spec): """changes VM state and notifies.""" LOG.warning(_LW("Failed to %(service)s_%(method)s: %(ex)s"), {'service': service, 'method': method, 'ex': ex}) vm_state = updates['vm_state'] properties = request_spec.get('instance_properties', {}) # NOTE(vish): We shouldn't get here unless we have a catastrophic # failure, so just set the instance to its internal state notifier = rpc.get_notifier(service) state = vm_state.upper() LOG.warning(_LW('Setting instance to %s state.'), state, instance_uuid=instance_uuid) instance = objects.Instance(context=context, uuid=instance_uuid, **updates) instance.obj_reset_changes(['uuid']) instance.save() compute_utils.add_instance_fault_from_exc(context, instance, ex, sys.exc_info()) payload = dict(request_spec=request_spec, instance_properties=properties, instance_id=instance_uuid, state=vm_state, method=method, reason=ex) event_type = '%s.%s' % (service, method) notifier.error(context, event_type, payload)
def instance_delete_everywhere(self, ctxt, instance, delete_type): """This is used by API cell when it didn't know what cell an instance was in, but the instance was requested to be deleted or soft_deleted. So, we'll broadcast this everywhere. """ if isinstance(instance, dict): instance = objects.Instance._from_db_object( ctxt, objects.Instance(), instance) self.msg_runner.instance_delete_everywhere(ctxt, instance, delete_type)
def instance_info_cache_update_at_top(self, ctxt, instance_info_cache): """Broadcast up that an instance's info_cache has changed.""" version = '1.35' instance = objects.Instance(uuid=instance_info_cache.instance_uuid, info_cache=instance_info_cache) if not self.client.can_send_version('1.35'): instance = objects_base.obj_to_primitive(instance) version = '1.34' cctxt = self.client.prepare(version=version) cctxt.cast(ctxt, 'instance_update_at_top', instance=instance)
def deallocate_for_instance(self, context, instance, requested_networks=None): """Deallocates all network structures related to instance.""" # NOTE(vish): We can't do the floating ip deallocation here because # this is called from compute.manager which shouldn't # have db access so we do it on the other side of the # rpc. if not isinstance(instance, obj_base.NovaObject): instance = objects.Instance._from_db_object(context, objects.Instance(), instance) self.network_rpcapi.deallocate_for_instance(context, instance=instance, requested_networks=requested_networks)
def _locked_consume_from_request(self, spec_obj): disk_mb = (spec_obj.root_gb + spec_obj.ephemeral_gb) * 1024 ram_mb = spec_obj.memory_mb vcpus = spec_obj.vcpus self.free_ram_mb -= ram_mb self.free_disk_mb -= disk_mb self.vcpus_used += vcpus # Track number of instances on host self.num_instances += 1 pci_requests = spec_obj.pci_requests if pci_requests and self.pci_stats: pci_requests = pci_requests.requests else: pci_requests = None # Calculate the numa usage host_numa_topology, _fmt = hardware.host_topology_and_format_from_host( self) instance_numa_topology = spec_obj.numa_topology spec_obj.numa_topology = hardware.numa_fit_instance_to_host( host_numa_topology, instance_numa_topology, limits=self.limits.get('numa_topology'), pci_requests=pci_requests, pci_stats=self.pci_stats) if pci_requests: instance_cells = None if spec_obj.numa_topology: instance_cells = spec_obj.numa_topology.cells self.pci_stats.apply_requests(pci_requests, instance_cells) # NOTE(sbauza): Yeah, that's crap. We should get rid of all of those # NUMA helpers because now we're 100% sure that spec_obj.numa_topology # is an InstanceNUMATopology object. Unfortunately, since # HostState.host_numa_topology is still limbo between an NUMATopology # object (when updated by consume_from_request), a ComputeNode object # (when updated by update_from_compute_node), we need to keep the call # to get_host_numa_usage_from_instance until it's fixed (and use a # temporary orphaned Instance object as a proxy) instance = objects.Instance(numa_topology=spec_obj.numa_topology) self.numa_topology = hardware.get_host_numa_usage_from_instance( self, instance) # NOTE(sbauza): By considering all cases when the scheduler is called # and when consume_from_request() is run, we can safely say that there # is always an IO operation because we want to move the instance self.num_io_ops += 1
def test_attach_interface_no_more_fixed_ips(self, attach_mock, get_mock): fake_instance = compute.Instance(uuid=FAKE_UUID1) get_mock.return_value = fake_instance attach_mock.side_effect = exception.NoMoreFixedIps(net=FAKE_NET_ID1) body = {} self.assertRaises(exc.HTTPBadRequest, self.attachments.create, self.req, FAKE_UUID1, body=body) ctxt = self.req.environ['compute.context'] attach_mock.assert_called_once_with(ctxt, fake_instance, None, None, None) get_mock.assert_called_once_with(ctxt, FAKE_UUID1, want_objects=True, expected_attrs=None)
def test_attach_interface_port_in_use(self, attach_mock, get_mock): fake_instance = compute.Instance(uuid=FAKE_UUID1) get_mock.return_value = fake_instance attach_mock.side_effect = exception.PortInUse(port_id=FAKE_PORT_ID1) body = {} self.assertRaises(self.in_use_exc, self.attachments.create, self.req, FAKE_UUID1, body=body) ctxt = self.req.environ['compute.context'] attach_mock.assert_called_once_with(ctxt, fake_instance, None, None, None) get_mock.assert_called_once_with(ctxt, FAKE_UUID1, want_objects=True, expected_attrs=None)
def _from_db_object(context, block_device_obj, db_block_device, expected_attrs=None): if expected_attrs is None: expected_attrs = [] for key in block_device_obj.fields: if key in BLOCK_DEVICE_OPTIONAL_ATTRS: continue block_device_obj[key] = db_block_device[key] if 'instance' in expected_attrs: my_inst = objects.Instance(context) my_inst._from_db_object(context, my_inst, db_block_device['instance']) block_device_obj.instance = my_inst block_device_obj._context = context block_device_obj.obj_reset_changes() return block_device_obj
def test_attach_interface_failed_no_network(self, attach_mock, get_mock): fake_instance = compute.Instance(uuid=FAKE_UUID1, project_id=FAKE_UUID2) get_mock.return_value = fake_instance attach_mock.side_effect = (exception.InterfaceAttachFailedNoNetwork( project_id=FAKE_UUID2)) self.assertRaises(exc.HTTPBadRequest, self.attachments.create, self.req, FAKE_UUID1, body={}) ctxt = self.req.environ['compute.context'] attach_mock.assert_called_once_with(ctxt, fake_instance, None, None, None) get_mock.assert_called_once_with(ctxt, FAKE_UUID1, want_objects=True, expected_attrs=None)
def _create_instances_here(self, ctxt, instance_uuids, instance_properties, instance_type, image, security_groups, block_device_mapping): instance_values = copy.copy(instance_properties) # The parent may pass these metadata values as lists, and the # create call expects it to be a dict. instance_values['metadata'] = utils.instance_meta(instance_values) # Pop out things that will get set properly when re-creating the # instance record. instance_values.pop('id') instance_values.pop('name') instance_values.pop('info_cache') instance_values.pop('security_groups') instance_values.pop('flavor') # FIXME(danms): The instance was brutally serialized before being # sent over RPC to us. Thus, the pci_requests value wasn't really # sent in a useful form. Since it was getting ignored for cells # before it was part of the Instance, skip it now until cells RPC # is sending proper instance cloud. instance_values.pop('pci_requests', None) # FIXME(danms): Same for ec2_ids instance_values.pop('ec2_ids', None) instances = [] num_instances = len(instance_uuids) security_groups = (self.compute_api.security_group_api. populate_security_groups(security_groups)) for i, instance_uuid in enumerate(instance_uuids): instance = objects.Instance(context=ctxt) instance.update(instance_values) instance.uuid = instance_uuid instance.flavor = instance_type instance.old_flavor = None instance.new_flavor = None instance = self.compute_api.create_db_entry_for_new_instance( ctxt, instance_type, image, instance, security_groups, block_device_mapping, num_instances, i) instances.append(instance) self.msg_runner.instance_update_at_top(ctxt, instance) return instances
def migrate_server(self, context, instance, scheduler_hint, live, rebuild, flavor, block_migration, disk_over_commit, reservations=None, clean_shutdown=True, request_spec=None): if instance and not isinstance(instance, nova_object.NovaObject): # NOTE(danms): Until v2 of the RPC API, we need to tolerate # old-world instance objects here attrs = [ 'metadata', 'system_metadata', 'info_cache', 'security_groups' ] instance = objects.Instance._from_db_object(context, objects.Instance(), instance, expected_attrs=attrs) # NOTE: Remove this when we drop support for v1 of the RPC API if flavor and not isinstance(flavor, objects.Flavor): # Code downstream may expect extra_specs to be populated since it # is receiving an object, so lookup the flavor to ensure this. flavor = objects.Flavor.get_by_id(context, flavor['id']) if live and not rebuild and not flavor: self._live_migrate(context, instance, scheduler_hint, block_migration, disk_over_commit, request_spec) elif not live and not rebuild and flavor: instance_uuid = instance.uuid with compute_utils.EventReporter(context, 'cold_migrate', instance_uuid): self._cold_migrate(context, instance, flavor, scheduler_hint['filter_properties'], reservations, clean_shutdown) else: raise NotImplementedError()
def _schedule_build_to_cells(self, message, instance_uuids, filter_properties, method, method_kwargs): """Pick a cell where we should create a new instance(s).""" try: for i in range(max(0, CONF.cells.scheduler_retries) + 1): try: target_cells = self._grab_target_cells(filter_properties) if target_cells is None: # a filter took care of scheduling. skip. return return method(message, target_cells, instance_uuids, method_kwargs) except exception.NoCellsAvailable: if i == max(0, CONF.cells.scheduler_retries): raise sleep_time = max(1, CONF.cells.scheduler_retry_delay) LOG.info( _LI("No cells available when scheduling. Will " "retry in %(sleep_time)s second(s)"), {'sleep_time': sleep_time}) time.sleep(sleep_time) continue except Exception: LOG.exception(_LE("Error scheduling instances %(instance_uuids)s"), {'instance_uuids': instance_uuids}) ctxt = message.ctxt for instance_uuid in instance_uuids: instance = objects.Instance(context=ctxt, uuid=instance_uuid, vm_state=vm_states.ERROR) self.msg_runner.instance_update_at_top(ctxt, instance) try: instance.vm_state = vm_states.ERROR instance.save() except Exception: pass
def fake_vpn_instance(): return compute.Instance( id=7, image_ref=CONF.vpn_image_id, vm_state='active', created_at=timeutils.parse_strtime('1981-10-20T00:00:00.000000'), uuid=uuid, project_id=project_id)
def fake_get_instance(self, *args, **kwargs): return compute.Instance(uuid=FAKE_UUID1)
def instance_rules(self, instance, network_info): ctxt = context.get_admin_context() if isinstance(instance, dict): # NOTE(danms): allow old-world instance objects from # unconverted callers; all we need is instance.uuid below instance = objects.Instance._from_db_object( ctxt, objects.Instance(), instance, []) ipv4_rules = [] ipv6_rules = [] # Initialize with basic rules self._do_basic_rules(ipv4_rules, ipv6_rules, network_info) # Set up rules to allow traffic to/from DHCP server self._do_dhcp_rules(ipv4_rules, network_info) # Allow project network traffic if CONF.allow_same_net_traffic: self._do_project_network_rules(ipv4_rules, ipv6_rules, network_info) # We wrap these in CONF.use_ipv6 because they might cause # a DB lookup. The other ones are just list operations, so # they're not worth the clutter. if CONF.use_ipv6: # Allow RA responses self._do_ra_rules(ipv6_rules, network_info) # then, security group chains and rules rules = objects.SecurityGroupRuleList.get_by_instance(ctxt, instance) for rule in rules: if not rule.cidr: version = 4 else: version = netutils.get_ip_version(rule.cidr) if version == 4: fw_rules = ipv4_rules else: fw_rules = ipv6_rules protocol = rule.protocol if protocol: protocol = rule.protocol.lower() if version == 6 and protocol == 'icmp': protocol = 'icmpv6' args = ['-j ACCEPT'] if protocol: args += ['-p', protocol] if protocol in ['udp', 'tcp']: args += self._build_tcp_udp_rule(rule, version) elif protocol == 'icmp': args += self._build_icmp_rule(rule, version) if rule.cidr: args += ['-s', str(rule.cidr)] fw_rules += [' '.join(args)] else: if rule.grantee_group: insts = objects.InstanceList.get_by_security_group( ctxt, rule.grantee_group) for inst in insts: if inst.info_cache.deleted: LOG.debug('ignoring deleted cache') continue nw_info = compute_utils.get_nw_info_for_instance( inst) ips = [ip['address'] for ip in nw_info.fixed_ips() if ip['version'] == version] LOG.debug('ips: %r', ips, instance=inst) for ip in ips: subrule = args + ['-s %s' % ip] fw_rules += [' '.join(subrule)] ipv4_rules += ['-j $sg-fallback'] ipv6_rules += ['-j $sg-fallback'] LOG.debug('Security Group Rules %s translated to ipv4: %r, ipv6: %r', list(rules), ipv4_rules, ipv6_rules, instance=instance) return ipv4_rules, ipv6_rules