def migrate_server(self, context, instance, scheduler_hint, live, rebuild, flavor, block_migration, disk_over_commit, reservations=None, clean_shutdown=True): kw = { 'instance': instance, 'scheduler_hint': scheduler_hint, 'live': live, 'rebuild': rebuild, 'flavor': flavor, 'block_migration': block_migration, 'disk_over_commit': disk_over_commit, 'reservations': reservations, 'clean_shutdown': clean_shutdown } version = '1.11' if not self.client.can_send_version(version): del kw['clean_shutdown'] version = '1.10' if not self.client.can_send_version(version): kw['flavor'] = objects_base.obj_to_primitive(flavor) version = '1.6' if not self.client.can_send_version(version): kw['instance'] = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) version = '1.4' cctxt = self.client.prepare(version=version) return cctxt.call(context, 'migrate_server', **kw)
def build_instances(self, ctxt, **kwargs): """Build instances.""" build_inst_kwargs = kwargs instances = build_inst_kwargs['instances'] build_inst_kwargs['image'] = jsonutils.to_primitive( build_inst_kwargs['image']) version = '1.34' if self.client.can_send_version('1.34'): build_inst_kwargs.pop('legacy_bdm', None) else: bdm_p = objects_base.obj_to_primitive( build_inst_kwargs['block_device_mapping']) build_inst_kwargs['block_device_mapping'] = bdm_p version = '1.32' if not self.client.can_send_version('1.32'): instances_p = [jsonutils.to_primitive(inst) for inst in instances] build_inst_kwargs['instances'] = instances_p version = '1.30' if not self.client.can_send_version('1.30'): if 'filter_properties' in build_inst_kwargs: filter_properties = build_inst_kwargs['filter_properties'] flavor = filter_properties['instance_type'] flavor_p = objects_base.obj_to_primitive(flavor) filter_properties['instance_type'] = flavor_p version = '1.8' cctxt = self.client.prepare(version=version) cctxt.cast(ctxt, 'build_instances', build_inst_kwargs=build_inst_kwargs)
def build_instances(self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, legacy_bdm=True): image_p = jsonutils.to_primitive(image) version = '1.10' if not self.client.can_send_version(version): version = '1.9' if 'instance_type' in filter_properties: flavor = filter_properties['instance_type'] flavor_p = objects_base.obj_to_primitive(flavor) filter_properties = dict(filter_properties, instance_type=flavor_p) kw = {'instances': instances, 'image': image_p, 'filter_properties': filter_properties, 'admin_password': admin_password, 'injected_files': injected_files, 'requested_networks': requested_networks, 'security_groups': security_groups} if not self.client.can_send_version(version): version = '1.8' kw['requested_networks'] = kw['requested_networks'].as_tuples() if not self.client.can_send_version('1.7'): version = '1.5' bdm_p = objects_base.obj_to_primitive(block_device_mapping) kw.update({'block_device_mapping': bdm_p, 'legacy_bdm': legacy_bdm}) cctxt = self.client.prepare(version=version) cctxt.cast(context, 'build_instances', **kw)
def test_get_all_host_states(self): # Ensure .service is set and we have the values we expect to. context = 'fake_context' self.mox.StubOutWithMock(objects.ServiceList, 'get_by_binary') self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all') objects.ServiceList.get_by_binary(context, 'patron-compute').AndReturn( ironic_fakes.SERVICES) objects.ComputeNodeList.get_all(context).AndReturn( ironic_fakes.COMPUTE_NODES) self.mox.ReplayAll() with mock.patch.object(patron.objects.InstanceList, 'get_by_host'): self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4) for i in range(4): compute_node = ironic_fakes.COMPUTE_NODES[i] host = compute_node.host node = compute_node.hypervisor_hostname state_key = (host, node) self.assertEqual( host_states_map[state_key].service, obj_base.obj_to_primitive( ironic_fakes.get_service_by_host(host))) self.assertEqual(compute_node.stats, host_states_map[state_key].stats) self.assertEqual(compute_node.free_ram_mb, host_states_map[state_key].free_ram_mb) self.assertEqual(compute_node.free_disk_gb * 1024, host_states_map[state_key].free_disk_mb)
def _get_usage_dict(self, object_or_dict, **updates): """Make a usage dict _update methods expect. Accepts a dict or an Instance or Flavor object, and a set of updates. Converts the object to a dict and applies the updates. :param object_or_dict: instance or flavor as an object or just a dict :param updates: key-value pairs to update the passed object. Currently only considers 'numa_topology', all other keys are ignored. :returns: a dict with all the information from object_or_dict updated with updates """ usage = {} if isinstance(object_or_dict, objects.Instance): usage = instance_obj.compat_instance(object_or_dict) elif isinstance(object_or_dict, objects.Flavor): usage = obj_base.obj_to_primitive(object_or_dict) else: usage.update(object_or_dict) for key in ('numa_topology',): if key in updates: usage[key] = updates[key] return usage
def _test_extract_flavor(self, prefix): instance_type = flavors.get_default_flavor() instance_type_p = obj_base.obj_to_primitive(instance_type) metadata = {} flavors.save_flavor_info(metadata, instance_type, prefix) instance = {'system_metadata': self._dict_to_metadata(metadata)} _instance_type = flavors.extract_flavor(instance, prefix) _instance_type_p = obj_base.obj_to_primitive(_instance_type) props = flavors.system_metadata_flavor_props.keys() for key in instance_type_p.keys(): if key not in props: del instance_type_p[key] self.assertEqual(instance_type_p, _instance_type_p)
def build_request_spec(ctxt, image, instances, instance_type=None): """Build a request_spec for the scheduler. The request_spec assumes that all instances to be scheduled are the same type. """ instance = instances[0] if instance_type is None: if isinstance(instance, objects.Instance): instance_type = instance.get_flavor() else: instance_type = flavors.extract_flavor(instance) if isinstance(instance, objects.Instance): instance = instance_obj.compat_instance(instance) if isinstance(instance_type, objects.Flavor): instance_type = obj_base.obj_to_primitive(instance_type) request_spec = { 'image': image or {}, 'instance_properties': instance, 'instance_type': instance_type, 'num_instances': len(instances) } return jsonutils.to_primitive(request_spec)
def test_get_all_host_states(self): # Ensure .service is set and we have the values we expect to. context = 'fake_context' self.mox.StubOutWithMock(objects.ServiceList, 'get_by_binary') self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all') objects.ServiceList.get_by_binary( context, 'patron-compute').AndReturn(ironic_fakes.SERVICES) objects.ComputeNodeList.get_all(context).AndReturn( ironic_fakes.COMPUTE_NODES) self.mox.ReplayAll() with mock.patch.object(patron.objects.InstanceList, 'get_by_host'): self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4) for i in range(4): compute_node = ironic_fakes.COMPUTE_NODES[i] host = compute_node.host node = compute_node.hypervisor_hostname state_key = (host, node) self.assertEqual(host_states_map[state_key].service, obj_base.obj_to_primitive( ironic_fakes.get_service_by_host(host))) self.assertEqual(compute_node.stats, host_states_map[state_key].stats) self.assertEqual(compute_node.free_ram_mb, host_states_map[state_key].free_ram_mb) self.assertEqual(compute_node.free_disk_gb * 1024, host_states_map[state_key].free_disk_mb)
def test_get_all_host_states(self, mock_get_by_host): mock_get_by_host.return_value = objects.InstanceList() context = 'fake_context' self.mox.StubOutWithMock(objects.ServiceList, 'get_by_binary') self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all') self.mox.StubOutWithMock(host_manager.LOG, 'warning') objects.ServiceList.get_by_binary( context, 'patron-compute').AndReturn(fakes.SERVICES) objects.ComputeNodeList.get_all(context).AndReturn(fakes.COMPUTE_NODES) # node 3 host physical disk space is greater than database host_manager.LOG.warning("Host %(hostname)s has more disk space " "than database expected (%(physical)sgb >" " %(database)sgb)", {'physical': 3333, 'database': 3072, 'hostname': 'node3'}) # Invalid service host_manager.LOG.warning("No compute service record found for " "host %(host)s", {'host': 'fake'}) self.mox.ReplayAll() self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4) # Check that .service is set properly for i in xrange(4): compute_node = fakes.COMPUTE_NODES[i] host = compute_node['host'] node = compute_node['hypervisor_hostname'] state_key = (host, node) self.assertEqual(host_states_map[state_key].service, obj_base.obj_to_primitive(fakes.get_service_by_host(host))) self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb, 512) # 511GB self.assertEqual(host_states_map[('host1', 'node1')].free_disk_mb, 524288) self.assertEqual(host_states_map[('host2', 'node2')].free_ram_mb, 1024) # 1023GB self.assertEqual(host_states_map[('host2', 'node2')].free_disk_mb, 1048576) self.assertEqual(host_states_map[('host3', 'node3')].free_ram_mb, 3072) # 3071GB self.assertEqual(host_states_map[('host3', 'node3')].free_disk_mb, 3145728) self.assertThat( objects.NUMATopology.obj_from_db_obj( host_states_map[('host3', 'node3')].numa_topology )._to_dict(), matchers.DictMatches(fakes.NUMA_TOPOLOGY._to_dict())) self.assertEqual(host_states_map[('host4', 'node4')].free_ram_mb, 8192) # 8191GB self.assertEqual(host_states_map[('host4', 'node4')].free_disk_mb, 8388608)
def bulk_create(self, context, fixed_ips): ips = [] for fixedip in fixed_ips: ip = obj_base.obj_to_primitive(fixedip) if 'id' in ip: raise exception.ObjectActionError(action='create', reason='already created') ips.append(ip) db.fixed_ip_bulk_create(context, ips)
def test_send_on_task_change(self): old = obj_base.obj_to_primitive(self.instance) old['task_state'] = None # pretend we just transitioned to task SPAWNING: self.instance.task_state = task_states.SPAWNING notifications.send_update(self.context, old, self.instance) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
def _get_compute_node(self, context): """Returns compute node for the host and nodename.""" try: compute = objects.ComputeNode.get_by_host_and_nodename( context, self.host, self.nodename) return obj_base.obj_to_primitive(compute) except exception.NotFound: LOG.warning(_LW("No compute node record for %(host)s:%(node)s"), {'host': self.host, 'node': self.nodename})
def test_get_all_host_states(self, mock_get_by_host): mock_get_by_host.return_value = objects.InstanceList() context = 'fake_context' self.mox.StubOutWithMock(objects.ServiceList, 'get_by_binary') self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all') self.mox.StubOutWithMock(host_manager.LOG, 'warning') objects.ServiceList.get_by_binary(context, 'patron-compute').AndReturn( fakes.SERVICES) objects.ComputeNodeList.get_all(context).AndReturn(fakes.COMPUTE_NODES) # node 3 host physical disk space is greater than database host_manager.LOG.warning( "Host %(hostname)s has more disk space " "than database expected (%(physical)sgb >" " %(database)sgb)", { 'physical': 3333, 'database': 3072, 'hostname': 'node3' }) # Invalid service host_manager.LOG.warning( "No compute service record found for " "host %(host)s", {'host': 'fake'}) self.mox.ReplayAll() self.host_manager.get_all_host_states(context) host_states_map = self.host_manager.host_state_map self.assertEqual(len(host_states_map), 4) # Check that .service is set properly for i in xrange(4): compute_node = fakes.COMPUTE_NODES[i] host = compute_node['host'] node = compute_node['hypervisor_hostname'] state_key = (host, node) self.assertEqual( host_states_map[state_key].service, obj_base.obj_to_primitive(fakes.get_service_by_host(host))) self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb, 512) # 511GB self.assertEqual(host_states_map[('host1', 'node1')].free_disk_mb, 524288) self.assertEqual(host_states_map[('host2', 'node2')].free_ram_mb, 1024) # 1023GB self.assertEqual(host_states_map[('host2', 'node2')].free_disk_mb, 1048576) self.assertEqual(host_states_map[('host3', 'node3')].free_ram_mb, 3072) # 3071GB self.assertEqual(host_states_map[('host3', 'node3')].free_disk_mb, 3145728) self.assertThat( objects.NUMATopology.obj_from_db_obj( host_states_map[('host3', 'node3')].numa_topology)._to_dict(), matchers.DictMatches(fakes.NUMA_TOPOLOGY._to_dict())) self.assertEqual(host_states_map[('host4', 'node4')].free_ram_mb, 8192) # 8191GB self.assertEqual(host_states_map[('host4', 'node4')].free_disk_mb, 8388608)
def test_numa_topology_filter_numa_host_no_numa_instance_pass(self): instance = fake_instance.fake_instance_obj(mock.sentinel.ctx) instance.numa_topology = None filter_properties = { 'request_spec': { 'instance_properties': jsonutils.to_primitive( obj_base.obj_to_primitive(instance))}} host = fakes.FakeHostState('host1', 'node1', {'numa_topology': fakes.NUMA_TOPOLOGY}) self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def build_instances(self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, legacy_bdm=True): image_p = jsonutils.to_primitive(image) version = '1.10' if not self.client.can_send_version(version): version = '1.9' if 'instance_type' in filter_properties: flavor = filter_properties['instance_type'] flavor_p = objects_base.obj_to_primitive(flavor) filter_properties = dict(filter_properties, instance_type=flavor_p) kw = { 'instances': instances, 'image': image_p, 'filter_properties': filter_properties, 'admin_password': admin_password, 'injected_files': injected_files, 'requested_networks': requested_networks, 'security_groups': security_groups } if not self.client.can_send_version(version): version = '1.8' kw['requested_networks'] = kw['requested_networks'].as_tuples() if not self.client.can_send_version('1.7'): version = '1.5' bdm_p = objects_base.obj_to_primitive(block_device_mapping) kw.update({ 'block_device_mapping': bdm_p, 'legacy_bdm': legacy_bdm }) cctxt = self.client.prepare(version=version) cctxt.cast(context, 'build_instances', **kw)
def test_send_on_vm_change(self): old = obj_base.obj_to_primitive(self.instance) old['vm_state'] = None # pretend we just transitioned to ACTIVE: self.instance.vm_state = vm_states.ACTIVE notifications.send_update(self.context, old, self.instance) self.assertEqual(1, len(fake_notifier.NOTIFICATIONS)) # service name should default to 'compute' notif = fake_notifier.NOTIFICATIONS[0] self.assertEqual('compute.testhost', notif.publisher_id)
def output(migrations_obj): """Returns the desired output of the API from an object. From a MigrationsList's object this method returns a list of primitive objects with the only necessary fields. """ objects = obj_base.obj_to_primitive(migrations_obj) for obj in objects: del obj['deleted'] del obj['deleted_at'] return objects
def output(migrations_obj): """Returns the desired output of the API from an object. From a MigrationsList's object this method returns a list of primitive objects with the only necessary fields. """ objects = obj_base.obj_to_primitive(migrations_obj) for obj in objects: del obj["deleted"] del obj["deleted_at"] return objects
def test_numa_topology_filter_numa_host_no_numa_instance_pass(self): instance = fake_instance.fake_instance_obj(mock.sentinel.ctx) instance.numa_topology = None filter_properties = { 'request_spec': { 'instance_properties': jsonutils.to_primitive(obj_base.obj_to_primitive(instance)) } } host = fakes.FakeHostState('host1', 'node1', {'numa_topology': fakes.NUMA_TOPOLOGY}) self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
def migrate_server(self, context, instance, scheduler_hint, live, rebuild, flavor, block_migration, disk_over_commit, reservations=None, clean_shutdown=True): kw = {'instance': instance, 'scheduler_hint': scheduler_hint, 'live': live, 'rebuild': rebuild, 'flavor': flavor, 'block_migration': block_migration, 'disk_over_commit': disk_over_commit, 'reservations': reservations, 'clean_shutdown': clean_shutdown} version = '1.11' if not self.client.can_send_version(version): del kw['clean_shutdown'] version = '1.10' if not self.client.can_send_version(version): kw['flavor'] = objects_base.obj_to_primitive(flavor) version = '1.6' if not self.client.can_send_version(version): kw['instance'] = jsonutils.to_primitive( objects_base.obj_to_primitive(instance)) version = '1.4' cctxt = self.client.prepare(version=version) return cctxt.call(context, 'migrate_server', **kw)
def test_numa_topology_filter_numa_instance_no_numa_host_fail(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512), objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512) ]) instance = fake_instance.fake_instance_obj(mock.sentinel.ctx) instance.numa_topology = instance_topology filter_properties = { 'request_spec': { 'instance_properties': jsonutils.to_primitive( obj_base.obj_to_primitive(instance))}} host = fakes.FakeHostState('host1', 'node1', {'pci_stats': None}) self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def test_numa_topology_filter_numa_instance_no_numa_host_fail(self): instance_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512), objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512) ]) instance = fake_instance.fake_instance_obj(mock.sentinel.ctx) instance.numa_topology = instance_topology filter_properties = { 'request_spec': { 'instance_properties': jsonutils.to_primitive(obj_base.obj_to_primitive(instance)) } } host = fakes.FakeHostState('host1', 'node1', {'pci_stats': None}) self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
def _create_instances_here(self, ctxt, instance_uuids, instance_properties, instance_type, image, security_groups, block_device_mapping): instance_values = copy.copy(instance_properties) # The parent may pass these metadata values as lists, and the # create call expects it to be a dict. instance_values['metadata'] = utils.instance_meta(instance_values) # Pop out things that will get set properly when re-creating the # instance record. instance_values.pop('id') instance_values.pop('name') instance_values.pop('info_cache') instance_values.pop('security_groups') instance_values.pop('flavor') # FIXME(danms): The instance was brutally serialized before being # sent over RPC to us. Thus, the pci_requests value wasn't really # sent in a useful form. Since it was getting ignored for cells # before it was part of the Instance, skip it now until cells RPC # is sending proper instance objects. instance_values.pop('pci_requests', None) instances = [] num_instances = len(instance_uuids) for i, instance_uuid in enumerate(instance_uuids): instance = objects.Instance(context=ctxt) instance.update(instance_values) instance.uuid = instance_uuid instance.flavor = instance_type instance.old_flavor = None instance.new_flavor = None instance = self.compute_api.create_db_entry_for_new_instance( ctxt, instance_type, image, instance, security_groups, block_device_mapping, num_instances, i) instances.append(instance) instance_p = obj_base.obj_to_primitive(instance) self.msg_runner.instance_update_at_top(ctxt, instance_p) return instances
def test_numa_topology_filter_pass_set_limit(self): self.flags(cpu_allocation_ratio=21) self.flags(ram_allocation_ratio=1.3) instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512), objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512) ]) instance = fake_instance.fake_instance_obj(mock.sentinel.ctx) instance.numa_topology = instance_topology filter_properties = { 'request_spec': { 'instance_properties': jsonutils.to_primitive( obj_base.obj_to_primitive(instance))}} host = fakes.FakeHostState('host1', 'node1', {'numa_topology': fakes.NUMA_TOPOLOGY, 'pci_stats': None}) self.assertTrue(self.filt_cls.host_passes(host, filter_properties)) limits = host.limits['numa_topology'] self.assertEqual(limits.cpu_allocation_ratio, 21) self.assertEqual(limits.ram_allocation_ratio, 1.3)
def bdm_update_or_create_at_top(self, ctxt, bdm, create=None): """Create or update a block device mapping in API cells. If create is True, only try to create. If create is None, try to update but fall back to create. If create is False, only attempt to update. This maps to patron-conductor's behavior. """ if not CONF.cells.enable: return if self.client.can_send_version('1.28'): version = '1.28' else: version = '1.10' bdm = objects_base.obj_to_primitive(bdm) cctxt = self.client.prepare(version=version) try: cctxt.cast(ctxt, 'bdm_update_or_create_at_top', bdm=bdm, create=create) except Exception: LOG.exception(_LE("Failed to notify cells of BDM update/create."))
def _build_instances(self, message, target_cells, instance_uuids, build_inst_kwargs): """Attempt to build instance(s) or send msg to child cell.""" ctxt = message.ctxt instance_properties = obj_base.obj_to_primitive( build_inst_kwargs['instances'][0]) filter_properties = build_inst_kwargs['filter_properties'] instance_type = filter_properties['instance_type'] image = build_inst_kwargs['image'] security_groups = build_inst_kwargs['security_groups'] block_device_mapping = build_inst_kwargs['block_device_mapping'] LOG.debug("Building instances with routing_path=%(routing_path)s", {'routing_path': message.routing_path}) for target_cell in target_cells: try: if target_cell.is_me: # Need to create instance DB entries as the conductor # expects that the instance(s) already exists. instances = self._create_instances_here(ctxt, instance_uuids, instance_properties, instance_type, image, security_groups, block_device_mapping) build_inst_kwargs['instances'] = instances # Need to record the create action in the db as the # conductor expects it to already exist. self._create_action_here(ctxt, instance_uuids) self.compute_task_api.build_instances(ctxt, **build_inst_kwargs) return self.msg_runner.build_instances(ctxt, target_cell, build_inst_kwargs) return except Exception: LOG.exception(_LE("Couldn't communicate with cell '%s'"), target_cell.name) # FIXME(comstud): Would be nice to kick this back up so that # the parent cell could retry, if we had a parent. LOG.error(_LE("Couldn't communicate with any cells")) raise exception.NoCellsAvailable()
def test_numa_topology_filter_pass_set_limit(self): self.flags(cpu_allocation_ratio=21) self.flags(ram_allocation_ratio=1.3) instance_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512), objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512) ]) instance = fake_instance.fake_instance_obj(mock.sentinel.ctx) instance.numa_topology = instance_topology filter_properties = { 'request_spec': { 'instance_properties': jsonutils.to_primitive(obj_base.obj_to_primitive(instance)) } } host = fakes.FakeHostState('host1', 'node1', { 'numa_topology': fakes.NUMA_TOPOLOGY, 'pci_stats': None }) self.assertTrue(self.filt_cls.host_passes(host, filter_properties)) limits = host.limits['numa_topology'] self.assertEqual(limits.cpu_allocation_ratio, 21) self.assertEqual(limits.ram_allocation_ratio, 1.3)
def to_dict(self): pci_pool = base.obj_to_primitive(self) tags = pci_pool.pop('tags', {}) for k, v in six.iteritems(tags): pci_pool[k] = v return pci_pool
def __init__(self, instance, address=None, content=None, extra_md=None, conductor_api=None, network_info=None, vd_driver=None): """Creation of this object should basically cover all time consuming collection. Methods after that should not cause time delays due to network operations or lengthy cpu operations. The user should then get a single instance and make multiple method calls on it. """ if not content: content = [] ctxt = context.get_admin_context() # The default value of mimeType is set to MIME_TYPE_TEXT_PLAIN self.set_mimetype(MIME_TYPE_TEXT_PLAIN) self.instance = instance self.extra_md = extra_md if conductor_api: capi = conductor_api else: capi = conductor.API() self.availability_zone = az.get_instance_availability_zone(ctxt, instance) self.security_groups = objects.SecurityGroupList.get_by_instance( ctxt, instance) self.mappings = _format_instance_mapping(ctxt, instance) if instance.user_data is not None: self.userdata_raw = base64.b64decode(instance.user_data) else: self.userdata_raw = None self.ec2_ids = capi.get_ec2_ids(ctxt, obj_base.obj_to_primitive(instance)) self.address = address # expose instance metadata. self.launch_metadata = utils.instance_meta(instance) self.password = password.extract_password(instance) self.uuid = instance.uuid self.content = {} self.files = [] # get network info, and the rendered network template if network_info is None: network_info = instance.info_cache.network_info self.ip_info = \ ec2utils.get_ip_info_for_instance_from_nw_info(network_info) self.network_config = None cfg = netutils.get_injected_network_template(network_info) if cfg: key = "%04i" % len(self.content) self.content[key] = cfg self.network_config = {"name": "network_config", 'content_path': "/%s/%s" % (CONTENT_DIR, key)} # 'content' is passed in from the configdrive code in # patron/virt/libvirt/driver.py. That's how we get the injected files # (personalities) in. AFAIK they're not stored in the db at all, # so are not available later (web service metadata time). for (path, contents) in content: key = "%04i" % len(self.content) self.files.append({'path': path, 'content_path': "/%s/%s" % (CONTENT_DIR, key)}) self.content[key] = contents if vd_driver is None: vdclass = importutils.import_class(CONF.vendordata_driver) else: vdclass = vd_driver self.vddriver = vdclass(instance=instance, address=address, extra_md=extra_md, network_info=network_info) self.route_configuration = None
def bdm_update_or_create_at_top(self, ctxt, bdm, create=None): """BDM was created/updated in this cell. Tell the API cells.""" # TODO(ndipanov): Move inter-cell RPC to use objects bdm = base_obj.obj_to_primitive(bdm) self.msg_runner.bdm_update_or_create_at_top(ctxt, bdm, create=create)