def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): """overwrite original libvirt_driver's spawn method """ # add metadata to the instance def _append_metadata(target_instance, metadata_dict): original_meta = target_instance.get('metadata', None) or list() original_meta.append(metadata_dict) target_instance['metadata'] = original_meta # get meta info related to VM synthesis base_sha256_uuid, memory_snap_id, diskhash_snap_id, memhash_snap_id = \ self._get_basevm_meta_info(image_meta) overlay_url = None handoff_info = None instance_meta = instance.get('metadata', None) if instance_meta is not None: if "overlay_url" in instance_meta.keys(): overlay_url = instance_meta.get("overlay_url") if "handoff_info" in instance_meta.keys(): handoff_info = instance_meta.get("handoff_info") # original openstack logic disk_info = blockinfo.get_disk_info( libvirt_driver.CONF.libvirt.virt_type, instance, block_device_info, image_meta) if hasattr(self, 'to_xml'): # icehouse xml = self.to_xml(context, instance, network_info, disk_info, image_meta, block_device_info=block_device_info, write_to_disk=True) elif hasattr(self, '_get_guest_xml'): # kilo xml = self._get_guest_xml(context, instance, network_info, disk_info, image_meta, block_device_info=block_device_info, write_to_disk=True) # handle xml configuration to make a portable VM xml_obj = ElementTree.fromstring(xml) xml = self._polish_VM_configuration(xml_obj) # avoid injecting key, password, and metadata since we're resuming VM original_inject_password = libvirt_driver.CONF.libvirt.inject_password original_inject_key = libvirt_driver.CONF.libvirt.inject_key original_metadata = instance.get('metadata') libvirt_driver.CONF.libvirt.inject_password = None libvirt_driver.CONF.libvirt.inject_key = None instance['metadata'] = {} self._create_image(context, instance, disk_info['mapping'], network_info=network_info, block_device_info=block_device_info, files=injected_files, admin_pass=admin_password) # revert back the configuration libvirt_driver.CONF.libvirt.inject_password = original_inject_password libvirt_driver.CONF.libvirt.inject_key = original_inject_key instance['metadata'] = original_metadata if (overlay_url is not None) and (handoff_info is None): # spawn instance using VM synthesis LOG.debug(_('cloudlet, synthesis start')) # append metadata to the instance self._create_network_only(xml, instance, network_info, block_device_info) synthesized_vm = self._spawn_using_synthesis( context, instance, xml, image_meta, overlay_url) instance_uuid = str(instance.get('uuid', '')) self.synthesized_vm_dics[instance_uuid] = synthesized_vm elif handoff_info is not None: # spawn instance using VM handoff LOG.debug(_('cloudlet, Handoff start')) self._create_network_only(xml, instance, network_info, block_device_info) synthesized_vm = self._spawn_using_handoff(context, instance, xml, image_meta, handoff_info) instance_uuid = str(instance.get('uuid', '')) self.synthesized_vm_dics[instance_uuid] = synthesized_vm pass elif memory_snap_id is not None: # resume from memory snapshot LOG.debug(_('cloudlet, resume from memory snapshot')) # append metadata to the instance basedisk_path = self._get_cache_image(context, instance, image_meta['id']) basemem_path = self._get_cache_image(context, instance, memory_snap_id) diskhash_path = self._get_cache_image(context, instance, diskhash_snap_id) memhash_path = self._get_cache_image(context, instance, memhash_snap_id) LOG.debug(_('cloudlet, creating network')) self._create_network_only(xml, instance, network_info, block_device_info) LOG.debug(_('cloudlet, resuming base vm')) self.resume_basevm(instance, xml, basedisk_path, basemem_path, diskhash_path, memhash_path, base_sha256_uuid) else: self._create_domain_and_network(context, xml, instance, network_info, block_device_info) LOG.debug(_("Instance is running"), instance=instance) def _wait_for_boot(): """Called at an interval until the VM is running.""" state = self.get_info(instance).state if state == power_state.RUNNING: raise loopingcall.LoopingCallDone() timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot) timer.start(interval=0.5).wait() LOG.info(_("Instance spawned successfully."), instance=instance)
def pre_launch(self, context, new_instance_ref, network_info=None, block_device_info=None, migration=False, skip_image_service=False, image_refs=[], lvm_info={}): image_base_path = os.path.join(CONF.instances_path, CONF.base_dir_name) if not os.path.exists(image_base_path): LOG.debug('Base path %s does not exist. It will be created now.', image_base_path) mkdir_as(image_base_path, self.openstack_uid) artifact_path = None if not(skip_image_service) and CONF.cobalt_use_image_service: artifact_path = image_base_path # We need to first download the descriptor and the disk files # from the image service. LOG.debug("Downloading images %s from the image service." % (image_refs)) for image_ref in image_refs: image = self.image_service.show(context, image_ref) # In previous versions name was the filename (*.gc, *.disk) so # there was no file_name property. Now that name is more descriptive # when uploaded to glance, file_name property is set; use if possible target = os.path.join(image_base_path, image['properties'].get('file_name',image['name'])) if migration or not os.path.exists(target): # If the path does not exist fetch the data from the image # service. NOTE: We always fetch in the case of a # migration, as the descriptor may have changed from its # previous state. Migrating VMs are the only case where a # descriptor for an instance will not be a fixed constant. # We download to a temporary location so we can make the # file appear atomically from the right user. fd, temp_target = tempfile.mkstemp(dir=image_base_path) try: os.close(fd) self.image_service.download(context, image_ref, temp_target) os.chown(temp_target, self.openstack_uid, self.openstack_gid) os.chmod(temp_target, 0644) os.rename(temp_target, target) except: os.unlink(temp_target) raise libvirt_conn_type = 'migration' if migration else 'launch' libvirt_conn = self.libvirt_connections[libvirt_conn_type] # (dscannell) Check to see if we need to convert the network_info # object into the legacy format. if hasattr(network_info, 'legacy') and libvirt_conn.legacy_nwinfo(): network_info = network_info.legacy() # TODO(dscannell): This method can take an optional image_meta that # appears to be the root disk's image metadata. it checks the metadata # for the image format (e.g. iso, disk, etc). Right now we are passing # in None (default) but we need to double check this. disk_info = blockinfo.get_disk_info(CONF.libvirt_type, new_instance_ref, block_device_info) # We need to create the libvirt xml, and associated files. Pass back # the path to the libvirt.xml file. working_dir = os.path.join(CONF.instances_path, new_instance_ref['uuid']) stubbed_disks = self._stub_disks(libvirt_conn, new_instance_ref, disk_info['mapping'], block_device_info, lvm_info) libvirt_file = os.path.join(working_dir, "libvirt.xml") # Make sure that our working directory exists. mkdir_as(working_dir, self.openstack_uid) # (dscannell) We want to disable any injection. We do this by making a # copy of the instance and clearing out some entries. Since OpenStack # uses dictionary-list accessors, we can pass this dictionary through # that code. instance_dict = dict(new_instance_ref) # The name attribute is special and does not carry over like the rest # of the attributes. instance_dict['key_data'] = None instance_dict['metadata'] = [] for network_ref, mapping in network_info: network_ref['injected'] = False # Stub out an image in the _base directory so libvirt_conn._create_image # doesn't try to download the base image that the master was booted # from, which isn't necessary because a clone's thin-provisioned disk is # overlaid on a disk image accessible via VMS_DISK_URL. Note that we # can't just touch the image's file in _base because master instances # will need the real _base data. So we trick _create_image into using # some bogus id for the image id that'll never be the id of a real # image; the new instance's uuid suffices. disk_images = {'image_id': new_instance_ref['uuid'], 'kernel_id': new_instance_ref['kernel_id'], 'ramdisk_id': new_instance_ref['ramdisk_id']} touch_as(os.path.join(image_base_path, get_cache_fname(disk_images, 'image_id')), self.openstack_uid) # (dscannell) This was taken from the core nova project as part of the # boot path for normal instances. We basically want to mimic this # functionality. # (rui-lin) libvirt_xml parameter was removed from 2013.1 to 2013.1.1 # Check if the parameter is in argument list of _create_image to # decide which method signature to use, and whether to write the xml # file to disk afterwards. if 'libvirt_xml' in inspect.getargspec(libvirt_conn._create_image).args: xml = libvirt_conn.to_xml(instance_dict, network_info, disk_info, block_device_info=block_device_info) libvirt_conn._create_image(context, instance_dict, xml, disk_info['mapping'], network_info=network_info, block_device_info=block_device_info, disk_images=disk_images) else: libvirt_conn._create_image(context, instance_dict, disk_info['mapping'], network_info=network_info, block_device_info=block_device_info, disk_images=disk_images) xml = libvirt_conn.to_xml(instance_dict, network_info, disk_info, block_device_info=block_device_info, write_to_disk=True) if not(migration): for disk_name, disk_file in stubbed_disks.iteritems(): disk_path = disk_file.path if os.path.exists(disk_path) and disk_file.source_type == 'file': # (dscannell) Remove the fake disk file (if created). os.remove(disk_path) # Fix up the permissions on the files that we created so that they are owned by the # openstack user. for root, dirs, files in os.walk(working_dir, followlinks=True): for path in dirs + files: LOG.debug("chowning path=%s to openstack user %s" % \ (os.path.join(root, path), self.openstack_uid)) os.chown(os.path.join(root, path), self.openstack_uid, self.openstack_gid) # Return the libvirt file, this will be passed in as the name. This # parameter is overloaded in the management interface as a libvirt # special case. return (libvirt_file, artifact_path)
def spawn( self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None ): """overwrite original libvirt_driver's spawn method """ # add metadata to the instance def _append_metadata(target_instance, metadata_dict): original_meta = target_instance.get("metadata", None) or list() original_meta.append(metadata_dict) target_instance["metadata"] = original_meta # get meta info related to VM synthesis base_sha256_uuid, memory_snap_id, diskhash_snap_id, memhash_snap_id = self._get_basevm_meta_info(image_meta) overlay_url = None handoff_info = None instance_meta = instance.get("metadata", None) if instance_meta is not None: if "overlay_url" in instance_meta.keys(): overlay_url = instance_meta.get("overlay_url") if "handoff_info" in instance_meta.keys(): handoff_info = instance_meta.get("handoff_info") # original openstack logic disk_info = blockinfo.get_disk_info( libvirt_driver.CONF.libvirt.virt_type, instance, block_device_info, image_meta ) if hasattr(self, "to_xml"): # icehouse xml = self.to_xml( context, instance, network_info, disk_info, image_meta, block_device_info=block_device_info, write_to_disk=True, ) elif hasattr(self, "_get_guest_xml"): # kilo xml = self._get_guest_xml( context, instance, network_info, disk_info, image_meta, block_device_info=block_device_info, write_to_disk=True, ) # handle xml configuration to make a portable VM xml_obj = ElementTree.fromstring(xml) xml = self._polish_VM_configuration(xml_obj) # avoid injecting key, password, and metadata since we're resuming VM original_inject_password = libvirt_driver.CONF.libvirt.inject_password original_inject_key = libvirt_driver.CONF.libvirt.inject_key original_metadata = instance.get("metadata") libvirt_driver.CONF.libvirt.inject_password = None libvirt_driver.CONF.libvirt.inject_key = None instance["metadata"] = {} self._create_image( context, instance, disk_info["mapping"], network_info=network_info, block_device_info=block_device_info, files=injected_files, admin_pass=admin_password, ) # revert back the configuration libvirt_driver.CONF.libvirt.inject_password = original_inject_password libvirt_driver.CONF.libvirt.inject_key = original_inject_key instance["metadata"] = original_metadata if (overlay_url is not None) and (handoff_info is None): # spawn instance using VM synthesis LOG.debug(_("cloudlet, synthesis start")) # append metadata to the instance self._create_network_only(xml, instance, network_info, block_device_info) synthesized_vm = self._spawn_using_synthesis(context, instance, xml, image_meta, overlay_url) instance_uuid = str(instance.get("uuid", "")) self.synthesized_vm_dics[instance_uuid] = synthesized_vm elif handoff_info is not None: # spawn instance using VM handoff LOG.debug(_("cloudlet, Handoff start")) self._create_network_only(xml, instance, network_info, block_device_info) synthesized_vm = self._spawn_using_handoff(context, instance, xml, image_meta, handoff_info) instance_uuid = str(instance.get("uuid", "")) self.synthesized_vm_dics[instance_uuid] = synthesized_vm pass elif memory_snap_id is not None: # resume from memory snapshot LOG.debug(_("cloudlet, resume from memory snapshot")) # append metadata to the instance basedisk_path = self._get_cache_image(context, instance, image_meta["id"]) basemem_path = self._get_cache_image(context, instance, memory_snap_id) diskhash_path = self._get_cache_image(context, instance, diskhash_snap_id) memhash_path = self._get_cache_image(context, instance, memhash_snap_id) LOG.debug(_("cloudlet, creating network")) self._create_network_only(xml, instance, network_info, block_device_info) LOG.debug(_("cloudlet, resuming base vm")) self.resume_basevm( instance, xml, basedisk_path, basemem_path, diskhash_path, memhash_path, base_sha256_uuid ) else: self._create_domain_and_network(context, xml, instance, network_info, block_device_info) LOG.debug(_("Instance is running"), instance=instance) def _wait_for_boot(): """Called at an interval until the VM is running.""" state = self.get_info(instance).state if state == power_state.RUNNING: raise loopingcall.LoopingCallDone() timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot) timer.start(interval=0.5).wait() LOG.info(_("Instance spawned successfully."), instance=instance)
def pre_launch(self, context, new_instance_ref, network_info=None, block_device_info=None, migration=False, skip_image_service=False, image_refs=[], lvm_info={}): image_base_path = os.path.join(CONF.instances_path, CONF.base_dir_name) if not os.path.exists(image_base_path): LOG.debug('Base path %s does not exist. It will be created now.', image_base_path) mkdir_as(image_base_path, self.openstack_uid) artifact_path = None if not (skip_image_service) and CONF.cobalt_use_image_service: artifact_path = image_base_path # We need to first download the descriptor and the disk files # from the image service. LOG.debug("Downloading images %s from the image service." % (image_refs)) for image_ref in image_refs: image = self.image_service.show(context, image_ref) # In previous versions name was the filename (*.gc, *.disk) so # there was no file_name property. Now that name is more descriptive # when uploaded to glance, file_name property is set; use if possible target = os.path.join( image_base_path, image['properties'].get('file_name', image['name'])) if migration or not os.path.exists(target): # If the path does not exist fetch the data from the image # service. NOTE: We always fetch in the case of a # migration, as the descriptor may have changed from its # previous state. Migrating VMs are the only case where a # descriptor for an instance will not be a fixed constant. # We download to a temporary location so we can make the # file appear atomically from the right user. fd, temp_target = tempfile.mkstemp(dir=image_base_path) try: os.close(fd) self.image_service.download(context, image_ref, temp_target) os.chown(temp_target, self.openstack_uid, self.openstack_gid) os.chmod(temp_target, 0644) os.rename(temp_target, target) except: os.unlink(temp_target) raise # (dscannell): Determine which libvirt_conn to use. If this is for # migration, and there exists some lvm information, then # use the migration libvirt_conn (that will use the # configured image backend). Otherwise, default to launch # libvirt_conn that will always use a qcow2 backend. It is # safer to use the launch libvirt_conn for a migration if # no lvm_info is given. if migration and len(lvm_info) > 0: libvirt_conn_type = 'migration' else: libvirt_conn_type = 'launch' libvirt_conn = self.libvirt_connections[libvirt_conn_type] # (dscannell) Check to see if we need to convert the network_info # object into the legacy format. if hasattr(network_info, 'legacy') and libvirt_conn.legacy_nwinfo(): network_info = network_info.legacy() # TODO(dscannell): This method can take an optional image_meta that # appears to be the root disk's image metadata. it checks the metadata # for the image format (e.g. iso, disk, etc). Right now we are passing # in None (default) but we need to double check this. disk_info = blockinfo.get_disk_info(CONF.libvirt_type, new_instance_ref, block_device_info) # We need to create the libvirt xml, and associated files. Pass back # the path to the libvirt.xml file. working_dir = os.path.join(CONF.instances_path, new_instance_ref['uuid']) stubbed_disks = self._stub_disks(libvirt_conn, new_instance_ref, disk_info['mapping'], block_device_info, lvm_info) libvirt_file = os.path.join(working_dir, "libvirt.xml") # Make sure that our working directory exists. mkdir_as(working_dir, self.openstack_uid) # (dscannell) We want to disable any injection. We do this by making a # copy of the instance and clearing out some entries. Since OpenStack # uses dictionary-list accessors, we can pass this dictionary through # that code. instance_dict = dict(new_instance_ref) # The name attribute is special and does not carry over like the rest # of the attributes. instance_dict['key_data'] = None instance_dict['metadata'] = [] for network_ref, mapping in network_info: network_ref['injected'] = False # Stub out an image in the _base directory so libvirt_conn._create_image # doesn't try to download the base image that the master was booted # from, which isn't necessary because a clone's thin-provisioned disk is # overlaid on a disk image accessible via VMS_DISK_URL. Note that we # can't just touch the image's file in _base because master instances # will need the real _base data. So we trick _create_image into using # some bogus id for the image id that'll never be the id of a real # image; the new instance's uuid suffices. disk_images = { 'image_id': new_instance_ref['uuid'], 'kernel_id': new_instance_ref['kernel_id'], 'ramdisk_id': new_instance_ref['ramdisk_id'] } touch_as( os.path.join(image_base_path, get_cache_fname(disk_images, 'image_id')), self.openstack_uid) # (dscannell) This was taken from the core nova project as part of the # boot path for normal instances. We basically want to mimic this # functionality. # (rui-lin) libvirt_xml parameter was removed from 2013.1 to 2013.1.1 # Check if the parameter is in argument list of _create_image to # decide which method signature to use, and whether to write the xml # file to disk afterwards. if 'libvirt_xml' in inspect.getargspec( libvirt_conn._create_image).args: xml = libvirt_conn.to_xml(instance_dict, network_info, disk_info, block_device_info=block_device_info) libvirt_conn._create_image(context, instance_dict, xml, disk_info['mapping'], network_info=network_info, block_device_info=block_device_info, disk_images=disk_images) else: libvirt_conn._create_image(context, instance_dict, disk_info['mapping'], network_info=network_info, block_device_info=block_device_info, disk_images=disk_images) xml = libvirt_conn.to_xml(instance_dict, network_info, disk_info, block_device_info=block_device_info, write_to_disk=True) if not (migration): for disk_name, disk_file in stubbed_disks.iteritems(): disk_path = disk_file.path if os.path.exists( disk_path) and disk_file.source_type == 'file': # (dscannell) Remove the fake disk file (if created). os.remove(disk_path) # Fix up the permissions on the files that we created so that they are owned by the # openstack user. for root, dirs, files in os.walk(working_dir, followlinks=True): for path in dirs + files: LOG.debug("chowning path=%s to openstack user %s" % \ (os.path.join(root, path), self.openstack_uid)) os.chown(os.path.join(root, path), self.openstack_uid, self.openstack_gid) # Return the libvirt file, this will be passed in as the name. This # parameter is overloaded in the management interface as a libvirt # special case. return (libvirt_file, artifact_path)