def spawn(self, instance): """ Create a new VM and start it.""" vm = self._lookup(instance.name) if vm is not None: raise exception.Duplicate(_('Attempt to create duplicate vm %s') % instance.name) user = manager.AuthManager().get_user(instance['user_id']) project = manager.AuthManager().get_project(instance['project_id']) #Fetch the file, assume it is a VHD file. base_vhd_filename = os.path.join(FLAGS.instances_path, instance.name) vhdfile = "%s.vhd" % (base_vhd_filename) images.fetch(instance['image_id'], vhdfile, user, project) try: self._create_vm(instance) self._create_disk(instance['name'], vhdfile) self._create_nic(instance['name'], instance['mac_address']) LOG.debug(_('Starting VM %s '), instance.name) self._set_vm_state(instance['name'], 'Enabled') LOG.info(_('Started VM %s '), instance.name) except Exception as exn: LOG.exception(_('spawn vm failed: %s'), exn) self.destroy(instance)
def _fetch_image(context, instance, image_path): disk_path = None try: images.fetch(context, instance.image_ref, image_path, instance.user_id, instance.project_id) # Avoid conflicts vhdutils.check_disk_uuid(image_path) disk_info = vhdutils.disk_info(image_path) disk_format = disk_info[constants.VHD_IMAGE_TYPE] disk_path = image_path + "." + disk_format.lower() manage.VBoxManage.clone_hd(image_path, disk_path, disk_format=disk_format) manage.VBoxManage.close_medium(constants.MEDIUM_DISK, image_path, delete=True) except (vbox_exc.VBoxException, exception.NovaException): with excutils.save_and_reraise_exception(): for path in (image_path, disk_path): if path and os.path.exists(path): manage.VBoxManage.close_medium(constants.MEDIUM_DISK, path) pathutils.delete_path(path) return disk_path
def _pull_missing_image(self, context, image_meta, instance): msg = 'Image name "%s" does not exist, fetching it...' LOG.debug(msg, image_meta['name']) # TODO(imain): It would be nice to do this with file like object # passing but that seems a bit complex right now. snapshot_directory = CONF.docker.snapshots_directory fileutils.ensure_tree(snapshot_directory) with utils.tempdir(dir=snapshot_directory) as tmpdir: try: out_path = os.path.join(tmpdir, uuid.uuid4().hex) images.fetch(context, image_meta['id'], out_path, instance['user_id'], instance['project_id']) self.docker.load_repository_file( self._encode_utf8(image_meta['name']), out_path ) except Exception as e: LOG.warning(_('Cannot load repository file: %s'), e, instance=instance, exc_info=True) msg = _('Cannot load repository file: {0}') raise exception.NovaException(msg.format(e), instance_id=image_meta['name']) return self.docker.inspect_image(self._encode_utf8(image_meta['name']))
def fetch_image_if_not_existing(): vhd_path = None for format_ext in ['vhd', 'vhdx']: test_path = base_vhd_path + '.' + format_ext if self._pathutils.exists(test_path): vhd_path = test_path break if not vhd_path: try: images.fetch(context, image_id, base_vhd_path, instance.user_id, instance.project_id) if 'mtwilson_trustpolicy_location' in instance['metadata']: output, ret = utils.execute('python', "C:\Program Files (x86)\Intel\Policyagent\\bin\policyagent.py", 'prepare_trusted_image', base_vhd_path, image_id, instance.name, instance['metadata']['mtwilson_trustpolicy_location'], instance.root_gb) format_ext = self._vhdutils.get_vhd_format(base_vhd_path) vhd_path = base_vhd_path + '.' + format_ext.lower() self._pathutils.rename(base_vhd_path, vhd_path) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(base_vhd_path): self._pathutils.remove(base_vhd_path) if 'mtwilson_trustpolicy_location' in instance['metadata']: output, ret = utils.execute('python', "C:\Program Files (x86)\Intel\Policyagent\\bin\policyagent.py", 'create_instance_directory_symlink', instance.image_ref, instance.name) return vhd_path
def spawn(self, context, instance, image_meta, network_info=None, block_device_info=None): """ Create a new VM and start it.""" vm = self._lookup(instance.name) if vm is not None: raise exception.InstanceExists(name=instance.name) #Fetch the file, assume it is a VHD file. base_vhd_filename = os.path.join(FLAGS.instances_path, instance.name) vhdfile = "%s.vhd" % (base_vhd_filename) images.fetch(instance['image_ref'], vhdfile, instance['user_id'], instance['project_id']) try: self._create_vm(instance) self._create_disk(instance['name'], vhdfile) mac_address = None if instance['mac_addresses']: mac_address = instance['mac_addresses'][0]['address'] self._create_nic(instance['name'], mac_address) LOG.debug(_('Starting VM %s '), instance.name) self._set_vm_state(instance['name'], 'Enabled') LOG.info(_('Started VM %s '), instance.name) except Exception as exn: LOG.exception(_('spawn vm failed: %s'), exn) self.destroy(instance)
def fetch_raw_image(context, target, image_id): """Grab initrd or kernel image. This function does not attempt raw conversion, as these images will already be in raw format. """ images.fetch(context, image_id, target)
def spawn(self, instance): """ Create a new VM and start it.""" vm = self._lookup(instance.name) if vm is not None: raise exception.InstanceExists(name=instance.name) user = manager.AuthManager().get_user(instance['user_id']) project = manager.AuthManager().get_project(instance['project_id']) #Fetch the file, assume it is a VHD file. base_vhd_filename = os.path.join(FLAGS.instances_path, instance.name) vhdfile = "%s.vhd" % (base_vhd_filename) images.fetch(instance['image_id'], vhdfile, user, project) try: self._create_vm(instance) self._create_disk(instance['name'], vhdfile) self._create_nic(instance['name'], instance['mac_address']) LOG.debug(_('Starting VM %s '), instance.name) self._set_vm_state(instance['name'], 'Enabled') LOG.info(_('Started VM %s '), instance.name) except Exception as exn: LOG.exception(_('spawn vm failed: %s'), exn) self.destroy(instance)
def fetch_image_if_not_existing(): image_path = None for format_ext in ['vhd', 'vhdx', 'iso']: test_path = base_image_path + '.' + format_ext if self._pathutils.exists(test_path): image_path = test_path self._update_image_timestamp(image_id) break if not image_path: try: images.fetch(context, image_id, base_image_path, instance.user_id, instance.project_id) LOG.info("BASE IMAGE PATH : " + base_image_path) if 'mtwilson_trustpolicy_location' in instance['metadata']: output, ret = utils.execute('python', "C:\Program Files (x86)\Intel\Policyagent\\bin\policyagent.py", 'prepare_trusted_image', base_image_path, image_id, instance.name, instance['metadata']['mtwilson_trustpolicy_location'], instance.root_gb) if image_type == 'iso': format_ext = 'iso' else: format_ext = self._vhdutils.get_vhd_format( base_image_path) image_path = base_image_path + '.' + format_ext.lower() self._pathutils.rename(base_image_path, image_path) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(base_image_path): self._pathutils.remove(base_image_path) if 'mtwilson_trustpolicy_location' in instance['metadata']: output, ret = utils.execute('python', "C:\Program Files (x86)\Intel\Policyagent\\bin\policyagent.py", 'create_instance_directory_symlink', instance.image_ref, instance.name) return image_path
def fetch_image_if_not_existing(): image_path = None for format_ext in ['vhd', 'vhdx', 'iso']: test_path = base_image_path + '.' + format_ext if self._pathutils.exists(test_path): image_path = test_path self._update_image_timestamp(image_id) break if not image_path: try: images.fetch(context, image_id, base_image_path) if image_type == 'iso': format_ext = 'iso' else: format_ext = self._vhdutils.get_vhd_format( base_image_path) image_path = base_image_path + '.' + format_ext.lower() self._pathutils.rename(base_image_path, image_path) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(base_image_path): self._pathutils.remove(base_image_path) return image_path
def fetch_image_if_not_existing(): image_path = None for format_ext in ['vhd', 'vhdx', 'iso']: test_path = base_image_path + '.' + format_ext if self._pathutils.exists(test_path): image_path = test_path self._update_image_timestamp(image_id) break if not image_path: try: images.fetch(context, image_id, base_image_path, instance.user_id, instance.project_id) if image_type == 'iso': format_ext = 'iso' else: format_ext = self._vhdutils.get_vhd_format( base_image_path) image_path = base_image_path + '.' + format_ext.lower() self._pathutils.rename(base_image_path, image_path) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(base_image_path): self._pathutils.remove(base_image_path) return image_path
def fetch_image_if_not_existing(): if not self._pathutils.exists(vhd_path): try: images.fetch(context, image_id, vhd_path, instance["user_id"], instance["project_id"]) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(vhd_path): self._pathutils.remove(vhd_path)
def fetch_image(self, context, image_id, target, user, project): LOG.debug(_("Downloading image %s from glance image server") % image_id) try: images.fetch(context, image_id, target, user, project) except Exception as err: msg = _("Download image file of image %(id)s failed with reason:" " %(err)s") % {'id': image_id, 'err': err} raise exception.ZVMImageError(msg=msg)
def fetch_image_if_not_existing(): if not self._pathutils.exists(vhd_path): try: images.fetch(context, image_id, vhd_path, instance['user_id'], instance['project_id']) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(vhd_path): self._pathutils.remove(vhd_path)
def _import_spawn_image(self, context, image_meta_id, image_os_version): LOG.debug("Downloading the image %s from glance to nova compute " "server", image_meta_id) image_path = os.path.join(os.path.normpath(CONF.zvm.image_tmp_path), image_meta_id) if not os.path.exists(image_path): images.fetch(context, image_meta_id, image_path) image_url = "file://" + image_path image_meta = {'os_version': image_os_version} self._hypervisor.image_import(image_meta_id, image_url, image_meta)
def fetch_raw_image(context, target, image_id): """Grab initrd or kernel image. This function does not attempt raw conversion, as these images will already be in raw format. """ time_prev = time.time() images.fetch(context, image_id, target) print 'whr [fetch raw] %0.06f' % (time.time() - time_prev) time_prev = time.time()
def fetch_image_if_not_existing(): if not self._pathutils.exists(image_path): try: images.fetch(context, image_id, image_path, user_id, project_id) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(image_path): self._pathutils.remove(image_path) return image_path
def fetch_raw_image(context, target, image_id, trusted_certs=None): """Grab initrd or kernel image. This function does not attempt raw conversion, as these images will already be in raw format. :param context: nova.context.RequestContext auth request context :param target: target path to put the image :param image_id: id of the image to fetch :param trusted_certs: optional objects.TrustedCerts for image validation """ images.fetch(context, image_id, target, trusted_certs)
def create_volume_from_image(self, context, instance, image_id): """Creates a Logical Volume and copies the specified image to it :param context: nova context used to retrieve image from glance :param instance: instance to create the volume for :param image_id: image_id reference used to locate image in glance :returns: dictionary with the name of the created Logical Volume device in 'device_name' key """ file_name = '.'.join([image_id, 'gz']) file_path = os.path.join(CONF.powervm_img_local_path, file_name) if not os.path.isfile(file_path): LOG.debug(_("Fetching image '%s' from glance") % image_id) images.fetch(context, image_id, file_path, instance['user_id'], instance['project_id']) else: LOG.debug((_("Using image found at '%s'") % file_path)) LOG.debug(_("Ensuring image '%s' exists on IVM") % file_path) remote_path = CONF.powervm_img_remote_path remote_file_name, size = self._copy_image_file(file_path, remote_path) # calculate root device size in bytes # we respect the minimum root device size in constants instance_type = flavors.extract_flavor(instance) size_gb = max(instance_type['root_gb'], constants.POWERVM_MIN_ROOT_GB) size = size_gb * 1024 * 1024 * 1024 disk_name = None try: LOG.debug(_("Creating logical volume of size %s bytes") % size) disk_name = self._create_logical_volume(size) LOG.debug(_("Copying image to the device '%s'") % disk_name) self._copy_file_to_device(remote_file_name, disk_name) except Exception: LOG.error(_("Error while creating logical volume from image. " "Will attempt cleanup.")) # attempt cleanup of logical volume before re-raising exception with excutils.save_and_reraise_exception(): if disk_name is not None: try: self.delete_volume(disk_name) except Exception: msg = _('Error while attempting cleanup of failed ' 'deploy to logical volume.') LOG.exception(msg) return {'device_name': disk_name}
def create_volume_from_image(self, context, instance, image_id): """Creates a Logical Volume and copies the specified image to it :param context: nova context used to retrieve image from glance :param instance: instance to create the volume for :param image_id: image_id reference used to locate image in glance :returns: dictionary with the name of the created Logical Volume device in 'device_name' key """ file_name = '.'.join([image_id, 'gz']) file_path = os.path.join(CONF.powervm_img_local_path, file_name) if not os.path.isfile(file_path): LOG.debug(_("Fetching image '%s' from glance") % image_id) images.fetch(context, image_id, file_path, instance['user_id'], instance['project_id']) else: LOG.debug((_("Using image found at '%s'") % file_path)) LOG.debug(_("Ensuring image '%s' exists on IVM") % file_path) remote_path = CONF.powervm_img_remote_path remote_file_name, size = self._copy_image_file(file_path, remote_path) # calculate root device size in bytes # we respect the minimum root device size in constants instance_type = flavors.extract_instance_type(instance) size_gb = max(instance_type['root_gb'], constants.POWERVM_MIN_ROOT_GB) size = size_gb * 1024 * 1024 * 1024 disk_name = None try: LOG.debug(_("Creating logical volume of size %s bytes") % size) disk_name = self._create_logical_volume(size) LOG.debug(_("Copying image to the device '%s'") % disk_name) self._copy_file_to_device(remote_file_name, disk_name) except Exception: LOG.error(_("Error while creating logical volume from image. " "Will attempt cleanup.")) # attempt cleanup of logical volume before re-raising exception with excutils.save_and_reraise_exception(): if disk_name is not None: try: self.delete_volume(disk_name) except Exception: msg = _('Error while attempting cleanup of failed ' 'deploy to logical volume.') LOG.exception(msg) return {'device_name': disk_name}
def _pull_missing_image(self, context, image_meta, instance): msg = 'Image name "%s" does not exist, fetching it...' LOG.debug(msg, image_meta.name) shared_directory = CONF.docker.shared_directory if (shared_directory and os.path.exists( os.path.join(shared_directory, image_meta.id))): LOG.debug('Found %s in shared_directory', image_meta.id) try: LOG.debug('Loading repository file into docker %s', self._encode_utf8(image_meta.name)) self.docker.load_repository_file( self._encode_utf8(image_meta.name), os.path.join(shared_directory, image_meta.id)) return self.docker.inspect_image( self._encode_utf8(image_meta.name)) except Exception as e: # If failed to load image from shared_directory, continue # to download the image from glance then load. LOG.warning( 'Cannot load repository file from shared ' 'directory: %s', e, instance=instance, exc_info=True) # TODO(imain): It would be nice to do this with file like object # passing but that seems a bit complex right now. snapshot_directory = CONF.docker.snapshots_directory fileutils.ensure_tree(snapshot_directory) with utils.tempdir(dir=snapshot_directory) as tmpdir: try: out_path = os.path.join(tmpdir, uuidutils.generate_uuid(dashed=False)) LOG.debug('Fetching image with id %s from glance', image_meta.id) images.fetch(context, image_meta.id, out_path) LOG.debug('Loading repository file into docker %s', self._encode_utf8(image_meta.name)) self.docker.load_repository_file( self._encode_utf8(image_meta.name), out_path) return self.docker.inspect_image( self._encode_utf8(image_meta.name)) except Exception as e: LOG.warning('Cannot load repository file: %s', e, instance=instance, exc_info=True) msg = _('Cannot load repository file: {0}') raise exception.NovaException(msg.format(e), instance_id=image_meta.name)
def import_spawn_image(self, context, image_href, image_os_version): LOG.debug("Downloading the image %s from glance to nova compute " "server" % image_href) image_path = os.path.join(os.path.normpath(CONF.zvm_image_tmp_path), image_href) if not os.path.exists(image_path): images.fetch(context, image_href, image_path) image_url = "file://" + image_path image_meta = {'os_version': image_os_version} remote_host = get_host() self._sdk_api.image_import(image_url, image_meta=image_meta, remote_host=remote_host)
def pre_live_migration(self, context, instance, block_device_info, network_info): LOG.debug(_("pre_live_migration called"), instance=instance) self._livemigrutils.check_live_migration_config() if CONF.use_cow_images: ebs_root = self._volumeops.volume_in_mapping( self._volumeops.get_default_root_device(), block_device_info) if not ebs_root: base_vhd_path = self._pathutils.get_base_vhd_path( instance["image_ref"]) if not os.path.exists(base_vhd_path): images.fetch(context, instance["image_ref"], base_vhd_path, instance["user_id"], instance["project_id"])
def _pull_missing_image(self, context, image_meta, instance): msg = 'Image name "%s" does not exist, fetching it...' LOG.debug(msg, image_meta.name) shared_directory = CONF.docker.shared_directory if (shared_directory and os.path.exists(os.path.join(shared_directory, image_meta.id))): LOG.debug('Found %s in shared_directory', image_meta.id) try: LOG.debug('Loading repository file into docker %s', self._encode_utf8(image_meta.name)) self.docker.load_repository_file( self._encode_utf8(image_meta.name), os.path.join(shared_directory, image_meta.id)) return self.docker.inspect_image( self._encode_utf8(image_meta.name)) except Exception as e: # If failed to load image from shared_directory, continue # to download the image from glance then load. LOG.warning('Cannot load repository file from shared ' 'directory: %s', e, instance=instance, exc_info=True) # TODO(imain): It would be nice to do this with file like object # passing but that seems a bit complex right now. snapshot_directory = CONF.docker.snapshots_directory fileutils.ensure_tree(snapshot_directory) with utils.tempdir(dir=snapshot_directory) as tmpdir: try: out_path = os.path.join(tmpdir, uuidutils.generate_uuid(dashed=False)) LOG.debug('Fetching image with id %s from glance', image_meta.id) images.fetch(context, image_meta.id, out_path) LOG.debug('Loading repository file into docker %s', self._encode_utf8(image_meta.name)) self.docker.load_repository_file( self._encode_utf8(image_meta.name), out_path ) return self.docker.inspect_image( self._encode_utf8(image_meta.name)) except Exception as e: LOG.warning('Cannot load repository file: %s', e, instance=instance, exc_info=True) msg = _('Cannot load repository file: {0}') raise exception.NovaException(msg.format(e), instance_id=image_meta.name)
def _try_fetch_image(self, context, image, instance, max_size=0): try: images.fetch(context, instance.image_ref, image, instance.user_id, instance.project_id, max_size=max_size) except Exception: LOG.exception(_LE("Image %(image_id)s doesn't exist anymore on " "image service, attempting to copy image ", {'image_id': instance.image_ref})) fileutils.ensure_tree(self.image_dir) (user, group) = self.idmap.get_user() utils.execute('tar', '-C', self.image_dir, '--anchored', '--numeric-owner', '-xpzf', image, run_as_root=True) utils.execute('chown', '-R', '%s:%s' % (user, group), self.image_dir, run_as_root=True)
def _download_rpm(self, context, image_meta): LOG.info("_download_rpm") if self.rpm_path: return if image_meta['name']: name = image_meta['name'] else: name = image_meta['id'] if CONF.tempdir: tempdir = CONF.tempdir else: tempdir = tempfile.gettempdir() rpm_path = os.path.join(tempdir, name) images.fetch(context, self.instance['image_ref'], rpm_path, self.instance['user_id'], self.instance['project_id']) self.rpm_path = rpm_path
def _pull_missing_image(self, context, image_meta, instance): msg = 'Image name "%s" does not exist, fetching it...' LOG.debug(msg, image_meta['name']) shared_directory = CONF.hyper.shared_directory #todo: check image location if (shared_directory and os.path.exists(os.path.join(shared_directory, image_meta['id']))): try: self.hyper.load_image( self._encode_utf8(image_meta['name']), os.path.join(shared_directory, image_meta['id'])) return self.hyper.inspect_image( self._encode_utf8(image_meta['name'])) except Exception as e: # If failed to load image from shared_directory, continue # to download the image from glance then load. LOG.warning(_('Cannot load repository file from shared ' 'directory: %s'), e, instance=instance, exc_info=True) snapshot_directory = CONF.hyper.snapshots_directory fileutils.ensure_tree(snapshot_directory) with utils.tempdir(dir=snapshot_directory) as tmpdir: try: out_path = os.path.join(tmpdir, uuid.uuid4().hex) images.fetch(context, image_meta['id'], out_path, instance['user_id'], instance['project_id']) self.hyper.load_image( self._encode_utf8(image_meta['name']), out_path ) except Exception as e: LOG.warning(_('Cannot load repository file: %s'), e, instance=instance, exc_info=True) msg = _('Cannot load repository file: {0}') raise exception.NovaException(msg.format(e), instance_id=image_meta['name']) return self.hyper.inspect_image(self._encode_utf8(image_meta['name']))
def _cache_image(self, instance): """ Create the disk image for the virtual environment. """ image_name = "%s.tar.gz" % instance["image_id"] full_image_path = "%s/%s" % (FLAGS.ovz_image_template_dir, image_name) if not os.path.exists(full_image_path): # These objects are required to retrieve images from the object store. # This is known only to work with glance so far but as I understand it # glance's interface matches that of the other object stores. user = manager.AuthManager().get_user(instance["user_id"]) project = manager.AuthManager().get_project(instance["project_id"]) # Grab image and place it in the image cache images.fetch(instance["image_id"], full_image_path, user, project) return True else: return False
def fetch_image_if_not_existing(): vhd_path = None for format_ext in ['vhd', 'vhdx']: test_path = base_vhd_path + '.' + format_ext if self._pathutils.exists(test_path): vhd_path = test_path break if not vhd_path: try: images.fetch(context, image_id, base_vhd_path) format_ext = self._vhdutils.get_vhd_format(base_vhd_path) vhd_path = base_vhd_path + '.' + format_ext.lower() self._pathutils.rename(base_vhd_path, vhd_path) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(base_vhd_path): self._pathutils.remove(base_vhd_path) return vhd_path
def fetch_image_if_not_existing(): vhd_path = None for format_ext in ["vhd", "vhdx"]: test_path = base_vhd_path + "." + format_ext if self._pathutils.exists(test_path): vhd_path = test_path break if not vhd_path: try: images.fetch(context, image_id, base_vhd_path, instance.user_id, instance.project_id) format_ext = self._vhdutils.get_vhd_format(base_vhd_path) vhd_path = base_vhd_path + "." + format_ext.lower() self._pathutils.rename(base_vhd_path, vhd_path) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(base_vhd_path): self._pathutils.remove(base_vhd_path) return vhd_path
def fetch_orig_image(context, target, image_id, user_id, project_id): images.fetch(context, image_id, target, user_id, project_id)
def fetch_image(self, target, context, image_id, user, project, *args, **kwargs): images.fetch(context, image_id, target, user, project)
def pre_launch(self, context, new_instance_ref, network_info=None, block_device_info=None, migration=False, use_image_service=False, image_refs=[]): image_base_path = None if use_image_service: # We need to first download the descriptor and the disk files # from the image service. LOG.debug("Downloading images %s from the image service." % (image_refs)) image_base_path = os.path.join(FLAGS.instances_path, '_base') if not os.path.exists(image_base_path): LOG.debug('Base path %s does not exist. It will be created now.', image_base_path) utilities.make_directories(image_base_path) os.chown(image_base_path, self.openstack_uid, self.openstack_gid) image_service = nova.image.get_default_image_service() for image_ref in image_refs: image = image_service.show(context, image_ref) target = os.path.join(image_base_path, image['name']) if migration or not os.path.exists(target): # If the path does not exist fetch the data from the image # service. NOTE: We always fetch in the case of a # migration, as the descriptor may have changed from its # previous state. Migrating VMs are the only case where a # descriptor for an instance will not be a fixed constant. images.fetch(context, image_ref, target, new_instance_ref['user_id'], new_instance_ref['project_id']) os.chown(target, self.openstack_uid, self.openstack_gid) # (dscannell) Check to see if we need to convert the network_info # object into the legacy format. if network_info and self.libvirt_conn.legacy_nwinfo(): network_info = compute_utils.legacy_network_info(network_info) # We need to create the libvirt xml, and associated files. Pass back # the path to the libvirt.xml file. working_dir = os.path.join(FLAGS.instances_path, new_instance_ref['name']) disk_file = os.path.join(working_dir, "disk") libvirt_file = os.path.join(working_dir, "libvirt.xml") # Make sure that our working directory exists. if not(os.path.exists(working_dir)): os.makedirs(working_dir) if not(os.path.exists(disk_file)): # (dscannell) We will write out a stub 'disk' file so that we don't # end up copying this file when setting up everything for libvirt. # Essentially, this file will be removed, and replaced by vms as an # overlay on the blessed root image. f = open(disk_file, 'w') f.close() # (dscannell) We want to disable any injection. We do this by making a # copy of the instance and clearing out some entries. Since OpenStack # uses dictionary-list accessors, we can pass this dictionary through # that code. instance_dict = AttribDictionary(dict(new_instance_ref.iteritems())) # The name attribute is special and does not carry over like the rest # of the attributes. instance_dict['name'] = new_instance_ref['name'] instance_dict.os_type = new_instance_ref.os_type instance_dict['key_data'] = None instance_dict['metadata'] = [] for network_ref, mapping in network_info: network_ref['injected'] = False # (dscannell) This was taken from the core nova project as part of the # boot path for normal instances. We basically want to mimic this # functionality. xml = self.libvirt_conn.to_xml(instance_dict, network_info, False, block_device_info=block_device_info) self.libvirt_conn.firewall_driver.setup_basic_filtering(instance_dict, network_info) self.libvirt_conn.firewall_driver.prepare_instance_filter(instance_dict, network_info) self.libvirt_conn._create_image(context, instance_dict, xml, network_info=network_info, block_device_info=block_device_info) if not(migration): # (dscannell) Remove the fake disk file (if created). os.remove(disk_file) # Fix up the permissions on the files that we created so that they are owned by the # openstack user. os.chown(working_dir, self.openstack_uid, self.openstack_gid) for root, dirs, files in os.walk(working_dir, followlinks=True): for path in dirs + files: LOG.debug("chowning path=%s to openstack user %s" % (os.path.join(root, path), self.openstack_uid)) os.chown(os.path.join(root, path), self.openstack_uid, self.openstack_gid) # Return the libvirt file, this will be passed in as the name. This # parameter is overloaded in the management interface as a libvirt # special case. return (libvirt_file, image_base_path)
def fetch_image(context, target, image_id, user_id, project_id): """Grab image""" images.fetch(context, image_id, target, user_id, project_id)
def fetch_image(context, target, image_id, user_id, project_id, size=None): """Grab image and optionally attempt to resize it""" images.fetch(context, image_id, target, user_id, project_id) if size: disk.extend(target, size)
def fetch_image_if_not_existing(): if not self._pathutils.exists(vhd_path): images.fetch(context, image_id, vhd_path, instance['user_id'], instance['project_id']) self._validate_vhd_image(vhd_path)
def _install_machine(self, context, instance, bmm, cluster_name, vlan_id, update_instance=False): db.bmm_update(context, bmm["id"], {"instance_id": instance["id"]}) mac = self._get_pxe_mac(bmm) # fetch image image_base_path = self._get_cobbler_image_path() if not os.path.exists(image_base_path): utils.execute('mkdir', '-p', image_base_path) image_path = self._get_cobbler_image_path(instance) if not os.path.exists(image_path): image_meta = images.fetch(context, instance["image_ref"], image_path, instance["user_id"], instance["project_id"]) else: image_meta = images.show(context, instance["image_ref"]) image_type = "server" image_name = image_meta["name"] or image_meta["properties"]["image_location"] if image_name.find("dodai-deploy") == -1: image_type = "node" # begin to install os pxe_ip = bmm["pxe_ip"] or "None" pxe_mac = bmm["pxe_mac"] or "None" storage_ip = bmm["storage_ip"] or "None" storage_mac = bmm["storage_mac"] or "None" service_mac1 = bmm["service_mac1"] or "None" service_mac2 = bmm["service_mac2"] or "None" instance_path = self._get_cobbler_instance_path(instance) if not os.path.exists(instance_path): utils.execute('mkdir', '-p', instance_path) self._cp_template("create.sh", self._get_cobbler_instance_path(instance, "create.sh"), {"INSTANCE_ID": instance["id"], "IMAGE_ID": instance["image_ref"], "COBBLER": FLAGS.cobbler, "HOST_NAME": bmm["name"], "STORAGE_IP": storage_ip, "STORAGE_MAC": storage_mac, "PXE_IP": pxe_ip, "PXE_MAC": pxe_mac, "SERVICE_MAC1": bmm["service_mac1"], "SERVICE_MAC2": bmm["service_mac2"], "IMAGE_TYPE": image_type, "MONITOR_PORT": FLAGS.dodai_monitor_port, "ROOT_SIZE": FLAGS.dodai_partition_root_gb, "SWAP_SIZE": FLAGS.dodai_partition_swap_gb, "EPHEMERAL_SIZE": FLAGS.dodai_partition_ephemeral_gb, "KDUMP_SIZE": FLAGS.dodai_partition_kdump_gb}) self._cp_template("pxeboot_action", self._get_pxe_boot_file(mac), {"INSTANCE_ID": instance["id"], "COBBLER": FLAGS.cobbler, "PXE_MAC": pxe_mac, "ACTION": "create"}) LOG.debug("Reboot or power on.") self._reboot_or_power_on(bmm["ipmi_ip"]) # wait until starting to install os while self._get_state(context, instance) != "install": greenthread.sleep(20) LOG.debug("Wait until begin to install instance %s." % instance["id"]) self._cp_template("pxeboot_start", self._get_pxe_boot_file(mac), {}) # wait until starting to reboot while self._get_state(context, instance) != "install_reboot": greenthread.sleep(20) LOG.debug("Wait until begin to reboot instance %s after os has been installed." % instance["id"]) power_manager = PowerManager(bmm["ipmi_ip"]) power_manager.soft_off() while power_manager.status() == "on": greenthread.sleep(20) LOG.debug("Wait unit the instance %s shuts down." % instance["id"]) power_manager.on() # wait until installation of os finished while self._get_state(context, instance) != "installed": greenthread.sleep(20) LOG.debug("Wait until instance %s installation finished." % instance["id"]) if cluster_name == "resource_pool": status = "active" else: status = "used" db.bmm_update(context, bmm["id"], {"status": status}) if update_instance: db.instance_update(context, instance["id"], {"vm_state": vm_states.ACTIVE})
def _fetch_image(self, target, context, image_id, user, project, *args, **kwargs): images.fetch(context, image_id, target, user, project)
def _install_machine(self, context, instance, bmm, cluster_name, vlan_id, update_instance=False): db.bmm_update(context, bmm["id"], {"instance_id": instance["id"]}) mac = self._get_pxe_mac(bmm) # fetch image image_base_path = self._get_cobbler_image_path() if not os.path.exists(image_base_path): utils.execute('mkdir', '-p', image_base_path) image_path = self._get_cobbler_image_path(instance) if not os.path.exists(image_path): image_meta = images.fetch(context, instance["image_ref"], image_path, instance["user_id"], instance["project_id"]) else: image_meta = images.show(context, instance["image_ref"]) image_type = "server" image_name = image_meta["name"] or image_meta["properties"][ "image_location"] if image_name.find("dodai-deploy") == -1: image_type = "node" # begin to install os pxe_ip = bmm["pxe_ip"] or "None" pxe_mac = bmm["pxe_mac"] or "None" storage_ip = bmm["storage_ip"] or "None" storage_mac = bmm["storage_mac"] or "None" service_mac1 = bmm["service_mac1"] or "None" service_mac2 = bmm["service_mac2"] or "None" instance_path = self._get_cobbler_instance_path(instance) if not os.path.exists(instance_path): utils.execute('mkdir', '-p', instance_path) self._cp_template( "create.sh", self._get_cobbler_instance_path(instance, "create.sh"), { "INSTANCE_ID": instance["id"], "IMAGE_ID": instance["image_ref"], "COBBLER": FLAGS.cobbler, "HOST_NAME": bmm["name"], "STORAGE_IP": storage_ip, "STORAGE_MAC": storage_mac, "PXE_IP": pxe_ip, "PXE_MAC": pxe_mac, "SERVICE_MAC1": bmm["service_mac1"], "SERVICE_MAC2": bmm["service_mac2"], "IMAGE_TYPE": image_type, "MONITOR_PORT": FLAGS.dodai_monitor_port, "ROOT_SIZE": FLAGS.dodai_partition_root_gb, "SWAP_SIZE": FLAGS.dodai_partition_swap_gb, "EPHEMERAL_SIZE": FLAGS.dodai_partition_ephemeral_gb, "KDUMP_SIZE": FLAGS.dodai_partition_kdump_gb }) self._cp_template( "pxeboot_action", self._get_pxe_boot_file(mac), { "INSTANCE_ID": instance["id"], "COBBLER": FLAGS.cobbler, "PXE_MAC": pxe_mac, "ACTION": "create" }) LOG.debug("Reboot or power on.") self._reboot_or_power_on(bmm["ipmi_ip"]) # wait until starting to install os while self._get_state(context, instance) != "install": greenthread.sleep(20) LOG.debug("Wait until begin to install instance %s." % instance["id"]) self._cp_template("pxeboot_start", self._get_pxe_boot_file(mac), {}) # wait until starting to reboot while self._get_state(context, instance) != "install_reboot": greenthread.sleep(20) LOG.debug( "Wait until begin to reboot instance %s after os has been installed." % instance["id"]) power_manager = PowerManager(bmm["ipmi_ip"]) power_manager.soft_off() while power_manager.status() == "on": greenthread.sleep(20) LOG.debug("Wait unit the instance %s shuts down." % instance["id"]) power_manager.on() # wait until installation of os finished while self._get_state(context, instance) != "installed": greenthread.sleep(20) LOG.debug("Wait until instance %s installation finished." % instance["id"]) if cluster_name == "resource_pool": status = "active" else: status = "used" db.bmm_update(context, bmm["id"], {"status": status}) if update_instance: db.instance_update(context, instance["id"], {"vm_state": vm_states.ACTIVE})
def pre_launch(self, context, new_instance_ref, network_info=None, block_device_info=None, migration=False, skip_image_service=False, image_refs=[]): image_base_path = None if not (skip_image_service) and CONF.gridcentric_use_image_service: # We need to first download the descriptor and the disk files # from the image service. LOG.debug("Downloading images %s from the image service." % (image_refs)) image_base_path = os.path.join(CONF.instances_path, '_base') if not os.path.exists(image_base_path): LOG.debug( 'Base path %s does not exist. It will be created now.', image_base_path) mkdir_as(image_base_path, self.openstack_uid) image_service = glance.get_default_image_service() for image_ref in image_refs: image = image_service.show(context, image_ref) target = os.path.join(image_base_path, image['name']) if migration or not os.path.exists(target): # If the path does not exist fetch the data from the image # service. NOTE: We always fetch in the case of a # migration, as the descriptor may have changed from its # previous state. Migrating VMs are the only case where a # descriptor for an instance will not be a fixed constant. # We download to a temporary location so we can make the # file appear atomically from the right user. fd, temp_target = tempfile.mkstemp(dir=image_base_path) try: os.close(fd) images.fetch(context, image_ref, temp_target, new_instance_ref['user_id'], new_instance_ref['project_id']) os.chown(temp_target, self.openstack_uid, self.openstack_gid) os.chmod(temp_target, 0644) os.rename(temp_target, target) except: os.unlink(temp_target) raise # (dscannell) Check to see if we need to convert the network_info # object into the legacy format. if network_info and self.libvirt_conn.legacy_nwinfo(): network_info = network_info.legacy() # We need to create the libvirt xml, and associated files. Pass back # the path to the libvirt.xml file. working_dir = os.path.join(CONF.instances_path, new_instance_ref['name']) disk_file = os.path.join(working_dir, "disk") libvirt_file = os.path.join(working_dir, "libvirt.xml") # Make sure that our working directory exists. mkdir_as(working_dir, self.openstack_uid) if not (os.path.exists(disk_file)): # (dscannell) We will write out a stub 'disk' file so that we don't # end up copying this file when setting up everything for libvirt. # Essentially, this file will be removed, and replaced by vms as an # overlay on the blessed root image. touch_as(disk_file, self.openstack_uid) # (dscannell) We want to disable any injection. We do this by making a # copy of the instance and clearing out some entries. Since OpenStack # uses dictionary-list accessors, we can pass this dictionary through # that code. instance_dict = AttribDictionary(dict(new_instance_ref.iteritems())) # The name attribute is special and does not carry over like the rest # of the attributes. instance_dict['name'] = new_instance_ref['name'] instance_dict.os_type = new_instance_ref['os_type'] instance_dict['key_data'] = None instance_dict['metadata'] = [] for network_ref, mapping in network_info: network_ref['injected'] = False # (dscannell) This was taken from the core nova project as part of the # boot path for normal instances. We basically want to mimic this # functionality. xml = self.libvirt_conn.to_xml(instance_dict, network_info, False, block_device_info=block_device_info) self.libvirt_conn._create_image(context, instance_dict, xml, network_info=network_info, block_device_info=block_device_info) if not (migration): # (dscannell) Remove the fake disk file (if created). os.remove(disk_file) # Fix up the permissions on the files that we created so that they are owned by the # openstack user. for root, dirs, files in os.walk(working_dir, followlinks=True): for path in dirs + files: LOG.debug("chowning path=%s to openstack user %s" % \ (os.path.join(root, path), self.openstack_uid)) os.chown(os.path.join(root, path), self.openstack_uid, self.openstack_gid) # Return the libvirt file, this will be passed in as the name. This # parameter is overloaded in the management interface as a libvirt # special case. return (libvirt_file, image_base_path)